diff --git "a/4456.jsonl" "b/4456.jsonl" new file mode 100644--- /dev/null +++ "b/4456.jsonl" @@ -0,0 +1,679 @@ +{"seq_id":"500642784","text":"import clsDDS\r\nimport time\r\nimport random\r\n\r\nif __name__ == \"__main__\":\r\n\r\n sTopic = 'HelloWorldData_Msg'\r\n\r\n oDds = clsDDS.clsDdsMsg()\r\n oDds.AddTopic(sTopic)\r\n oDds.AddSubscriber(sTopic)\r\n\r\n userID = random.randrange(1, 64535)\r\n\r\n i=0\r\n while i<100:\r\n message = \"This is message: \" +str(i)\r\n oDds.Publish(sTopic,userID, message)\r\n\r\n print(str(i))\r\n i = i+1\r\n time.sleep(2)\r\n\r\n","sub_path":"DDS/ExamplePublisher_Msg.py","file_name":"ExamplePublisher_Msg.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"81853824","text":"import xml.etree.ElementTree as ET\n\nimport pystac\nfrom pystac.utils import str_to_datetime\nfrom shapely.geometry import shape\n\nfrom stactools.modis.constants import (ITEM_TIF_IMAGE_NAME, ITEM_METADATA_NAME,\n MODIS_CATALOG_ELEMENTS, MODIS_BAND_DATA,\n ADDITIONAL_MODIS_PROPERTIES)\n\ndef create_collection(catalog_id) -> pystac.Collection:\n \"\"\"Creates a STAC Collection for MODIS data.\n \"\"\"\n\n collection = pystac.Collection(\n id=catalog_id,\n description=MODIS_CATALOG_ELEMENTS[catalog_id].description,\n extent=MODIS_CATALOG_ELEMENTS[catalog_id].extent,\n title=MODIS_CATALOG_ELEMENTS[catalog_id].title,\n providers=MODIS_CATALOG_ELEMENTS[catalog_id].provider,\n stac_extensions=['item-assets'],\n extra_fields={\n 'item_assets': {\n 'image': {\n \"eo:bands\": MODIS_BAND_DATA[catalog_id],\n \"roles\": [\"data\"],\n \"title\": \"RGBIR COG tile\",\n \"type\": pystac.MediaType.COG\n },\n }\n })\n\n return collection\n\ndef create_item(metadata_href):\n \"\"\"Creates a STAC Item from modis data.\n Args:\n metadata_href (str): The href to the metadata for this hdf.\n This function will read the metadata file for information to place in\n the STAC item.\n Returns:\n pystac.Item: A STAC Item representing this MODIS image.\n \"\"\"\n\n metadata_root = ET.parse(metadata_href).getroot()\n\n # Item id\n name = metadata_root.find(\n 'GranuleURMetaData/CollectionMetaData/ShortName').text\n version = metadata_root.find(\n 'GranuleURMetaData/CollectionMetaData/VersionID').text\n short_item_id = '{}/00{}/{}'.format('MODIS', version, name)\n\n image_name = metadata_root.find(\n 'GranuleURMetaData/DataFiles/DataFileContainer/DistributedFileName'\n ).text\n item_id = image_name.replace('.hdf', '')\n\n coordinates = []\n point_ele = '{}/{}'.format(\n 'GranuleURMetaData/SpatialDomainContainer/',\n 'HorizontalSpatialDomainContainer/GPolygon/Boundary/Point')\n for point in metadata_root.findall(point_ele):\n lon = float(point.find('PointLongitude').text)\n lat = float(point.find('PointLatitude').text)\n coordinates.append([lon, lat])\n\n geom = {'type': 'Polygon', 'coordinates': [coordinates]}\n\n bounds = shape(geom).bounds\n\n # Item date\n prod_node = 'GranuleURMetaData/ECSDataGranule/ProductionDateTime'\n prod_dt_text = metadata_root.find(prod_node).text\n prod_dt = str_to_datetime(prod_dt_text)\n\n item = pystac.Item(id=item_id,\n geometry=geom,\n bbox=bounds,\n datetime=prod_dt,\n properties=ADDITIONAL_MODIS_PROPERTIES[short_item_id])\n\n # Common metadata\n item.common_metadata.providers = [\n MODIS_CATALOG_ELEMENTS[short_item_id]['provider']\n ]\n item.common_metadata.description = MODIS_CATALOG_ELEMENTS[short_item_id][\n 'description']\n\n item.common_metadata.instruments = [\n metadata_root.find(\n 'GranuleURMetaData/Platform/Instrument/InstrumentShortName').text\n ]\n item.common_metadata.platform = metadata_root.find(\n 'GranuleURMetaData/Platform/PlatformShortName').text\n item.common_metadata.title = MODIS_CATALOG_ELEMENTS[short_item_id]['title']\n\n # Hdf\n item.add_asset(\n ITEM_TIF_IMAGE_NAME,\n pystac.Asset(href=image_name,\n media_type=pystac.MediaType.HDF,\n roles=['data'],\n title=\"hdf image\"))\n\n # Metadata\n item.add_asset(\n ITEM_METADATA_NAME,\n pystac.Asset(href=image_name + '.xml',\n media_type=pystac.MediaType.TEXT,\n roles=['metadata'],\n title='FGDC Metdata'))\n\n # Bands\n item.ext.enable('eo')\n\n if item_id in MODIS_BAND_DATA:\n item.ext.eo.bands = MODIS_BAND_DATA[item_id]\n\n return item\n","sub_path":"src/stactools/modis/stac.py","file_name":"stac.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"645487369","text":"# Copyright 2013 – present by the SalishSeaCast Project contributors\n# and The University of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"Unit tests for SalishSeaCast make_feeds worker.\n\"\"\"\nimport datetime\nimport os\nimport textwrap\nfrom collections import namedtuple\nfrom pathlib import Path\nfrom types import SimpleNamespace\nfrom unittest.mock import Mock, patch\n\nimport arrow\nimport nemo_nowcast\nimport numpy as np\nimport pytest\nfrom nemo_nowcast import WorkerError\n\nfrom nowcast.workers import make_feeds\n\n\n@pytest.fixture()\ndef config(base_config):\n \"\"\":py:class:`nemo_nowcast.Config` instance from YAML fragment to use as config for unit tests.\"\"\"\n config_file = Path(base_config.file)\n with config_file.open(\"at\") as f:\n f.write(\n textwrap.dedent(\n \"\"\"\\\n ssh:\n tidal predictions: tidal_predictions/\n results archive:\n forecast: /results/SalishSea/forecast/\n figures:\n storage path: /results/nowcast-sys/figures/\n storm surge info portal path: storm-surge/\n storm surge feeds:\n storage path: atom\n domain: salishsea.eos.ubc.ca\n feed entry template: storm_surge_advisory.mako\n feeds:\n pmv.xml:\n title: SalishSeaCast for Port Metro Vancouver\n city: Vancouver\n tide gauge stn: Point Atkinson\n tidal predictions: Point Atkinson_tidal_prediction_01-Jan-2013_31-Dec-2020.csv\n \"\"\"\n )\n )\n config_ = nemo_nowcast.Config()\n config_.load(config_file)\n return config_\n\n\n@patch(\"nowcast.workers.make_feeds.NowcastWorker\", spec=True)\nclass TestMain:\n \"\"\"Unit tests for main() function.\"\"\"\n\n def test_instantiate_worker(self, m_worker):\n m_worker().cli = Mock(name=\"cli\")\n make_feeds.main()\n args, kwargs = m_worker.call_args\n assert args == (\"make_feeds\",)\n assert list(kwargs.keys()) == [\"description\"]\n\n def test_init_cli(self, m_worker):\n m_worker().cli = Mock(name=\"cli\")\n make_feeds.main()\n m_worker().init_cli.assert_called_once_with()\n\n def test_add_run_type_arg(self, m_worker):\n m_worker().cli = Mock(name=\"cli\")\n make_feeds.main()\n args, kwargs = m_worker().cli.add_argument.call_args_list[0]\n assert args == (\"run_type\",)\n assert kwargs[\"choices\"] == {\"forecast\", \"forecast2\"}\n assert \"help\" in kwargs\n\n def test_add_run_date_arg(self, m_worker):\n m_worker().cli = Mock(name=\"cli\")\n make_feeds.main()\n args, kwargs = m_worker().cli.add_date_option.call_args_list[0]\n assert args == (\"--run-date\",)\n assert kwargs[\"default\"] == arrow.now().floor(\"day\")\n assert \"help\" in kwargs\n\n def test_run_worker(self, m_worker):\n m_worker().cli = Mock(name=\"cli\")\n make_feeds.main()\n args, kwargs = m_worker().run.call_args\n assert args == (make_feeds.make_feeds, make_feeds.success, make_feeds.failure)\n\n\n@pytest.mark.parametrize(\"run_type\", [\"forecast\", \"forecast2\"])\n@patch(\"nowcast.workers.make_feeds.logger\", autospec=True)\nclass TestSuccess:\n \"\"\"Unit tests for success() function.\"\"\"\n\n def test_success(self, m_logger, run_type):\n parsed_args = SimpleNamespace(\n run_type=run_type, run_date=arrow.get(\"2015-12-21\")\n )\n msg_type = make_feeds.success(parsed_args)\n assert m_logger.info.called\n assert msg_type == f\"success {run_type}\"\n\n\n@pytest.mark.parametrize(\"run_type\", [\"forecast\", \"forecast2\"])\n@patch(\"nowcast.workers.make_feeds.logger\", autospec=True)\nclass TestFailure:\n \"\"\"Unit tests for failure() function.\"\"\"\n\n def test_failure(self, m_logger, run_type):\n parsed_args = SimpleNamespace(\n run_type=run_type, run_date=arrow.get(\"2015-12-21\")\n )\n msg_type = make_feeds.failure(parsed_args)\n assert m_logger.critical.called\n assert msg_type == f\"failure {run_type}\"\n\n\nclass TestMakeFeeds:\n \"\"\"Unit test for make_feeds() function.\"\"\"\n\n @patch(\"nowcast.workers.make_feeds._generate_feed\", autospec=True)\n @patch(\"nowcast.workers.make_feeds._calc_max_ssh_risk\", autospec=True)\n def test_checklist(self, m_cmsr, m_gf, config):\n parsed_args = SimpleNamespace(\n run_type=\"forecast\", run_date=arrow.get(\"2016-11-12\")\n )\n m_cmsr.return_value = {\"risk_level\": None}\n checklist = make_feeds.make_feeds(parsed_args, config)\n expected = {\n \"forecast 2016-11-12\": [\n \"/results/nowcast-sys/figures/storm-surge/atom/pmv.xml\"\n ]\n }\n assert checklist == expected\n\n\nclass TestGenerateFeed:\n \"\"\"Unit test for _generate_feed() function.\"\"\"\n\n @patch(\"nowcast.workers.make_feeds.arrow.utcnow\", autospec=True)\n def test_generate_feed(self, m_utcnow, config):\n m_utcnow.return_value = arrow.get(\"2016-02-20 11:02:42\")\n storm_surge_path = config[\"figures\"][\"storm surge info portal path\"]\n atom_path = config[\"storm surge feeds\"][\"storage path\"]\n fg = make_feeds._generate_feed(\n \"pmv.xml\",\n config[\"storm surge feeds\"],\n os.path.join(storm_surge_path, atom_path),\n )\n feed = fg.atom_str(pretty=True).decode(\"ascii\")\n expected = [\n \"\",\n '',\n \" tag:salishsea.eos.ubc.ca,2015-12-12:/storm-surge/atom/pmv/\"\n \"20160220110242\",\n \" SalishSeaCast for Port Metro Vancouver\",\n ]\n assert feed.splitlines()[:4] == expected\n # The updated element contains a UTC time stamp that we can't\n # mock out easily\n assert feed.splitlines()[4].startswith(\" \")\n assert feed.splitlines()[4].endswith(\"\")\n expected = [\n \" \",\n \" SalishSeaCast Project\",\n \" https://salishsea.eos.ubc.ca/\",\n \" \",\n ' ',\n ' ',\n ' python-feedgen',\n \" Copyright 2015 - present by the SalishSeaCast Project Contributors \"\n \"and The University of British Columbia\",\n \"\",\n ]\n assert feed.splitlines()[5:] == expected\n\n\n@patch(\"nowcast.workers.make_feeds.arrow.now\", autospec=True)\n@patch(\"nowcast.workers.make_feeds._render_entry_content\", return_value=b\"\", spec=True)\n@patch(\"nowcast.workers.make_feeds.FeedEntry\", autospec=True)\nclass TestGenerateFeedEntry:\n \"\"\"Unit tests for _generate_feed_entry() function.\"\"\"\n\n def test_title(self, m_fe, m_rec, m_now, config):\n storm_surge_path = config[\"figures\"][\"storm surge info portal path\"]\n atom_path = config[\"storm surge feeds\"][\"storage path\"]\n make_feeds._generate_feed_entry(\n \"pmv.xml\", \"max_ssh_info\", config, os.path.join(storm_surge_path, atom_path)\n )\n m_fe().title.assert_called_once_with(\"Storm Surge Alert for Point Atkinson\")\n\n def test_id(self, m_fe, m_rec, m_now, config):\n storm_surge_path = config[\"figures\"][\"storm surge info portal path\"]\n atom_path = config[\"storm surge feeds\"][\"storage path\"]\n m_now.return_value = arrow.get(\"2015-12-24 15:10:42\")\n make_feeds._generate_feed_entry(\n \"pmv.xml\", \"max_ssh_info\", config, os.path.join(storm_surge_path, atom_path)\n )\n m_fe().id.assert_called_once_with(\n make_feeds._build_tag_uri(\n \"2015-12-24\",\n \"pmv.sml\",\n m_now(),\n config[\"storm surge feeds\"],\n os.path.join(storm_surge_path, atom_path),\n )\n )\n\n def test_author(self, m_fe, m_rec, m_now, config):\n storm_surge_path = config[\"figures\"][\"storm surge info portal path\"]\n atom_path = config[\"storm surge feeds\"][\"storage path\"]\n make_feeds._generate_feed_entry(\n \"pmv.xml\", \"max_ssh_info\", config, os.path.join(storm_surge_path, atom_path)\n )\n m_fe().author.assert_called_once_with(\n name=\"SalishSeaCast Project\", uri=\"https://salishsea.eos.ubc.ca/\"\n )\n\n def test_content(self, m_fe, m_rec, m_now, config):\n storm_surge_path = config[\"figures\"][\"storm surge info portal path\"]\n atom_path = config[\"storm surge feeds\"][\"storage path\"]\n make_feeds._generate_feed_entry(\n \"pmv.xml\", \"max_ssh_info\", config, os.path.join(storm_surge_path, atom_path)\n )\n m_fe().content.assert_called_once_with(m_rec(), type=\"html\")\n\n def test_link(self, m_fe, m_rec, m_now, config):\n storm_surge_path = config[\"figures\"][\"storm surge info portal path\"]\n atom_path = config[\"storm surge feeds\"][\"storage path\"]\n make_feeds._generate_feed_entry(\n \"pmv.xml\", \"max_ssh_info\", config, os.path.join(storm_surge_path, atom_path)\n )\n m_fe().link.assert_called_once_with(\n href=\"https://salishsea.eos.ubc.ca/storm-surge/forecast.html\",\n rel=\"alternate\",\n type=\"text/html\",\n )\n\n\nclass TestBuildTagURI:\n \"\"\"Unit test for _build_tag_uri() function.\"\"\"\n\n def test_build_tag_uri(self, config):\n storm_surge_path = config[\"figures\"][\"storm surge info portal path\"]\n atom_path = config[\"storm surge feeds\"][\"storage path\"]\n tag = make_feeds._build_tag_uri(\n \"2015-12-12\",\n \"pmv.xml\",\n arrow.get(\"2015-12-21 09:31:42\"),\n config[\"storm surge feeds\"],\n os.path.join(storm_surge_path, atom_path),\n )\n expected = (\n \"tag:salishsea.eos.ubc.ca,2015-12-12:\"\n \"/storm-surge/atom/pmv/20151221093142\"\n )\n assert tag == expected\n\n\nclass TestRenderEntryContent:\n \"\"\"Unit test for _render_entry_content() function.\"\"\"\n\n @patch(\"nowcast.workers.make_feeds._calc_wind_4h_avg\", autospec=True)\n @patch(\"nowcast.workers.make_feeds.mako.template.Template\", autospec=True)\n @patch(\"nowcast.workers.make_feeds.os.path.dirname\", autospec=True)\n @patch(\"nowcast.workers.make_feeds.docutils.core.publish_parts\", spec=True)\n def test_render_entry_content(self, m_pp, m_dirname, m_tmpl, m_cw4a, config):\n max_ssh_info = {\n \"max_ssh\": 5.0319,\n \"max_ssh_time\": arrow.get(\"2015-12-27 15:22:30\"),\n \"risk_level\": \"moderate risk\",\n }\n m_cw4a.return_value = {\"wind_speed_4h_avg\": 0.826, \"wind_dir_4h_avg\": 236.97}\n m_dirname.return_value = \"nowcast/workers/\"\n content = make_feeds._render_entry_content(\"pmv.xml\", max_ssh_info, config)\n m_tmpl.assert_called_once_with(\n filename=\"nowcast/workers/storm_surge_advisory.mako\", input_encoding=\"utf-8\"\n )\n assert m_tmpl().render.called\n assert content == m_pp()[\"body\"]\n\n\nclass TestCalcMaxSshRisk:\n \"\"\"Unit test for _calc_max_ssh_risk() function.\"\"\"\n\n @patch(\"nowcast.workers.make_feeds.stormtools.load_tidal_predictions\", spec=True)\n @patch(\"nowcast.workers.make_feeds._calc_max_ssh\", autospec=True)\n @patch(\"nowcast.workers.make_feeds.stormtools.storm_surge_risk_level\", spec=True)\n def test_calc_max_ssh_risk(self, m_ssrl, m_cms, m_ltp, config):\n run_date = arrow.get(\"2015-12-24\").floor(\"day\")\n max_ssh = np.array([5.09])\n max_ssh_time = np.array([datetime.datetime(2015, 12, 25, 19, 59, 42)])\n m_cms.return_value = (max_ssh, max_ssh_time)\n m_ltp.return_value = (\"ttide\", \"msl\")\n max_ssh_info = make_feeds._calc_max_ssh_risk(\n \"pmv.xml\", run_date, \"forecast\", config\n )\n m_ltp.assert_called_once_with(\n \"tidal_predictions/Point Atkinson_tidal_prediction_\"\n \"01-Jan-2013_31-Dec-2020.csv\"\n )\n m_cms.assert_called_once_with(\n \"pmv.xml\", m_ltp()[0], run_date, \"forecast\", config\n )\n m_ssrl.assert_called_once_with(\"Point Atkinson\", max_ssh, m_ltp()[0])\n np.testing.assert_array_equal(max_ssh_info[\"max_ssh\"], np.array([5.09]))\n np.testing.assert_array_equal(max_ssh_info[\"max_ssh_time\"], max_ssh_time)\n assert max_ssh_info[\"risk_level\"] == m_ssrl()\n\n\n@patch(\"nowcast.workers.make_feeds.logger\", autospec=True)\n@patch(\"nowcast.workers.make_feeds.nc.Dataset\", autospec=True)\n@patch(\"nowcast.workers.make_feeds.nc_tools.ssh_timeseries_at_point\", autospec=True)\n@patch(\"nowcast.workers.make_feeds.nowcast.figures.shared.find_ssh_max\", autospec=True)\nclass TestCalcMaxSsh:\n \"\"\"Unit test for _calc_max_ssh() function.\"\"\"\n\n def test_calc_max_ssh(self, m_fsshmax, m_sshtapt, m_ncd, m_logger, config):\n ssh_ts = namedtuple(\"ssh_ts\", \"ssh, time\")\n m_sshtapt.return_value = ssh_ts(\n np.array([1.93]), np.array([datetime.datetime(2015, 12, 22, 22, 40, 42)])\n )\n m_fsshmax.return_value = (\n np.array([5.09]),\n np.array([datetime.datetime(2015, 12, 22, 22, 40, 42)]),\n )\n max_ssh, max_ssh_time = make_feeds._calc_max_ssh(\n \"pmv.xml\", \"ttide\", arrow.get(\"2015-12-22\").floor(\"day\"), \"forecast\", config\n )\n m_ncd.assert_called_once_with(\n \"/results/SalishSea/forecast/22dec15/PointAtkinson.nc\"\n )\n m_sshtapt.assert_called_once_with(m_ncd(), 0, 0, datetimes=True)\n assert not m_logger.critical.called\n np.testing.assert_array_equal(max_ssh, np.array([5.09]))\n np.testing.assert_array_equal(\n max_ssh_time, np.array([datetime.datetime(2015, 12, 22, 22, 40, 42)])\n )\n\n def test_max_ssh_is_nan(self, m_fsshmax, m_sshtapt, m_ncd, m_logger, config):\n ssh_ts = namedtuple(\"ssh_ts\", \"ssh, time\")\n m_sshtapt.return_value = ssh_ts(\n np.array([np.nan]), np.array([datetime.datetime(2017, 10, 7, 17, 48, 42)])\n )\n m_fsshmax.return_value = (\n np.array([np.nan]),\n np.array([datetime.datetime(2015, 12, 22, 22, 40, 42)]),\n )\n with pytest.raises(WorkerError):\n max_ssh, max_ssh_time = make_feeds._calc_max_ssh(\n \"pmv.xml\",\n \"ttide\",\n arrow.get(\"2017-10-07\").floor(\"day\"),\n \"forecast\",\n config,\n )\n assert m_logger.critical.called\n","sub_path":"tests/workers/test_make_feeds.py","file_name":"test_make_feeds.py","file_ext":"py","file_size_in_byte":15487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"474021933","text":"import pandas as pd\r\nimport plotly.express as px\r\n\r\ndf = pd.read_csv(\"escape_velocity.csv\")\r\n\r\nvelocity_list = df[\"Velocity\"].tolist()\r\nescaped_list = df[\"Escaped\"].tolist()\r\n\r\nfig = px.scatter(x=velocity_list, y=escaped_list)\r\nfig.show()\r\n\r\nimport numpy as np\r\nvelocity_array = np.array(velocity_list)\r\nescaped_array = np.array(escaped_list)\r\n\r\n#Slope and intercept using pre-built function of Numpy\r\nm, c = np.polyfit(velocity_array, escaped_array, 1)\r\n\r\ny = []\r\nfor x in velocity_array:\r\n y_value = m*x + c\r\n y.append(y_value)\r\n\r\n#plotting the graph\r\nfig = px.scatter(x=velocity_array, y=escaped_array)\r\nfig.update_layout(shapes=[\r\n dict(\r\n type= 'line',\r\n y0= min(y), y1= max(y),\r\n x0= min(velocity_array), x1= max(velocity_array)\r\n )\r\n])\r\nfig.show()\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\nX = np.reshape(velocity_list, (len(velocity_list), 1))\r\nY = np.reshape(escaped_list, (len(escaped_list), 1))\r\n\r\nlr = LogisticRegression()\r\nlr.fit(X, Y)\r\n\r\nplt.figure()\r\nplt.scatter(X.ravel(), Y, color='black', zorder=20)\r\n\r\ndef model(x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\n#Using the line formula \r\nX_test = np.linspace(0, 100, 200)\r\nchances = model(X_test * lr.coef_ + lr.intercept_).ravel()\r\n\r\nplt.plot(X_test, chances, color='red', linewidth=3)\r\nplt.axhline(y=0, color='k', linestyle='-')\r\nplt.axhline(y=1, color='k', linestyle='-')\r\nplt.axhline(y=0.5, color='b', linestyle='--')\r\n\r\n# do hit and trial by changing the value of X_test\r\nplt.axvline(x=X_test[23], color='b', linestyle='--')\r\n\r\nplt.ylabel('y')\r\nplt.xlabel('X')\r\nplt.xlim(0, 30)\r\nplt.show()\r\n\r\nvelocity = float(input(\"Enter the velocity:- \"))\r\nescape = model(velocity * lr.coef_ + lr.intercept_).ravel()[0]\r\nif escape <= 0.01:\r\n print(\"The object will not escape the orbit\")\r\nelif escape >= 1:\r\n print(\"The object will escape the orbit\")\r\nelif escape < 0.5:\r\n print(\"The object might not escape the orbit\")\r\nelse:\r\n print(\"The object might escape the orbit\")","sub_path":"p115.py","file_name":"p115.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"462379249","text":"\"\"\"\n Copyright 2018 EPAM Systems, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom botocore.exceptions import ClientError\n\nfrom syndicate.commons.log_helper import get_logger\nfrom syndicate.core import CONN\nfrom syndicate.core.resources.alarm_resource import remove_alarms\nfrom syndicate.core.resources.helper import (build_description_obj,\n validate_params)\n\n_LOG = get_logger('syndicate.core.resources.dynamo_db_resource')\n_DYNAMO_DB_CONN = CONN.dynamodb()\n_CW_METRIC = CONN.cw_metric()\n_APP_AS_CONN = CONN.application_autoscaling()\n\n\ndef create_tables_by_10(args):\n \"\"\" Only 10 tables can be created, updated or deleted simultaneously.\n\n :type args: list\n \"\"\"\n response = dict()\n waiters = {}\n start = 0\n end = 8\n while start < len(args):\n tables_to_create = args[start: end]\n for arg_set in tables_to_create:\n name = arg_set['name']\n meta = arg_set['meta']\n response.update(_create_dynamodb_table_from_meta(name, meta))\n table = _DYNAMO_DB_CONN.get_table_by_name(name)\n waiters[table.name] = table.meta.client.get_waiter('table_exists')\n for table_name in waiters:\n waiters[table_name].wait(TableName=table_name)\n start = end\n end += 9\n return response\n\n\ndef describe_table(name, meta, response=None):\n if not response:\n response = _DYNAMO_DB_CONN.describe_table(table_name=name)\n arn = response['TableArn']\n del response['TableArn']\n return {\n arn: build_description_obj(response, name, meta)\n }\n\n\ndef describe_stream(name, meta):\n response = _DYNAMO_DB_CONN.describe_table(meta['table'])\n res_obj = {\n 'StreamSpecification': response['StreamSpecification'],\n 'LatestStreamLabel': response['LatestStreamLabel']\n }\n arn = response['LatestStreamArn']\n return {\n arn: build_description_obj(res_obj, name, meta)\n }\n\n\ndef _create_dynamodb_table_from_meta(name, meta):\n \"\"\" Create Dynamo DB table from meta description after parameter\n validation.\n\n :type name: str\n :type meta: dict\n \"\"\"\n required_parameters = ['hash_key_name', 'hash_key_type', 'read_capacity',\n 'write_capacity']\n validate_params(name, meta, required_parameters)\n\n res = _DYNAMO_DB_CONN.describe_table(name)\n autoscaling_config = meta.get('autoscaling')\n if res:\n _LOG.warn('%s table exists.', name)\n if autoscaling_config:\n res['Autoscaling'] = _describe_autoscaling(autoscaling_config,\n name)\n return describe_table(name, meta, res)\n\n _DYNAMO_DB_CONN.create_table(\n name, meta['hash_key_name'], meta['hash_key_type'],\n meta.get('sort_key_name'), meta.get('sort_key_type'),\n meta['read_capacity'], meta['write_capacity'],\n global_indexes=meta.get('global_indexes'),\n local_indexes=meta.get('local_indexes'),\n wait=False)\n response = _DYNAMO_DB_CONN.describe_table(name)\n if not response:\n raise AssertionError('Table with name {0} has not been created!'\n .format(name))\n # enabling stream if present\n stream_view_type = meta.get('stream_view_type')\n if stream_view_type:\n stream = _DYNAMO_DB_CONN.get_table_stream_arn(name)\n if stream:\n _LOG.warn('Stream %s exists.', name)\n else:\n try:\n _DYNAMO_DB_CONN.enable_table_stream(name, stream_view_type)\n except ClientError as e:\n # handle specific case for fantom stream enabling\n if 'ResourceInUseException' in str(e):\n _LOG.warn('Stream enabling currently in progress,'\n ' table: %s', name)\n else:\n raise e\n if autoscaling_config:\n _LOG.debug('Found autoscaling configuration for resource %s', name)\n sc_res = _enable_autoscaling(autoscaling_config, name)\n response['Autoscaling'] = sc_res\n _LOG.info('Created table %s.', name)\n return describe_table(name, meta, response)\n\n\ndef _describe_autoscaling(autoscaling_config, name):\n targets = []\n policies = []\n for item in autoscaling_config:\n dimension = item['dimension']\n resource_name = item['resource_name']\n resource_id = _build_res_id(dimension, resource_name, name)\n sc_targets = _APP_AS_CONN.describe_scalable_targets(\n service_namespace='dynamodb',\n resources_ids=[resource_id],\n scalable_dimension=dimension)\n targets.extend(sc_targets)\n autoscaling_policy = item.get('config')\n if autoscaling_policy:\n policy_name = autoscaling_policy['policy_name']\n sc_policies = _APP_AS_CONN.describe_scaling_policies(\n service_namespace='dynamodb', policy_names=[policy_name],\n resource_id=resource_id, scalable_dimension=dimension)\n policies.extend(sc_policies)\n return {\n 'targets': targets,\n 'policies': policies\n }\n\n\ndef _enable_autoscaling(autoscaling_config, name):\n targets = []\n policies = []\n for item in autoscaling_config:\n autoscaling_required_parameters = ['resource_name', 'dimension',\n 'min_capacity', 'max_capacity',\n 'role_name']\n validate_params(name, item, autoscaling_required_parameters)\n role_name = item['role_name']\n role_arn = CONN.iam().check_if_role_exists(role_name)\n if role_arn:\n dimension = item['dimension']\n resource_id, sc_targets = register_autoscaling_target(dimension,\n item,\n role_arn,\n name)\n targets.extend(sc_targets)\n _LOG.debug('Autoscaling %s is set up for %s', dimension,\n resource_id)\n autoscaling_policy = item.get('config')\n if autoscaling_policy:\n policy_name = autoscaling_policy['policy_name']\n _LOG.debug('Going to set up autoscaling with '\n 'policy %s', policy_name)\n sc_policies = put_autoscaling_policy(autoscaling_policy,\n dimension, policy_name,\n resource_id)\n policies.append(sc_policies)\n _LOG.debug('Policy %s is set up', policy_name)\n else:\n _LOG.warn('Role %s is not found, skip autoscaling config',\n role_name)\n return {\n 'targets': targets,\n 'policies': policies\n }\n\n\ndef put_autoscaling_policy(autoscaling_policy, dimension, policy_name,\n resource_id):\n target_utilization = autoscaling_policy['target_utilization']\n scale_in_cooldown = autoscaling_policy.get('scale_in_cooldown')\n scale_out_cooldown = autoscaling_policy.get('scale_out_cooldown')\n metric_type = 'DynamoDBWriteCapacityUtilization' if 'Write' in dimension \\\n else 'DynamoDBReadCapacityUtilization'\n response = _APP_AS_CONN.put_target_scaling_policy(\n policy_name=policy_name, service_namespace='dynamodb',\n resource_id=resource_id, scalable_dimension=dimension,\n target_value=target_utilization, predefined_metric_type=metric_type,\n scale_in_cooldown=scale_in_cooldown,\n scale_out_cooldown=scale_out_cooldown)\n return response\n\n\ndef register_autoscaling_target(dimension, item, role_arn, table_name):\n resource_name = item['resource_name']\n resource_id = _build_res_id(dimension, resource_name, table_name)\n _APP_AS_CONN.register_target(service_namespace='dynamodb',\n resource_id=resource_id,\n scalable_dimension=dimension,\n min_capacity=str(item['min_capacity']),\n max_capacity=str(item['max_capacity']),\n role_arn=role_arn)\n targets = _APP_AS_CONN.describe_scalable_targets(\n service_namespace='dynamodb',\n resources_ids=[resource_id],\n scalable_dimension=dimension)\n return resource_id, targets\n\n\ndef _build_res_id(dimension, resource_name, table_name):\n resource_id = 'table/{0}'.format(table_name) if 'table' in dimension \\\n else 'table/{0}/index/{1}'.format(table_name, resource_name)\n return resource_id\n\n\ndef remove_dynamodb_tables(args):\n db_names = [x['config']['resource_name'] for x in args]\n _DYNAMO_DB_CONN.remove_tables_by_names(db_names)\n _LOG.info('Dynamo DB tables %s were removed', str(db_names))\n alarm_args = []\n for arg in args:\n autoscaling = arg['config']['description'].get('Autoscaling')\n if autoscaling:\n policies = autoscaling['policies']\n for policy in policies:\n if policy:\n alarms = policy.get('Alarms', [])\n alarm_args.extend(map(lambda x: {\n 'arn': x['AlarmARN'],\n 'config': {'resource_name': x['AlarmName']}\n }, alarms))\n\n remove_alarms(alarm_args)\n","sub_path":"syndicate/core/resources/dynamo_db_resource.py","file_name":"dynamo_db_resource.py","file_ext":"py","file_size_in_byte":10006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"646971929","text":"from flask import Flask, request\nfrom requests import get\nfrom PIL import Image\nimport face_recognition\nimport logging\nimport sys\nimport traceback\nfrom time import strftime\n\napp = Flask(__name__)\n\noriginal_photo_filename = 'originalPhoto.jpg'\ntarget_photo_filename = 'targetPhoto.jpg'\n\nlogger = logging.getLogger(\"recognizer\")\nhdlr = logging.FileHandler(\"log.log\")\nformatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\nhdlr.setFormatter(formatter)\nlogger.addHandler(hdlr)\nlogger.setLevel(logging.INFO)\ndefault_tolerance = 0.7\n\n\ndef log_info(content):\n print(content)\n logger.info(content)\n\n\n@app.route('/recognize', methods=['POST'])\ndef recognize():\n\n original_photo_url = request.form['originalPhotoUrl']\n target_photo_url = request.form['targetPhotoUrl']\n\n tolerance = default_tolerance\n if 'tolerance' in request.form:\n tolerance = float(request.form['tolerance'])\n\n log_info(\"Tolerance \" + str(tolerance))\n\n download_file(original_photo_url, original_photo_filename)\n log_info(\"Downloading \" + original_photo_url)\n\n download_file(target_photo_url, target_photo_filename)\n log_info(\"Downloading \" + target_photo_url)\n\n check_dimensions(target_photo_filename)\n\n original_photo = face_recognition.load_image_file(original_photo_filename)\n original_photo_encodings = face_recognition.face_encodings(original_photo)\n\n if len(original_photo_encodings) == 0:\n log_info(\"The original photo doesn't have a valid face\")\n return '', 406\n\n original_photo_encoding = original_photo_encodings[0]\n\n target_photo = face_recognition.load_image_file(target_photo_filename)\n target_photo_encodings = face_recognition.face_encodings(target_photo)\n\n if len(target_photo_encodings) == 0:\n log_info(\"The target photo doesn't have a valid face\")\n return '', 406\n\n target_photo_encoding = target_photo_encodings[0]\n\n results = face_recognition.compare_faces([original_photo_encoding], target_photo_encoding, tolerance)\n\n if results[0]:\n log_info(\"Match!\")\n return '', 200\n else:\n log_info(\"Doesn't match!\")\n return '', 403\n\n\ndef download_file(url, filename):\n with open(filename, \"wb\") as file:\n response = get(url)\n file.write(response.content)\n\n\ndef check_dimensions(filename):\n image = Image.open(filename)\n dimensions = image.size\n if dimensions[0] > dimensions[1]:\n log_info(\"Rotating image\")\n image = image.rotate(90)\n image.save(filename)\n\n\n@app.errorhandler(Exception)\ndef exceptions(e):\n tb = traceback.format_exc()\n timestamp = strftime('[%Y-%b-%d %H:%M]')\n logger.error('%s %s %s %s %s\\n%s',\n timestamp, request.remote_addr, request.method,\n request.scheme, request.full_path, tb)\n\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) > 1:\n app.run(host='0.0.0.0', port=int(args[1]))\n else:\n app.run(host='0.0.0.0')\n","sub_path":"recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"171416221","text":"# Import standard python modules\nimport time\nimport datetime\nimport os\nimport sys\nimport threading\n\n# Import Adafruit IO Client.\nfrom Adafruit_IO import Client\n\n# Import RPi.GPIO Module\ntry:\n\timport RPi.GPIO as GPIO\nexcept RuntimeError:\n\tprint(\"Error importing RPi.GPIO! This is probably because you need \\\n\tsuperuser privileges. You can achieve \\\n\tthis by using 'sudo' to run your script\")\n\n# Control of sincronization Thread\nlock = threading.RLock()\n\n# Class for params into object for Threading call\nclass MessageSendControl():\n\tdef __init__(self, message):\n\t\tself.message=message\n\n# Define Functions for Threading\ndef send_message(aioClient, tankMeasureFeedInstance, tankStatusFeedInstance, messageInstance, pinList):\n\tlastState=\"\"\n\twhile True:\n\t\tif(messageInstance.message!=\"\"):\n\t\t\tlock.acquire()\n\t\t\tlogFile=open(\"log fuel.txt\", \"a\", encoding=\"utf8\")\n\t\t\n\t\t\tif(messageInstance.message.isdigit()):\n\t\t\t\taioClient.send(tankMeasureFeedInstance.key, int(messageInstance.message))\n\t\t\t\tlogFile.write(\"{}~{}~{}~{}\\n\".format(datetime.datetime.now(), tankMeasureFeedInstance.key, messageInstance.message, \"Valor de temperatura enviado\"))\n\n\t\t\t\t# Case for trigger a normal status, \n\t\t\t\tif(int(messageInstance.message)>=10 and int(messageInstance.message)<=90):\n\t\t\t\t\taioClient.send(tankStatusFeedInstance.key, \"normal\")\n\t\t\t\t\tlogFile.write(\"{}~{}~{}~{}\\n\".format(datetime.datetime.now(), tankStatusFeedInstance.key, \"normal\", \"Estado de temperatura enviado\"))\n\n\t\t\t\t# LED control\n\t\t\t\ttankStatusFeedData=aioClient.receive(tankStatusFeedInstance.key)\n\t\t\t\tif(lastState!=tankStatusFeedData.value):\n\t\t\t\t\tGPIO.output(list(pinList), GPIO.LOW)\n\t\t\t\t\tGPIO.output(pinList.get(tankStatusFeedData.value), GPIO.HIGH)\n\t\t\t\t\tlogFile.write(\"{}~{}~{}~{}\\n\".format(datetime.datetime.now(), tankStatusFeedInstance.key, tankStatusFeedData, \"Encendiendo LED {} - Estado recibido {}\".format(pinList.get(i), tankStatusFeedData.value)))\n\t\t\t\t\tlastState=tankStatusFeedData.value\n\t\t\t\t\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprint(\"El dato '{}' no es apto para el envio\".format(messageInstance.message))\n\n\t\t\tlogFile.close()\n\t\t\tlock.release()\n\t\t\ttime.sleep(10)\n\ndef main():\n\tif(len(sys.argv)!=5):\n\t\tsys.stderr.write('Usage: \"{0}\" $AIOUsername $AIOKey $tankMeasureFeedKey $tankStatusFeedKey\\n'.format(sys.argv[0]))\n\t\tos._exit(1)\n\n\tAIOUsername=sys.argv[1]\n\tAIOKey=sys.argv[2]# Beware, your Key is Secret!\n\ttankMeasureFeedKey=sys.argv[3] # Feed key where tank measure data is received\n\ttankStatusFeedKey=sys.argv[4] # Feed key where tank status data is received\n\n\t\t# Connect to Adafruit IO Server\n\taio=Client(username=AIOUsername, key=AIOKey)\n\n\t# Link to feeds\n\ttankMeasureFeedInstance=aio.feeds(tankMeasureFeedKey)\n\ttankStatusFeedInstance=aio.feeds(tankStatusFeedKey)\n\t\n\t# Create messageSendControl instance\n\tmessageInstance=MessageSendControl(\"\")\n\n\t# Setup GPIO mode\n\tGPIO.setmode(GPIO.BCM)\n\t\n\t# List with all GPIO pin numbers\n\tpinList={\"bajo\":10, \"normal\":11, \"alto\":12}\n\t\n\t# write on log file\n\tlogFile=open(\"log fuel.txt\", \"a\", encoding=\"utf8\")\n\tlogFile.write(\"{}~{}~{}~{}\\n\".format(datetime.datetime.now(), \"Nulo\", \"Nulo\", \"Aplicación iniciada\"))\n\tlogFile.close()\n\t\n\t# Setup Threading, to publish message every 10 seconds\n\thilo0=threading.Thread(target=send_message, args=[aio, tankMeasureFeedInstance, tankStatusFeedInstance, messageInstance, pinList,])\n\thilo0.start()\n\n\t# Mod publish value\n\twhile True:\n\t\tmessageInstance.message=input(\"Ingrese nuevo valor para el tanque\\n\")\n\t\t# Sincronization threads\n\t\tlock.acquire()\n\t\t# write on log file\n\t\tlogFile=open(\"log fuel.txt\", \"a\", encoding=\"utf8\")\n\t\tlogFile.write(\"{}~{}~{}~{}\\n\".format(datetime.datetime.now(), \"Nulo\", \"Nulo\", \"Valor de temperatura modificado a {}\".format(messageInstance.message)))\n\t\tlogFile.close()\n\t\tlock.release()\n\t\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain()\n\texcept:\n\t\tprint(\"{} line {}\".format(sys.exc_info()[0], sys.exc_info()[-1].tb_lineno))\n\t\tGPIO.cleanup()\n\t\t\n ","sub_path":"Project 0 - Exercises essential software/Adafruit Dashboard/send fuel measure with log.py","file_name":"send fuel measure with log.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"508692751","text":"# 1 - Defining a function\n#Vordeklarationen:\ndef main():\n kitten1()\n x = kitten2(5)\n print(x)\n x = kitten3()\n print(x)\n\n # 2 - Function arguments\n # nur die letzten Params können Defaluts-Values haben\n kitten2_1(1)\n\n #Call by Value\n x = 5\n print(id(x))\n kitten2_2(x)\n print(\"{} Meow.\".format(x))\n\n #gleiche ID bei Zuweisung, nach Veränderung aber underschiedliche ID\n\n #int, double, char sind imutable\n #Objekte sind mutable = Call by Ref\n x = 5\n y = x\n print(id(x))\n print(id(y))\n y = 3\n print(id(y))\n\n #mutable Variable => automatisch Call by Ref\n xArray = [5]\n yArray = xArray\n yArray[0] = 3\n print(id(xArray))\n print(id(yArray))\n print(xArray[0])\n print(yArray[0])\n\n # 3 - Argument lists\n #Aufruf mit drei Parametern\n kitten3_1(\"one\", \"two\", \"three\")\n test = (\"one\", \"two\", \"three\")\n #Aufruf mit 1 einem Parameter (=Objekt)\n kitten3_1(test)\n #Aufruf mit * = Arrayelemente als einzelne Parameter behandeln\n kitten3_1(*test) # als Ref aufrufen\n\n # 4 - Keywords arguments\n test = dict(Buffu = \"meow\", Zilla = \"grr\", Angel = \"rawr\")\n kitten4_1(Buffu = \"meow\", Zilla = \"grr\", Angel = \"rawr\")\n kitten4_1(**test)\n\n # 5 - Return values\n print( kitten5_1() )\n print( kitten5_2() )\n\n # 6 - Return values\n\n\n #7 Generators:\n for i in range(10):\n print(i, end=' ')\n print()\n\n for i in inclusive_range(10):\n print(i, end =' ')\n\n #8 - Decorators:\n x = f1\n x()\n\n x=f2\n f2()\n\n x = f21(f23)\n x()\n\n #aufrufen wie oben, aber mit der Wrapper-Aufruf-Kurzschreibweise\n @f21\n def f24():\n print(\"this ist F24\")\n f24()\n\ndef kitten1():\n print(\"Meow.\")\n\ndef kitten2(n):\n print(\"{} Meow\".format(n))\n# in Python return jede Funktion etwas, wenn kein return explizit => None wird return\ndef kitten3():\n return \"Meow.\"\n\n# 2 - Function arguments\n#nur die letzten Params können Defaluts-Values haben\ndef kitten2_1(a, b = 1, c = 0):\n print(\"{} Meow.\".format(c))\n\n#globale + locale Params:\n#Call By Value\ndef kitten2_2(a):\n print(id(a))\n a = 3\n #andrer ID = Speicheradresse\n print(id(a))\n print(\"{} Meow.\".format(a))\n# 3 - Argument lists\ndef kitten3_1(*varArg):\n if len(varArg):\n for s in varArg:\n print(s)\n else:\n print(\"Meow.\")\n\n# 4 - Keywords arguments\n# <- = Arrays mit String als Index\n# gute Paxis solche Argumente als kwargs zu nennen\ndef kitten4_1(**kwargs):\n if len(kwargs):\n for k in kwargs: # k = key\n print(\"Kitten {} says {}\".format(k, kwargs[k]))\n\n# 5 - Return values\n\ndef kitten5_1():\n print(\"Test\")\n\ndef kitten5_2():\n print(\"Test\")\n #return \"returned\"\n return [10, 20, 30]\n# 6 - Return values\n\n# 7 - Generators\ndef inclusive_range(*args):\n numargs = len(args)\n start = 0\n step = 1\n\n if numargs < 1:\n raise TypeError(\"expected at least 1 argument, got {}\".format(numargs))\n elif numargs == 1:\n stop = args[0]\n elif numargs == 2:\n (start, stop) = args\n elif numargs == 3:\n (start, stop, step) = args\n else:\n raise TypeError(\"expected at most 3 arguments, got {}\".format(numargs))\n\n i = start\n #eigentliche Implementation des Generators\n while i <= stop:\n yield i # ist wie return nur für Generatoren = nach yeild geht wieder in die Funktion\n i += step\n print()\n\n# 8 - Decorator\n#spezielle Funktion, die Wrapper-Funktion zurückgibt\n#in Python ist alles ein Objekt, auch eine Funktion\ndef f1():\n print(\"Decorator\")\n\n#etwas wirres:\ndef f2():\n #hier wird f3 definiert\n def f3():\n print(\"f3 in f2\")\n return f3() # hiert wird f3 aufgerufen\n#<- man kann hier f2 nicht aufrufen, da es nur innerhalb von f2 bekannt ist. f2 ist Wrapper von f3\ndef f21(f):\n def f22():\n print(\"bevor Funktion-Call\")\n f()\n print(\"after Funtion-Call\")\n return f22\ndef f23():\n print(\"F23\")\n\n#ALSO Decorator = einer Funktion, Funktion als Parameter übergeben\n\n\n\n\n#main-Funktion:\n#__name__ = hat namen des Moduls. -> Wenn diese Datei mit Import irgendwo eingebunden, dann wird diese Datei als Modul gelaufen. und Name des Moduls wird dann in __name__ gespeichert.\n#Da aber hier diese Datei nicht einbebunden wird, sondern als main(-Modul) gelaufen wird =>\n# __main__ = sagt: Das ist kein Modul, dass ist main-Executable\nif __name__ == \"__main__\":\n main()\n","sub_path":"ONLINE-KURSE/Become a Python Developer/4 - Python Essential Training/code-training/7 - Functions/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"616534707","text":"\r\nchoice=0\r\n\r\n\r\n\r\nwhile choice!=3:\r\n \r\n print(\"\"\"My To Do App\r\n ============\r\n 1. Add Task\r\n 2. View All Tasks\r\n 0. Exit\r\n \"\"\" )\r\n choice=int(input(\"enter your choice: \"))\r\n f=open(\"file1.txt\",'w')\r\n if choice==1:\r\n f=open(\"file1.txt\",'a')\r\n n=input(\"enter task name\")\r\n f.write(n)\r\n print(\"task added\")\r\n \r\n \r\n \r\n \r\n elif choice==2:\r\n f=open(\"file1.txt\",\"r\")\r\n a=f.readlines()\r\n for i in a:\r\n print(\"task name is\\n\\n\",i)\r\n f.close\r\n\r\n elif choice==0:\r\n\r\n print(\"byee\")\r\n break\r\n else:\r\n print('valid data')\r\n f.close","sub_path":"assignent/usecase2.py","file_name":"usecase2.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"595818973","text":"from constants import PointState, row_names, point_symbol\r\nfrom boardslice import BoardSlice\r\n\r\n\r\nclass GoBoard(object):\r\n def __init__(self, other=None, size=19):\r\n self._row_labels = None\r\n if other is None:\r\n row_prototype = [PointState.Empty for _ in range(0, size)]\r\n self._board = [row_prototype for _ in range(0, size)]\r\n else:\r\n self._board = other.board\r\n\r\n @property\r\n def board(self):\r\n return [self.board[i].copy() for i in range(0, len(self._board))]\r\n\r\n @property\r\n def row_labels(self):\r\n if self._row_labels is None:\r\n self._row_labels = row_names[0:len(self._board)][::-1]\r\n return self._row_labels\r\n\r\n def _assert_is_square(self):\r\n height = len(self._board)\r\n for row in self._board:\r\n assert len(row) == height, 'Board is not square'\r\n\r\n def __repr__(self):\r\n ret = ' ' + ' '.join([str(int(((i+1) / 10) % 10)) for i in range(0, len(self._board))]) + '\\n'\r\n ret += ' ' + ' '.join(str((i+1) % 10) for i in range(0, len(self._board))) + '\\n'\r\n rows = []\r\n count = 0\r\n for row in self._board:\r\n row_text = self.row_labels[count] + ' '\r\n row_text += '|'.join([point_symbol(point) for point in row])\r\n rows.append(row_text)\r\n count += 1\r\n ret += '\\n'.join(rows)\r\n return ret\r\n\r\n def create_slice(self, row_offset, column_offset, height, width):\r\n self._assert_is_square()\r\n assert row_offset + height <= len(self._board)\r\n assert column_offset + width <= len(self._board)\r\n slice2d = [[self._board[row_i][col_i]\r\n for col_i in range(column_offset, column_offset + width)]\r\n for row_i in range(row_offset, row_offset + height)]\r\n return BoardSlice(row_offset, column_offset, slice2d, self.row_labels[row_offset:row_offset + width], self)\r\n","sub_path":"goboard.py","file_name":"goboard.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"65054750","text":"\"\"\"\n~~~~~~~~~~~~~~~~~\ncommon.consts.py\n\nContains constants which are available to all modules in etl service\n~~~~~~~~~~~~~~~~~\n\"\"\"\n\nfrom etl.data_sources.amazon_sources.file_source import AmazonFileSource\n\nAMAZON_SOURCE_MAPPER = {\n \"file\": AmazonFileSource\n} # Source mapper if different kind of amazon sources, Rgister all new amazon sources here\n","sub_path":"etl/common/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"633085329","text":"#This script will build the main subpackages \ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration, get_info\n config = Configuration('tt', parent_package, top_path) \n config.set_options(ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=False, \n )\n #config.add_include_dirs(tt_fort)\n config.add_extension('quadgauss',sources='hermite_rule.f90')#,include_dirs=inc_dir),#extra_objects=\"../tt-fort/mytt.a\")\n return config\n \n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n\n\n#, include_dirs=None, define_macros=None, undef_macros=None, library_dirs=None, libraries=None, runtime_library_dirs=None, extra_objects=None, extra_compile_args=None, extra_link_args=None, export_symbols=None, swig_opts=None, depends=None, language=None, f2py_options=None, module_dirs\n","sub_path":"quadgauss/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"472424690","text":"from getIDs import *\nimport cPickle as pickle\n\nwith open('/cshome/kzhou3/Data/hash/setid_items.p','rb') as fp:\n\tsetid_items = pickle.load(fp)\nwith open('/cshome/kzhou3/Data/hash/item_setids.p','rb') as fp:\n\titem_setids = pickle.load(fp)\n\ninput_file = open('/cshome/kzhou3/SetExpansion/input/all','r')\noutput_file = open('/cshome/kzhou3/Data/table/only_ground_true','w')\n\nfor seeds in input_file:\n########################## Get Set IDs ###########################################\n\tseeds = seeds.strip().split('\\t')\n\t# pass the commented seeds\n\tif seeds[0] == \"%\":\n\t\tcontinue\n\tsetid_result = getIDs(item_setids, seeds)\n\tfor tt in setid_result:\n\t\toutput_file.write(tt[0] + \"\\t\\t\" + setid_items[tt[0]] + \"\\n\")\n\ninput_file.close()\noutput_file.close()\n","sub_path":"create_ground_true.py","file_name":"create_ground_true.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"444763041","text":"\"\"\"\nThis file is executed when the Python interactive shell is started if\n$PYTHONSTARTUP is in your environment and points to this file. It's just\nregular Python commands, so do what you will. Your ~/.inputrc file can greatly\ncomplement this file.\n\n\"\"\"\nimport os\n\ntry:\n import readline\n import rlcompleter\n import atexit\nexcept ImportError:\n print(\"You need readline, rlcompleter, and atexit\")\n\n\nclass Completer(object):\n def __init__(self):\n # Enable a History\n self.HISTFILE=os.path.expanduser(\"%s/.pyhistory\" % os.environ[\"HOME\"])\n\n # Read the existing history if there is one\n if os.path.exists(self.HISTFILE):\n readline.read_history_file(self.HISTFILE)\n\n # Set maximum number of items that will be written to the history file\n readline.set_history_length(300)\n atexit.register(self.savehist)\n\n def savehist(self):\n import readline\n readline.write_history_file(self.HISTFILE)\n\n\nc = Completer()\n\n\nEDITOR = os.environ.get('EDITOR', 'vim')\nEDIT_CMD = '\\e'\n","sub_path":"_pythonrc.py","file_name":"_pythonrc.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"98437003","text":"from heapq import heappush, heappop\n\n\ndef dijkstra(graph, start):\n\n visited = [0] * len(graph)\n distances = [(float('inf'), -1)] * len(graph)\n distances[start] = (0, -1)\n heap = []\n heappush(heap, (distances[start], start))\n\n while heap:\n path, vertex_from = heappop(heap)\n if not visited[vertex_from]:\n visited[vertex_from] = 1\n for vertex_to in graph[vertex_from]:\n path, previous = distances[vertex_to]\n new_path = (\n distances[vertex_from][0] + graph[vertex_from][vertex_to]\n )\n if path > new_path:\n distances[vertex_to] = (new_path, vertex_from)\n if not visited[vertex_to]:\n heappush(heap, (distances[vertex_to], vertex_to))\n return distances\n\n\nif __name__ == '__main__':\n with open('input.txt', 'r') as file:\n vertices, edges = map(int, file.readline().split())\n start, destination = map(int, file.readline().split())\n graph = [{} for _ in range(vertices)]\n for _ in range(edges):\n vert1, vert2, weight = map(int, file.readline().split())\n graph[vert1][vert2] = weight\n graph[vert2][vert1] = weight\n distanses = dijkstra(graph, start)\n if distanses[destination][0] == float('inf'):\n print(-1)\n else:\n final_distance, previous = distanses[destination]\n route = [destination]\n while previous != -1:\n route.append(previous)\n _, previous = distanses[previous]\n\n print(final_distance)\n print(len(route))\n print(' '.join(map(str, route[::-1])))\n\n","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"413424347","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('farmingconcrete', '0008_garden_metric_record_added'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='garden',\n name='measurement_system',\n field=models.CharField(default=b'imperial', help_text='Pick the measurement system that will be used for this garden.', max_length=25, verbose_name='measurement system'),\n ),\n ]\n","sub_path":"barn/farmingconcrete/migrations/0009_garden_measurement_system.py","file_name":"0009_garden_measurement_system.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"91304008","text":"import numpy as np\nimport tensorflow as tf\nfrom utils import fc, conv, ortho_init\nfrom baselines.common.mpi_running_mean_std import RunningMeanStd\nimport baselines.common.tf_util as U\n\ndef to2d(x):\n size = 1\n for shapel in x.get_shape()[1:]: size *= shapel.value\n return tf.reshape(x, (-1, size))\n\n\nclass RND(object):\n def __init__(self, name, ph_ob, args):\n self.convfeat = args.convfeat\n self.rep_size = args.rep_size\n self.enlargement = args.enlargement\n self.proportion_of_exp_used_for_predictor_update = args.proportion_of_exp_used_for_predictor_update\n self.scope = name\n\n with tf.variable_scope(self.scope):\n self.build_graph = self.build_graph(ph_ob)\n\n\n def build_graph(self, ph_ob):\n ob = ph_ob[-1]\n assert len(ob.shape.as_list()) == 4 #B, H, W, C\n with tf.variable_scope(\"obfilter\"):\n self.ob_rms = RunningMeanStd(shape = ob.shape.as_list()[1:3] + [1])\n\n ob_norm = ob[:, :, :, -1:]\n ob_norm = tf.cast(ob_norm, tf.float32)\n ob_norm = tf.clip_by_value((ob_norm - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)\n \n # Random target network\n xr = tf.nn.leaky_relu(conv(ob_norm, \"c1r\", nf=self.convfeat*1, rf=8, stride=4, init_scale=np.sqrt(2)))\n xr = tf.nn.leaky_relu(conv(xr, 'c2r', nf=self.convfeat * 2 * 1, rf=4, stride=2, init_scale=np.sqrt(2)))\n xr = tf.nn.leaky_relu(conv(xr, 'c3r', nf=self.convfeat * 2 * 1, rf=3, stride=1, init_scale=np.sqrt(2)))\n rgbr = [to2d(xr)]\n X_r = fc(rgbr[0], 'fc1r', nh=self.rep_size, init_scale=np.sqrt(2))\n\n # Predictor network\n xrp = tf.nn.leaky_relu(conv(ob_norm, 'c1rp_pred', nf=self.convfeat, rf=8, stride=4, init_scale=np.sqrt(2)))\n xrp = tf.nn.leaky_relu(conv(xrp, 'c2rp_pred', nf=self.convfeat * 2, rf=4, stride=2, init_scale=np.sqrt(2)))\n xrp = tf.nn.leaky_relu(conv(xrp, 'c3rp_pred', nf=self.convfeat * 2, rf=3, stride=1, init_scale=np.sqrt(2)))\n rgbrp = to2d(xrp)\n\n X_r_hat = tf.nn.relu(fc(rgbrp, 'fc1r_hat1_pred', nh=256 * self.enlargement, init_scale=np.sqrt(2)))\n X_r_hat = tf.nn.relu(fc(X_r_hat, 'fc1r_hat2_pred', nh=256 * self.enlargement, init_scale=np.sqrt(2)))\n X_r_hat = fc(X_r_hat, 'fc1r_hat3_pred', nh=self.rep_size, init_scale=np.sqrt(2))\n\n self.feat_var = tf.reduce_mean(tf.nn.moments(X_r, axes=[0])[1])\n self.max_feat = tf.reduce_max(tf.abs(X_r))\n self.int_rew = tf.reduce_mean(tf.square(tf.stop_gradient(X_r) - X_r_hat), axis=-1, keep_dims=True)\n\n targets = tf.stop_gradient(X_r)\n # self.aux_loss = tf.reduce_mean(tf.square(noisy_targets-X_r_hat))\n self.aux_loss = tf.reduce_mean(tf.square(targets - X_r_hat), -1)\n\n mask = tf.random_uniform(shape=tf.shape(self.aux_loss), minval=0., maxval=1., dtype=tf.float32)\n mask = tf.cast(mask < self.proportion_of_exp_used_for_predictor_update, tf.float32)\n self.aux_loss = tf.reduce_sum(mask * self.aux_loss) / tf.maximum(tf.reduce_sum(mask), 1.)\n self._predictor = U.function([ob], [self.int_rew])\n \n def predict(self, ob):\n obf = ob[-1]\n if obf.shape == 3:\n obf = np.expand_dims(obf, 0)\n int_rew = self._predictor(obf)[0]\n return int_rew\n\n def update_obs_rms(self, ob):\n obf = np.array(zip(*ob.tolist())[1])\n self.ob_rms.update(obf)\n\n def get_variables(self):\n return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)\n def get_trainable_variables(self):\n return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)\n\n","sub_path":"ppo_rnd/rnd_cnn.py","file_name":"rnd_cnn.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"25648175","text":"from Bio import SeqIO\nfrom Bio import AlignIO\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser(description='Clean up coding consensus.')\nparser.add_argument(\"--consensus\", action=\"store\", type=str, dest=\"consensus\")\nparser.add_argument(\"--alignment_with_ref\", action=\"store\", type=str, dest=\"alignment_with_ref\")\nparser.add_argument(\"--output_seq\", action=\"store\", type=str, dest=\"output_seq\")\nparser.add_argument(\"--polish_round\", action=\"store\", type=str, dest=\"round\")\nargs = parser.parse_args()\n\nround_name = ''\nif args.round:\n round_name = f\" round_name={args.round}\"\n\n\ndef find_gaps(aln):\n gap_dict = {}\n alignment = AlignIO.read(aln, \"fasta\")\n\n print(f\"Reading in {alignment}.\\nLooking for gaps in coding sequence.\")\n for i in range(len(alignment[0])):\n\n col = alignment[:, i]\n if len(set(col)) >1:\n print(alignment[:, i])\n if '-' in col:\n print(f\"Gap at position {i}: {col}\")\n if col[0]=='-':\n gap_dict[i] = ''\n else:\n gap_dict[i] = col.rstrip('-')+'N'\n return(gap_dict)\n\n#the rule is to replace a gap in the query with 'N' and to force delete a base that causes a gap in the reference\nwith open(args.output_seq, \"w\") as fw:\n\n gap_dict = find_gaps(args.alignment_with_ref)\n \n for record in SeqIO.parse(args.consensus, \"fasta\"):\n \n\n new_seq = list(record.seq)\n\n for key in gap_dict:\n try:\n new_seq[key]= gap_dict[key]\n except IndexError:\n new_seq.append(gap_dict[key])\n new_seq = ''.join(new_seq).upper()\n \n\n fw.write(f\">{record.id}{round_name} length={len(new_seq)}\\n{new_seq}\\n\")\n\n","sub_path":"pipelines/process_unmapped/rules/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"128200145","text":"from shogiPackage import color\r\nfrom shogiPackage import piece\r\nfrom shogiPackage import move\r\nfrom shogiPackage import const\r\n\r\nsize_squares = 9\r\n\r\npiece_symbols = [const.lancer_symbol, const.horse_symbol, const.silver_symbol, const.gold_symbol, const.king_symbol, const.torre_symbol, const.bishop_symbol, const.peon_symbol]\r\npiece_colors = ['black', 'white']\r\n\r\n\r\nmy_board = [\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n]\r\n\r\n\r\ndef create_pieces():\r\n black_color = color.Color(\"black\")\r\n white_color = color.Color(\"white\")\r\n\r\n black_p = []\r\n white_p = []\r\n\r\n for i in range(size_squares):\r\n # Cargamos los peones negros y blancos\r\n black_p.append(piece.Piece(piece_symbols[7], black_color, (2, i), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[7], white_color, (6, i), False, False, \"\"))\r\n\r\n # L\r\n black_p.append(piece.Piece(piece_symbols[0], black_color, (0, 0), False, False, \"\"))\r\n black_p.append(piece.Piece(piece_symbols[0], black_color, (0, 8), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[0], white_color, (8, 0), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[0], white_color, (8, 8), False, False, \"\"))\r\n\r\n # N\r\n black_p.append(piece.Piece(piece_symbols[1], black_color, (0, 1), False, False, \"\"))\r\n black_p.append(piece.Piece(piece_symbols[1], black_color, (0, 7), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[1], white_color, (8, 1), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[1], white_color, (8, 7), False, False, \"\"))\r\n\r\n # S\r\n black_p.append(piece.Piece(piece_symbols[2], black_color, (0, 2), False, False, \"\"))\r\n black_p.append(piece.Piece(piece_symbols[2], black_color, (0, 6), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[2], white_color, (8, 2), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[2], white_color, (8, 6), False, False, \"\"))\r\n\r\n # G\r\n black_p.append(piece.Piece(piece_symbols[3], black_color, (0, 3), False, False, \"\"))\r\n black_p.append(piece.Piece(piece_symbols[3], black_color, (0, 5), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[3], white_color, (8, 3), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[3], white_color, (8, 5), False, False, \"\"))\r\n\r\n # K\r\n black_p.append(piece.Piece(piece_symbols[4], black_color, (0, 4), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[4], white_color, (8, 4), False, False, \"\"))\r\n\r\n # R\r\n black_p.append(piece.Piece(piece_symbols[5], black_color, (1, 1), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[5], white_color, (7, 7), False, False, \"\"))\r\n\r\n # B\r\n black_p.append(piece.Piece(piece_symbols[6], black_color, (1, 7), False, False, \"\"))\r\n white_p.append(piece.Piece(piece_symbols[6], white_color, (7, 1), False, False, \"\"))\r\n\r\n return black_p, white_p\r\n\r\n\r\ndef create_moves(my_arr_black1, my_arr_white1, my_board1):\r\n black_move = []\r\n white_move = []\r\n\r\n for i in range(len(my_arr_black1)):\r\n black_move.append(move.Move(my_arr_black1[i], my_board1, my_arr_black1, my_arr_white1))\r\n\r\n for j in range(len(my_arr_white1)):\r\n white_move.append(move.Move(my_arr_white1[j], my_board1, my_arr_black1, my_arr_white1))\r\n\r\n return black_move, white_move\r\n\r\n\r\ndef refresh_board(black_p, white_p, my_board1):\r\n\r\n aux_str = \"\"\r\n\r\n for k in range(20):\r\n for i in range(9):\r\n for j in range(9):\r\n if (i, j) == black_p[k].get_actual_position():\r\n my_board[i][j] = black_p[k].get_simbol()+\" \"\r\n\r\n for k in range(20):\r\n for i in range(9):\r\n for j in range(9):\r\n if (i,j) == white_p[k].get_actual_position():\r\n my_board1[i][j] = white_p[k].get_simbol()+\" \"\r\n\r\n for i in range(9):\r\n for j in range(9):\r\n if j % 9 == 0:\r\n aux_str = aux_str + \"\\n\" + \"{} \".format(i)\r\n if my_board1[i][j] == 0:\r\n aux_str = aux_str + \" \"\r\n if my_board[i][j] != 0:\r\n aux_str = aux_str + my_board1[i][j]\r\n\r\n print(\"\\n\")\r\n aux_numbers = \" 0 1 2 3 4 5 6 7 8\"\r\n print(aux_numbers, aux_str)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n my_arr_black, my_arr_white = create_pieces()\r\n my_move_black, my_move_white = create_moves(my_arr_black, my_arr_white, my_board)\r\n\r\n refresh_board(my_arr_black, my_arr_white, my_board)\r\n\r\n control = 2\r\n game = 1\r\n flag = False\r\n\r\n while game == 1:\r\n\r\n if control % 2 == 0:\r\n print(\"Turno de las negras\")\r\n\r\n fd1 = int(input(\"Ingrese la fila desde: \"))\r\n while fd1 < const.min_fil or fd1 > const.max_fil:\r\n fd1 = int(input(\"Ingrese la fila desde: \"))\r\n\r\n cd1 = int(input(\"Ingrese la columna desde: \"))\r\n while cd1 < const.min_col or cd1 > const.max_col:\r\n cd1 = int(input(\"Ingrese la columna desde: \"))\r\n\r\n fh1 = int(input(\"Ingrese la fila hasta: \"))\r\n while fh1 < const.min_fil or fh1 > const.max_fil:\r\n fh1 = int(input(\"Ingrese la fila hasta: \"))\r\n\r\n ch1 = int(input(\"Ingrese la columna hasta: \"))\r\n while ch1 < const.min_col or ch1 > const.max_col:\r\n ch1 = int(input(\"Ingrese la columna hasta: \"))\r\n\r\n if my_board[fd1][cd1] == 0:\r\n print(\"Movimiento inválido\")\r\n flag = True\r\n\r\n for i in range(len(my_arr_black)):\r\n if my_arr_black[i].get_actual_position() == (fd1, cd1):\r\n my_move_black[i].make_move((fd1, cd1), (fh1, ch1))\r\n if my_move_black[i].my_flag == False:\r\n flag = True\r\n if my_move_black[i].my_jaque == True:\r\n game = -1\r\n else:\r\n print(\"Turno de las blancas\")\r\n\r\n fd2 = int(input(\"Ingrese la fila desde: \"))\r\n while fd2 < const.min_fil or fd2 > const.max_fil:\r\n fd2 = int(input(\"Ingrese la fila desde: \"))\r\n\r\n cd2 = int(input(\"Ingrese la columna desde: \"))\r\n while cd2 < const.min_col or cd2 > const.max_col:\r\n cd2 = int(input(\"Ingrese la columna desde: \"))\r\n\r\n fh2 = int(input(\"Ingrese la fila hasta: \"))\r\n while fh2 < const.min_fil or fh2 > const.max_fil:\r\n fh2 = int(input(\"Ingrese la fila hasta: \"))\r\n\r\n ch2 = int(input(\"Ingrese la columna hasta: \"))\r\n while ch2 < const.min_col or ch2 > const.max_col:\r\n ch2 = int(input(\"Ingrese la columna hasta: \"))\r\n\r\n if my_board[fd2][cd2] == 0:\r\n print(\"Movimiento inválido\")\r\n flag = True\r\n\r\n for j in range(len(my_arr_white)):\r\n if my_arr_white[j].get_actual_position() == (fd2, cd2):\r\n my_move_white[j].make_move((fd2, cd2), (fh2, ch2))\r\n if my_move_white[j].my_flag == False:\r\n flag = True\r\n if my_move_white[j].my_jaque == True:\r\n game = -1\r\n\r\n if flag:\r\n control = control + 2\r\n flag = False\r\n else:\r\n control = control + 1\r\n flag = False\r\n\r\n refresh_board(my_arr_black, my_arr_white, my_board)\r\n\r\n print(\"Terminó el juego!\")\r\n\r\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"610475340","text":"import os\nimport sys\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport csv\nimport plotly\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(__file__), '../../resources/hpxml-measures/workflow/tests')))\nfrom compare import BaseCompare, read_csv\n\n\nenum_maps = {'build_existing_model.geometry_building_type_recs': {'Single-Family Detached': 'SFD',\n 'Mobile Home': 'SFD',\n 'Single-Family Attached': 'SFA',\n 'Multi-Family with 2 - 4 Units': 'MF',\n 'Multi-Family with 5+ Units': 'MF'} }\n\nclass MoreCompare(BaseCompare):\n def __init__(self, base_folder, feature_folder, export_folder, export_file, map_file):\n self.base_folder = base_folder\n self.feature_folder = feature_folder\n self.export_folder = export_folder\n self.export_file = export_file\n\n if map_file:\n self.map_columns(map_file)\n\n\n def samples(self):\n\n def value_counts(df, file):\n value_counts = []\n with open(file, 'w', newline='') as f:\n\n for col in sorted(df.columns):\n if col == 'Building':\n continue\n\n value_count = df[col].value_counts(normalize=True)\n value_count = value_count.round(2)\n keys_to_values = dict(zip(value_count.index.values, value_count.values))\n keys_to_values = dict(sorted(keys_to_values.items(), key=lambda x: (x[1], x[0]), reverse=True))\n value_counts.append([col])\n value_counts.append(keys_to_values.keys())\n value_counts.append(keys_to_values.values())\n value_counts.append('')\n\n w = csv.writer(f)\n w.writerows(value_counts)\n\n df = read_csv(os.path.join(self.base_folder, 'buildstock.csv'), dtype=str)\n file = os.path.join(self.export_folder, 'base_samples.csv')\n value_counts(df, file)\n\n df = read_csv(os.path.join(self.feature_folder, 'buildstock.csv'), dtype=str)\n file = os.path.join(self.export_folder, 'feature_samples.csv')\n value_counts(df, file)\n\n def convert_units(self, df):\n for col in df.columns:\n units = col.split('_')[-1]\n if units == 'kwh':\n df[col] *= 3412.14/1000000 # to mbtu\n elif units == 'therm':\n df[col] *= 0.1 # to mbtu\n\n return\n\n\n def write_results(self, base_df, feature_df):\n base_df.to_csv(os.path.join(self.base_folder, 'results_output.csv'))\n feature_df.to_csv(os.path.join(self.feature_folder, 'results_output.csv'))\n \n\n def map_columns(self, map_file):\n # This function uses a column mapping csv (specified with the -m argument) with columns \"map_from\" and \"map_to\"\n # If a \"map_from\" column is found in either the base or feature results, the column will be updated to the \"map_to\" value\n # Any columns that do not appear in both base and feature after the mapping will be dropped\n # An entry in the column mapping csv may have multiple column headers separated by a comma, in which case the columns will be summed and first entry will be used as the column header\n\n ## Characteristics\n # This is optional since you aren't necessarily going to visualize by characteristics\n has_characteristics = False\n if os.path.exists(os.path.join(self.base_folder, 'results_characteristics.csv')) and os.path.exists(os.path.join(self.feature_folder, 'results_characteristics.csv')):\n has_characteristics = True\n base_df_char = read_csv(os.path.join(self.base_folder, 'results_characteristics.csv'), index_col=0)\n feature_df_char = read_csv(os.path.join(self.feature_folder, 'results_characteristics.csv'), index_col=0)\n\n ## Outputs\n base_df = read_csv(os.path.join(self.base_folder, 'results_output.csv'), index_col=0)\n feature_df = read_csv(os.path.join(self.feature_folder, 'results_output.csv'), index_col=0)\n\n ## Mapping\n cwd = os.path.dirname(os.path.realpath(__file__))\n map_df = read_csv(map_file, usecols=['map_from','map_to'])\n map_df = map_df.dropna(axis=0)\n map_dict = {k:v for k,v in zip(map_df['map_from'], map_df['map_to'])}\n\n # Set new base and feature folders\n self.base_folder = os.path.join(self.base_folder, 'map')\n self.feature_folder = os.path.join(self.feature_folder, 'map')\n if not os.path.exists(self.base_folder):\n os.makedirs(self.base_folder)\n if not os.path.exists(self.feature_folder):\n os.makedirs(self.feature_folder)\n\n ## Characteristics\n if has_characteristics:\n # Align results_charactersitics columns\n base_cols = ['build_existing_model.' + col if 'build_existing_model' not in col else col for col in base_df_char.columns]\n feature_cols = ['build_existing_model.' + col if 'build_existing_model' not in col else col for col in feature_df_char.columns]\n\n base_df_char.columns = base_cols\n feature_df_char.columns = feature_cols\n\n common_cols = np.intersect1d(base_df_char.columns, feature_df_char.columns)\n base_df_char = base_df_char[common_cols]\n feature_df_char = feature_df_char[common_cols]\n\n base_df_char.to_csv(os.path.join(self.base_folder, 'results_characteristics.csv'))\n feature_df_char.to_csv(os.path.join(self.feature_folder, 'results_characteristics.csv'))\n\n # Skip mapping if not needed\n if set(base_df.columns).issubset(set(feature_df.columns)) or set(feature_df).issubset(set(base_df.columns)):\n self.write_results(base_df, feature_df)\n return\n\n # Sum columns with more than 1 column header in mapping csv\n results_dfs = {'base': base_df, 'feature': feature_df}\n map_dict_copy = map_dict.copy()\n for key, df in results_dfs.items():\n column_headers = df.columns\n\n for map_from, map_to in map_dict.items():\n # Sum 'map to' columns and use first parameter as col name\n map_to_s = map_to.split(',')\n if len(map_to_s) > 1: \n map_to = map_to_s[0]\n if map_to in column_headers:\n # sum columns\n df[map_to] = df[map_to_s].sum(axis=1)\n # update mapping\n map_dict_copy[map_from] = map_to \n # drop summed columns\n df.drop(map_to_s[1:], axis='columns', inplace=True)\n\n # Sum 'map from' columns and use first parameter as col name\n map_from_s = map_from.split(',')\n if len(map_from_s)>1:\n map_from = map_from_s[0]\n if map_from in column_headers:\n # sum columns\n df[map_from] = df[map_from_s].sum(axis=1)\n # update mapping\n map_dict_copy[map_from] = map_to\n # drop summed columns\n df.drop(map_from_s[1:], axis='columns', inplace=True)\n\n results_dfs[key] = df\n\n base_df = results_dfs['base']\n feature_df = results_dfs['feature']\n\n # Convert units\n self.convert_units(base_df)\n self.convert_units(feature_df)\n \n # Map column headers\n map_dict = map_dict_copy\n base_df.rename(columns=map_dict, inplace=True)\n feature_df.rename(columns=map_dict, inplace=True)\n\n # Output only columns in common\n common_cols = base_df.columns.intersection(feature_df.columns)\n base_df = base_df[common_cols]\n feature_df = feature_df[common_cols]\n\n base_df = base_df.reindex(sorted(base_df.columns), axis=1)\n feature_df = feature_df.reindex(sorted(feature_df.columns), axis=1)\n\n # Store new mapped csvs\n self.write_results(base_df, feature_df)\n print(\"Wrote mapped results_output.csv for base and feature results\")\n return\n\n\n def timeseries(self):\n files = []\n for file in os.listdir(self.base_folder):\n files.append(file)\n\n def cvrmse(b, f):\n if np.all(b == 0):\n return 'NA'\n\n s = np.sum((b - f) ** 2)\n s /= (len(b) - 1)\n s **= (0.5)\n s /= np.mean(b)\n s *= 100.0\n return s\n\n def nmbe(b, f):\n if np.all(b == 0):\n return 'NA'\n\n s = np.sum(b - f)\n s /= (len(b) - 1)\n s /= np.mean(b)\n s *= 100.0\n return s\n\n metrics = ['cvrmse', 'nmbe']\n\n for file in sorted(files):\n base_df = read_csv(os.path.join(self.base_folder, file), index_col=0)\n feature_df = read_csv(os.path.join(self.feature_folder, file), index_col=0)\n\n base_df = self.intersect_rows(base_df, feature_df)\n feature_df = self.intersect_rows(feature_df, base_df)\n\n cols = sorted(list(set(base_df.columns) & set(feature_df.columns)))\n\n for time_col in ['Time', 'time']:\n if time_col in cols:\n cols.remove(time_col)\n\n if not cols:\n return\n\n g = base_df.groupby('PROJECT')\n groups = g.groups.keys()\n\n dfs = []\n for group in groups:\n b_df = base_df.copy()\n f_df = feature_df.copy()\n\n cdfs = []\n for col in cols:\n b = b_df.loc[group][col].values\n f = f_df.loc[group][col].values\n\n data = {'CVRMSE (%)': [cvrmse(b, f)], 'NMBE (%)': [nmbe(b, f)]}\n df = pd.DataFrame(data=data, index=[group])\n columns = [(col, 'CVRMSE (%)'), (col, 'NMBE (%)')]\n df.columns = pd.MultiIndex.from_tuples(columns)\n cdfs.append(df)\n\n df = pd.concat(cdfs, axis=1)\n dfs.append(df)\n\n df = pd.concat(dfs).transpose()\n df.to_csv(os.path.join(self.export_folder, 'cvrmse_nmbe_{}'.format(file)))\n\nif __name__ == '__main__':\n\n default_base_folder = 'test/base_results/baseline'\n default_feature_folder = 'test/base_results/results'\n default_export_folder = 'test/base_results/comparisons'\n actions = [method for method in dir(MoreCompare) if method.startswith('__') is False]\n actions += ['timeseries']\n aggregate_columns = ['build_existing_model.geometry_building_type_recs',\n 'build_existing_model.census_region']\n aggregate_functions = ['sum', 'mean']\n display_columns = ['build_existing_model.geometry_building_type_recs',\n 'build_existing_model.geometry_foundation_type',\n 'build_existing_model.census_region']\n map_result_choices = ['base', 'feature']\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-b', '--base_folder', default=default_base_folder, help='The path of the base folder.')\n parser.add_argument('-f', '--feature_folder', default=default_feature_folder, help='The path of the feature folder.')\n parser.add_argument('-e', '--export_folder', default=default_export_folder, help='The path of the export folder.')\n parser.add_argument('-x', '--export_file', help='The path of the export file.')\n parser.add_argument('-a', '--actions', action='append', choices=actions, help='The method to call.')\n parser.add_argument('-ac', '--aggregate_column', choices=aggregate_columns, help='On which column to aggregate data.')\n parser.add_argument('-af', '--aggregate_function', choices=aggregate_functions, help='Function to use for aggregating data.')\n parser.add_argument('-dc', '--display_column', choices=display_columns, help='How to organize the subplots.')\n parser.add_argument('-m', '--map_file', help='Column mapping csv path.')\n\n args = parser.parse_args()\n print(args)\n\n if not os.path.exists(args.export_folder):\n os.makedirs(args.export_folder)\n \n compare = MoreCompare(args.base_folder, args.feature_folder, args.export_folder, args.export_file, args.map_file)\n\n if args.actions == None:\n args.actions = [] \n\n for action in args.actions:\n if action == 'samples':\n compare.samples()\n elif action == 'results':\n excludes = ['buildstock.csv']\n compare.results(args.aggregate_column, args.aggregate_function, excludes, enum_maps)\n elif action == 'visualize':\n excludes = ['buildstock.csv', 'results_characteristics.csv']\n categories = ['.component_load_', '.emissions_', '.end_use_', '.energy_use_', '.fuel_use_', '.hot_water_', '.hvac_', '.load_', '.peak_', '.resilience_', '.unmet_hours_', 'report_utility_bills.', 'upgrade_costs.', 'qoi_report.']\n for category in categories:\n export_file, ext = args.export_file.split('.')\n export_file = '{}_{}.{}'.format(export_file, category.strip('.').rstrip('_'), ext)\n cols_to_ignore = ['color_index'] + categories\n cols_to_ignore.remove(category)\n compare = MoreCompare(args.base_folder, args.feature_folder, args.export_folder, export_file, args.map_file)\n compare.visualize(args.aggregate_column, args.aggregate_function, args.display_column, excludes, enum_maps, cols_to_ignore)\n elif action == 'timeseries':\n compare.timeseries()\n","sub_path":"test/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":12667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"622602357","text":"import discord\nfrom discord.ext import commands\nimport random\nfrom gtts import gTTS\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\nbot_key = os.getenv(\"KEY\")\n\ndescription = '''An example bot to showcase the discord.ext.commands extension\nmodule.\n\nThere are a number of utility commands being showcased here.'''\nbot = commands.Bot(command_prefix='?', description=description)\n\ndef random_zitat():\n afile = open('zitate.txt')\n line = next(afile)\n for num, aline in enumerate(afile, 2):\n if random.randrange(num): continue\n line = aline\n return line\n\nclass Music(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def join(self, ctx):\n channel = ctx.message.author.voice.channel\n\n if ctx.voice_client is not None:\n return await ctx.voice_client.move_to(channel)\n\n await channel.connect()\n\n @classmethod\n async def play(self, ctx, query):\n source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))\n ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)\n \n @commands.command()\n async def curb(self, ctx):\n await self.play(ctx, 'sounds/curb.mp3')\n \n @commands.command()\n async def doubt(self, ctx):\n await self.play(ctx, 'sounds/doubt.mp3')\n \n @commands.command()\n async def drum(self, ctx):\n await self.play(ctx, 'sounds/drum.mp3')\n \n @commands.command()\n async def error(self, ctx):\n await self.play(ctx, 'sounds/error.mp3')\n\n @commands.command()\n async def egal(self, ctx):\n await self.play(ctx, 'sounds/egal.mp3')\n\n @commands.command()\n async def standard(self, ctx):\n await self.play(ctx, 'sounds/standard.mp3')\n\n @commands.command()\n async def kot(self, ctx):\n await self.play(ctx, 'sounds/kot.mp3')\n\n\n @commands.command()\n async def zitat(self, ctx):\n zitat = random_zitat()\n output = gTTS(zitat, lang='de')\n output.save('temp.wav')\n await ctx.send(zitat)\n await self.play(ctx, 'temp.wav')\n \n\n@bot.event\nasync def on_ready():\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('------')\n\nbot.add_cog(Music(bot))\nbot.run(bot_key)\n","sub_path":"bruder_wuenstel.py","file_name":"bruder_wuenstel.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"575297713","text":"import test as Arceus\nimport pandas as pd\n\nSalesDataSet = pd.read_pickle('MarylandVehicleSales2002-2021') #load your pkl or use pd.read_csv #1\nprint(SalesDataSet)\nIndicatorDataSet = pd.read_csv('historical_country_United_States_indicator_Cpi_Core_Core.csv') #2 load ur indicator \nIndicatorDataSetDates = IndicatorDataSet['DateTime']\nfor x in range(0, IndicatorDataSetDates.size):\n IndicatorDataSetDates[x] = IndicatorDataSetDates[x][0:10]\nIndicatorDataSet['DateTime'] = IndicatorDataSetDates\ndf = IndicatorDataSet.set_index('DateTime')\ndf1 = df.loc['2002-01-31':'2021-04-30']\ndf1.reset_index(drop=True, inplace = True)\nIndexColumnOfIndicator = 2 #which index is your indicator columns #3\nSalesColumnName = 'New' # specify your sales column. New is my sales in this case #4\nTestYear = 2018 # prediction year #5\nNumNodes = 153 # approx 2/3 of the number of your rows. Make this divisible by a three #6\nstartYear = 2002 # 7\nendYear = 2021 # 8\nstartMonth = 1 # 9\nendMonth = 4 # 10\n\n\n# Message me on discord if you need help\n# RamenMode#1200\n\n\n\n\n\n\n\n\n\nArceus.Arceus(SalesDataSet, df1, IndexColumnOfIndicator, SalesColumnName, TestYear, NumNodes, startYear, endYear, startMonth, endMonth)","sub_path":"baseCall.py","file_name":"baseCall.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"579927684","text":"import subprocess\nfrom xml.etree import ElementTree\n\nimport pygments\nfrom bs4 import BeautifulSoup\nfrom pygments.formatters import get_formatter_by_name\nfrom pygments.lexers import get_lexer_by_name\n\nlanguageMimeTypeMap = {\n \"kotlin\": \"text/x-kotlin\",\n \"java\": \"text/x-java\",\n \"groovy\": \"text/x-groovy\",\n \"xml\": \"application/xml\",\n \"bash\": \"text/x-sh\",\n \"html\": \"application/xml\",\n \"javascript\": \"text/javascript\",\n \"json\": \"application/json\"\n}\n\ndef customized_markdown(text):\n kramdown = subprocess.Popen(\n \"kramdown --input GFM --no-hard-wrap --smart-quotes apos,apos,quot,quot --no-enable-coderay\",\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n stdout_data, stderr_data = kramdown.communicate(input=text.encode(\"utf8\"))\n return stdout_data.decode(\"utf8\", errors='ignore')\n\n\ndef highlight_code(text):\n tree = BeautifulSoup(text, 'html.parser')\n code_elements = tree.select('pre > code')\n for element in code_elements:\n class_names = element.get(\"class\")\n lang = None\n if class_names is not None:\n for class_name in class_names:\n if class_name.startswith(\"language-\"):\n lang = class_name[len(\"language-\"):]\n if lang is not None:\n element['data-lang'] = languageMimeTypeMap[lang]\n element['class'] = \"code _highlighted\"\n return unicode(str(tree), \"utf8\").replace(\"
\", \"
\")\n\n\ndef jinja_aware_markdown(text, flatPages):\n app = flatPages.app\n template_context = {}\n app.update_template_context(template_context)\n\n env = app.jinja_env\n template = env.from_string(text)\n page_html = customized_markdown(template.render(template_context))\n return highlight_code(page_html)\n","sub_path":"src/markdown/makrdown.py","file_name":"makrdown.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"506822258","text":"import geopandas as gpd\nimport numpy as np\nfrom shapely.geometry import LineString, Point, Polygon\n\n\ndef make_crossings(intersections_dict, sidewalks, debug=False):\n crs = sidewalks.crs\n st_crossings = []\n street_segments = []\n ixn_dat = []\n for i, (ixn, data) in enumerate(intersections_dict.items()):\n ixn_dat.append({\n 'geometry': data['geometry'],\n 'ixn': i\n })\n for street in data['streets']:\n new_crossing = make_crossing(street, sidewalks, data['streets'],\n debug)\n if debug:\n new_crossing, street_segment = new_crossing\n street_segments.append(street_segment)\n if new_crossing is not None:\n st_crossings.append(new_crossing)\n\n st_crossings = gpd.GeoDataFrame(st_crossings)\n st_crossings = gpd.GeoDataFrame(st_crossings[['geometry']])\n st_crossings = st_crossings[st_crossings.type == 'LineString']\n st_crossings = st_crossings[st_crossings.is_valid]\n\n # Remove duplicates\n def comp(geom):\n p1 = np.round(geom.coords[0], 2)\n p2 = np.round(geom.coords[-1], 2)\n return str([p1, p2])\n\n comparison = st_crossings.geometry.apply(comp)\n comparison.name = 'comp'\n unique = st_crossings.groupby(comparison).first()\n st_crossings = gpd.GeoDataFrame(unique.reset_index())\n st_crossings.crs = crs\n\n if debug:\n street_segments = gpd.GeoDataFrame(street_segments)\n street_segments.crs = sidewalks.crs\n return st_crossings, street_segments\n else:\n return st_crossings\n\n\ndef make_crossing(street, sidewalks, streets_list, debug=False):\n '''Attempts to create a street crossing line given a street segment and\n a GeoDataFrame sidewalks dataset. The street and sidewalks should have\n these properties:\n\n (1) The street should start at the street intersection and extend away\n from it.\n (2) The sidewalks should all be LineString geometries.\n\n If a crossing cannot be created that meets certain internal parameters,\n None is returned.\n\n :param street: The street geometry.\n :type street: shapely.geometry.LineString\n :param sidewalks: The sidewalks dataset.\n :type sidewalks: geopandas.GeoDataFrame\n :returns: If a crossing can be made, a shapely Linestring. Otherwise, None.\n :rtype: shapely.geometry.LineString or None\n\n '''\n # 'Walk' along the street in 1-meter increments, finding the closest\n # sidewalk + the distance along each end. Reject those with inappropriate\n # angles and differences in length.\n # TODO: this is a good place for optimizations, it's a search problem.\n # Can probably do something like binary search.\n\n # Clip street in half: don't want to cross too far in.\n # TODO: this should be done in a more sophisticated way. e.g. dead ends\n # shouldn't require this and we should use a max distance value as well\n # street = street.interpolate(0.5, normalized=True)\n\n # New idea: use street buffers of MAX_CROSSING_DIST + small delta, use\n # this to limit the sidewalks to be considered at each point. Fewer\n # distance and side-of-line queries!\n\n # FIXME: use 'z layer' data if available (e.g. OSM)\n\n START_DIST = 4\n INCREMENT = 2\n MAX_DIST_ALONG = 25\n MAX_CROSSING_DIST = 30\n OFFSET = MAX_CROSSING_DIST / 2\n\n st_distance = min(street.length / 2, MAX_DIST_ALONG)\n start_dist = min(START_DIST, st_distance / 2)\n\n # Create buffer for the street search area, one for each side, then find\n # the sidewalks intersecting that buffer - use as candidates for\n # right/left\n street_cut = cut(street, st_distance)[0]\n\n if debug:\n street_segment = {'geometry': street_cut, 'issue': 'None'}\n\n sidewalk_sides = {}\n\n for side in ('left', 'right'):\n side_sidewalks = get_side_sidewalks(OFFSET, side, street_cut,\n sidewalks)\n if side_sidewalks.shape[0] < 1:\n # One of the sides has no sidewalks to connect to! Abort!\n if debug:\n street_segment['issue'] = 'no {} sidewalk'.format(side)\n return None, street_segment\n else:\n return None\n sidewalk_sides[side] = side_sidewalks\n\n candidates = []\n for dist in np.arange(start_dist, st_distance, INCREMENT):\n crossing = crossing_from_dist(street, dist,\n sidewalk_sides['left'],\n sidewalk_sides['right'])\n\n # We now have the lines on the left and right sides. Let's now filter\n # and *not* append if either are invalid\n\n # if side.length > MAX_DIST or crosses_streets(side, streets):\n other_streets = [st for st in streets_list if st != street]\n crosses_self, crosses_others = valid_crossing(crossing, street_cut,\n other_streets)\n\n # The sides have passed the filter! Add their data to the list\n if crosses_self and not crosses_others:\n candidates.append({'geometry': crossing, 'distance': dist})\n\n if not candidates:\n if debug:\n street_segment['issue'] = 'no candidates'\n return None, street_segment\n else:\n return None\n\n # Return the shortest crossing.\n # TODO: Should also bias towards *earlier* appearances, i.e. towards\n # corner.\n # lengths = np.array([line['crossing'].length for line in lines])\n # # Inverse distance function (distance from intersection)\n # distance_metric = 1 / np.array([line['distance'] for line in lines])\n\n # lengths * distance_metric\n def metric(candidate):\n return candidate['geometry'].length + 1e-1 * candidate['distance']\n\n best = sorted(candidates, key=metric)[0]\n\n if debug:\n return best, street_segment\n else:\n return best\n\n\ndef get_side_sidewalks(offset, side, street, sidewalks):\n offset = street.parallel_offset(offset, side, 0, 1, 1)\n if offset.type == 'MultiLineString':\n # Convert to LineString\n coords = []\n for geom in offset.geoms:\n coords += list(geom.coords)\n offset = LineString(coords)\n if side == 'left':\n offset.coords = offset.coords[::-1]\n st_buffer = Polygon(list(street.coords) +\n list(offset.coords) +\n [street.coords[0]])\n query = sidewalks.sindex.intersection(st_buffer.bounds, objects=True)\n query_sidewalks = sidewalks.loc[[q.object for q in query]]\n side_sidewalks = query_sidewalks[query_sidewalks.intersects(st_buffer)]\n\n return side_sidewalks\n\n\ndef crossing_from_dist(street, dist, sidewalks_left, sidewalks_right):\n # Grab a point along the outgoing street\n point = street.interpolate(dist)\n\n # Find the closest left and right points\n def closest_line_to_point(point, lines):\n sorted_side = lines.distance(point).sort_values()\n closest = lines.loc[sorted_side.index[0], 'geometry']\n return closest.interpolate(closest.project(point))\n\n left = closest_line_to_point(point, sidewalks_left)\n right = closest_line_to_point(point, sidewalks_right)\n\n # We now have the lines on the left and right sides. Let's now filter\n # and *not* append if either are invalid\n # (1) They cannot cross any other street line\n # (2) They cannot be too far away (MAX_DIST)\n crossing = LineString([left, right])\n\n return crossing\n\n\ndef valid_crossing(crossing, street, other_streets):\n crosses_street = street.intersects(crossing)\n crosses_others = [other.intersects(crossing) for other in other_streets]\n\n return crosses_street, any(crosses_others)\n\n if any(crosses_others):\n return False\n\n return True\n\n\ndef cut(line, distance):\n # Cuts a line in two at a distance from its starting point\n if distance <= 0.0 or distance >= line.length:\n return [LineString(line)]\n coords = list(line.coords)\n for i, p in enumerate(coords):\n pd = line.project(Point(p))\n if pd == distance:\n return [\n LineString(coords[:i+1]),\n LineString(coords[i:])]\n if pd > distance:\n cp = line.interpolate(distance)\n return [\n LineString(coords[:i] + [(cp.x, cp.y)]),\n LineString([(cp.x, cp.y)] + coords[i:])]\n","sub_path":"crossify/crossings.py","file_name":"crossings.py","file_ext":"py","file_size_in_byte":8433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"168549401","text":"import re\nimport os\nimport glob\nimport pandas as pd\n\n\ndef split_pages(data):\n return list(filter(None, map(\n str.strip, map(str, data.split('\\x0c')))))\n\n\nall_data = []\nbase_dir = r\"J:\\PQM Share\\RDS\"\nsave_dir = r\"\"\nsave_fname = os.path.join(save_dir, 'results.xlsx')\nfpaths = glob.glob(os.path.join(base_dir, '*.txt'))\npage_search_sent = 'ELECTRONIC AND FAX INSTRUCTIONS AUTHORISATION AND INDEMNITY LETTER'\ncompany_pattern = re.compile(r'^Company:\\s+(.*)$', re.IGNORECASE)\ninst_line_pattern = re.compile(r'^\\(([A-H]{1})\\).+\"(.*)\".', re.IGNORECASE)\ninst_pattern = re.compile(r'\"(.*?)\"')\nresults = {\n 'company_name': [],\n 'instructions': []\n}\n\nfor fpath in fpaths:\n with open(fpath, 'r', encoding='utf8') as infile:\n data = infile.read()\n all_data += split_pages(data)\n\nfor data in all_data:\n if page_search_sent in data:\n company_name, instructions = [], []\n for line in data.splitlines():\n inst_line_match = re.search(inst_line_pattern, line)\n company_match = re.findall(company_pattern, line)\n if inst_line_match:\n instructions.append(re.findall(inst_pattern, line)[0])\n print(f'Instruction: {re.findall(inst_pattern, line)[0]}')\n elif company_match:\n company_name.append(company_match[0])\n print(f'Company Name: {company_match[0]}')\n results['company_name'].append(company_name[0])\n results['instructions'].append(', '.join(instructions))\n\ndf = pd.DataFrame(results)\ndf.to_excel(save_fname)\n","sub_path":"regex_search.py","file_name":"regex_search.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"493939858","text":"from . import main\nfrom flask import render_template, make_response, send_file, url_for, request, current_app, flash,redirect, \\\nsend_from_directory\nfrom ..models import Carpark, Transaction\nfrom datetime import datetime\nimport os\nfrom bokeh.plotting import figure, output_file, save\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as ticker\nfrom werkzeug.utils import secure_filename\nimport pandas as pd\nfrom app import db\n\n@main.route('/')\ndef index():\n codes = Carpark.get_carparks_with_data()\n page = request.args.get('page', 1, type=int)\n pagination = Carpark.query.order_by(Carpark.last_updated_timestamp.desc()).paginate(\n page, per_page=current_app.config['CARPARKS_PER_PAGE'], error_out=False\n )\n carparks = pagination.items\n return render_template('index.html', carparks=carparks, codes=codes, pagination=pagination )\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.',1)[1].lower() in current_app.config['ALLOWED_EXTENSIONS']\n\n\n@main.route('/upload', methods=['GET','POST'])\ndef upload():\n if request.method == 'POST':\n if 'file' not in request.files:\n flash('No File')\n return redirect(url_for('main.upload'))\n file = request.files['file']\n if file.name == '':\n flash('No selected file')\n return redirect(url_for('main.upload'))\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))\n flash('Uploaded, start updating database...')\n ### TODO BACKEND\n try:\n update_database(filename)\n except Exception as e:\n flash('Failed to update the database...')\n print(e)\n return redirect(url_for('main.upload'))\n return render_template('upload.html')\n\ndef update_database(filename):\n start = datetime.utcnow()\n FILEPATH = os.path.join(current_app.config['UPLOAD_FOLDER'], filename)\n df = pd.read_excel(FILEPATH, sheet_name='carparks')\n end = datetime.utcnow()\n print('Loading Carpark Tab takes ', end - start)\n\n for i in range(len(df)):\n if not Carpark.query.filter_by(code=df['code'][i]).first():\n carpark = Carpark(\n code=df['code'][i],\n name=df['name'][i],\n description=df['description'][i],\n city=df['city'][i],\n address=df['address'][i],\n num_of_space=int(df['num_of_space'][i]),\n type=df['type'][i],\n longtitude=df['longtitude'][i],\n latitude=df['latitude'][i],\n demand=df['demand'][i],\n )\n db.session.add(carpark)\n print('Carpark Inserted ', carpark.code)\n if not os.path.exists(r'app\\static\\carparks\\{}\\plots'.format(df['code'][i])):\n os.makedirs(r'app\\static\\carparks\\{}\\plots'.format(df['code'][i]))\n db.session.commit()\n\n start = datetime.utcnow()\n df = pd.read_excel(FILEPATH, sheet_name='transactions')\n end = datetime.utcnow()\n print('Loading Transactions Tab takes ', end - start)\n\n df[['entry_timestamp', 'exit_timestamp', 'tx_timestamp']] = df[\n ['entry_timestamp', 'exit_timestamp', 'tx_timestamp']].apply(lambda x: pd.to_datetime(x))\n c_id = Carpark.query.filter_by(code=df['code'][1]).first().id\n\n start = datetime.utcnow()\n for i in range(len(df)): # len(df)\n transaction = Transaction(\n entry_timestamp=df['entry_timestamp'][i],\n exit_timestamp=df['exit_timestamp'][i],\n tx_timestamp=df['tx_timestamp'][i],\n parker_type=df['parker_type'][i],\n license=df['license'][i],\n gross_price=df['gross_price'][i],\n validation=df['validation'][i],\n validation_type=df['validation_type'][i],\n net_price=df['net_price'][i],\n payment_method=df['payment_method'][i],\n payment_location=df['payment_location'][i],\n entry_gate=df['entry_gate'][i],\n exit_gate=df['exit_gate'][i],\n operator=df['operator'][i],\n length_of_stay=df['length_of_stay'][i],\n length_of_stay_until_pay=df['length_of_stay_until_pay'][i],\n carpark_id=c_id\n )\n db.session.add(transaction)\n db.session.commit()\n end = datetime.utcnow()\n print('Inserting data takes ', end - start)\n print('Entries: ', len(df))\n print('Avg: ', (end - start) / len(df))\n\n\n@main.route('/download_template')\ndef download_template():\n return send_from_directory(current_app.config['DATA_TEMPLATE_FOLDER'],'Data Model Template.xlsm')\n\n@main.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(current_app.config['UPLOAD_FOLDER'],\n filename)\n\n\n@main.route('/carpark/')\ndef carpark(code):\n carpark = Carpark.query.filter_by(code=code).first_or_404()\n has_data = code in Carpark.get_carparks_with_data()\n # If no analytics data, return template\n if not has_data:\n return render_template('carpark.html', carpark=carpark, has_data=has_data)\n\n data = {}\n\n # Statics\n statics_file = r'app/static/carparks/{}/plots/statics.html'.format(code)\n if not os.path.exists(statics_file):\n statics_df = carpark.get_statics()\n statics_df.to_html(statics_file, float_format='%.1f')\n data['statics'] = open(statics_file).read()\n\n # Volume\n vol_file = r'app/static/carparks/{}/plots/volume.png'.format(code)\n if not os.path.exists(vol_file):\n plt.clf()\n vol_df = carpark.get_volume()\n plt.style.use('ggplot')\n ax = vol_df.plot(kind='bar', figsize=(12, 5), stacked=True, x=vol_df.index)\n ticklabels = [''] * len(vol_df.index)\n ticklabels[::7] = [item.strftime('%b %d') for item in vol_df.index[::7]]\n ax.set_xlabel('Daily Volume')\n ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))\n plt.gcf().autofmt_xdate()\n plt.savefig(vol_file, format='png')\n\n # plot = figure(plot_width=1200, plot_height=500, x_axis_type=\"datetime\")\n # plot.line(vol_df.index, vol_df.values, color='navy', alpha=0.5)\n # output_file(vol_file)\n # save(plot)\n data['volume'] = url_for('static', filename='carparks/{}/plots/volume.png'.format(code))\n\n # Occupancy\n occ_file = r'app/static/carparks/{}/plots/occupancy.png'.format(code)\n if not os.path.exists(occ_file):\n plt.clf()\n occ_chart_data = carpark.get_occupancy()\n plt.style.use('ggplot')\n fig, ax = plt.subplots(figsize=(12, 5))\n occ_chart_data.plot(ax=ax, kind='bar', stacked=True)\n from matplotlib.ticker import PercentFormatter\n ax.yaxis.set_major_formatter(PercentFormatter())\n plt.savefig(occ_file, format='png')\n data['occupancy'] = url_for('static', filename='carparks/{}/plots/occupancy.png'.format(code))\n\n # LOS\n los_file = r'app/static/carparks/{}/plots/los.png'.format(code)\n if not os.path.exists(los_file):\n plt.clf()\n los_df = carpark.get_los()\n los_dist = los_df['los_group'].value_counts()\n los_dist.plot.pie(autopct='%.1f%%', figsize=(6, 6))\n plt.savefig(los_file, format='png')\n # To Be Polished\n data['los'] = url_for('static', filename='carparks/{}/plots/los.png'.format(code))\n\n\n\n\n return render_template('carpark.html', carpark=carpark, has_data=has_data, data=data)\n\n\n@main.route('/transactions/')\ndef transactions(code):\n carpark = Carpark.query.filter_by(code=code).first_or_404()\n page = request.args.get('page', 1, type=int)\n pagination = carpark.transactions.order_by(Transaction.exit_timestamp.desc()).paginate(\n page, per_page=current_app.config['TRANSACTIONS_PER_PAGE'], error_out=False\n )\n transactions = pagination.items\n return render_template('transactions.html', carpark=carpark, transactions=transactions, pagination=pagination)\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"226475709","text":"import sys\nimport pytest\n\nfrom jinja2 import Template\n\n\n@pytest.mark.skipif(sys.version_info < (3, 5),\n reason='Requires 3.5 or later')\ndef test_generator_stop():\n class X(object):\n def __getattr__(self, name):\n raise StopIteration()\n\n t = Template('a{{ bad.bar() }}b')\n with pytest.raises(RuntimeError):\n t.render(bad=X())\n","sub_path":"tests/test_features.py","file_name":"test_features.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"251370031","text":"import os\nimport subprocess\n\nimport discord\nfrom discord.ext import commands\n\nfrom utils.checks import checks\nfrom utils.functions import pagify\n\n\nclass Core(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(description=\"List all modules on the bot\")\n async def modules(self, ctx):\n cog_list, cogs_loaded, cogs_unloaded = [], \"```diff\\n+\\t\", \"\"\n event_list, events_loaded, events_unloaded = [], \"```diff\\n+\\t\", \"\"\n cogs, events = [], []\n bot_cogs = {}\n em = discord.Embed(color=self.bot.color)\n em.set_author(name=\"Bot modules:\")\n em.set_thumbnail(url=self.bot.user.avatar_url)\n paths = [\"modules/Commands\", \"modules/Events\"]\n for path in paths:\n for file in os.listdir(path):\n if not file.endswith(\".py\"):\n pass\n else:\n if path == paths[0]:\n cog_list.append(file[:-3])\n else:\n event_list.append(file[:-3])\n for name, obj in self.bot.cogs.items():\n if \"modules.Cogs\" in str(obj):\n cogs.append(name)\n else:\n events.append(name)\n bot_cogs[\"cogs\"] = cogs\n bot_cogs[\"events\"] = events\n for k, v in bot_cogs.items():\n if k == \"cogs\":\n for cog in v:\n if cog in cog_list:\n cog_list.remove(cog)\n else:\n for event in v:\n if event in event_list:\n event_list.remove(event)\n cogs_loaded += \", \".join(bot_cogs[\"cogs\"])\n cogs_unloaded += \", \".join(cog_list)\n events_loaded += \", \".join(bot_cogs[\"events\"])\n events_unloaded += \", \".join(event_list)\n cogs_loaded += f\"\\n-\\t{cogs_unloaded}```\" if cogs_unloaded else \"```\"\n events_loaded += f\"\\n-\\t{events_unloaded}```\" if events_unloaded else \"```\"\n em.add_field(name=\"Cogs:\", value=cogs_loaded)\n em.add_field(name=\"Events:\", value=events_loaded)\n await ctx.send(embed=em)\n\n @checks.is_owner()\n @commands.group(hidden=True, case_insensitive=True, description=\"Load a module\")\n async def load(self, ctx):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n if not ctx.invoked_subcommand:\n return await ctx.send_help(ctx.command)\n\n @load.command(name=\"cog\", aliases=[\"c\"], description=\"Load a cog\")\n async def load_cog(self, ctx, cog_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n cog_name = cog_name.replace(\".py\", \"\")\n try:\n self.bot.load_extension(f\"modules.Commands.{cog_name}\")\n except commands.ExtensionAlreadyLoaded:\n return await ctx.send_error(f\"Cog {cog_name} is already loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Cog {cog_name} could not be found!\")\n await ctx.send(f\"Cog {cog_name} has now been loaded!\")\n\n @load.command(name=\"event\", aliases=[\"e\"], description=\"Load an event\")\n async def load_event(self, ctx, event_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n event_name = event_name.replace(\".py\", \"\")\n try:\n self.bot.load_extension(f\"modules.Events.{event_name}\")\n except commands.ExtensionAlreadyLoaded:\n return await ctx.send_error(f\"Event {event_name} is already loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Event {event_name} could not be found!\")\n await ctx.send(f\"Event {event_name} has now been loaded!\")\n\n @checks.is_owner()\n @commands.group(hidden=True, case_insensitive=True, description=\"Unload a module\")\n async def unload(self, ctx):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n if not ctx.invoked_subcommand:\n return await ctx.send_help(ctx.command)\n\n @unload.command(name=\"cog\", aliases=[\"c\"], description=\"Unload a cog\")\n async def unload_cog(self, ctx, cog_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n cog_name = cog_name.replace(\".py\", \"\")\n try:\n self.bot.unload_extension(f\"modules.Cogs.{cog_name}\")\n except commands.ExtensionNotLoaded:\n return await ctx.send_error(f\"Cog {cog_name} is not loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Cog {cog_name} could not be found!\")\n await ctx.send(f\"Cog {cog_name} is now unloaded!\")\n\n @unload.command(name=\"event\", aliases=[\"e\"], description=\"Unload an event\")\n async def unload_event(self, ctx, event_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n event_name = event_name.replace(\".py\", \"\")\n try:\n self.bot.unload_extension(f\"modules.Events.{event_name}\")\n except commands.ExtensionNotLoaded:\n return await ctx.send_error(f\"Event {event_name} is not loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Event {event_name} could not be found!\")\n await ctx.send(f\"Event {event_name} is now unloaded!\")\n\n @checks.is_owner()\n @commands.group(hidden=True, case_insensitive=True, description=\"Reload a module\")\n async def reload(self, ctx):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n if not ctx.invoked_subcommand:\n return await ctx.send_help(ctx.command)\n\n @reload.command(name=\"cog\", aliases=[\"c\"], description=\"Reload a cog\")\n async def reload_cog(self, ctx, cog_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n cog_name = cog_name.replace(\".py\", \"\")\n try:\n self.bot.reload_extension(f\"modules.Commands.{cog_name}\")\n except commands.ExtensionNotLoaded:\n return await ctx.send_error(f\"Cog {cog_name} is not loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Cog {cog_name} could not be found!\")\n await ctx.send(f\"Cog {cog_name} has been reloaded!\")\n\n @reload.command(name=\"event\", aliases=[\"e\"], description=\"Reload an event\")\n async def reload_event(self, ctx, event_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n event_name = event_name.replace(\".py\", \"\")\n try:\n self.bot.reload_extension(f\"modules.Events.{event_name}\")\n except commands.ExtensionNotLoaded:\n return await ctx.send_error(f\"Event {event_name} is not loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Event {event_name} could not be found!\")\n await ctx.send(f\"Event {event_name} has been reloaded!\")\n\n @checks.is_owner()\n @commands.command(hidden=True, description=\"Pull updates from git\")\n async def pull(self, ctx):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n paged = pagify(\n subprocess.Popen(\n [\"git\", \"pull\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n ).stdout.read().decode()\n )\n for page in paged:\n p = f\"```css\\n{page}```\"\n await ctx.send(p)\n\n @checks.is_owner()\n @commands.command(name=\"raise\", hidden=True, description=\"Raise a test exception\")\n async def _raise(self, ctx):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n await ctx.send(\"Raising a test exception..\")\n raise Exception(f\"Exception raised by {ctx.author}\")\n\n @checks.is_owner()\n @commands.command(hidden=True, description=\"Force a user to run a command\")\n async def sudo(self, ctx, user: discord.Member, *, command):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n message = ctx.message\n prefix = await self.bot.get_prefix(message)\n message.author = user\n message.content = prefix + command\n await self.bot.invoke(await self.bot.get_context(message))\n\n\ndef setup(bot):\n bot.add_cog(Core(bot))\n","sub_path":"modules/Commands/Core.py","file_name":"Core.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"429526844","text":"#!/usr/bin/env python\nimport os\nfrom distutils.core import setup\n\ndef get_files(directory, install_base):\n file_list = []\n files=os.listdir(directory)\n found_files = []\n for file in files:\n if ( os.path.isdir(directory + \"/\" + file) ):\n if ( not file == \".svn\"):\n file_list += get_files(directory + \"/\" + file, install_base)\n else:\n found_files.append(directory + \"/\" + file)\n \n if ( len(found_files) > 0 ):\n file_list.append((install_base + \"/\" + directory, found_files))\n return file_list\n\nmedia_files = get_files(\"media\", \"share/saxs/\")\n\nsetup(\n name='saxs-style-glareindark',\n version='0.0.1',\n packages=['saxs_style_glareindark'],\n package_data={'saxs_style_glareindark': ['templates/*', 'templatetags/*']},\n data_files = media_files,\n)\n","sub_path":"saxs-style-glareindark/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"42429278","text":"import graphene\nfrom graphene import relay\nfrom graphene_django.filter import DjangoFilterConnectionField\nfrom graphene_django import DjangoObjectType\nfrom .models import Inmate\nfrom graphql_relay import from_global_id, to_global_id\n\n\nclass InmateNode(DjangoObjectType):\n\n class Meta:\n model = Inmate\n interfaces = (relay.Node,)\n filter_fields = {\n 'id': ['exact'],\n # 'number': ['exact'],\n 'last_name': ['exact', 'icontains', 'istartswith'],\n 'first_name': ['exact', 'icontains', 'istartswith'],\n 'middle_name': ['exact', 'icontains', 'istartswith'],\n 'agency': ['exact', 'icontains', 'istartswith'],\n 'date_created': ['exact', 'icontains', 'istartswith']\n }\n row_id = graphene.Int(source='id')\n # @staticmethod\n # def resolve_row_id(self):\n\n # return self.id\n\n\nclass Query(graphene.ObjectType):\n inmate = relay.Node.Field(InmateNode)\n inmates = DjangoFilterConnectionField(InmateNode)\n\n\nclass CreateInmate(graphene.relay.ClientIDMutation):\n\n class Input:\n # number = graphene.Int()\n last_name = graphene.String()\n first_name = graphene.String()\n middle_name = graphene.String()\n agency = graphene.String()\n\n inmate = graphene.Field(InmateNode)\n\n def mutate_and_get_payload(self, info, **input):\n # row_id = graphene.Int(source='id')\n inmate = Inmate(\n # number=row_id,\n last_name=input.get('last_name'),\n first_name=input.get('first_name'),\n middle_name=input.get('middle_name'),\n agency=input.get('agency')\n )\n inmate.save()\n return CreateInmate(inmate=inmate)\n\n\nclass Mutation(graphene.AbstractType):\n create_inmate = CreateInmate.Field()\n","sub_path":"jpacks/relay-schema.py","file_name":"relay-schema.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"150530404","text":"import config\nfrom discord.ext.commands import Bot\nimport asyncio\nimport re\nimport datetime\nimport time\n\nBOT_PREFIX = ('!')\nTOKEN = config.token\n\nclass Track:\n def __init__(self, title=None, length=None, user=None, np=False):\n self.title = title\n self.length = length\n self.user = user\n self.np = np\n\n def __str__(self):\n return '{0}\\\"{1}\\\" {2} ({3})'.format('▶ ' if self.np else ' ', self.title, sec2str(self.length), self.user)\n\nclient = Bot(command_prefix=BOT_PREFIX)\n\n#--------------------#\n\n@client.event\nasync def on_ready():\n print('Logged in as {0}'.format(client.user.name))\n #await client.send_message(destination=client.get_channel('356866879392055307'), content='harlo harlo')\n\n@client.command(name = 'harlo',\n pass_context = True)\nasync def cmd_harlo(context):\n msg = 'harlo harlo {0}!!!'.format(context.message.author.mention)\n await client.say(msg)\n\n#--------------------#\n\n@client.command(name = 'start',\n pass_context = True)\nasync def cmd_start(context):\n tracklist = await update_list_from_q(context)\n\n for i, track in enumerate(tracklist):\n print('{0}. {1}'.format(i, track))\n\ndef get_status_and_update_tl(tl, q_ts, sec_remaining):\n now_ts = datetime.datetime.utcnow()\n sec_since_q = (now_ts - q_ts).total_seconds()\n\n cur_q_pos = sum(t.length for t in tl) - sec_remaining + sec_since_q\n cur_pos = cur_q_pos\n cur_index = 0\n for i, t in enumerate(tl):\n if t.length - cur_pos <= 0:\n cur_pos -= t.length\n else:\n cur_index = i\n break\n\n #print('now_ts = {0}'.format(now_ts))\n #print('total_q_len = {0}'.format(sec2str(sum(t.length for t in tl))))\n #print('sec_since_q = {0}'.format(sec2str(sec_since_q)))\n #print('sec_remaining = {0}'.format(sec2str(sec_remaining)))\n #print('cur_q_pos = {0}'.format(sec2str(cur_q_pos)))\n print('rn: {0} {1}'.format(cur_index, sec2str(cur_pos)))\n return (cur_index, cur_pos)\n\nasync def update_list_from_q(context):\n q_ts = None\n sec_remaining = None\n tl = []\n regexes = {\n 'q_title': r'.*There (?:is|are) \\*\\*(\\d+)\\*\\* tracks? with a remaining length of \\*\\*\\[([\\d:]+)\\]\\*\\* in the queue\\..*',\n 'q_page': r'^Page \\*\\*(\\d+)\\*\\* of \\*\\*(\\d+)\\*\\*\\.$',\n 'q_track': r'^`\\[\\d+\\]` (\\\\▶)?\\*\\*(.*)\\*\\* added by \\*\\*(.*)\\*\\* `\\[([\\d:]+)\\]`$',\n }\n\n channel = context.message.channel\n\n # TODO: add after to limit msgs from last X hours\n found_q = False\n async for message in client.logs_from(channel):\n # Look for ;;q msg\n match = re.search(regexes['q_title'], message.content)\n if match:\n found_q = True\n q_ts = message.timestamp\n #q_ts = datetime.datetime(2018, 11, 12, 19, 23, 5, 605000)\n #q_ts = datetime.datetime.utcnow()\n sec_remaining = str2sec(match.group(2))\n\n # Scrape ;;q line-by-line for data\n page_cur = 0\n page_tot = 0\n for line in iter(message.content.splitlines()):\n match = re.search(regexes['q_page'], line)\n if match:\n page_cur = int(match.group(1))\n page_tot = int(match.group(2))\n match = re.search(regexes['q_track'], line)\n if match:\n length = match.group(4)\n \n track = Track(title = match.group(2),\n length = str2sec(match.group(4)),\n user = match.group(3),\n np = match.group(1))\n tl.append(track)\n\n print('\\nq_time: {0}\\n'\n 'page {1}/{2}, {3} tracks, {4} remaining\\n'.format(q_ts, page_cur, page_tot, len(tl), sec2str(sec_remaining)))\n\n get_status_and_update_tl(tl, q_ts, sec_remaining)\n break\n\n if not found_q:\n await client.say('No ;;q message found. Send a ;;q and try again')\n\n return tl\n\n#--------------------#\n\ndef utc2local(utc):\n epoch = time.mktime(utc.timetuple())\n offset = datetime.datetime.fromtimestamp(epoch) - datetime.datetime.utcfromtimestamp(epoch)\n return utc + offset\n\ndef str2sec(string):\n match = re.match(r'^\\d{2}:\\d{2}$', string)\n if match:\n t = time.strptime(string, '%M:%S')\n match = re.match(r'^\\d{2}:\\d{2}:\\d{2}$', string)\n if match:\n t = time.strptime(string, '%H:%M:%S')\n return datetime.timedelta(hours=t.tm_hour, minutes=t.tm_min, seconds=t.tm_sec).total_seconds()\n\ndef sec2str(sec):\n return str(datetime.timedelta(seconds=sec))\n\nif __name__ == '__main__':\n client.run(TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"169483523","text":"import numpy as np # linear algebra\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import Dense, Activation, Conv2D, Flatten, Conv1D\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.optimizers import Adam\nfrom trainer.replay_buffer import ReplayBuffer\nfrom trainer.dense_nn import DenseNN\nfrom kaggle_environments.envs.halite.helpers import Board, ShipAction, ShipyardAction, Observation\n\n\ndef build_conv_dqn(lr, n_actions, input_dims, fc1_dims):\n model = Sequential()\n model.add(Conv2D(filters=32, kernel_size=8, strides=4, activation='relu',\n input_shape=(input_dims,None), data_format='channels_first'))\n model.add(Conv2D(filters=64, kernel_size=4, strides=2, activation='relu',\n data_format='channels_first'))\n model.add(Conv2D(filters=64, kernel_size=3, strides=1, activation='relu',\n data_format='channels_first'))\n model.add(Flatten())\n model.add(Dense(fc1_dims, activation='relu'))\n model.add(Dense(n_actions))\n\n model.compile(optimizer=Adam(lr=lr), loss='mean_squared_error')\n\n return model\n\n\ndef build_dense_dqn(lr, n_actions, input_dims, fc1_dims, fc2_dims, fc3_dims=8):\n model = keras.Sequential(\n [keras.layers.Dense(fc1_dims, input_shape=(input_dims,)),\n keras.layers.Activation('relu'),\n keras.layers.Dense(fc2_dims),\n keras.layers.Activation('relu'),\n keras.layers.Dense(fc3_dims),\n keras.layers.Activation('relu'),\n keras.layers.Dense(n_actions)]\n )\n\n model.compile(optimizer=keras.optimizers.Adam(lr=lr), loss='mse')\n\n return model\n\n\nclass Agent(object):\n\n def __init__(self,\n alpha,\n gamma,\n n_actions,\n epsilon,\n batch_size,\n input_dims,\n epsilon_dec=0.985,\n epsilon_end=0.05,\n win_reward=5,\n replace=20,\n fc1_dims=16,\n fc2_dims=16,\n mem_size=100_000,\n fname='dqn_model.h5',\n verbose=False,\n agent_type='default',\n nnet_type='dense'\n ):\n \"\"\"\n gamma: discount factor\n epsilon: how often we choose the random action\n\n \"\"\"\n self.action_space = [i for i in range(n_actions)]\n self.n_actions = n_actions\n self.gamma = gamma\n self.epsilon = epsilon\n self.epsilon_end = epsilon_end\n self.epsilon_dec = epsilon_dec\n self.batch_size = batch_size\n self.model_file = fname\n self.win_reward = win_reward\n self.replace = replace\n self.learn_step = 0\n # self.state_converter = state_converter\n self.memory = ReplayBuffer(mem_size, input_dims, n_actions, discrete=True)\n self.verbose = verbose\n self.chose_random = False\n self.agent_type = agent_type\n\n if nnet_type == 'dense':\n dense_nn = DenseNN(\n n_actions=n_actions,\n unit_scale=2,\n observation_shape=input_dims\n )\n self.q_online = dense_nn.compile('q_online', 'mse')\n self.q_offline = dense_nn.compile('q_offline', 'mse')\n else:\n self.q_online = build_conv_dqn(alpha, fc1_dims=fc1_dims, input_dims=input_dims, n_actions=n_actions)\n self.q_offline = build_conv_dqn(alpha, fc1_dims=fc1_dims, input_dims=input_dims, n_actions=n_actions)\n\n def remember(self, state, action, reward, new_state, done):\n self.memory.store_transition(state, action, reward, new_state, done)\n return reward\n\n def replace_target_network(self):\n if self.replace is not None and self.learn_step % self.replace == 0:\n self.q_offline.set_weights(self.q_online.get_weights())\n\n def get_action(\n self,\n state,\n game,\n step,\n verbose=True\n ):\n \"\"\"\n Given a particular state, select the 2 highest values actions, or 2 random actions.\n\n :param state:\n :param game: Which game number this is (used for\n :return:\n \"\"\"\n\n rand = np.random.rand()\n if rand < self.epsilon:\n top_action_index = np.random.choice(self.action_space, 1)[0]\n # see here!\n actions = np.zeros(len(self.action_space))\n actions[top_action_index] = 1\n self.chose_random = True\n else:\n state = np.array([state])\n # pass the state through the network\n # and select the best action\n\n pred = self.q_online.predict(state)\n action_values = pred[0]\n\n top_action_index = np.argmax(action_values)\n actions = np.zeros(len(self.action_space))\n\n # try:\n actions[top_action_index] = 1\n\n self.chose_random = False\n\n if verbose and ((game % 10) == 0) and (step % 10 == 0):\n print(f'Game: {game}, Step: {step}')\n print('action values')\n print(action_values)\n print('top_action_index')\n print(top_action_index)\n print('actions')\n print(actions)\n\n return top_action_index\n\n def learn(self, step_num, episode_num):\n verbose = self.verbose\n\n # print(f'Learning with agent type: {self.agent_type}')\n\n verbose = verbose and (self.memory.mem_ctr == self.batch_size)\n\n # this is a temporal difference learning method --> we learn on each step\n # when we start, do we start with random or all zeros?\n if self.memory.mem_ctr < self.batch_size:\n return\n\n self.learn_step += 1\n\n self.replace_target_network()\n\n if verbose and ((episode_num % 10) == 0) and (step_num % 10 == 0):\n print('\\n ================')\n print('learning - game: {}, iteration: {}'.format(episode_num, step_num))\n print('Mean reward: {}'.format(np.mean(self.memory.reward_memory)))\n\n # Here we sample non-sequential memory. We don't want to sample sequential\n # memory because this results in correlation (23:45 in video)\n state, action, reward, new_state, done = \\\n self.memory.sample_buffer(self.batch_size)\n\n if verbose:\n print('states: {}, actions: {}, rewards: {}'.format(len(state), len(action), len(reward)))\n\n # feed set of states through the model\n\n if verbose and ((episode_num % 10) == 0) and (step_num % 10 == 0):\n print('Predicting q_target with input {}'.format(state.shape))\n q_network = self.q_offline.predict(state)\n if verbose:\n print('Predicting q_next with {}'.format(state.shape))\n q_next = self.q_offline.predict(new_state)\n\n q_network = q_network.copy()\n\n # this is a point of contention\n # TODO: return to this (22:00 in video)\n batch_index = np.arange(self.batch_size, dtype=np.int32)\n\n q_network[batch_index, action] = reward + \\\n self.gamma * np.max(q_next, axis=1) * (1 - done)\n\n if verbose and ((episode_num % 10) == 0) and (step_num % 10 == 0):\n print('Updated q_target with shape: {}'.format(q_network.shape))\n\n self.q_online.train_on_batch(state, q_network)\n\n if verbose and ((episode_num % 10) == 0) and (step_num % 10 == 0):\n print('Training complete')\n\n self.epsilon = self.epsilon * self.epsilon_dec if self.epsilon > self.epsilon_end \\\n else self.epsilon\n\n def save_weights(self, model_path):\n if not model_path:\n model_path = self.model_file\n self.q_online.save_weights(model_path)\n\n def load_weights(self, model_path):\n if not model_path:\n model_path = self.model_file\n self.q_online.load_weights(model_path)\n","sub_path":"code/v2/rl_on_gcp/trainer/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":7997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"5578522","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 14 20:17:58 2017\n\n@author: Karthikeya\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\n\n# Filenames as per tweet_postag.py\nst_train = [\"norm_trn_ath.csv\", \"norm_trn_cli.csv\", \"norm_trn_fem.csv\", \"norm_trn_hil.csv\", \"norm_trn_leg.csv\"]\nst_test = [\"norm_tst_ath.csv\", \"norm_tst_cli.csv\", \"norm_tst_fem.csv\", \"norm_tst_hil.csv\", \"norm_tst_leg.csv\"]\nacc_st = []\nfor i in range(5):\n trn_f = st_train[i]\n tst_f = st_test[i]\n \n train = pd.read_csv(trn_f, engine='python')\n test = pd.read_csv(tst_f, engine='python')\n \n train_features = train.iloc[:-1,:-1]\n train_target = train.iloc[:-1,-1]\n \n test_features = test.iloc[:-1,:-1]\n test_target = test.iloc[:-1,-1]\n \n clf = RandomForestClassifier()\n clf = clf.fit(train_features, train_target)\n \n pred_target = clf.predict(test_features)\n acc_st.append(accuracy_score(test_target, pred_target))\n print(accuracy_score(test_target, pred_target))","sub_path":"stance_random_forests.py","file_name":"stance_random_forests.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"590457472","text":"\"\"\"Wrapper code to test flatten functions.\r\n\"\"\"\r\nfrom compiler.ast import flatten\r\nimport random\r\nimport sys\r\nimport timeit\r\n\r\nfrom my_flatten import my_flatten\r\n\r\nrandom.seed(\"listoflists\")\r\n\r\nN_ITEMS = 10 ** 2\r\nTIMEIT_ITERATIONS = 10 ** 2\r\n\r\nrand_digit = lambda: random.randint(0,9)\r\ngo_deeper = lambda: random.choice([True, False])\r\n\r\n\r\ndef rand_list(curr_depth, max_len=5, max_depth=5):\r\n list_ = []\r\n for _ in range(random.choice(range(max_len + 1))):\r\n if go_deeper() and curr_depth < max_depth:\r\n list_.append(rand_list(curr_depth + 1))\r\n else:\r\n list_.append(rand_digit())\r\n return list_\r\n\r\n\r\ndef rand_list_of_lists(n_items):\r\n list_of_lists = []\r\n for _ in range(n_items):\r\n list_of_lists.append(rand_list(0))\r\n return list_of_lists\r\n\r\nif __name__ == \"__main__\":\r\n setup = \"\"\"\r\nfrom flatten_timeit import rand_list, rand_list_of_lists\r\nfrom my_flatten import my_flatten\r\n\r\nlol = rand_list_of_lists({0})\r\n\"\"\".format(N_ITEMS)\r\n stmt = \"my_flatten(lol)\"\r\n\r\n # check my_flatten first\r\n lol = rand_list_of_lists(N_ITEMS)\r\n assert flatten(lol) == my_flatten(lol), \"Your my_flatten did not flatten the list of lists properly.\"\r\n\r\n # time my_flatten second\r\n sys.stdout.write(\"Your my_flatten function took: {0} seconds to run {1} times.\\n\".format(\r\n timeit.timeit(stmt, setup=setup, number=TIMEIT_ITERATIONS), TIMEIT_ITERATIONS))\r\n","sub_path":"iterative/flatten_timeit.py","file_name":"flatten_timeit.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"549234868","text":"#!/usr/bin/env python\r\n# -*- coding: cp1251 -*-\r\n#\r\n# grep logs by max procedures timings, threads activities processed\r\n\r\n# return file system objects list\r\ndef getFsoList(fso_path):\r\n\r\n import os, time\r\n\r\n files = [os.path.join(fso_path, f) for f in os.listdir(fso_path)]\r\n\r\n # sort list by file time modified\r\n return sorted(files, key=lambda x: time.ctime(os.path.getmtime(x)))\r\n\r\n#\r\ndef miner(fso_list):\r\n\t\r\n import re\r\n from datetime import datetime\r\n from datetime import timedelta\r\n\r\n grape_di = {}\r\n result_li = []\r\n\r\n for fso_name in fso_list:\r\n f = open(fso_name, 'r')\r\n print (fso_name)\r\n for line in f:\r\n time_str = re.search(r'(^\\d{2}:\\d{2}:\\d{2},\\d{3})(.*\\.)(0x\\d{2}) - (В.*ходные) параметры вызова .*', line)\r\n if time_str:\r\n th_time = datetime.strptime(time_str.group(1), '%H:%M:%S,%f')\r\n th_num = int(time_str.group(3)[2:])\r\n th_queue = time_str.group(2)\r\n if time_str.group(4) == 'Входные':\r\n if th_num not in grape_di.keys():\r\n grape_di[th_num] = [[th_time, th_queue]]\r\n else:\r\n grape_di[th_num].append([th_time, th_queue])\r\n if time_str.group(4) == 'Выходные':\r\n if th_num in grape_di.keys():\r\n if grape_di[th_num] != []:\r\n for i in range(len(grape_di[th_num]),0,-1):\r\n if grape_di[th_num][i-1][1] == th_queue:\r\n last_time = grape_di[th_num][i-1][0]\r\n del grape_di[th_num][i-1]\r\n result_li.append(['{:%H:%M:%S}'.format(last_time), (th_time - last_time).seconds, th_num])\r\n break\r\n\r\n return sorted(result_li), max(grape_di.keys())\r\n\r\n\r\n# --select max(proc_timing) from (init_time, proc_time, th_num) group by init_time, th_num\r\ndef group_by(in_list):\r\n\r\n from itertools import groupby\r\n\r\n# for key, group in groupby(in_list, lambda x: x[0]):\r\n# print (key)\r\n# print (max(group, key=lambda x: x[1]))\r\n\r\n return [[max(group, key=lambda x: x[1])] for key, group in groupby(in_list, lambda x: x[0])]\r\n\r\n# convert to excel view\r\ndef to_excel(in_list, max_th):\r\n\r\n out_list = []\r\n\r\n for i in in_list:\r\n tmp = [i[0][0]] + [''] * (max_th+1)\r\n tmp[i[0][2]+1] = str(i[0][1])\r\n out_list.append(tmp)\r\n\r\n return out_list\r\n\r\n# main module\r\ndef main():\r\n\r\n# inpath = 'c:/project1/grep/test4'\r\n# inpath = 'c:/project1/grep/test2'\r\n inpath = 'c:/project1/grep/in'\r\n\r\n # get files list\r\n in_fso_list = getFsoList(inpath)\r\n\r\n a,m = miner(in_fso_list)\r\n# print ('\\nthis is extractor action!')\r\n# for i in a: print (i)\r\n\r\n b = group_by(a)\r\n# print ('\\nthis is group by action!')\r\n# for i in b: print (i)\r\n \r\n c = to_excel(b,m)\r\n print ('\\nthis is excel view!')\r\n for i in c: print (i)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # main module call\r\n main()\r\n","sub_path":"src/grepit4_3.py","file_name":"grepit4_3.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"635378945","text":"from Karumanchi.Queue import Queue\n\ndef rearrange(input_que):\n temp_que = Queue.Queue1()\n is_length_odd = True if input_que.size%2 ==1 else False\n mid = input_que.size//2\n for i in range(mid):\n temp_que.enqueue(input_que.dequeue())\n while not temp_que.is_empty():\n input_que.enqueue(temp_que.dequeue())\n input_que.enqueue(input_que.dequeue())\n if is_length_odd:\n input_que.enqueue(input_que.dequeue())\n return input_que\n\nif __name__==\"__main__\":\n que = Queue.Queue1()\n for i in range(11,22):\n que.enqueue(i)\n rearrange(que)\n while not que.is_empty():\n print(que.dequeue())","sub_path":"Karumanchi/Queue/Rearrange.py","file_name":"Rearrange.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"393804757","text":"from pathlib import Path\nimport pytest\nimport subprocess\n\nTEST_CASES = [\n # Skip modules tests\n # \"../day3/modules/solutions/exercise1/exercise1.py\",\n # \"../day3/modules/solutions/exercise2/__init__.py\",\n # \"../day3/modules/solutions/exercise2/mod1.py\",\n # \"../day3/modules/solutions/exercise2/mod2.py\",\n # \"../day3/modules/solutions/exercise2/mod3.py\",\n # Skip linting tests\n # \"../day3/linting/solutions/exercise1_pylint.py\",\n # \"../day3/linting/solutions/devices.py\",\n # \"../day3/linting/solutions/exercise1_pep8.py\",\n # \"../day3/linting/solutions/exercise2.py\",\n \"../day3/parsers/solutions/exercise2.py\",\n \"../day3/parsers/solutions/exercise3.py\",\n \"../day3/parsers/solutions/exercise4.py\",\n \"../day3/serialization/solutions/exercise1.py\",\n \"../day3/serialization/solutions/exercise2.py\",\n \"../day3/serialization/solutions/exercise3.py\",\n \"../day3/data_struct/solutions/exercise1.py\",\n \"../day3/api/solutions/exercise1.py\",\n \"../day3/api/solutions/exercise2.py\",\n \"../day3/nxapi/solutions/exercise1.py\",\n \"../day3/nxapi/solutions/exercise2.py\",\n \"../day3/recap/solutions/exercise1.py\",\n]\n\n\ndef subprocess_runner(cmd_list, exercise_dir):\n with subprocess.Popen(\n cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=exercise_dir\n ) as proc:\n std_out, std_err = proc.communicate()\n return (std_out.decode(), std_err.decode(), proc.returncode)\n\n\ndef test_parsers_ex1():\n \"\"\"\n Execute textfsm.py:\n\n $ textfsm.py exercise1.tpl ex1_show_int_status.txt\n \"\"\"\n cmd_list = [\"textfsm.py\", \"exercise1.tpl\", \"ex1_show_int_status.txt\"]\n script_dir = \"../day3/parsers/solutions/\"\n std_out, std_err, return_code = subprocess_runner(cmd_list, script_dir)\n assert return_code == 0\n assert std_err == \"\"\n\n\n@pytest.mark.parametrize(\"test_case\", TEST_CASES)\ndef test_runner(test_case):\n path_obj = Path(test_case)\n python_script = path_obj.name\n script_dir = path_obj.parents[0]\n cmd_list = [\"python\", python_script]\n std_out, std_err, return_code = subprocess_runner(cmd_list, script_dir)\n assert return_code == 0\n assert std_err == \"\"\n","sub_path":"tests/test_day3.py","file_name":"test_day3.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"236834479","text":"# -*- coding: utf-8 -*-\nimport re\nimport scrapy\nfrom bancochile_cl.items import BancochileClItem\nfrom datetime import datetime\nfrom fuzzywuzzy import process\nfrom itertools import zip_longest\n\n\nclass BancoChileSpider(scrapy.Spider):\n\n # Name of the spider\n name = 'banco_chile'\n allowed_domains = ['bancochile.cl']\n \n # The 6 links holding all the required benefits\n start_urls = [\n 'https://ww3.bancochile.cl/wps/wcm/connect/Personas/Portal/programa-travel/beneficios/vestuario-calzado/',\n \"https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/beneficios/salud-y-belleza/portada\",\n \"https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/beneficios/hogar/hogar\",\n \"https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/beneficios/servicios/portada\",\n 'https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/panoramas/restaurantes/portada',\n \"https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/panoramas/entretencion/portada\"\n ]\n # Iterates over all start urls\n def start_requests(self):\n for url in self.start_urls:\n yield scrapy.Request(url=url, callback=self.parse, meta={\"from_url\": url})\n\n # Creates absolute links for parse_page to crawl, extract and output\n # Gets the category name and passes it the parse_page as meta \n def parse(self, response):\n\n from_url = response.meta['from_url']\n base_url = 'https://ww3.bancochile.cl'\n\n # Restaurantes benefits has different html structure so the url must be checked first\n if from_url == 'https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/panoramas/restaurantes/portada':\n links = response.xpath('//div[@class=\"content\"]/div/a/@href').extract()\n else:\n links = response.xpath('//div[@class=\"benef-cont\"]/a/@href').extract()\n\n # The category to be transfered as meta data to parse_page()\n categoria = response.xpath('//head/title/text()').extract_first()\n\n for i in links:\n # A URL from the bancochile website for buying tickets. Irrelevant to the other pages for scraping\n cine_url_to_avoid = \"/wps/wcm/connect/personas/portal/programa-travel/panoramas/entretencion/cines\"\n if i == cine_url_to_avoid:\n continue\n else:\n url = base_url + i\n yield scrapy.Request(url = url, callback=self.parse_page, meta={\"Categoria\": categoria})\n\n\n def parse_page(self, response):\n \n # TODO Logo for banco de chile\n pais = \"Chile\"\n\n # The logo of the benefit. Is a list since image_urls requires it being a list\n benefit_logo = ['https://ww3.bancochile.cl' + response.xpath('//div[@class=\"content\"]/div[@class=\"content-left\"]//@src').extract_first()]\n\n # Regex patterns to match available phone numbers. Since there are different possibilities of written numbers, \n # different patterns are implemented and compiled as one\n phone_regex = re.compile(r'(\\d{11})|(\\d{10})|(\\d{9})|(\\d{8})|(\\d\\s\\d{8})|(\\d{2}\\s\\d{7})|(\\+\\d{2}\\-\\d\\-\\s\\d{4}\\-\\d{4})|(\\d{3}\\s\\d{6})|(\\(\\d\\)\\d{8})|(\\d{5}\\s\\d{4})|(\\(\\+\\d{3}\\)\\s\\d{8})|(\\(\\+\\d{4}\\)\\s\\d{4}\\s\\d{3})|(\\(\\+\\d{4}\\)\\s\\d{3}\\s\\d{4})|(\\+\\d{3}\\s\\d{8})|(\\(\\+\\d{4}\\)\\s\\d{3}\\s\\d{3})|(\\+\\d{2}\\-\\d{2}\\-\\d{6})|(\\+\\d{2}\\-\\d{2}\\-\\d{7})|(\\+\\d{2}\\-\\d{2}\\-\\d{3}\\-\\d{2}\\-\\d{2})|(\\+\\d{2}\\-\\d{2}\\-\\d{3}\\-\\d{4})|(\\(\\+\\d{3}\\)\\s\\d{4}\\s\\d{4})|(\\+\\d{2}\\-\\d\\-\\d{4}\\-\\d{4})|(\\+\\d{2}\\-\\d{2}\\-\\d{3}\\-\\d{3})|(\\(\\d\\)\\d{8})|(\\d{5}\\s\\d{4})|(\\d{2}\\-\\d{3}\\-\\d{3})|(\\+\\d{2}\\-\\d{2}\\–\\d{6})|(\\d{3}\\s\\d{4}\\s\\d{4})|(\\+\\d{2}\\s\\d\\s\\d{4}\\s\\d{4})')\n \n # Create a dictionary with each month and its corresponding number \n # Will be used if a month is located anywhere in the \"Valid until\" text and substituted with its number so the date in \n # starting and ending columns is properly outputted\n month_dict = {\n 'enero': \"01\" , 'febrero': \"02\", 'marzo': \"03\", 'abril': \"04\", 'mayo': \"05\", 'junio': \"06\", \n 'julio': \"07\", 'agosto': \"08\", 'septiembre': \"09\", 'octubre': \"10\", 'noviembre': \"11\", 'diciembre': \"12\"}\n # Exctract the name of the benefit\n nombre_del_beneficio = response.xpath('//h3//text()').extract()\n\n # We get the name of the business from the url \n nombre_del_comercio = response.url.split('/')[-1]\n\n # Extract the retailer decription\n descripcion_del_comercio = response.xpath('//section[@class=\"section-grey benef-ficha\"]/a/text()').extract_first()\n if response.xpath('//div[@class=\"content-right\"]/div/div[@class=\"ConDescu\"]'):\n terminos_y_condiciones_del_beneficio = response.xpath('//div[@class=\"content-right\"]/div/div/ul/li//text()').extract()\n \n else:\n terminos_y_condiciones_del_beneficio = response.xpath('//div[@class=\"content-right\"]/ul/li//text()').extract()\n\n # Extracts the whole paragraph ( to search for a percentage sign - % ). If unavailable - assigns the terminos_y_condiciones to it\n if not response.xpath('//div[@class=\"content-right\"]/p//text()'):\n percentage_paragraph = terminos_y_condiciones_del_beneficio\n else:\n percentage_paragraph = response.xpath('//div[@class=\"content-right\"]/p//text()').extract()\n \n # Get the terminos y condiciones text \n # Also to search for % sign or email below in the iteration\n email = \"\"\n \n # Assign an empty string variable for the type of benefit\n tipo_de_beneficio = \"\"\n\n # Assign an empty string for the retail website URL \n web_comercio = \"\"\n # String searching is implemented below so the index of the % sign is needed if found\n index_of_percent = 0\n first_index = 0\n \n # Iterate over the name of the benefit, the paragraph and the unordered list to search for the % sign\n # If found in any of them, extract 2 previous indexes to get the whole number % - 20%, 50% etc.\n # Or if \"Dólares-Premio\" found in any of them it will be assigned as tipo_de_beneficio\n tipo_found = False\n for i,j,k in zip_longest(nombre_del_beneficio, percentage_paragraph, terminos_y_condiciones_del_beneficio):\n if i:\n if '%' in i:\n tipo_found = True\n index_of_percent = i.index('%')\n first_index = index_of_percent - 2\n tipo_de_beneficio = str(i[first_index: index_of_percent + 1]) + \" de descuento\"\n elif \"Dólares-Premio\" in i:\n tipo_found = True\n tipo_de_beneficio = nombre_del_beneficio\n if not tipo_found:\n if j:\n if '%' in j:\n tipo_found = True\n index_of_percent = j.index('%')\n first_index = index_of_percent - 2\n tipo_de_beneficio = str(j[first_index: index_of_percent + 1]) + \" de descuento\"\n elif \"Dólares-Premio\" in j:\n tipo_found = True\n tipo_de_beneficio = nombre_del_beneficio\n if not tipo_found:\n if k:\n if '%' in k:\n tipo_found = True\n index_of_percent = k.index('%')\n first_index = index_of_percent - 2\n tipo_de_beneficio = str(k[first_index: index_of_percent + 1]) + \" de descuento\"\n elif \"Dólares-Premio\" in k:\n tipo_found = True\n tipo_de_beneficio = nombre_del_beneficio\n\n # Iterate over all of the 3 for a website URL\n # Iterate over the name of the benefit, the paragraph and the unordered list to search for the website\n # If found in any - the string will be split so only the website is extracted\n # Assign it to its variable accordingly afterwards\n for i,j,k in zip_longest(nombre_del_beneficio, percentage_paragraph, terminos_y_condiciones_del_beneficio):\n if i:\n if 'www' in i:\n new_list = i.split()\n for string in new_list:\n if 'www' in string:\n web_comercio = string\n elif j:\n if 'www' in j:\n new_list = j.split()\n for string in new_list:\n if 'www' in string:\n web_comercio = string\n elif k:\n if 'www' in k:\n new_list = k.split()\n for string in new_list:\n if 'www' in string:\n web_comercio = string\n\n # Exctract the benefit description and remove trailing newlines and whitespaces from the string\n descripcion_del_beneficio = response.xpath('//div[@class=\"content-bottom\"]/p//text()').extract_first().strip()\n\n # Find where in descripcion_del_beneficio is the first '2020' or '2021' located. It is the only constant found in each benefit's description string\n # Split the descripcion_del_beneficio on '2020' or on '2021' and take the first part\n year = \"\"\n promocion_valida = \"\"\n if '2020' in descripcion_del_beneficio:\n promocion_valida = descripcion_del_beneficio.split('2020')[0]\n year = '2020'\n elif '2021'in descripcion_del_beneficio:\n promocion_valida = descripcion_del_beneficio.split('2021')[0]\n year = '2021'\n # Check for 'desde ' - if found - there is a starting date. So far there are only 2 possiblities listed on the website for the promocion_valida string:\n # Either it has 'desde ' or it has 'hasta ' so we search for either one of them\n # If 'desde' is found - split the string on it, take the second part and make a list out of it for iteration below\n if 'desde ' in promocion_valida:\n promocion_valida = promocion_valida.split('desde ')[1].split()\n \n # If 'hasta ' is found - split the string on it, take the second part and make a list out of it for iteration below\n elif 'hasta ' in promocion_valida:\n promocion_valida = promocion_valida.split('hasta ')[1].split()\n\n # Search for 'el', 'al', 'de' and 'del' keywords and if found - remove them so only the actual dates are left in the list( promocion_valida )\n for i in promocion_valida:\n if 'el' in promocion_valida:\n promocion_valida.remove('el')\n elif 'al' in promocion_valida:\n promocion_valida.remove('al')\n elif 'de' in promocion_valida:\n promocion_valida.remove('de')\n elif 'del' in promocion_valida:\n promocion_valida.remove('del')\n\n # Substitute each month found in promocion_valida list with its corresponding number\n for k, v in month_dict.items():\n if k in promocion_valida:\n subs = promocion_valida.index(k)\n promocion_valida[subs] = str(month_dict[k])\n\n # Assign fetcha_de_initio to today's date. If another starting date is found in the promocion valida list, this will be substituted.\n fecha_de_initio = datetime.today().strftime('%Y-%m-%d')\n fecha_de_termino = \"\"\n\n # Checks the length of promocion valida:\n # If the length == 4 then there is a month for fecha_de_initio and a month for fecha_de_termino\n # If the length == 3 then both fecha_de_initio and fecha_de_termino have the same month\n # If the length == 2, fecha_de_initio stays set to today's date and only fecha_de_termino is set according to the benefit's information\n if len(promocion_valida) == 2:\n fecha_de_termino = f'{year}-' + promocion_valida[1] + '-' + promocion_valida[0]\n elif len(promocion_valida) == 3:\n fecha_de_initio = f'{year}-' + promocion_valida[2] + '-' + promocion_valida[0]\n fecha_de_termino = f'{year}-' + promocion_valida[2] + '-' + promocion_valida[1]\n elif len(promocion_valida) == 4:\n fecha_de_initio = f'{year}-' + promocion_valida[1] + '-' + promocion_valida[0]\n fecha_de_termino = f'{year}-' + promocion_valida[3] + '-' + promocion_valida[2]\n\n # Extract the type of program\n programa_de_beneficios = response.xpath('//div[@class=\"navbar-header\"]//img/@title').extract_first()\n # Company of the Benefit Program\n empresa_del_programa = \"Banco de Chile\"\n\n # A nested dictionary holding all counties with their respective cities and regions in Chile\n chile_communas = {\n \"Arica and Parinacota\": {\n \"Arica\": [\"Camarones\", \"Arica\"],\n \"Parinacota\": [\"Putre\", \"General Lagos\"]\n },\n \"Tarapacá\": {\n \"Iquique\": [\"Iquique\", \"Alto Hospicio\"],\n \"Tamarugal\": [\"Pozo Almonte\", \"Pica\", \"Huara\", \"Colchane\", \"Camiña\"]\n },\n \"Antofagasta\": {\n \"Antofagasta\": [\"Taltal\", \"Sierra Gorda\", \"Mejillones\", \"Antofagasta\"],\n \"El Loa\": [\"San Pedro de Atacama\", \"Ollagüe\", \"Calama\"],\n \"Tocopilla\": [\"Tocopilla\", \"María Elena\"]\n },\n \"Atacama\": {\n \"Chañaral\": [\"Diego de Almagro\", \"Chañaral\"],\n \"Copiapó\": [\"Tierra Amarilla\", \"Copiapó\", \"Caldera\"],\n \"Huasco\": [\"Vallenar\", \"Huasco\", \"Freirina\", \"Alto del Carmen\"]\n },\n \"Coquimbo\": {\n \"Choapa\": [\"Salamanca\", \"Los Vilos\", \"Illapel\", \"Canela\"],\n \"Elqui\": [\"Vicuña\", \"Paiguano\", \"La Serena\", \"La Higuera\", \"Coquimbo\", \"Andacollo\"],\n \"Limarí\": [\"Río Hurtado\", \"Punitaqui\", \"Ovalle\", \"Monte Patria\", \"Combarbalá\"]\n },\n \"Valparaíso\": {\n \"Isla de Pascua\": [\"Isla de Pascua\"],\n \"Los Andes\": [\"San Esteban\", \"Rinconada\", \"Los Andes\", \"Calle Larga\"],\n \"Marga Marga\": [\"Villa Alemana\", \"Quilpué\", \"Limache\", \"Olmué\"],\n \"Petorca\": [\"Zapallar\", \"Petorca\", \"Papudo\", \"La Ligua\", \"Cabildo\"],\n \"Quillota\": [\"Quillota\", \"Nogales\", \"La Cruz\", \"La Calera\", \"Hijuelas\"],\n \"San Antonio\": [\"Santo Domingo\", \"San Antonio\", \"El Tabo\", \"El Quisco\", \"Cartagena\", \"Algarrobo\"],\n \"San Felipe\": [\"Santa María\", \"San Felipe\", \"Putaendo\", \"Panquehue\", \"Llaillay\", \"Catemu\"],\n \"Valparaíso\": [\"Viña del Mar\", \"Valparaíso\", \"Quintero\", \"Puchuncaví\", \"Concón\", \"Juan Fernández\", \"Casablanca\"]\n },\n \"Metropolitana\": {\n \"Chacabuco\": [\"Tiltil\", \"Lampa\", \"Colina\"],\n \"Cordillera\": [\"San José de Maipo\", \"Puente Alto\", \"Pirque\"],\n \"Maipo\": [\"San Bernardo\", \"Paine\", \"Calera de Tango\", \"Buin\"],\n \"Melipilla\": [\"San Pedro\", \"Melipilla\", \"María Pinto\", \"Curacaví\", \"Alhué\"],\n \"Santiago\": [\n \"Vitacura\", \"Santiago\", \"San Ramón\", \"San Miguel\", \"San Joaquín\", \"Renca\", \"Recoleta\", \"Quinta Normal\", \"Quilicura\", \"Pudahuel\",\n \"Providencia\", \"Peñalolén\", \"Pedro Aguirre Cerda\", \"Ñuñoa\", \"Maipú\", \"Macul\", \"Lo Prado\", \"Lo Espejo\", \"Lo Barnechea\", \"Las Condes\", \n \"La Reina\", \"La Pintana\", \"La Granja\", \"La Florida\", \"La Cisterna\", \"Independencia\", \"Huechuraba\", \"Estación Central\", \"El Bosque\", \"Conchalí\",\n \"Cerro Navia\", \"Cerrillos\"\n ],\n \"Talagante\":[\"Talagante\", \"Peñaflor\", \"Padre Hurtado\", \"Isla de Maipo\", \"El Monte\"]\n },\n \"O'Higgins\": {\n \"Cachapoal\": [\n \"San Vicente\", \"Requínoa\", \"Rengo\", \"Rancagua\", \"Quinta de Tilcoco\", \"Pichidegua\", \"Peumo\", \"Olivar\", \"Mostazal\", \n \"Malloa\", \"Machalí\", \"Las Cabras\", \"Graneros\", \"Doñihue\", \"Coltauco\", \"Coinco\", \"Codegua\"\n ],\n \"Cardenal Caro\": [\"Pichilemu\", \"Paredones\", \"Navidad\", \"Marchihue\", \"Litueche\", \"La Estrella\"],\n \"Colchagua\": [\"Santa Cruz\", \"San Fernando\", \"Pumanque\", \"Placilla\", \"Peralillo\", \"Palmilla\", \"Nancagua\", \"Lolol\", \"Chimbarongo\", \"Chépica\"]\n },\n \"Maule\": {\n \"Cauquenes\": [\"Pelluhue\", \"Chanco\", \"Cauquenes\"],\n \"Curicó\": [\"Vichuquén\", \"Teno\", \"Sagrada Familia\", \"Romeral\", \"Rauco\", \"Molina\", \"Licantén\", \"Hualañé\", \"Curicó\"],\n \"Linares\": [\"Yerbas Buenas\", \"Villa Alegre\", \"San Javier\", \"Retiro\", \"Parral\", \"Longaví\", \"Linares\", \"Colbún\"],\n \"Talca\": [\"Talca\", \"San Rafael\", \"San Clemente\", \"Río Claro\", \"Pencahue\", \"Pelarco\", \"Maule\", \"Empedrado\", \"Curepto\", \"Constitución\"]\n },\n \"Ñuble\": {\n \"Diguillín\": [\"Chillán Viejo\", \"Chillán\", \"Bulnes\", \"El Carmen\", \"Pemuco\", \"Pinto\", \"Quillón\", \"San Ignacio\", \"Yungay\"],\n \"Itata\": [\"Cobquecura\", \"Coelemu\", \"Ninhue\", \"Portezuelo\", \"Quirihue\", \"Ránquil\", \"Treguaco\"],\n \"Punilla\": [\"Coihueco\", \"Ñiquén\", \"San Carlos\", \"San Fabián\", \"San Nicolás\"]\n },\n \"Biobío\": {\n \"Arauco\": [\"Tirúa\", \"Los Álamos\", \"Lebu\", \"Curanilahue\", \"Contulmo\", \"Cañete\", \"Arauco\"],\n \"Biobío\": [\n \"Yumbel\", \"Tucapel\", \"Santa Bárbara\", \"San Rosendo\", \"Quilleco\", \"Quilaco\", \"Negrete\", \n \"Nacimiento\", \"Mulchén\", \"Los Ángeles\", \"Laja\", \"Cabrero\", \"Antuco\", \"Alto Biobío\"\n ],\n \"Concepción\": [\n \"Tomé\", \"Talcahuano\", \"Santa Juana\", \"San Pedro de la Paz\", \"Penco\", \"Lota\", \n \"Hualqui\", \"Hualpén\", \"Florida\", \"Coronel\", \"Concepción\", \"Chiguayante\"\n ]\n },\n \"Araucanía\": {\n \"Cautín\": [\n \"Villarrica\", \"Vilcún\", \"Toltén\", \"Teodoro Schmidt\", \"Temuco\", \"Saavedra\", \"Pucón\", \n \"Pitrufquén\", \"Perquenco\", \"Padre Las Casas\", \"Nueva Imperial\", \"Melipeuco\", \"Loncoche\", \"Lautaro\",\n \"Gorbea\", \"Galvarino\", \"Freire\", \"Curarrehue\", \"Cunco\", \"Cholchol\", \"Carahue\"\n ],\n \"Malleco\": [\"Victoria\", \"Traiguén\", \"Renaico\", \"Purén\", \"Lumaco\", \"Los Sauces\", \"Lonquimay\", \"Ercilla\", \"Curacautín\", \"Collipulli\", \"Angol\"],\n },\n \"Los Ríos\": {\n \"Ranco\": [\"Río Bueno\", \"Lago Ranco\", \"La Unión\", \"Futrono\"],\n \"Valdivia\": [\"Valdivia\", \"Panguipulli\", \"Paillaco\", \"Mariquina\", \"Máfil\", \"Los Lagos\", \"Lanco\", \"Corral\"]\n },\n \"Los Lagos\": {\n \"Chiloé\": [\"Quinchao\", \"Quemchi\", \"Quellón\", \"Queilén\", \"Puqueldón\", \"Dalcahue\", \"Curaco de Vélez\", \"Chonchi\", \"Castro\", \"Ancud\"],\n \"Llanquihue\": [\"Puerto Varas\", \"Puerto Montt\", \"Maullín\", \"Los Muermos\", \"Llanquihue\", \"Frutillar\", \"Fresia\", \"Cochamó\", \"Calbuco\"],\n \"Osorno\": [\"San Pablo\", \"San Juan de la Costa\", \"Río Negro\", \"Puyehue\", \"Purranque\", \"Puerto Octay\", \"Osorno\"],\n \"Palena\": [\"Palena\", \"Hualaihué\", \"Futaleufú\", \"Chaitén\"]\n },\n \"Aysén\": {\n \"Aysén\": [\"Guaitecas\", \"Cisnes\", \"Aysén\"],\n \"Capitán Prat\": [\"Tortel\", \"O'Higgins\", \"Cochrane\"],\n \"Coyhaique\": [\"Lago Verde\", \"Coihaique\"],\n \"General Carrera\": [\"Río Ibáñez\", \"Chile Chico\"]\n },\n \"Magallanes\": {\n \"Antártica Chilena\": [\"Cabo de Hornos\", \"Antártica\"],\n \"Magallanes\": [\"San Gregorio\", \"Río Verde\", \"Punta Arenas\", \"Laguna Blanca\"],\n \"Tierra del Fuego\": [\"Timaukel\", \"Primavera\", \"Porvenir\"],\n \"Última Esperanza\": [\"Torres del Paine\", \"Natales\"]\n }\n\n }\n\n all_cities = []\n # Gets all cities in a list for lighter search\n for d1, d2 in chile_communas.items():\n for k, v in d2.items():\n all_cities.append(k)\n\n # Gets the html directions box\n direction_box = \"\"\n if response.xpath('//div[@class=\"contBlokAcordeon\"]'):\n direction_box = response.xpath('//div[@class=\"contBlokAcordeon\"]')\n\n # Checks how many \"Direcciones\" are listed so one can iterate over them - usually one or two ( ex. Santiago, Regiones )\n # Gets the direction name to run a light city search instead of full - i.e. if \"Santiago\" --> get it's counties and only iterate over them\n first_direction_name = \"\"\n first_direction_box = \"\"\n second_direction_name = \"\"\n second_direction_box = \"\"\n if direction_box:\n \n if len(direction_box) == 1:\n first_direction_box = direction_box[0].xpath('.//ul/li')\n first_direction_name = direction_box[0].xpath('./div[@class=\"ContTitulo\"]/h2/text()').extract_first() # Get the first name ( ex Santiago )\n elif len(direction_box) == 2:\n first_direction_box = direction_box[0].xpath('.//ul/li')\n first_direction_name = direction_box[0].xpath('./div[@class=\"ContTitulo\"]/h2/text()').extract_first()\n second_direction_box = direction_box[1].xpath('.//ul/li')\n second_direction_name = direction_box[1].xpath('./div[@class=\"ContTitulo\"]/h2/text()').extract_first() # Get the second name (ex. Regiones )\n\n # Assign an empty value for a city\n city_found = False\n # A list to check how many cities match the search below\n total_cities_found = []\n # A list to hold the addresses\n address_list = []\n # A dictionary to hold the counties if a city is matched\n my_dict = {}\n \n # Checks if \"Direcciones\" exists on the page\n if first_direction_name:\n\n # Assigns addresses and strips them of empty spaces\n for i in first_direction_box:\n address = i.xpath('.//text()').extract()\n address = [j.strip() for j in address]\n address_list.append(address)\n\n # Flattens the nested lists and concatenate the strings in the addresses lists so the phone numbers correspond to their listed address\n joined_list = [' '.join(x) for x in address_list]\n\n # A list of words for string matching\n phone_words = [\"Teléfono:\" , \"Reservas al fono\", \"Teléfono\", \"Reservas:\", \"Reservas al\", \"Reservas\", \"Tel\"]\n\n # Creates a dictionary to add the address with it's corresponding number if available\n address_phone_dict = {}\n\n # Used below to check if a city is matched\n city_match = \"\"\n\n # Checks for a phone-related word and website - if found - gets the phone number and/ or website name\n for word in phone_words:\n for address in joined_list:\n\n # Checks for a website\n if 'www' in address:\n web_comercio = address\n joined_list.remove(address)\n else:\n if word in address:\n address_only = address.split(word)[0]\n phone = address.split(word)[1]\n joined_list.remove(address)\n cleaned_phone_number = phone.replace(\"(\", \"\").replace(\")\", \"\").replace(\"+\", \"\").replace(\"-\",\"\").replace(\":\", \"\")\n if address_only not in address_phone_dict.keys():\n address_phone_dict[address_only] = cleaned_phone_number\n \n # Checks for a phone regex match if the previous for loop didn't missed a match\n for address in joined_list:\n # Searches through all the addresses\n phone_number = \"\"\n\n # Search for a matching regex pattern for a phone number \n # If a matching regex is found - assigns it as the value to the address key in the address_phone_dict\n # Checks to see if the \"Local\" keyword exists right before the found number to avoid writing part of the address\n # i.e. Local A200-A202-A204\n # (? with the features names\n '''\n if verbose:\n print('Fetching {:}...'.format(corpus_exception_file))\n \n corpus_exceptions_path = '{:}{:}'.format(CORPUS_EXCEPTIONS_DIR, corpus_exception_file)\n df = pd.read_csv(corpus_exceptions_path, sep='\\t')\n if verbose:\n print('Fetching {:}...done'.format(corpus_exception_file)) \n\n return set(df['TOKEN'].values)\n\n\n\ndef preprocess(lexicon, word2vec, verbose=True): \n '''\n 1. for NER entities within exception file\n replace by the tag organization, person, location\n 2. for smaller than 5 tokens replace by one hot encoding \n 3. include time i.e 20h30, 9h in number embeddings '0'\n 4. include ordinals 2º 2ª in number embeddings '0'\n 5. include tel._38-4048 in numeber embeddings '0'\n\n New Word embedding size = embedding size + one-hot enconding of 2 \n '''\n # define outputs\n total_words = len(lexicon)\n lexicon2token = dict(zip(lexicon, ['unk']*total_words))\n\n # fetch exceptions list\n pers = fetch_corpus_exceptions('corpus-word-missing-pers.txt', verbose=verbose)\n locs = fetch_corpus_exceptions('corpus-word-missing-locs.txt', verbose=verbose)\n orgs = fetch_corpus_exceptions('corpus-word-missing-orgs.txt', verbose=verbose)\n\n\n #define regex\n re_punctuation = re.compile(r'[{:}]'.format(string.punctuation), re.UNICODE)\n re_number = re.compile(r'^\\d+$')\n re_tel = re.compile(r'^tel\\._')\n re_time = re.compile(r'^\\d{1,2}h\\d{0,2}$')\n re_ordinals = re.compile(r'º|ª')\n\n for word in list(lexicon):\n # some hiffenized words belong to embeddings\n # ex: super-homem, fim-de-semana, pré-qualificar, caça-níqueis\n token = word.lower() \n if token in word2vec: \n lexicon2token[word]= token\n else:\n # if word in ['Rede_Globo', 'Hong_Kong', 'Banco_Central']:\n token = re_tel.sub('', token)\n token = re_ordinals.sub('', token)\n token = re_punctuation.sub('', token)\n\n token = re_time.sub('0', token)\n token = re_number.sub('0', token)\n\n if token in word2vec:\n lexicon2token[word]= token.lower()\n else:\n if word in pers:\n lexicon2token[word] = 'pessoa'\n else:\n if word in orgs:\n lexicon2token[word] = 'organização'\n else:\n if word in locs:\n lexicon2token[word] = 'local'\n\n total_tokens = len([val for val in lexicon2token.values() if not val in ('unk')])\n if verbose:\n print('Preprocess finished. Found {:} of {:} words, missing {:.2f}%'.format(total_tokens,\n total_words, 100*float(total_words-total_tokens)/ total_words)) \n\n return lexicon2token\n\ndef get_index(columns_list, columns_dims_dict, column_name):\n '''\n Returns column index from descriptor\n args:\n columns_list .: list input features + target\n columns_dims_dict .: dict holding the columns\n column_name .: str name of the column to get the index from\n\n returns:\n '''\n features_set = set(config.CATEGORICAL_FEATURES).union(config.EMBEDDED_FEATURES)\n used_set = set(columns_list)\n descriptor_list = sorted(list(features_set - used_set))\n index = 0\n for descriptor in descriptor_list:\n if descriptor == column_name:\n break\n else:\n index += columns_dims_dict[descriptor]\n return index\n\n\ndef get_dims(labels_list, labels_dim_dict):\n return sum([labels_dim_dict[label] for label in labels_list])\n\n\ndef get_binary(ds_type, embeddings, version='1.0'):\n if ds_type not in ('train', 'test', 'valid', 'deep'):\n raise ValueError('Invalid dataset label {:}'.format(ds_type))\n\n prefix = '' if ds_type in ('deep') else 'db'\n ext = 'pickle' if ds_type in ('deep') else 'tfrecords'\n dbname = '{:}{:}_{:}.{:}'.format(prefix, ds_type, embeddings, ext)\n return '{:}{:}/{:}'.format(INPUT_DIR, version, dbname)\n\n\ndef get_db_bounds(ds_type, version='1.0'):\n '''Returns upper and lower bound proposition for dataset\n\n Dataset breakdowns are done by partioning of the propositions\n\n Arguments:\n ds_type {str} -- Dataset type this must be `train`, `valid`, `test`\n\n Retuns:\n bounds {tuple({int}, {int})} -- Tuple with lower and upper proposition\n for ds_type\n\n Raises:\n ValueError -- [description]\n '''\n ds_tuple = ('train', 'valid', 'test',)\n version_tuple = ('1.0', '1.1',)\n\n if not(ds_type in ds_tuple):\n _msg = 'ds_type must be in {:} got \\'{:}\\''\n _msg = _msg.format(ds_tuple, ds_type)\n raise ValueError(_msg)\n\n if not(version in version_tuple):\n _msg = 'version must be in {:} got \\'{:}\\''\n _msg = _msg.format(version_tuple, version)\n raise ValueError(_msg)\n else:\n size_dict = config.DATASET_PROPOSITION_DICT[version]\n\n lb = 1\n ub = size_dict['train']\n\n if ds_type == 'train':\n return lb, ub\n else:\n lb += ub\n ub += size_dict['valid']\n if ds_type == 'valid':\n return lb, ub\n elif ds_type == 'test':\n lb += ub\n ub += size_dict['test']\n return lb, ub","sub_path":"models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"267013272","text":"import sys\nsys.stdin = open('input.txt','r')\nT = int(input())\n\n\ndef Aton(inputlist):\n alist = [\"ZRO\", \"ONE\", \"TWO\", \"THR\", \"FOR\", \"FIV\", \"SIX\", \"SVN\", \"EGT\", \"NIN\"]\n result = []\n for j in inputlist:\n for i, v in enumerate(alist):\n if j == v:\n result.append(i)\n return result\n\ndef NtoA(inputlist):\n alist = [\"ZRO\", \"ONE\", \"TWO\", \"THR\", \"FOR\", \"FIV\", \"SIX\", \"SVN\", \"EGT\", \"NIN\"]\n result = []\n for j in inputlist:\n for i, v in enumerate(alist):\n if j == i:\n result.append(alist[i])\n return result\n\nfor tc in range(1,T+1):\n n = list(input().split())[1]\n\n\n a = list(input().split())\n a = sorted(Aton(a))\n b = NtoA(a)\n print(f'#{tc}')\n for i in b:\n print(i, end=\" \")","sub_path":"190220/aton/이중봉.py","file_name":"이중봉.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"636855074","text":"import Histogram\nimport HistogramMain\n\nimport argparse\nfrom pathlib import Path\nimport numpy as np\nimport signal\nfrom scipy import stats\nimport sys\nimport operator\n\ndef process_args():\n # Required arguments - input file, D T L costs\n parser = argparse.ArgumentParser(\"\")\n parser.add_argument(\"--input\", metavar=\"\", required=True,\n help=\"The path to a folder of .newick files.\")\n parser.add_argument(\"-d\", type=int, metavar=\"\", required=True,\n help=\"The relative cost of a duplication.\")\n parser.add_argument(\"-t\", type=int, metavar=\"\", required=True,\n help=\"The relative cost of a transfer.\")\n parser.add_argument(\"-l\", type=int, metavar=\"\", required=True,\n help=\"The relative cost of a loss.\")\n parser.add_argument(\"--timeout\", type=int, metavar=\"\", required=False, default=300,\n help=\"The amount of time a single tree file can run before timing out.\")\n parser.add_argument(\"--min-mprs\", type=int, metavar=\"<#MPRs>\", required=False, default=10000,\n help=\"The minimum number of MPRs a reconciliation must have to use it.\")\n args = parser.parse_args()\n return args\n\nclass TimeoutError(Exception):\n pass\n\ndef timeout_handler(signum, frame):\n raise TimeoutError\n\ndef sample_hist(hist, n):\n s_v = sum(list(hist.values()))\n # Do not sample if the histogram is smaller than the desired sample size\n if s_v < n:\n # The population for each key\n key_population = [[k]*v for k,v in hist.items()]\n # Flatten that list\n r = reduce(operator.concat, key_population)\n return r\n else:\n k = list(hist.keys())\n v = list(hist.values())\n # Convert v to a probability distribution\n p_v = [float(i)/s_v for i in v]\n # would use random.choices in 3.6\n return np.random.choice(k, n, p=p_v)\n\ndef hierarchical_cluster(hists):\n pass\n\n#TODO: consider smoothing\n# Although smoothing will remove even/odd parity differences.\ndef hist_to_array(hist):\n n = max(list(hist.keys()))\n l = []\n for i in range(n):\n if i in hist:\n l.append(hist[i])\n else:\n l.append(0)\n return np.array(l)\n\n# Shift-invariant Jensen-Shannon distance\ndef array_dist(a1, a2):\n # Begin by aligning the arrays via correlation\n c = np.correlate(a1, a2, mode='same')\n m = np.argmax(c)[0]\n shift = np.array([0] * m)\n l1 = len(a1)\n l2 = len(a2)\n if l1 < l2:\n new_a1 = np.concatenate(shift, a1)\n new_a2 = a2\n else:\n new_a1 = a1\n new_a2 = np.concatenate(shift, a2)\n return scipy.spatial.distance.jensenshannon(new_a1, new_a2)\n\ndef find_hists(pathstr, d, t, l, timeout=10, min_mprs=0, normalize=False, zero_loss=False):\n p = Path(pathstr)\n all_files = [f for f in p.glob(\"**/*\") if f.is_file()]\n tree_files = [f for f in all_files if f.suffix == \".newick\"]\n filenames = []\n histograms = []\n times = []\n for (i, f) in enumerate(tree_files):\n sys.stdout.write(\"{}/{}\\r\".format(i, len(tree_files)))\n sys.stdout.flush()\n # Time out if it's taking too long to calculate the histogram\n signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(timeout)\n try:\n hist, time = HistogramMain.calc_histogram(str(f), d,t,l, True, normalize, zero_loss)\n except TimeoutError:\n print(\"\")\n print(\"{} timed out\".format(f))\n continue\n except AssertionError:\n print(\"\")\n print(\"{} asserted\".format(f))\n continue\n signal.alarm(0)\n h_d = hist.histogram_dict\n s_v = sum(list(h_d.values()))\n # 20 is the minimum sample size for statistical testing to make sense\n # Also make sure it has above the minimum number of MPRs\n if s_v >= 20 and h_d[0] > min_mprs:\n filenames.append(f)\n histograms.append(hist)\n times.append(time)\n print(\"\")\n return filenames, histograms, times\n\ndef normality(hist_sample):\n # p is 1-probability of rejecting null hypothesis\n # So if p < 0.05 we can say with 95% confidence that the sample is not normal\n _,p = stats.normaltest(hist_sample)\n return p\n\ndef normal_sort(names, hists):\n samples = [sample_hist(h, 10000) for h in hists]\n normalities = [normality(s) for s in samples]\n z = zip(names, hists, samples, normalities)\n z = sorted(z, key=lambda x: x[3])\n return z\n\nif __name__ == \"__main__\":\n args = process_args()\n names, hists, times = find_hists(args.input, args.d, args.t, args.l, timeout=args.timeout, min_mprs=10000, normalize=True, zero_loss=True)\n print(\"DATA\")\n # Compute the timing information\n print(\"Timing:\")\n for i in range(len(names)):\n print(str(names[i]), times[i])\n time_mean = np.mean(times)\n time_std = np.std(times)\n time_max = np.max(times)\n print(\"Time:\")\n print(\"Mean: {}\".format(time_mean))\n print(\"Standard Deviation: {}\".format(time_std))\n print(\"Maximum: {}\".format(time_max))\n # Find the mean and standard deviation of the histograms\n all_hists = Histogram.Histogram.sum(hists)\n m = all_hists.mean()\n s = all_hists.standard_deviation()\n print(\"Distance:\")\n print(\"Mean: {}\".format(m))\n print(\"Standard Deviation: {}\".format(s))\n # Sort them by normality\n hist_ds = [h.histogram_dict for h in hists]\n l = normal_sort(names, hist_ds)\n print(\"Normality:\")\n for i in l:\n print(str(i[0]), i[3])\n \n","sub_path":"HistogramNormal.py","file_name":"HistogramNormal.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"593591701","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 11 10:08:26 2018\n\n@author: monish.mukherjee\n\"\"\"\nimport matplotlib.pyplot as plt\nimport time\nimport helics as h\nimport logging\nimport pandas as pd\n\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.StreamHandler())\nlogger.setLevel(logging.DEBUG)\n\n\ndef destroy_federate(fed):\n h.helicsFederateFinalize(fed)\n h.helicsFederateFree(fed)\n h.helicsCloseLibrary()\n\n\nif __name__ == \"__main__\":\n\n ################################# Registering federate from json ########################################\n\n fed = h.helicsCreateCombinationFederateFromConfig(\"Control.json\")\n federate_name = h.helicsFederateGetName(fed)\n print(federate_name)\n endpoint_count = h.helicsFederateGetEndpointCount(fed)\n subkeys_count = h.helicsFederateGetInputCount(fed)\n print(subkeys_count)\n print(endpoint_count)\n ###################### Reference to Publications and Subscription form index #############################\n endid = {}\n subid = {}\n for i in range(0, endpoint_count):\n endid[\"m{}\".format(i)] = h.helicsFederateGetEndpointByIndex(fed, i)\n end_name = h.helicsEndpointGetName(endid[\"m{}\".format(i)])\n logger.info(\"Registered Endpoint ---> {}\".format(end_name))\n\n for i in range(0, subkeys_count):\n subid[\"m{}\".format(i)] = h.helicsFederateGetInputByIndex(fed, i)\n status = h.helicsInputSetDefaultComplex(subid[\"m{}\".format(i)], 0, 0)\n sub_key = h.helicsSubscriptionGetKey(subid[\"m{}\".format(i)])\n logger.info(\"Registered Subscription ---> {}\".format(sub_key))\n\n print(\n \"###############################################################################################\"\n )\n print(\n \"######################## Entering Execution Mode ##########################################\"\n )\n ###################### Entering Execution Mode ##########################################################\n h.helicsFederateEnterExecutingMode(fed)\n\n hours = 24\n total_inteval = int(60 * 60 * hours)\n grantedtime = -1\n update_interval = 5 * 60\n feeder_limit_upper = 4 * (1000 * 1000)\n feeder_limit_lower = 2.7 * (1000 * 1000)\n k = 0\n data = {}\n time_sim = []\n feeder_real_power = []\n feeder_imag_power = []\n for t in range(0, total_inteval, update_interval):\n\n while grantedtime < t:\n grantedtime = h.helicsFederateRequestTime(fed, t)\n time.sleep(0.1)\n\n time_sim.append(t / 3600)\n ############################# Subscribing to Feeder Load from to GridLAB-D ##############################################\n key = []\n Real_demand = []\n Imag_demand = []\n for i in range(0, subkeys_count):\n sub = subid[\"m{}\".format(i)]\n rload, iload = h.helicsInputGetComplex(sub)\n sub_key = h.helicsSubscriptionGetKey(sub)\n print(sub_key)\n if \"totalLoad\" in str(sub_key):\n key_feeder_load = sub_key\n distribution_fed_name = str(key_feeder_load.split(\"/totalLoad\")[0])\n Real_feeder_load = rload\n Imag_feeder_load = iload\n feeder_real_power.append(rload / 1000)\n feeder_imag_power.append(iload / 1000)\n else:\n try:\n data[sub_key].append(rload / 1000)\n except KeyError:\n data[sub_key] = [rload / 1000]\n\n key.append(sub_key)\n Real_demand.append(rload)\n Imag_demand.append(iload)\n\n logger.info(\"EV Controller grantedtime = {}\".format(grantedtime))\n\n logger.info(\"Total Feeder Load is {} + {} j\".format(Real_feeder_load, Imag_feeder_load))\n\n if Real_feeder_load > feeder_limit_upper:\n logger.info(\"Total Feeder Load is over the Feeder Upper Limit\")\n logger.info(\"Warning ----> Feeder OverLimit ---> Turn off EV\")\n\n if k < endpoint_count:\n end = endid[\"m{}\".format(k)]\n logger.info(\"endid: {}\".format(endid))\n end_name = str(h.helicsEndpointGetName(end))\n logger.info(\"Sending endpoint name: {}\".format(end_name))\n destination_name = end_name.replace(federate_name, distribution_fed_name)\n logger.info(\n \"Endpoint destination: {}\".format(h.helicsEndpointGetDefaultDestination(end))\n )\n status = h.helicsEndpointSendMessageRaw(end, \"\", str(\"0 + 0 j\")) #\n logger.info(\"Endpoint sending status: {}\".format(status))\n logger.info(\"Turning off {}\".format(end_name))\n k = k + 1\n else:\n logger.info(\"All EVs are Turned off\")\n\n if Real_feeder_load < feeder_limit_lower:\n logger.info(\"Total Feeder Load is under the Feeder Lower Limit\")\n logger.info(\"Feeder Can Support EVs ------> Turn on EV\")\n if k > 0:\n k = k - 1\n end = endid[\"m{}\".format(k)]\n end_name = h.helicsEndpointGetName(end)\n destination_name = end_name.replace(federate_name, distribution_fed_name)\n print(\"Endpoint Destination {}\".format(destination_name))\n status = h.helicsEndpointSendMessageRaw(end, \"\", str(\"200000 + 0 j\"))\n logger.info(\"Turning on {}\".format(end_name))\n else:\n logger.info(\"All EVs are Turned on\")\n\n fig = plt.figure()\n fig.subplots_adjust(hspace=0.4, wspace=0.4)\n i = 1\n for keys in data:\n ax = fig.add_subplot(2, 3, i)\n ax.plot(time_sim, data[keys])\n ax.set_ylabel(\"EV Output in kW\")\n ax.set_xlabel(\"Time \")\n ax.set_title(keys)\n i = i + 1\n\n plt.show(block=True)\n data[\"time\"] = time_sim\n data[\"feeder_load(real)\"] = feeder_real_power\n pd.DataFrame.from_dict(data=data).to_csv(\"EV_Outputs.csv\", header=True)\n\n t = 60 * 60 * 24\n while grantedtime < t:\n grantedtime = h.helicsFederateRequestTime(fed, t)\n logger.info(\"Destroying federate\")\n destroy_federate(fed)\n","sub_path":"docs/user-guide/examples/user_guide_examples/Example_1b/EV_Controller/EV_Controller.py","file_name":"EV_Controller.py","file_ext":"py","file_size_in_byte":6165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"196537492","text":"\"\"\"A logger. Because apparently the default one isn't good enough.\"\"\"\n\nfrom collections import defaultdict\nimport datetime\n\nDEFAULT_DATE_FORMAT = \"%Y-%m-%d %H:%M:%S%z\"\nDEFAULT_FORMAT = \"[{datetime}] {level}: {message}\"\nDEFAULT_LEVEL = \"INFO\"\nNEWLINE = '\\n'\n\nLEVELS = {\"NOTSET\": 00,\n \"DEBUG\": 10,\n \"INFO\": 20,\n \"NOTICE\": 30,\n \"WARNING\": 40,\n \"ERROR\": 50,\n \"CRITICAL\": 60}\n\nclass Writer(object):\n def __init__(self, output, tags=None, level=DEFAULT_LEVEL,\n format=DEFAULT_FORMAT, date_format=DEFAULT_DATE_FORMAT):\n self.output = output\n self.tags = tags if tags is not None else ['*']\n self.level = level\n self.int_level = LEVELS.get(level, 0)\n self.format = format\n self.date_format = date_format\n \n def write(self, line):\n self.output.write(line)\n self.output.write(NEWLINE)\n self.output.flush()\n \n def _do_write(self, message):\n line = self._pre_write(message)\n self.write(line)\n \n def _pre_write(self, message):\n args = message.args()\n args['datetime'] = args['datetime'].strftime(self.date_format)\n line = self.format.format(**args)\n return line\n\n## IRC ERRORS:\nclass NoHandlerError(NotImplementedError):\n pass\n\nclass IRCWriter(Writer):\n def __init__(self, output, tags=None, level=DEFAULT_LEVEL,\n format=DEFAULT_FORMAT, date_format=DEFAULT_DATE_FORMAT,\n irc_handler=None):\n Writer.__init__(self, output, tags, level, format, date_format)\n self.irc_handler = None\n \n def write(self, line):\n if self.irc_handler is None:\n raise NoHandlerError\n \n self.irc_handler.send_message(self.output, message)\n \n def add_irc_handler(self, handler):\n self.irc_handler = handler\n\nclass Message(object):\n def __init__(self, message, level=DEFAULT_LEVEL,\n tags=None, *args, **kwargs):\n self.tags = [] if tags is None else tags\n self.raw_message = message\n self.message = message.format(*args, **kwargs)\n self.level = level\n self.datetime = datetime.datetime.today()\n \n def args(self):\n new_dict = {}\n new_dict.update(self.__dict__)\n return new_dict\n\nclass Logger(object):\n \n instances = {}\n \n def __new__(cls, name=\"k-eight\", *args, **kwargs):\n if name in cls.instances:\n return cls.instances[name]\n else:\n new = object.__new__(cls, *args, **kwargs)\n new.name = name\n cls.instances[name] = new\n return new\n \n def __init__(self, name=\"k-eight\", writers=None):\n if not hasattr(self, 'writers'):\n self.writers = [] if writers is None else writers\n \n def log(self, message, tags=None, level=DEFAULT_LEVEL, *args, **kwargs):\n message = Message(message, level, tags, *args, **kwargs)\n if tags is None:\n for writer in self.writers:\n if '*' in writer.tags:\n if writer.int_level <= LEVELS.get(message.level, 0):\n writer._do_write(message)\n tags = []\n for tag in tags:\n for writer in self.writers:\n if tag in writer.tags or '*' in writer.tags:\n if writer.int_level <= LEVELS.get(message.level, 0):\n writer._do_write(message)\n \n def debug(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"DEBUG\", *args, **kwargs)\n \n def info(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"INFO\", *args, **kwargs)\n \n def notice(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"NOTICE\", *args, **kwargs)\n \n def warning(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"WARNING\", *args, **kwargs)\n \n def error(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"ERROR\", *args, **kwargs)\n \n def critical(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"CRITICAL\", *args, **kwargs)\n \n def add_writers(self, *writers):\n self.writers.extend(writers)\n \n def add_writer(self, writer):\n self.writers.append(writer)","sub_path":"tools/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"115758959","text":"from flask import Flask, jsonify, make_response, abort, request\nfrom models import biblioteka\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"nininini\"\n\n@app.route(\"/api/v1/biblioteka/\", methods=[\"GET\"])\ndef biblioteka_list_api_v1():\n return jsonify(biblioteka.all())\n\n@app.route(\"/api/v1/biblioteka/\", methods=[\"GET\"])\ndef get_book(book_id):\n book = biblioteka.get(book_id)\n if not book:\n abort(404)\n return jsonify({\"book\": book})\n\n@app.route(\"/api/v1/biblioteka/\", methods=[\"POST\"])\ndef create_book():\n if not request.json or not 'title' in request.json:\n abort(400)\n\n book = {\n 'author': request.json.get('author', 'None'),\n 'id': biblioteka.all()[-1]['id'] + 1,\n 'title': request.json['title'],\n 'year': request.json.get('year', 0),\n 'read': False\n }\n biblioteka.create(book)\n return jsonify({'book': book}), 201\n\n@app.route(\"/api/v1/biblioteka/\", methods=['DELETE'])\ndef delete_book(book_id):\n result = biblioteka.delete(book_id)\n if not result:\n abort(404)\n return jsonify({'result': result})\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found', 'status_code': 404}), 404)\n\n@app.errorhandler(400)\ndef bad_request(error):\n return make_response(jsonify({'error': 'Bad request', 'status_code': 400}), 400)\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"501503725","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.forms import ModelForm\nfrom django import forms\nfrom .models import Profesor, Asignatura, AsignaturaDicatada, Estudiante\n\n\nclass ProfesorForm(ModelForm):\n class Meta:\n model = Profesor\n fields = ['profesor_nombre']\n labels = {'profesor_nombre': 'Nombre'}\n\n\nclass AsignaturaForm(ModelForm):\n class Meta:\n model = Asignatura\n fields = ['idAsignatura', 'asignatura_nombre']\n labels = {\n 'idAsignatura': 'Código de la Asignatura',\n 'asignatura_nombre': 'Nombre de la Asignatura'\n }\n\n\nclass AsignaturaDictadaForm(ModelForm):\n class Meta:\n model = AsignaturaDicatada\n fields = ['idAsignatura', 'idProfesor', 'asignatura_dictada_periodo']\n labels = {\n 'idAsignatura': 'Asignatura',\n 'idProfesor': 'Profesor',\n 'asignatura_dictada_periodo': 'Dictada'\n }\n\n\nclass EstudianteForm(ModelForm):\n \"\"\"\n ModelForm para un estudiante\n \"\"\"\n class Meta:\n model = Estudiante\n fields = ['idUniversidad']\n labels = {'idUniversidad': 'Universidad', }","sub_path":"apps/universidad/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"606174156","text":"'''\nCreated on Dec 21, 2016\n\n@author: Ahmed Sirage\n'''\nfrom nltk import SnowballStemmer\nfrom tensorflow.python.framework.tensor_shape import vector\nfrom nltk import word_tokenize\n\n\ndef get_line_stem(word):\n s = SnowballStemmer(\"english\")\n return s.stem(word)\n\n\ndef init_lexicon_dictionary():\n lexicon_dictionary = {} \n lexicon_stemmed_dictionary = {}\n ld_file = open('lexicon_dictionary.txt','r')\n for line in ld_file:\n word = line[:-22]\n vector = line[-21:-2].split()\n lexicon_dictionary[word] = vector\n lexicon_stemmed_dictionary[get_line_stem(word)] = vector\n ld_file.close()\n \n return lexicon_dictionary, lexicon_stemmed_dictionary\n\n\nclass sentence:\n '''\n classdocs\n '''\n \n \n\n\n\n\n lexicon_dictionary, lexicon_stemmed_dictionary = init_lexicon_dictionary()\n length = 16\n bayz=0\n kolo=0\n\n def __init__(self, text,emoj):\n '''\n Constructor\n '''\n self.text = text\n self.emoj = emoj\n self.words = {}\n self.tokens = word_tokenize(text, \"english\")\n\n\n for word in self.tokens:\n if word.lower() in sentence.lexicon_dictionary:\n if '1' in sentence.lexicon_dictionary[word.lower()]:\n self.words[word.lower()] = sentence.lexicon_dictionary[word.lower()]\n elif word.lower() in sentence.lexicon_stemmed_dictionary:\n if '1' in sentence.lexicon_stemmed_dictionary[word.lower()]:\n self.words[word.lower()] = sentence.lexicon_stemmed_dictionary[word.lower()]\n else:\n word_stem = get_line_stem(word) \n if word_stem.lower() in sentence.lexicon_dictionary:\n if '1' in sentence.lexicon_dictionary[word_stem]:\n self.words[word.lower()] = sentence.lexicon_dictionary[word_stem]\n elif word_stem.lower() in sentence.lexicon_stemmed_dictionary:\n if '1' in sentence.lexicon_stemmed_dictionary[word_stem]:\n self.words[word.lower()] = sentence.lexicon_stemmed_dictionary[word_stem]\n \n \n \n def get_words(self):\n return self.words \n \n def get_text(self):\n return self.text \n \n def get_len(self):\n return sentence.length \n \n \n def get_emoj(self):\n return self.emoj \n \n \n def init_sent_vector(self):\n self.vector=[]\n temp=[0,0,0,0,0,0,0,0,0,0]\n for word in self.words:\n for vec in self.words[word]:\n self.vector.append(int(vec))\n if len(self.vector) < (sentence.length*10):\n for i in range((sentence.length*10)-len(self.vector)):\n self.vector.append(0)\n for word in self.words:\n print(self.words[word])\n d=0\n for i in self.words[word]:\n if(temp[d] == 1 or i == '1'):\n temp[d] = 1\n d=d+1\n \n for i in temp:\n self.vector.append(i)\n \n \n def get_sentence_vector(self):\n return self.vector \n \n \n \n ","sub_path":"website/sentence.py","file_name":"sentence.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"197454133","text":"import os, sys\nfrom string import ascii_lowercase\n\ndirname, filename = os.path.split(os.path.abspath(sys.argv[0]))\n\ndef is_same_type(a, b):\n return a.lower() == b.lower()\n\ndef is_opposite_polarity(a, b):\n return (a.islower() and b.isupper()) or (a.isupper() and b.islower())\n\ndef do_react(a, b):\n return is_same_type(a, b) and is_opposite_polarity(a, b)\n\ndef remove_unit(unit, polymer):\n polymer = polymer.replace(unit.lower(), \"\")\n polymer = polymer.replace(unit.upper(), \"\")\n\n return polymer\n\npolymer = \"\"\n\n##open file and store \"polymer\"\nwith open(os.path.join(dirname, \"input.txt\")) as fileobj:\n for line in fileobj: \n for ch in line: \n polymer += ch\n\n##initialize min\noriginal_polymer = polymer\nmin = len(polymer)\n\n##loop through alphabet\nfor c in ascii_lowercase:\n\n i = 0\n polymer = remove_unit(c, polymer)\n\n ##loop through polymer\n while i < len(polymer): \n ##if this is the last unit, then we can stop the loop\n if i >= len(polymer) - 1:\n break\n\n ##otherwise, continue checking for reactive units\n current_unit = polymer[i]\n next_unit = polymer[i + 1]\n\n ##if the units react, remove them and start over at previous unit (if exists)\n if do_react(current_unit, next_unit):\n polymer = polymer[:i] + polymer[i + 2:]\n if i > 0:\n i -= 1\n else:\n i = 0\n ##otherwise just go to next unit\n else:\n i += 1\n \n ##if the length of this polymer is the new smallest, save it\n if len(polymer) < min:\n min = len(polymer)\n \n polymer = original_polymer\n\nprint(min)\n\n","sub_path":"5/5-2.py","file_name":"5-2.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"230091656","text":"import os\nimport config\n\nos.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_id\nimport shutil\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nimport torch.backends.cudnn as cudnn\nfrom tensorboardX import SummaryWriter\n\nfrom model.model import East\nfrom model.loss import EastLoss\nfrom dataset.data_utils import custom_dset, collate_fn\nimport config\nfrom utils import *\nfrom eval import eval\n\n\ndef train_epoch(model, optimizer, scheduler, train_loader, device, criterion, epoch, all_step, writer, logger):\n model.train()\n train_loss = 0.\n start = time.time()\n lr = scheduler.get_lr()[0]\n\n for i, (img, score_map, geo_map, training_mask) in enumerate(train_loader):\n cur_batch = img.size()[0]\n img, score_map, geo_map, training_mask = img.to(device), score_map.to(device), geo_map.to(\n device), training_mask.to(device)\n\n f_score, f_geometry = model(img)\n loss = criterion(score_map, f_score, geo_map, f_geometry, training_mask)\n\n # backward\n scheduler.step()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss = loss.item()\n train_loss += loss\n cur_step = epoch * all_step + i\n writer.add_scalar(tag='Train/loss', scalar_value=loss, global_step=cur_step)\n writer.add_scalar(tag='Train/lr', scalar_value=lr, global_step=cur_step)\n\n if i % config.display_interval == 0:\n batch_time = time.time() - start\n logger.info(\n '[{}/{}], [{}/{}], step: {}, {:.3f} samples/sec, batch_loss: {:.4f} time:{:.4f}, lr:{}'.format(\n epoch, config.epochs, i, all_step, cur_step, config.display_interval * cur_batch / batch_time,\n loss, batch_time, lr))\n start = time.time()\n\n return train_loss / all_step, lr\n\n\n\ndef main():\n if config.output_dir is None:\n config.output_dir = 'output'\n if config.restart_training:\n shutil.rmtree(config.output_dir, ignore_errors=True)\n if not os.path.exists(config.output_dir):\n os.makedirs(config.output_dir)\n logger = setup_logger(os.path.join(config.output_dir, 'train_log'))\n\n torch.manual_seed(config.seed) # 为CPU设置随机种子\n if config.gpu_id is not None and torch.cuda.is_available():\n torch.backends.cudnn.benchmark = True\n logger.info('train with gpu {} and pytorch {}'.format(config.gpu_id, torch.__version__))\n device = torch.device(\"cuda:0\")\n torch.cuda.manual_seed(config.seed) # 为当前GPU设置随机种子\n torch.cuda.manual_seed_all(config.seed) # 为所有GPU设置随机种子\n else:\n logger.info('train with cpu and pytorch {}'.format(torch.__version__))\n device = torch.device(\"cpu\")\n writer = SummaryWriter(config.output_dir)\n # Model\n model = East()\n if not config.pretrained and not config.restart_training:\n init_weights(model, init_type=config.init_type)\n num_gpus = torch.cuda.device_count()\n if num_gpus > 1:\n model = nn.DataParallel(model)\n model = model.to(device)\n\n train_data = custom_dset(config.trainroot)\n train_loader = DataLoader(train_data, batch_size=config.train_batch_size_per_gpu * num_gpus,\n shuffle=True, collate_fn=collate_fn, num_workers=config.workers)\n criterion = EastLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)\n\n if config.checkpoint != '' and not config.restart_training:\n start_epoch = load_checkpoint(config.checkpoint, model, logger, device)\n start_epoch += 1\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, config.lr_decay_step, gamma=config.lr_gamma,\n last_epoch=start_epoch)\n else:\n start_epoch = config.start_epoch\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, config.lr_decay_step, gamma=config.lr_gamma)\n\n all_step = len(train_loader)\n logger.info('train dataset has {} samples,{} in dataloader'.format(train_data.__len__(), all_step))\n best_model = {'recall': 0, 'precision': 0, 'f1': 0, 'model': ''}\n\n try:\n for epoch in range(start_epoch, config.epochs):\n start = time.time()\n train_loss, lr = train_epoch(model, optimizer, scheduler, train_loader, device, criterion, epoch, all_step,\n writer, logger)\n logger.info('[{}/{}], train_loss: {:.4f}, time: {:.4f}, lr: {}'.format(\n epoch, config.epochs, train_loss, time.time() - start, lr))\n if epoch % 4 == 0 or train_loss < 0.005:\n recall, precision, f1 = eval(model, os.path.join(config.output_dir, 'output'), config.testroot, device)\n logger.info('test: recall: {:.6f}, precision: {:.6f}, f1: {:.6f}'.format(recall, precision, f1))\n\n net_save_path = '{}/PSENet_{}_loss{:.6f}_r{:.6f}_p{:.6f}_f1{:.6f}.pth'.format(config.output_dir, epoch,\n 0.1,\n recall,\n precision,\n f1)\n save_checkpoint(net_save_path, model, optimizer, epoch, logger)\n if f1 > best_model['f1']:\n best_model['recall'] = recall\n best_model['precision'] = precision\n best_model['f1'] = f1\n best_model['model'] = net_save_path\n writer.add_scalar(tag='Test/recall', scalar_value=recall, global_step=epoch)\n writer.add_scalar(tag='Test/precision', scalar_value=precision, global_step=epoch)\n writer.add_scalar(tag='Test/f1', scalar_value=f1, global_step=epoch)\n writer.close()\n except KeyboardInterrupt:\n save_checkpoint('{}/final.pth'.format(config.output_dir), model, optimizer, epoch, logger)\n finally:\n if best_model['model']:\n shutil.copy(best_model['model'],\n '{}/best_r{:.6f}_p{:.6f}_f1{:.6f}.pth'.format(config.output_dir, best_model['recall'],\n best_model['precision'], best_model['f1']))\n logger.info(best_model)\n\n # for epoch in range(start_epoch, config.max_epochs):\n #\n # train(train_loader, model, criterion, scheduler, optimizer, epoch)\n #\n # if epoch % config.eval_iteration == 0:\n #\n # # create res_file and img_with_box\n # output_txt_dir_path = predict(model, criterion, epoch)\n #\n # # Zip file\n # submit_path = MyZip(output_txt_dir_path, epoch)\n #\n # # submit and compute Hmean\n # hmean_ = compute_hmean(submit_path)\n #\n # if hmean_ > hmean:\n # is_best = True\n #\n # state = {\n # 'epoch' : epoch,\n # 'state_dict' : model.state_dict(),\n # 'optimizer' : optimizer.state_dict(),\n # 'is_best' : is_best,\n # }\n # save_checkpoint(state, epoch)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"156810033","text":"#!/usr/bin/python3\n# Connect to TTS MQTT Server and receive uplink messages using the Paho MQTT Python client library\n#\n# Original source:\n# https://github.com/descartes/TheThingsStack-Integration-Starters/blob/main/MQTT-to-Tab-Python3/TTS.MQTT.Tab.py\n#\n# Instructions to use Eclipse Paho MQTT Python client library:\n# https://www.thethingsindustries.com/docs/integrations/mqtt/mqtt-clients/eclipse-paho/)\n#\nimport os\nimport sys\nimport logging\nimport paho.mqtt.client as mqtt\nimport json\nimport csv\nimport random\nfrom datetime import datetime\n\n# Procedure to get the USER, PASSWORD, PUBLIC_TLS_ADDRESS and PUBLIC_TLS_ADDRESS_PORT:\n# 1. Login to The Things Stack Community Edition console\n# https://console.cloud.thethings.network/\n# 2. Select Go to applications\n# 3. Select your application\n# 4. On the left hand side menu, select Integrations | MQTT\n# 5. See Connection credentials\n# 6. For the password press button: Generate new API key\n# Each time you press this button a new password is generated!\n# The password looks like:\n# NNSXS.XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n#\nUSER = \"pepsi@ttn\"\nPASSWORD = \"NNSXS.ZQDPGYRR4TRCXUGJUFKN4WEEJO2ZQDNCQC2JL6A.I4HU2GYDHU4VQ7K2MA7HEMFFNJXZQFLGJJTV7MC3RZKICJ2YE5XA\"\nPUBLIC_TLS_ADDRESS = \"eu1.cloud.thethings.network\"\nPUBLIC_TLS_ADDRESS_PORT = 8883\nDEVICE_ID = \"eui-0004a30b001e5e43\"\nALL_DEVICES = True\n\n# Meaning Quality of Service (QoS)\n# QoS = 0 - at most once\n# The client publishes the message, and there is no acknowledgement by the broker.\n# QoS = 1 - at least once\n# The broker sends an acknowledgement back to the client.\n# The client will re-send until it gets the broker's acknowledgement.\n# QoS = 2 - exactly once\n# Both sender and receiver are sure that the message was sent exactly once, using a kind of handshake\nQOS = 0\n\nDEBUG = True\n\n\ndef get_value_from_json_object(obj, key):\n try:\n return obj[key]\n except KeyError:\n return '-'\n\n\ndef stop(client):\n client.disconnect()\n print(\"\\nExit\")\n sys.exit(0)\n\n\n# Write uplink to tab file\ndef save_to_file(some_json):\n end_device_ids = some_json[\"end_device_ids\"]\n device_id = end_device_ids[\"device_id\"]\n application_id = end_device_ids[\"application_ids\"][\"application_id\"]\n received_at = some_json[\"received_at\"]\n\n if 'uplink_message' in some_json:\n uplink_message = some_json[\"uplink_message\"]\n f_port = get_value_from_json_object(uplink_message, \"f_port\")\n\n # check if f_port is found\n if f_port != '-':\n f_cnt = get_value_from_json_object(uplink_message, \"f_cnt\")\n frm_payload = uplink_message[\"frm_payload\"]\n # If decoded_payload is a json object or a string \"-\" it will be converted to string\n decoded_payload = str(get_value_from_json_object(uplink_message, \"decoded_payload\"))\n rssi = get_value_from_json_object(uplink_message[\"rx_metadata\"][0], \"rssi\")\n snr = get_value_from_json_object(uplink_message[\"rx_metadata\"][0], \"snr\")\n data_rate_index = get_value_from_json_object(uplink_message[\"settings\"], \"data_rate_index\")\n consumed_airtime = get_value_from_json_object(uplink_message, \"consumed_airtime\")\n\n # Daily log of uplinks\n now = datetime.now()\n path_n_file = now.strftime(\"%Y%m%d\") + \".txt\"\n print(path_n_file)\n if not os.path.isfile(path_n_file):\n with open(path_n_file, 'a', newline='') as tabFile:\n fw = csv.writer(tabFile, dialect='excel-tab')\n fw.writerow([\"received_at\", \"application_id\", \"device_id\", \"f_port\", \"f_cnt\", \"rssi\", \"snr\",\n \"data_rate_index\", \"consumed_airtime\", \"frm_payload\", \"decoded_payload\"])\n\n with open(path_n_file, 'a', newline='') as tabFile:\n fw = csv.writer(tabFile, dialect='excel-tab')\n fw.writerow([received_at, application_id, device_id, f_port, f_cnt, rssi, snr,\n data_rate_index, consumed_airtime, frm_payload, decoded_payload])\n\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"\\nConnected successfully to MQTT broker\")\n else:\n print(\"\\nFailed to connect, return code = \" + str(rc))\n\n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, message):\n print(\"\\nMessage received on topic '\" + message.topic + \"' with QoS = \" + str(message.qos))\n\n parsed_json = json.loads(message.payload)\n\n if DEBUG:\n print(\"Payload (Collapsed): \" + str(message.payload))\n print(\"Payload (Expanded): \\n\" + json.dumps(parsed_json, indent=4))\n\n save_to_file(parsed_json)\n\n\n# mid = message ID\n# It is an integer that is a unique message identifier assigned by the client.\n# If you use QoS levels 1 or 2 then the client loop will use the mid to identify messages that have not been sent.\ndef on_subscribe(client, userdata, mid, granted_qos):\n print(\"\\nSubscribed with message id (mid) = \" + str(mid) + \" and QoS = \" + str(granted_qos))\n\n\ndef on_disconnect(client, userdata, rc):\n print(\"\\nDisconnected with result code = \" + str(rc))\n\n\ndef on_log(client, userdata, level, buf):\n print(\"\\nLog: \" + buf)\n logging_level = client.LOGGING_LEVEL[level]\n logging.log(logging_level, buf)\n\n\n# Generate client ID with pub prefix randomly\nclient_id = f'python-mqtt-{random.randint(0, 1000)}'\n\nprint(\"Create new mqtt client instance\")\nmqttc = mqtt.Client(client_id)\n\nprint(\"Assign callback functions\")\nmqttc.on_connect = on_connect\nmqttc.on_subscribe = on_subscribe\nmqttc.on_message = on_message\nmqttc.on_disconnect = on_disconnect\n# mqttc.on_log = on_log # Logging for debugging OK, waste\n\n# Setup authentication from settings above\nmqttc.username_pw_set(USER, PASSWORD)\n\n# IMPORTANT - this enables the encryption of messages\nmqttc.tls_set() # default certification authority of the system\n\n# mqttc.tls_set(ca_certs=\"mqtt-ca.pem\") # Use this if you get security errors\n# It loads the TTI security certificate. Download it from their website from this page: \n# https://www.thethingsnetwork.org/docs/applications/mqtt/api/index.html\n# This is normally required if you are running the script on Windows\n\nprint(\"Connecting to broker: \" + PUBLIC_TLS_ADDRESS + \":\" + str(PUBLIC_TLS_ADDRESS_PORT))\nmqttc.connect(PUBLIC_TLS_ADDRESS, PUBLIC_TLS_ADDRESS_PORT, 60)\n\n\nif ALL_DEVICES:\n print(\"Subscribe to all topics (#) with QoS = \" + str(QOS))\n mqttc.subscribe(\"#\", QOS)\nelif len(DEVICE_ID) != 0:\n topic = \"v3/\" + USER + \"/devices/\" + DEVICE_ID + \"/up\"\n print(\"Subscribe to topic \" + topic + \" with QoS = \" + str(QOS))\n mqttc.subscribe(topic, QOS)\nelse:\n print(\"Can not subscribe to any topic\")\n stop(mqttc)\n\n\nprint(\"And run forever\")\ntry:\n run = True\n while run:\n mqttc.loop(10) # seconds timeout / blocking time\n # print(\".\", end=\"\", flush=True) # feedback to the user that something is actually happening\nexcept KeyboardInterrupt:\n stop(mqttc)\n","sub_path":"uplink_download.py","file_name":"uplink_download.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"24706495","text":"# -*- coding:utf-8 -*-\r\nclass Solution:\r\n\r\n def VerifySquenceOfBST(self, sequence):\r\n # write code here\r\n if not sequence:\r\n return True\r\n root = sequence[-1]\r\n # 找到最大值结点\r\n for i in range(len(sequence)):\r\n if sequence[i] > root:\r\n break\r\n # 检查最大值结点右侧的数值是否违规\r\n for i in range(i, len(sequence) - 1):\r\n if sequence[i] < root:\r\n return False\r\n # 递归\r\n return self.VerifySquenceOfBST(sequence[:i]) \\\r\n and self.VerifySquenceOfBST(sequence[i:-1])\r\n\r\nif __name__ == '__main__':\r\n # arr = [5, 7, 6, 9, 11, 10, 8]\r\n arr = []\r\n solo = Solution()\r\n result = solo.VerifySquenceOfBST(arr)\r\n print(result)","sub_path":"towords_offer/tree/33 二叉搜索树的后续遍历序列.py","file_name":"33 二叉搜索树的后续遍历序列.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"314903122","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 11 20:01:57 2018\n\n@author: dell\n\"\"\"\nimport pandas as pd\nimport os\nimport re\nimport urllib\nimport pymysql\nimport numpy as np\n\n\nf = open('C:/Users/htc/Desktop/总和.txt')\na = f.read()\nb = a.strip()\ncids = re.findall('data-cid=\"(\\d+)\"', b)\nnames = re.findall('data-name=\"([^\"].*)\"', b)\ndp = pd.DataFrame({'cid':cids,'pro':names})\n#pieces = dict(list(dp.groupby('cid')))\nCheck = list(set(cids))\nindustry = ['彩妆/香水/美妆工具','美容护肤/美体/精油','运动服/休闲服装','保健食品/膳食营养补充食品','奶粉/辅食/营养品/零食','运动鞋new']\n\n#新增商品\nbrand_ = pd.read_excel('C:/Users/htc/Desktop/result.xlsx').values.T.tolist()\nurl = brand_[1]\ncvid = brand_[0]\null = list(zip(url,cvid))\nbrand_append_1 = []\nbrand_append_2 = []\nfor i in ull:\n itembrand = urllib.parse.unquote(re.findall('name=(\\S+)', i[0])[0])\n a = re.findall('data-cid=\"(\\d+)\"', i[1])\n b = re.findall('\">(\\D+)', i[1])#因为没数字\n caid = list(zip(a,b))\n for o in caid:\n if re.findall('(\\S+)', o[1])[0] in industry:\n if len(re.findall('(\\S+)', o[1])) == 3:\n brand_append_1.append((itembrand,o[0]))\n brand_append_2.append(o[0])\nCheck = list(set(Check+brand_append_2))\ndp1 = pd.DataFrame(brand_append_1,columns=['pro','cid'])\ndp1 = dp1[['cid','pro']]\ndp = pd.concat([dp,dp1],axis =0).reset_index()\npieces = dict(list(dp.groupby('cid')))\n\nbrand_out = pd.read_excel('C:/Users/htc/Desktop/country.xlsx')[0].tolist()\n'''\n#连接数据库\nconn = pymysql.connect(host=\"rm-bp12z8rh0j5503p6p2o.mysql.rds.aliyuncs.com\", \n user=\"wangquan\",\n passwd=\"Wq5985790\",\n db='tbdata',\n port=3306,\n charset='utf8')\ncur = conn.cursor()\nconn.commit()\ncur.execute('create table group_byss(itemName varchar(100), itemUrl varchar(80), itemBrand varchar(50), storeName varchar(50), storeCredit varchar(20), itemPromotion varchar(30), salesFaverite decimal, salesReview decimal, listPrice decimal(20,2) ,salesPrice decimal(20,2) , salesQty decimal, salesAmount decimal(20,2), catIVID decimal,catI varchar(50),catII varchar(50),catIII varchar(50),catIV varchar(50),dataPeriod varchar(20),主键 INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT, group1 decimal, group2 varchar(80))' )\ncur.execute('create table groups(itemName varchar(100), itemUrl varchar(80), itemBrand varchar(50), catIV varchar(50), primaryKey decimal, group1 decimal)' )#group是有特殊意义的就像'一样识别不了\n#创造\ncur.execute(\"truncate table group_byss\")\ncur.execute(\"drop table group_bys\")\n#清楚'''\n\n#ci = '50010815'\nh = 0\nj = 0\nfor ci in Check:\n h += 1\n #if h <= 82:\n # continue\n brand = pieces[ci]['pro'].tolist()\n brand = list(set(brand))\n for bra in brand:\n if bra not in brand_out:\n continue\n j += 1\n #if j < 4892:\n # continue\n bra = bra.replace('amp;','')\n #统一品牌名称\n bra_down=bra.replace(\"'\",'’')\n bra = urllib.parse.quote(bra,safe='@')\n if bra[-1] == '.':\n bra = bra.replace('.','。')\n os.chdir('D:/线下行业数据库/%s/%s/' % (ci,bra))\n filelist = os.listdir()\n list_1 = []\n for i in filelist:\n aa = pd.read_excel(i)\n aa['itemBrand'] = bra_down\n aa['group1'] = aa['itemUrl'].str.split('=').str[1]\n aa['group2'] = ''\n list_1.append(aa)\n #数据清洗\n if str(aa['catI'].tolist()[0]) not in industry:\n list_1 = []\n continue\n if list_1 == []:#防空文件夹,同时去掉错误文件\n continue\n \n bigdata_ = pd.concat(list_1)\n bigdata_ = bigdata_.reset_index(drop=True)\n bigdata_.loc[bigdata_[bigdata_['dataPeriod']=='2019年04月'].index.tolist(),'group2'] = bigdata_.loc[bigdata_[bigdata_['dataPeriod']=='2019年04月'].index]['group1']\n \n #去重,难免有些失误\n bigdata_.duplicated()\n df = bigdata_.drop_duplicates()\n df = df.reset_index(drop=True)\n df = df.loc[df['itemName'].dropna(axis=0).index]\n \n df = df.sort_values('group1')\n \n #上传数据\n from sqlalchemy import create_engine\n engine = create_engine(\"mysql+pymysql://wangquan:Wq5985790@rm-bp12z8rh0j5503p6p2o.mysql.rds.aliyuncs.com:3306/tbdata?charset=utf8\")\n df.to_sql(name = 'group_byss',con = engine,if_exists = 'append',index = False,index_label = False)\n \n#df.to_excel(\"C:/Users/htc/Desktop/shiyan1.xlsx\")\n#conn.close() \n #ss\n #sz","sub_path":"text_in.py","file_name":"text_in.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"241223015","text":"import os.path\nimport pickle\nimport requests\nfrom pyquery import PyQuery as pq\nfrom collections import OrderedDict\nfrom unicodecsv import DictWriter\n\nimport util\n\nbase_url = 'http://localhost:8080/towns/govikon'\nusername = 'admin@example.org'\npassword = 'etamogimarusakot'\n\nif 'localhost' not in base_url:\n print('Are you sure you want to do this (yes)?')\n if raw_input() != 'yes':\n exit()\n\ntagmap = {\n 'Kirche': 'Religion',\n 'Politik': 'Politik',\n 'Sport': 'Sport',\n}\n\n# Fetch event categories\nif os.path.exists('event_urls.p'):\n with open(\"event_urls.p\", \"rb\") as dumpfile:\n event_urls = pickle.load(dumpfile)\n\nelse:\n event_urls = {\n 'all': {\n 'url': (\n 'http://www.rueti.ch/dorfleben/veranstaltungen/'\n 'veranstaltungskategorien'\n )\n }\n }\n page = pq(requests.get(event_urls['all']['url']).text)\n for a in page('li.section-veranstaltungskategorien li a'):\n event_urls[pq(a).text()] = {'url': pq(a).attr('href')}\n\n # Fetch event urls\n for key in event_urls:\n event_urls[key]['events'] = []\n next_url = event_urls[key]['url']\n while next_url:\n page = pq(requests.get(next_url).text)\n event_urls[key]['events'].extend(\n [pq(a).attr('href') for a in page('a.summary.url')]\n )\n next_url = page('span.next > a').attr('href')\n\n with open(\"event_urls.p\", \"wb\") as dumpfile:\n pickle.dump(event_urls, dumpfile)\n\n\n# Fetch events\nif os.path.exists('events.p'):\n with open(\"events.p\", \"rb\") as dumpfile:\n events = pickle.load(dumpfile)\n\nelse:\n events = []\n print('Fetching {} events'.format(len(event_urls['all']['events'])))\n for event in event_urls['all']['events']:\n page = pq(requests.get(event).text)\n events.append(OrderedDict((\n ('title', page('h1').text()),\n ('start', page('#parent-fieldname-startDate').attr('title')),\n ('end', page('#parent-fieldname-endDate').attr('title')),\n ('timezone', 'Europe/Zurich'),\n # 'recurrence'\n ('tags', ','.join([\n tagmap.get(key, '') for key in event_urls\n if event in event_urls[key]['events']\n and key != 'all' and tagmap.get(key, '')\n ])),\n ('location', page('#parent-fieldname-location').text()),\n ('content_description', (u'{}\\n\\n{}\\n\\n{}\\n{}\\n{}\\n{}'.format(\n page('#parent-fieldname-description').text(),\n page('#parent-fieldname-text').text(),\n page('a.email').text(),\n page('#parent-fieldname-contactPhone').text(),\n page('a.email').attr('href').replace('mailto:', ''),\n page('#parent-fieldname-eventUrl').attr('href') or ''\n ))),\n ('meta_submitter_email', 'info@rueti.ch'),\n )))\n\n with open(\"events.p\", \"wb\") as dumpfile:\n pickle.dump(events, dumpfile)\n\n# Write output\nwith open('output.csv', 'w') as csvfile:\n writer = DictWriter(csvfile, fieldnames=events[0].keys())\n writer.writeheader()\n for event in events:\n writer.writerow(event)\n\n# Submit events\nfor event in events:\n util.submit_event(\n email=event['meta_submitter_email'],\n title=event['title'],\n description=event['content_description'],\n location=event['location'],\n start_date=event['start'].split('T')[0],\n start_time=event['start'].split('T')[1][:5],\n end_time=event['end'].split('T')[1][:5],\n base_url=base_url\n )\n\n# Publish events & close tickets\ncookies = util.login(username, password, base_url=base_url)\ntickets = util.get_tickets(cookies, category='EVN', state='open',\n base_url=base_url)\nfor ticket in tickets:\n util.accept_ticket(cookies, ticket)\n util.accept_event(cookies, ticket)\n util.close_ticket(cookies, ticket)\n","sub_path":"process_events.py","file_name":"process_events.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"429638407","text":"import numpy as np\n\ndef readfile(filename='articlewordmatrix.txt'):\n fileInfo=open('articlesmetadata.txt')\n fileMatrix=open(filename)\n\n lines = [line for line in fileMatrix]\n # First line is the column titles\n colnames = lines[0].strip().split('\\t')[1:]\n rownames = []\n data = []\n for line in lines[1:]:\n p = line.strip().split('\\t')\n # First column in each row is the rowname\n rownames.append(p[0])\n # The data for this row is the remainder of the row\n data.append([float(x) for x in p[1:]])\n sources = []\n authors = [] \n url = []\n #published = []\n #for line in fileInfo:\n # p = line.strip().split('\\t')\n # if p == ['error']:\n # p = ['None']*4\n # sources.append(p[1])\n # authors.append(p[2])\n # url.append(p[3])\n #published.append(p[4])\n fileMatrix.close()\n fileInfo.close()\n return rownames, colnames, data\n\ndef writefile(rownames, colnames, data):\n fileMatrix=open('articlewordmatrix_processed.txt','w')\n\n fileMatrix.write('Article')\n for word in colnames: fileMatrix.write('\\t%s' % word)\n fileMatrix.write('\\n')\n\n for i in range(len(rownames)):\n fileMatrix.write(rownames[i])\n row = '\\t' + '\\t'.join(map(str, data[i])) + '\\n'\n fileMatrix.write(row)\n \n fileMatrix.close()\n\n\n#Convert the data matrix to a tf matrix\ndef tf(data):\n return [termfreq(v) for v in data]\n\n#Convert the data matrix to a tf-idf matrix\ndef tfidf(data):\n data_tf = tf(data)\n idf_vec = invdocfreq(data)\n return [[c * idf for c, idf in zip(v,idf_vec)] for v in data_tf]\n\n#term frequency\ndef termfreq(v):\n return [c != 0 and 1 + np.log(c) or 0 for c in v]\n\n#inverse document frequency\t\ndef docap(data):\n N = len(data)\n return [o/N for o in [sum([c and 1 for c in x]) for x in zip(*data)]]\n\ndef invdocfreq(data):\n return [np.log(1/x) for x in docap(data)]\n\ndef pruning(data,colnames,lowerthreshold,higherthreshold):\n matrix, words = data[:], colnames[:]\n apvec = docap(matrix)\n wordstodel = []\n for i in range(len(apvec)):\n if not lowerthreshold < apvec[i] < higherthreshold:\n wordstodel = [i] + wordstodel # or append than reverse?\n for wordid in wordstodel:\n for v in matrix:\n del v[wordid]\n del words[wordid]\n return matrix,words\n\ndef truncation(data):\n pass\n\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"271612564","text":"import glob\nimport os\nfrom PIL import Image\nfrom tqdm import tqdm\nimport random\nimport subprocess\nimport sys\n\n\ndef main():\n process_dir = \"./data/img/\"\n \n try:\n os.mkdir(\"./data\")\n except OSError:\n print(\"Creation of the directory failed\")\n else:\n print(\"Successfully created the directory\")\n\n try:\n os.mkdir(process_dir)\n except OSError:\n print(\"Creation of the directory failed\")\n else:\n print(\"Successfully created the directory\")\n\n\n dir = input(\"Enter image directory: \")\n image_path = \"{}/*.jpg\".format(dir)\n\n for file_path in tqdm(glob.glob(image_path)):\n\n file, ext = os.path.splitext(file_path)\n file_name = file_path.split(\"\\\\\")[-1].split(\".\")[0]\n\n # file_name = file_path.split(\"\\\\\")[-1]\n im = Image.open(file_path)\n width, height = im.size\n\n im.thumbnail((width / 2, height / 2))\n im.save(process_dir + file_name + \".resize.jpeg\", \"JPEG\")\n\n rotate_image90 = im.rotate(angle=90)\n rotate_image90.save(process_dir +\n file_name + \".rotate90.jpeg\", \"jpeg\")\n\n rotate_image180 = im.rotate(angle=180)\n rotate_image180.save(process_dir +\n file_name + \".rotate180.jpeg\", \"jpeg\")\n\n rotate_image270 = im.rotate(angle=270)\n rotate_image270.save(process_dir +\n file_name + \".rotate270.jpeg\", \"jpeg\")\n\n split_data_set(process_dir)\n\n\ndef split_data_set(image_dir):\n\n f_val = open(\"data/test.txt\", 'w')\n f_train = open(\"data/train.txt\", 'w')\n\n path, dirs, files = next(os.walk(image_dir))\n data_size = len(files)\n\n ind = 0\n data_test_size = int(0.1 * data_size)\n test_array = random.sample(range(data_size), k=data_test_size)\n count = 0\n for f in os.listdir(image_dir):\n count += 1\n print(count)\n file_extension = f.split(\".\")[-1]\n local_file_reference = image_dir.split(\"/\")\n\n if(file_extension == \"jpg\" or file_extension == \"jpeg\"):\n ind += 1\n\n if ind in test_array:\n f_val.write(local_file_reference[1]+ '/' + local_file_reference[2] + '/' +f+'\\n')\n else:\n f_train.write(local_file_reference[1]+ '/' + local_file_reference[2] + '/' +f+'\\n')\n\nif __name__ == \"__main__\":\n main()","sub_path":"prepareDataset.py","file_name":"prepareDataset.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"163482574","text":"import sys\nimport logging\n\nimport grpc\nimport concurrent.futures as futures\n\nimport service.common\nimport service.image_recon as img_recon\nfrom service import flowers_map_names, dogs_map_names, cars_map_names\n\n# Importing the generated codes from buildproto.sh\nimport service.service_spec.image_recon_pb2_grpc as grpc_bt_grpc\nfrom service.service_spec.image_recon_pb2 import Result\n\nlogging.basicConfig(level=10, format=\"%(asctime)s - [%(levelname)8s] - %(name)s - %(message)s\")\nlog = logging.getLogger(\"image_recon_service\")\n\n\n# Create a class to be added to the gRPC server\n# derived from the protobuf codes.\nclass FlowersServicer(grpc_bt_grpc.FlowersServicer):\n def __init__(self):\n self.model = \"ResNet152\"\n self.img_path = \"\"\n self.result = \"Fail\"\n\n # Just for debugging purpose.\n log.debug(\"FlowersServicer created\")\n\n # The method that will be exposed to the snet-cli call command.\n # request: incoming data\n # context: object that provides RPC-specific information (timeout, etc).\n def flowers(self, request, context):\n # In our case, request is a Numbers() object (from .proto file)\n self.img_path = request.img_path\n self.model = request.model\n\n map_names = flowers_map_names\n image_dims = (3, 224, 224)\n json_result = img_recon.image_recognition(\"flowers\", self.model, map_names, self.img_path, image_dims)\n\n # To respond we need to create a Result() object (from .proto file)\n self.result = Result()\n self.result.top_5 = str(json_result[\"top_5\"]).encode(\"utf-8\")\n self.result.delta_time = str(json_result[\"delta_time\"]).encode(\"utf-8\")\n log.debug(\"flowers({},{})={}\".format(self.model, self.img_path, self.result.top_5))\n return self.result\n\n\nclass DogsServicer(grpc_bt_grpc.DogsServicer):\n def __init__(self):\n self.model = \"ResNet152\"\n self.img_path = \"\"\n self.result = \"Fail\"\n log.debug(\"DogsServicer created\")\n\n def dogs(self, request, context):\n\n self.img_path = request.img_path\n self.model = request.model\n\n map_names = dogs_map_names\n image_dims = (3, 224, 224)\n json_result = img_recon.image_recognition(\"dogs\", self.model, map_names, self.img_path, image_dims)\n\n self.result = Result()\n self.result.top_5 = str(json_result[\"top_5\"]).encode(\"utf-8\")\n self.result.delta_time = str(json_result[\"delta_time\"]).encode(\"utf-8\")\n log.debug(\"dogs({},{})={}\".format(self.model, self.img_path, self.result.top_5))\n return self.result\n\n\nclass CarsServicer(grpc_bt_grpc.CarsServicer):\n def __init__(self):\n self.model = \"ResNet152\"\n self.img_path = \"\"\n self.result = \"Fail\"\n log.debug(\"CarsServicer created\")\n\n def cars(self, request, context):\n\n self.img_path = request.img_path\n self.model = request.model\n\n map_names = cars_map_names\n image_dims = (3, 224, 224)\n json_result = img_recon.image_recognition(\"cars\", self.model, map_names, self.img_path, image_dims)\n\n self.result = Result()\n self.result.top_5 = str(json_result[\"top_5\"]).encode(\"utf-8\")\n self.result.delta_time = str(json_result[\"delta_time\"]).encode(\"utf-8\")\n log.debug(\"cars({},{})={}\".format(self.model, self.img_path, self.result.top_5))\n return self.result\n\n\n# The gRPC serve function.\n#\n# Params:\n# max_workers: pool of threads to execute calls asynchronously\n# port: gRPC server port\n#\n# Add all your classes to the server here.\ndef serve(max_workers=10, port=7777):\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))\n grpc_bt_grpc.add_FlowersServicer_to_server(FlowersServicer(), server)\n grpc_bt_grpc.add_DogsServicer_to_server(DogsServicer(), server)\n grpc_bt_grpc.add_CarsServicer_to_server(CarsServicer(), server)\n server.add_insecure_port(\"[::]:{}\".format(port))\n return server\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Runs the gRPC server to communicate with the Snet Daemon.\n \"\"\"\n parser = service.common.common_parser(__file__)\n args = parser.parse_args(sys.argv[1:])\n service.common.main_loop(serve, args)\n","sub_path":"Services/gRPC/cntk-image-recon/service/image_recon_service.py","file_name":"image_recon_service.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"31991556","text":"\n\nfrom xai.brain.wordbase.nouns._ambulance import _AMBULANCE\n\n#calss header\nclass _AMBULANCES(_AMBULANCE, ):\n\tdef __init__(self,): \n\t\t_AMBULANCE.__init__(self)\n\t\tself.name = \"AMBULANCES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"ambulance\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_ambulances.py","file_name":"_ambulances.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"91204885","text":"#bingo program LC\n#made by Casper Dik\n\ndef create_bingo_card():\n import random\n import numpy\n\n read = open(\"terms.txt\").read()\n read = read[0:]\n list = read.split()\n list_3 = list.copy()\n card = []\n\n for i in range(25):\n random_word = random.choice(list_3)\n card.append(random_word) # create list with 25 terms\n list_3.remove(random_word)\n\n bingo_card = numpy.array(card)\n bingo_card = bingo_card.reshape([5, 5]) # reshape list in 5x5 grid\n print(bingo_card)\n numpy.savetxt(\"bingo_card.txt\", numpy.array(bingo_card), fmt=\"%s\")\n\ndef generate_bingo_cards():\n new_card = \"no\"\n create_bingo_card()\n new_card = input(\"Do you want to generate a new bingo card: \")\n while (new_card == \"yes\"):\n create_bingo_card()\n new_card = input(\"Do you want to generate a new bingo card: \")\n\ngenerate_bingo_cards()","sub_path":"assessment_basic_track/create_card.py","file_name":"create_card.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"79788134","text":"#!/usr/bin/env python\n\"\"\"\nAuthor: Tianyi Gu\nDate: Mar / 7 / 2017\nDesc: Controller node \n\"\"\"\nimport sys\nsys.path.insert(0, \"../../../doc/motionPrimitive/\")\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\nfrom geometry_msgs.msg import Vector3\nfrom threading import Thread\nfrom primutils import Primitive, read_primitives, read_primitives_with_duration\nfrom collections import defaultdict\nimport itertools\nimport tf\nfrom nav_msgs.msg import Odometry\n\nactionQueue = []\nmotions = defaultdict(list)\nduration = 0.0\ncurrentTwist = Twist()\n\n\ndef executive_callback(data):\n global actionQueue\n rospy.loginfo(rospy.get_caller_id() + 'Executive give me the motion: %s',\n data.data)\n actionQueue.append(data.data)\n\ndef executive_listener():\n rospy.Subscriber('controller_msg', String, executive_callback)\n rospy.spin()\n\ndef pose_callback(data):\n global currentTwist\n rospy.loginfo(rospy.get_caller_id() + 'Get latest pose info: \\n' + \n \"linear x: %.2f\" % data.twist.linear.x + \"\\n\" + \n \"linear y: %.2f\" % data.twist.linear.y+\"\\n\"+\n \"linear z: %.2f\" % data.twist.linear.z+\"\\n\"+\n \"angular x: %.2f\" % data.twist.angular.x+\"\\n\"+\n \"angular y: %.2f\" % data.twist.angular.y+\"\\n\"+\n \"angular z: %.2f\" % data.twist.angular.z+\"\\n\")\n currentTwist = data.twist\n\ndef pose_listener():\n rospy.Subscriber('pose', Odometry, pose_callback)\n rospy.spin()\n\ndef move():\n global actionQueue\n #here,we publish actions to the topic 'cmd_vel'\n pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)\n speedListener = tf.TransformListener()\n #rospy.init_node('controller_publisher', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n while not rospy.is_shutdown():\n while actionQueue:\n motionStr = actionQueue.pop(0)\n currentMotion = motions[motionStr]\n motion = currentTwist\n motion.linear.x += currentMotion[0] * duration\n motion.angular.z += currentMotion[1] * duration\n rospy.loginfo(\"controller publish action: \\n\" + \n \"linear x: %.2f\" % (motion.linear.x) + \"\\n\" + \n \"linear y: %.2f\" % motion.linear.y+\"\\n\"+\n \"linear z: %.2f\" % motion.linear.z+\"\\n\"+\n \"angular x: %.2f\" % motion.angular.x+\"\\n\"+\n \"angular y: %.2f\" % motion.angular.y+\"\\n\"+\n \"angular z: %.2f\" % motion.angular.z+\"\\n\" +\n \"duration: \" + str(duration))\n pub.publish(motion)\n rospy.sleep(rospy.Duration(duration))\n motion = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))\n rospy.logerr(\"Action Queue is empty!\")\n pub.publish(motion)\n rate.sleep()\n\ndef init_motions():\n global duration\n (primitives, duration) = read_primitives_with_duration(\"../../../doc/motionPrimitive/primitives.txt\")\n dupMotions = [[p.name, p.va, p.wa] for p in primitives]\n dupMotions.sort()\n filterMotions = [m for m, _ in itertools.groupby(dupMotions)]\n for i in filterMotions:\n motions[i[0]].append(i[1])\n motions[i[0]].append(i[2])\n # print i\n #print motions[\"a6\"] \n\nif __name__ == '__main__':\n init_motions();\n rospy.init_node('controller_node', anonymous=True)\n execListernerThread = Thread(target=executive_listener, args=())\n poseListernerThread = Thread(target=pose_listener, args=())\n #execListernerThread.setDaemon(True)\n execListernerThread.start()\n controllerPublisherThread = Thread(target=move, args=())\n #controllerPublisherThread.setDaemon(True)\n controllerPublisherThread.start()\n \n # try:\n # move()\n # except rospy.ROSInterruptException:\n # pass\n","sub_path":"src/pioneer_hallway/controller/obsolete/v1_bak_controller_node.py","file_name":"v1_bak_controller_node.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"620903593","text":"from flask_restful import Resource, reqparse\nfrom flask import request\nfrom models.promocode import PromoCodeModel\nfrom flask_restful_swagger import swagger\nfrom datetime import datetime\n\nclass PromoCode(Resource):\n\n\tparser = reqparse.RequestParser()\n\tparser.add_argument('promo_code',\n\t\t\ttype = str,\n\t\t\trequired = True,\n\t\t\thelp = \"Promo Code is Required\"\n\t\t\t)\n\tparser.add_argument('promo_discount_per',\n\t\t\ttype = int,\n\t\t\trequired = True,\n\t\t\thelp = \"Promo Code Discount Percentage is Required\"\n\t\t\t)\n\tparser.add_argument('promo_validity',\n\t\t\ttype = str,\n\t\t\trequired = True,\n\t\t\thelp = \"Promo Validity is Required. Format: YYYY-MM-DD\"\n\t\t\t)\n\tparser.add_argument('promo_wallet',\n\t\ttype = bool,\n\t\trequired = True,\n\t\thelp = \"Cashback on wallet or Discount on Order ?? \")\n\tparser.add_argument('promo_user', \n\t\ttype=bool,\n\t\trequired = True,\n\t\thelp = \"Promo Code Valid For all Users Or Not\")\n\tparser.add_argument('promo_description',\n\t\ttype = str,\n\t\trequired = True,\n\t\thelp = \"Promo Code Description\")\n\tparser.add_argument('promo_url',\n\t\ttype = str,\n\t\trequired = False)\n\n\t@swagger.operation(\n\t\tnotes='Adding A Promo Code',\n\t\tnickname='POST',\n\t\tparameters=[\n\t\t\t{\n\t\t\t\t\"name\": \"promo_code\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"string\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_discount_per\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\":\"int\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_validity\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"Date\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\":\"promo_wallet\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"Boolean\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_user\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"Boolean\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_description\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"String\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_url\",\n\t\t\t\t\"required\": False,\n\t\t\t\t\"dataType\": \"String\"\n\t\t\t}]\n\t\t)\n\tdef post(self):\n\t\tdata = PromoCode.parser.parse_args()\n\t\tdatetime_object = datetime.strptime(data['promo_validity'],'%Y-%m-%d')\n\t\tif data['promo_url'] is None:\n\t\t\tpromocode = PromoCodeModel(data['promo_code'], data['promo_discount_per'], datetime_object, data[\"promo_wallet\"], data[\"promo_user\"], data[\"promo_description\"],None)\n\t\telse:\n\t\t\tpromocode = PromoCodeModel(data['promo_code'], data['promo_discount_per'], datetime_object, data[\"promo_wallet\"], data[\"promo_user\"], data[\"promo_description\"],data[\"promo_url\"])\n\t\ttry:\n\t\t\tpromocode.save_to_db()\n\t\texcept:\n\t\t\treturn {'data':{\"status\": False}}, 500\n\t\t\n\t\treturn {'data':{'status': True, 'promocode': promocode.json()}}, 201\n\n\t@swagger.operation(\n\t\tnotes='Get List of all Promo Code',\n\t\tnickname='GET'\n\t\t)\n\n\tdef get(self):\n\n\t\treturn {'data':{'promocode': [promo.json() for promo in PromoCodeModel.query.all()]}}\n\nclass PromoCodeEdit(Resource):\n\n\tparser = reqparse.RequestParser()\n\tparser.add_argument('promo_validity',\n\t\t\ttype = str,\n\t\t\trequired = True,\n\t\t\thelp = \"Promo Code Validity is Required.. Format: YYYY-MM-DD\"\n\t\t\t)\n\tparser.add_argument('promo_description',\n\t\t\ttype = str,\n\t\t\trequired = True,\n\t\t\thelp = \" Promo Code Desciption\")\n\n\t@swagger.operation(\n\t\tnotes='Edit a Promo Code Validity',\n\t\tnickname='PUT',\n\t\tparameters=[\n\t\t\t{\n\t\t\t\t\"name\": \"promo_id\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"int\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_validity\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"Date\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_description\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"String\"\n\t\t\t}]\n\t\t)\n\n\tdef put(self, promo_id):\n\n\t\tpromocode = PromoCodeModel.find_by_promo_id(promo_id)\n\t\tdata = PromoCodeEdit.parser.parse_args()\n\t\tif promocode:\n\t\t\tpromocode.promo_validity = data['promo_validity']\n\t\t\tpromocode.promo_description = data['desciption']\n\t\t\tpromocode.save_to_db()\n\t\t\treturn {'data':{'status': True, 'promocode': promocode.json()}}\n\n\t\treturn {'data': {'status': False}}\n\n\t# @swagger.operation(\n\t# \tnotes='Delete a Promo Code',\n\t# \tnickname='DELETE',\n\t# \tparameters=[\n\t# \t\t{\n\t# \t\t\t\"name\": \"promo_id\",\n\t# \t\t\t\"required\": True,\n\t# \t\t\t\"dataType\": \"int\"\n\t# \t\t}]\n\t# \t)\n\n\t# def delete(self, promo_id):\n\n\t# \tpromocode = PromoCodeModel.find_by_promo_id(promo_id)\n\t# \tif promocode:\n\t# \t\tpromocode.delete_from_db()\n\t# \t\treturn {'data': {'status': True}}\n\t# \treturn {'data': {'status': False}}\n\nclass PromoCodeForAll(Resource):\n\n\n\t@swagger.operation(\n\t\tnotes = \"List All Promo Codes Valid For All Users\",\n\t\tnickname = 'GET')\n\n\tdef get(self):\n\t\tdate = datetime.now().date()\n\n\t\treturn {'data':{'promocode': [promo.json() for promo in PromoCodeModel.query.filter(PromoCodeModel.promo_validity >= date, PromoCodeModel.promo_user == 0)]}}\n\n\n\n\n\n \n\n","sub_path":"resources/promocode.py","file_name":"promocode.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"117594392","text":"import numpy as np\nimport math\n\n\n# helper class to load and store input data\nclass Data(object):\n def __init__(self, file, subsample_rate=1e-4):\n\n print('loading corpus...')\n # initialize data, vocab, negative, freq, and context\n self.init_vocab(file)\n print('init negative sampling..')\n self.init_negative()\n\n print('subsampling...')\n self.subsample(subsample_rate)\n\n print('initializing contexts...')\n self.init_context()\n\n def init_vocab(self, file):\n buf = open(file).read()\n self.tks = buf.split(' ')\n self.vocab = {}\n self.freq = []\n self.data = []\n\n for tk in self.tks:\n if len(tk) == 0:\n continue\n if tk not in self.vocab:\n index = len(self.vocab) // 2\n self.vocab[tk] = index\n self.vocab[index] = tk\n self.freq.append(0)\n wid = self.vocab[tk]\n self.data.append(wid)\n self.freq[wid] += 1\n\n def init_negative(self):\n # initialize list to select negative samples\n self.negative = []\n for i, count in enumerate(self.freq):\n if count < 5:\n continue\n n = int(math.pow(count * 1.0, 0.75))\n self.negative += [i for _ in range(n)]\n\n def subsample(self, t=1e-3):\n # initialize probability to remove frequent tokens\n subsampling = [0 for i in range(len(self.vocab))]\n corpus_size = float(len(self.data))\n\n for i, count in enumerate(self.freq):\n f = count / corpus_size\n keep_probability = (math.pow(t / f, 0.5)) + (t / f)\n subsampling[i] = keep_probability\n\n # remove based on subsampling\n i = 0\n delete = [False for i in range(len(self.data))]\n while i < len(self.data):\n if np.random.rand() > subsampling[self.data[i]]:\n self.freq[self.data[i]] -= 1\n delete[i] = True\n else:\n i += 1\n\n self.data[:] = [self.data[i] for i in range(len(self.data)) if not delete[i]]\n\n def init_context(self):\n # map vocab to position for context evaluation\n self.context = [[] for word in self.vocab]\n for i in range(len(self.data)):\n self.context[self.data[i]].append(i)\n def get_data(self):\n return self.data, self.negative, self.vocab, self.freq\n\ndef print_window(data, index, window_size=5):\n str = ''\n\n for i in range(2 * window_size + 1):\n curr = i - window_size + index\n if 0 <= curr < len(data.data):\n str += data.vocab[data.data[curr]] + ' '\n print(str)\n\n\n# loads a word embedding file into an embed matrix\ndef load_wordvec(file, vocab={}):\n print('loading word embeddings...')\n\n embed = np.array([None for i in range(len(vocab))])\n index = 0\n no_vocab = len(vocab) == 0\n with open(file, 'r') as f:\n for line in f:\n buf = line.split(' ')\n word = buf[0]\n vector = [float(f) for f in buf[1:-1]]\n if no_vocab:\n embed.append(vector)\n vocab[word] = index\n index += 1\n else:\n embed[vocab[word]] = np.array(vector)\n\n return vocab, embed\n\n\n# returns the avg windows of embedded contexts for the specific word\ndef get_contexts(word, data, embed, window_size=5):\n index = data.vocab[word]\n contexts = data.context[index]\n\n embed_contexts = np.empty(shape=(0, len(embed[0])))\n for c in contexts:\n window = []\n\n for i in range(window_size * 2 + 1):\n idx = c + i - window_size\n if i != window_size and 0 <= idx <= len(data.data):\n if embed[data.data[idx]] is not None:\n window.append(data.data[idx])\n context = np.average(embed[window], axis=0).reshape(1, len(embed[0]))\n embed_contexts = np.append(embed_contexts, context, axis=0)\n\n return embed_contexts\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"380386389","text":"from django.db import models\nfrom datetime import datetime\nfrom users.models import Adduser\n\n# Create your models here.\nclass Blogadd(models.Model):\n author = models.ForeignKey(to=Adduser,on_delete=models.CASCADE)\n title = models.CharField(max_length=100)\n blog = models.CharField(max_length=1000)\n date = models.DateField(default=datetime.now())\n\n def __str__(self):\n return f\"{self.author}\"\n","sub_path":"djangoproject/myproject/blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"580410410","text":"#\n# @lc app=leetcode id=96 lang=python3\n#\n# [96] Unique Binary Search Trees\n#\nclass Solution:\n def numTrees(self, n: int) -> int:\n '''\n 卡特兰数\n 把 n = 0 时赋为1,因为空树也算一种二叉搜索树,那么 n = 1 时的情况可以看做是其左子树个数乘以右子树的个数,\n 左右子树都是空树,所以1乘1还是1。那么 n = 2 时,由于1和2都可以为根,分别算出来,再把它们加起来即可。\n n = 2 的情况可由下面式子算出(这里的 dp[i] 表示当有i个数字能组成的 BST 的个数):\n\n dp[2] = dp[0] * dp[1]   (1为根的情况,则左子树一定不存在,右子树可以有一个数字)\n\n     + dp[1] * dp[0]   (2为根的情况,则左子树可以有一个数字,右子树一定不存在)\n\n 同理可写出 n = 3 的计算方法:\n\n dp[3] = dp[0] * dp[2]   (1为根的情况,则左子树一定不存在,右子树可以有两个数字)\n\n     + dp[1] * dp[1]   (2为根的情况,则左右子树都可以各有一个数字)\n\n     + dp[2] * dp[0]   (3为根的情况,则左子树可以有两个数字,右子树一定不存在)\n '''\n if not n: return 1\n dp = [0]*(n+1)\n dp[0], dp[1] = 1, 1\n for i in range(2,n+1):\n for j in range(i):\n dp[i] += dp[j]*dp[i-j-1]\n return dp[-1]\n\n","sub_path":"96.unique-binary-search-trees.py","file_name":"96.unique-binary-search-trees.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"176597168","text":"import sys\nimport getopt\nimport logging\nfrom yt_concat.pipeline.pipeline import Pipeline\nfrom yt_concat.utils import Utils\nfrom yt_concat.pipeline.steps.preflight import Preflight\nfrom yt_concat.pipeline.steps.get_video_list import GetVideoList\nfrom yt_concat.pipeline.steps.initialize_yt import InitializeYT\nfrom yt_concat.pipeline.steps.search import Search\nfrom yt_concat.pipeline.steps.download_videos import DownloadVideos\nfrom yt_concat.pipeline.steps.edit_videos import EditVideos\nfrom yt_concat.pipeline.steps.postflight import Postflight\n\n\nCHANNEL_ID = 'UCKSVUHI9rbbkXhvAXK-2uxA'\n\n\ndef print_usage():\n print('python main.py OPTIONS')\n print('OPTIONS:')\n print('{:>6} {:<17} {}'.format('-c', '--channel_id', 'Channel ID of your target youtube channel'))\n print('{:>6} {:<17} {}'.format('-s', '--search_word', 'The word that you want to capture in videos'))\n print('{:>6} {:<17} {}'.format('-l', '--limit', 'The maximum number of capture videos in the output video'))\n print('{:>6} {:<17} {}'.format('-g', '--logging_level', 'The logging level shown on the CMD screen. '\n '[Fill a number only] '\n '[1:DEBUG, 2:INFO, 3:WARNING, 4:ERROR, 5:CRITICAL]'))\n print('{:<24} {}'.format('cleanup', 'Remove all downloaded videos'))\n print('{:<24} {}'.format('fast', 'Skip downloading video list and videos if exist'))\n\n\ndef command_line_arg():\n channel_id = CHANNEL_ID\n search_word = 'incredible'\n limit = 30\n logging_level = logging.DEBUG\n cleanup = False\n fast = False\n short_opt = 'hc:s:l:g:'\n long_opt = 'help channel_id= search_word= limit= logging_level= cleanup fast'.split()\n try:\n opts, args = getopt.getopt(sys.argv[1:], short_opt, long_opt)\n print(opts)\n except getopt.GetoptError:\n print_usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print_usage()\n sys.exit()\n elif opt in (\"-c\", \"--channel_id\"):\n channel_id = arg\n elif opt in (\"-s\", \"--search_word\"):\n search_word = arg\n elif opt in (\"-l\", \"--limit\"):\n limit = int(arg)\n elif opt in (\"-g\", \"--logging_level\"):\n if arg == '1':\n logging_level = logging.DEBUG\n elif arg == '2':\n logging_level = logging.INFO\n elif arg == '3':\n logging_level = logging.WARNING\n elif arg == '4':\n logging_level = logging.ERROR\n elif arg == '5':\n logging_level = logging.CRITICAL\n elif opt == '--cleanup':\n cleanup = True\n elif opt == '--fast':\n fast = True\n return channel_id, search_word, limit, logging_level, cleanup, fast\n\n\ndef config_logger(logging_level):\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s:%(asctime)s:%(message)s')\n file_handler = logging.FileHandler('yt_concat_logging.log')\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging_level)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n return logger\n\n\ndef main():\n channel_id, search_word, limit, logging_level, cleanup, fast = command_line_arg()\n inputs = {\n 'channel_id': channel_id,\n 'search_word': search_word,\n 'limit': limit,\n 'logging_level': logging_level,\n 'cleanup': cleanup,\n 'fast': fast,\n }\n\n steps = [\n Preflight(),\n GetVideoList(),\n InitializeYT(),\n Search(),\n DownloadVideos(),\n EditVideos(),\n Postflight(),\n ]\n\n logger = config_logger(logging_level)\n utils = Utils()\n p = Pipeline(steps)\n p.run(inputs, utils, logger)\n\n\nif __name__ == '__main__':\n main()","sub_path":"yt_concat/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"482628563","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets, Qt\r\nimport sys\r\nimport time\r\nfrom winchat import *\r\n\r\n\r\nclass Main_1(object):\r\n\r\n def setupUi(self, Form):\r\n Form.setObjectName(\"Form\")\r\n Form.resize(275, 700)\r\n Form.setWindowFlags(QtCore.Qt.FramelessWindowHint)\r\n Form.setMinimumSize(QtCore.QSize(275, 700))\r\n Form.setMaximumSize(QtCore.QSize(275, 700))\r\n # font = QtGui.QFont()\r\n # font.setFamily(\"Jokerman\")\r\n # Form.setFont(font)\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(\"images/climb.jpg\"),\r\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n Form.setWindowIcon(icon)\r\n\r\n font1 = QtGui.QFont()\r\n font1.setFamily(\"Arial\")\r\n font1.setPointSize(14)\r\n font1.setBold(False)\r\n # font1.setWeight(10)\r\n self.label_char = QtWidgets.QLabel(Form)\r\n self.label_char.setGeometry(QtCore.QRect(0, 0, 80, 25))\r\n self.label_char.setText(\" iClimb\")\r\n self.label_char.setObjectName(\"label_char\")\r\n self.label_char.setFont(font1)\r\n\r\n font2 = QtGui.QFont()\r\n font2.setFamily(\"Gulim\")\r\n font2.setPointSize(18)\r\n font2.setBold(False)\r\n # font2.setWeight(10)\r\n font2.setUnderline(False)\r\n self.label_mini = QtWidgets.QLabel(Form)\r\n self.label_mini.setGeometry(QtCore.QRect(225, 0, 25, 25))\r\n self.label_mini.setText(\r\n \"-\")\r\n self.label_mini.linkActivated.connect(self.showMinimized)\r\n self.label_mini.setObjectName(\"label_mini\")\r\n self.label_mini.setFont(font2)\r\n self.label_close = QtWidgets.QLabel(Form)\r\n self.label_close.setGeometry(QtCore.QRect(250, 0, 25, 25))\r\n self.label_close.setText(\r\n \"×\")\r\n self.label_close.linkActivated.connect(self.close)\r\n self.label_close.setObjectName(\"label_close\")\r\n self.label_close.setFont(font2)\r\n\r\n self.graphicsView_profile = QtWidgets.QGraphicsView(Form)\r\n self.graphicsView_profile.setGeometry(QtCore.QRect(15, 35, 90, 90))\r\n self.graphicsView_profile.setObjectName(\"graphicsView_profile\")\r\n self.textBrowser_weather = QtWidgets.QTextBrowser(Form)\r\n self.textBrowser_weather.setGeometry(QtCore.QRect(120, 30, 140, 70))\r\n self.textBrowser_weather.setObjectName(\"textBrowser_weather\")\r\n self.textEdit_city = QtWidgets.QTextEdit(Form)\r\n self.textEdit_city.setGeometry(QtCore.QRect(120, 105, 140, 20))\r\n self.textEdit_city.setObjectName(\"textEdit_city\")\r\n\r\n self.tabWidget = QtWidgets.QTabWidget(Form)\r\n self.tabWidget.setGeometry(QtCore.QRect(0, 150, 275, 500))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Viner Hand ITC\")\r\n font.setPointSize(10)\r\n font.setBold(False)\r\n # font.setWeight(50)\r\n self.tabWidget.setFont(font)\r\n self.tabWidget.setObjectName(\"tabWidget\")\r\n\r\n self.tab_chatting = QtWidgets.QWidget()\r\n self.tab_chatting.setObjectName(\"tab_chatting\")\r\n self.listWidget_chatting = QtWidgets.QListWidget(self.tab_chatting)\r\n self.listWidget_chatting.setGeometry(QtCore.QRect(0, 0, 275, 500))\r\n self.listWidget_chatting.setIconSize(QtCore.QSize(70, 70))\r\n self.listWidget_chatting.setObjectName(\"listWidget_chatting\")\r\n item = QtWidgets.QListWidgetItem()\r\n self.listWidget_chatting.addItem(item)\r\n self.tabWidget.addTab(self.tab_chatting, \"\")\r\n\r\n self.tab_friends = QtWidgets.QWidget()\r\n self.tab_friends.setObjectName(\"tab_friends\")\r\n self.listWidget_friends = QtWidgets.QListWidget(self.tab_friends)\r\n self.listWidget_friends.setGeometry(QtCore.QRect(0, 0, 275, 500))\r\n self.listWidget_friends.setIconSize(QtCore.QSize(70, 70))\r\n self.listWidget_friends.setObjectName(\"listWidget_friends\")\r\n item = QtWidgets.QListWidgetItem()\r\n self.listWidget_friends.addItem(item)\r\n self.tabWidget.addTab(self.tab_friends, \"\")\r\n\r\n self.tab_groups = QtWidgets.QWidget()\r\n self.tab_groups.setObjectName(\"tab_groups\")\r\n self.listWidget_groups = QtWidgets.QListWidget(self.tab_groups)\r\n self.listWidget_groups.setGeometry(QtCore.QRect(0, 0, 275, 500))\r\n self.listWidget_groups.setIconSize(QtCore.QSize(50, 50))\r\n self.listWidget_groups.setObjectName(\"listWidget_groups\")\r\n item = QtWidgets.QListWidgetItem()\r\n self.listWidget_groups.addItem(item)\r\n self.tabWidget.addTab(self.tab_groups, \"\")\r\n\r\n self.tab_apps = QtWidgets.QWidget()\r\n self.tab_apps.setObjectName(\"tab_apps\")\r\n self.calendarWidget = QtWidgets.QCalendarWidget(self.tab_apps)\r\n self.calendarWidget.setGeometry(QtCore.QRect(0, 250, 271, 221))\r\n self.calendarWidget.setObjectName(\"calendarWidget\")\r\n self.label = QtWidgets.QLabel(self.tab_apps)\r\n self.label.setGeometry(QtCore.QRect(10, 20, 54, 12))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Small Fonts\")\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label.setFont(font)\r\n self.label.setTextFormat(QtCore.Qt.RichText)\r\n self.label.setObjectName(\"label\")\r\n self.label_2 = QtWidgets.QLabel(self.tab_apps)\r\n self.label_2.setGeometry(QtCore.QRect(10, 130, 54, 12))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Small Fonts\")\r\n font.setPointSize(10)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label_2.setFont(font)\r\n self.label_2.setObjectName(\"label_2\")\r\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_3.setGeometry(QtCore.QRect(20, 50, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_3.setFont(font)\r\n self.pushButton_3.setObjectName(\"pushButton_3\")\r\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_4.setGeometry(QtCore.QRect(20, 160, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_4.setFont(font)\r\n self.pushButton_4.setObjectName(\"pushButton_4\")\r\n self.pushButton_5 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_5.setGeometry(QtCore.QRect(100, 50, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_5.setFont(font)\r\n self.pushButton_5.setObjectName(\"pushButton_5\")\r\n self.pushButton_6 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_6.setGeometry(QtCore.QRect(180, 50, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_6.setFont(font)\r\n self.pushButton_6.setObjectName(\"pushButton_6\")\r\n self.pushButton_7 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_7.setGeometry(QtCore.QRect(100, 160, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_7.setFont(font)\r\n self.pushButton_7.setObjectName(\"pushButton_7\")\r\n self.pushButton_8 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_8.setGeometry(QtCore.QRect(180, 160, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_8.setFont(font)\r\n self.pushButton_8.setObjectName(\"pushButton_8\")\r\n self.tabWidget.addTab(self.tab_apps, \"\")\r\n\r\n self.pushButton_set = QtWidgets.QPushButton(Form)\r\n self.pushButton_set.setGeometry(QtCore.QRect(0, 675, 25, 25))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Arial\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n font.setKerning(False)\r\n self.pushButton_set.setFont(font)\r\n self.pushButton_set.setObjectName(\"pushButton_set\")\r\n self.pushButton_add = QtWidgets.QPushButton(Form)\r\n self.pushButton_add.setGeometry(QtCore.QRect(25, 675, 25, 25))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Arial\")\r\n font.setPointSize(18)\r\n font.setBold(False)\r\n font.setWeight(50)\r\n self.pushButton_add.setFont(font)\r\n self.pushButton_add.setAutoRepeatInterval(102)\r\n self.pushButton_add.setObjectName(\"pushButton_add\")\r\n self.lcdNumber_time = QtWidgets.QLCDNumber(Form)\r\n self.lcdNumber_time.setGeometry(QtCore.QRect(100, 675, 150, 25))\r\n self.lcdNumber_time.setObjectName(\"lcdNumber_time\")\r\n\r\n self.retranslateUi(Form)\r\n self.tabWidget.setCurrentIndex(0)\r\n QtCore.QMetaObject.connectSlotsByName(Form)\r\n\r\n def retranslateUi(self, Form):\r\n _translate = QtCore.QCoreApplication.translate\r\n Form.setWindowTitle(_translate(\"Form\", \"iClimb\"))\r\n __sortingEnabled = self.listWidget_chatting.isSortingEnabled()\r\n self.listWidget_chatting.setSortingEnabled(False)\r\n item = self.listWidget_chatting.item(0)\r\n item.setText(_translate(\"Form\", \"New Item\"))\r\n\r\n self.listWidget_chatting.setSortingEnabled(__sortingEnabled)\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(\r\n self.tab_chatting), _translate(\"Form\", \"Chatting\"))\r\n __sortingEnabled = self.listWidget_friends.isSortingEnabled()\r\n self.listWidget_friends.setSortingEnabled(False)\r\n item = self.listWidget_friends.item(0)\r\n item.setText(_translate(\"Form\", \"New Item\"))\r\n\r\n self.listWidget_friends.setSortingEnabled(__sortingEnabled)\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(\r\n self.tab_friends), _translate(\"Form\", \"Friends\"))\r\n __sortingEnabled = self.listWidget_groups.isSortingEnabled()\r\n self.listWidget_groups.setSortingEnabled(False)\r\n item = self.listWidget_groups.item(0)\r\n item.setText(_translate(\"Form\", \"New Item\"))\r\n\r\n self.listWidget_groups.setSortingEnabled(__sortingEnabled)\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(\r\n self.tab_groups), _translate(\"Form\", \"Groups\"))\r\n self.label.setText(_translate(\"Form\", \"TOOLS\"))\r\n self.label_2.setText(_translate(\"Form\", \"GAMES\"))\r\n self.pushButton_3.setText(_translate(\"Form\", \"BLOG\"))\r\n self.pushButton_4.setText(_translate(\"Form\", \"Pan\"))\r\n self.pushButton_5.setText(_translate(\"Form\", \"BLOG\"))\r\n self.pushButton_6.setText(_translate(\"Form\", \"BLOG\"))\r\n self.pushButton_7.setText(_translate(\"Form\", \"Pan\"))\r\n self.pushButton_8.setText(_translate(\"Form\", \"Pan\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(\r\n self.tab_apps), _translate(\"Form\", \"APPS\"))\r\n self.pushButton_set.setText(_translate(\"Form\", \"≡\"))\r\n self.pushButton_add.setText(_translate(\"Form\", \"+\"))\r\n\r\n\r\nclass MainWin(Main_1, QtWidgets.QWidget):\r\n\r\n def __init__(self, info_my, parent=None):\r\n super().__init__(parent)\r\n self.setupUi(self)\r\n self.info_my = info_my\r\n # self.start_m(self.info_my)\r\n self.chat = {}\r\n\r\n def mousePressEvent(self, event):\r\n if event.button() == QtCore.Qt.LeftButton:\r\n self.m_drag = True\r\n self.m_DragPosition = event.globalPos() - self.pos()\r\n event.accept()\r\n self.setCursor(QtGui.QCursor(QtCore.Qt.OpenHandCursor))\r\n\r\n def mouseMoveEvent(self, QMouseEvent):\r\n if QtCore.Qt.LeftButton and self.m_drag:\r\n self.move(QMouseEvent.globalPos() - self.m_DragPosition)\r\n QMouseEvent.accept()\r\n\r\n def mouseReleaseEvent(self, QMouseEvent):\r\n self.m_drag = False\r\n self.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\r\n\r\n def start_m(self, info_my):\r\n for i in info_my[1]:\r\n item1 = QtWidgets.QListWidgetItem()\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(r\"images/\" + info_my[1][i] + \".ico\"),\r\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item1.setIcon(icon)\r\n item1.setText(i)\r\n self.listWidget_friends.addItem(item1)\r\n self.listWidget_friends.itemDoubleClicked.connect(self.chatting)\r\n for j in info_my[2]:\r\n item = QtWidgets.QListWidgetItem()\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(r\"images/群聊.jfif\"),\r\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon)\r\n item.setText(j)\r\n self.listWidget_groups.addItem(item)\r\n self.listWidget_groups.itemDoubleClicked.connect(self.chatting)\r\n\r\n def chatting(self, item):\r\n self.chat[item.text()] = ChatWin()\r\n self.chat[item.text()].show()\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n# app = QtWidgets.QApplication(sys.argv)\r\n\r\n # mainw.show()\r\n # sys.exit(app.exec_())\r\n","sub_path":"AID1806项目/聊天室2/client10-13/2018-9-25/winmain.py","file_name":"winmain.py","file_ext":"py","file_size_in_byte":13009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"194872870","text":"# count lines that match with regex\r\n\"\"\"import re\r\n\r\nfname = 'mbox.txt'\r\nhandler = open(fname)\r\n\r\nuser_regex = input('Enter a regular expression: ')\r\n\r\nnrgx = 0\r\nfor line in handler:\r\n if re.findall(f'{user_regex}', line):\r\n nrgx += 1\r\n\r\nprint(f'{fname} had {nrgx} lines that matched {user_regex}')\"\"\"\r\n\r\n#-----------------------------------------------\r\n# 2nd version with input already written\r\nimport re\r\n\r\nfname = 'mbox.txt'\r\nhandler = open(fname)\r\n\r\ninput = ['^Author', '^X-', 'java$']\r\n\r\nfor i in range(len(input)):\r\n nrgx = 0\r\n for line in handler:\r\n if re.findall(input[i], line):\r\n nrgx += 1\r\n print(f'{fname} had {nrgx} lines that matched {input[i]}')\r\n","sub_path":"ex_11_regex/ex_11_01.py","file_name":"ex_11_01.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"76986005","text":"#!/usr/bin/env python3\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Test client for Buggy Server.\")\nparser.add_argument(\"--buggy-name\", type=str, help=\"Which buggy name to use.\",\n default=\"Transistor\")\nparser.add_argument(\"--team-name\", type=str, help=\"Which team name to use.\",\n default=\"RoboBuggy\")\nparser.add_argument(\"--hostname\", type=str, help=\"Which hostname to use for the\"\n \" data server connection.\", default=\"localhost\")\nparser.add_argument(\"--port\", type=int,\n help=\"Which port to use for the data server connection.\",\n default=4242)\nparser.add_argument(\"--key\", type=str,\n help=\"Path to key to use for authentication to the server.\",\n default=\"\")\n\nparser.add_argument('--gui', dest='gui', action='store_true')\nparser.add_argument('--no-gui', dest='gui', action='store_false')\nparser.set_defaults(gui=True)\n\nparser.add_argument('--webcam', dest='webcam', action='store_true')\nparser.add_argument('--no-webcam', dest='webcam', action='store_false')\nparser.set_defaults(webcam=False)\nparser.add_argument('--camera', dest='camera', action='store_true')\nparser.add_argument('--no-camera', dest='camera', action='store_false')\nparser.set_defaults(camera=False)\nparser.add_argument('--imu', dest='imu', action='store_true')\nparser.add_argument('--no-imu', dest='imu', action='store_false')\nparser.set_defaults(imu=False)\nparser.add_argument('--gps', dest='gps', action='store_true')\nparser.add_argument('--no-gps', dest='gps', action='store_false')\nparser.set_defaults(gps=False)\nparser.add_argument('--status', dest='status', action='store_true')\nparser.add_argument('--no-status', dest='status', action='store_false')\nparser.set_defaults(status=False)\n\nimport logging\nimport math\nimport random\nimport sys\nimport time\n\nimport cv2\nimport numpy as np\nimport tornado\nfrom auth_client import AuthClient\nfrom protos.message_pb2 import DataMessage\nfrom protos.message_pb2 import ImuMessage\nfrom protos.message_pb2 import GpsMessage\nfrom protos.message_pb2 import LogMessage\nfrom packet import Packet\n\nwords = \"\"\"\nThere is a theory which states that if ever anyone discovers exactly what the\nUniverse is for and why it is here, it will instantly disappear and be replaced\nby something even more bizarre and inexplicable. There is another theory which\nstates that this has already happened.\n\"\"\".split()\n\n\n\nclass Client(AuthClient):\n\n def __init__(self, key, team_name, buggy_name, *args, **kwargs):\n super().__init__(key, team_name, buggy_name, *args, **kwargs)\n if not cl_args.webcam:\n self.camera = None\n self.image_color = np.zeros(3, np.uint8)\n else:\n try:\n self.camera = cv2.VideoCapture(0)\n except:\n pass\n # try:\n # # Uncomment this to switch to generated colors\n # raise Exception()\n # self.camera = cv2.VideoCapture(0)\n # except:\n # self.camera = None\n # self.image_color = np.zeros(3, np.uint8)\n\n # IMU initialization\n self.imu_start = time.time()\n self.imu_period = 1 # Every second, do a full revolution\n self.imu = ImuMessage()\n\n # GPS initialization\n self.course_points = np.array([\n (40.441760, -79.941561),\n (40.440168, -79.942258),\n (40.440078, -79.943041),\n (40.439090, -79.944125),\n (40.438665, -79.945648),\n (40.438878, -79.946421),\n (40.439735, -79.946818),\n (40.440723, -79.948255),\n (40.441507, -79.947225),\n (40.440437, -79.942140),\n ])\n\n self.point_distances = np.zeros(len(self.course_points) - 1)\n for i in range(len(self.course_points) - 1):\n self.point_distances[i] = np.linalg.norm(\n self.course_points[i] - self.course_points[(i + 1)])\n self.course_distances = np.cumsum(self.point_distances)\n self.course_distances = np.insert(self.course_distances, [0], [0])\n self.total_distance = self.course_distances[-1]\n\n self.gps_start = time.time()\n self.gps_period = 120 # seconds, slightly faster than the record.\n self.gps = GpsMessage()\n self.gps.lat = self.course_points[0][0]\n self.gps.long = self.course_points[0][1]\n self.gps_distance = 0\n\n def make_timestamp(self, timestamp):\n now = time.time()\n seconds = int(now)\n nanos = int((now - seconds) * 10**9)\n timestamp.seconds = seconds\n timestamp.nanos = nanos\n\n def make_gps_data(self, data):\n self.gps_distance += self.total_distance * (time.time() -\n self.gps_start) / self.gps_period\n self.gps_start = time.time()\n self.gps_distance %= self.total_distance\n point_left = np.searchsorted(self.course_distances, self.gps_distance)\n point_right = np.searchsorted(self.course_distances, self.gps_distance,\n \"right\")\n point_left -= point_left == point_right\n distance_from_prev_point = (self.gps_distance -\n self.course_distances[point_left])\n distance_to_next_point = self.point_distances[point_left]\n slope = self.course_points[point_right] - \\\n self.course_points[point_left]\n change = slope * (distance_from_prev_point / distance_to_next_point)\n point = self.course_points[point_left] + change\n self.gps.lat = point[0]\n self.gps.long = point[1]\n self.make_timestamp(self.gps.time)\n data.gps.CopyFrom(self.gps)\n data.data_type = DataMessage.GPS\n\n def make_status_data(self, data):\n # Just generate some fake text to make my life interesting.\n level = random.choice([\"DEBUG\", \"WARNING\", \"INFO\", \"ERROR\", \"FATAL\"])\n data.status.log_level = getattr(LogMessage, level)\n data.status.text = \" \".join(\n [random.choice(words).strip() for _ in range(10)])\n self.make_timestamp(data.status.time)\n data.data_type = DataMessage.STATUS\n\n def make_imu_data(self, data):\n time_diff = (time.time() - self.imu_start)\n self.imu.roll += ((time_diff / self.imu_period) * 2 * math.pi)\n self.imu.roll = self.imu.roll % (2 * math.pi)\n # self.imu.pitch += ((time_diff / self.imu_period) * 2 * math.pi)\n # self.imu.pitch = self.imu.pitch % (2 * math.pi)\n self.imu_start = time.time()\n # data.imu.roll = random.uniform(-1, 1)\n # data.imu.pitch = random.uniform(-2, 2)\n # data.imu.yaw = random.uniform(-3, 3)\n self.make_timestamp(self.imu.time)\n data.imu.CopyFrom(self.imu)\n data.data_type = DataMessage.IMU\n\n def make_camera_data(self, data):\n data.camera.width = 300\n data.camera.height = 300\n\n # Lets you switch between camera and generated imagery\n image = None\n if self.camera is not None:\n image = self.camera.read()[1]\n image = cv2.resize(image, (0, 0), fx=.5, fy=.5)\n data.camera.width = image.shape[1]\n data.camera.height = image.shape[0]\n\n if image is None:\n image = np.ones(\n (data.camera.height, data.camera.width, 3), np.uint8)\n image *= self.image_color\n to_add = np.array([0, 0, 0], np.uint8)\n to_add[random.randint(0, len(to_add) - 1)] = random.randint(0, 10)\n self.image_color += to_add\n\n data.camera.image = cv2.imencode(\".png\", image)[1].tostring()\n self.make_timestamp(data.camera.time)\n data.data_type = DataMessage.CAMERA\n if (cl_args.gui):\n cv2.imshow(\"TEST CLIENT\", image)\n cv2.waitKey(1)\n\n def async_send_stream(self, gen_fn):\n async def send():\n if self.stream_ok:\n try:\n data = DataMessage()\n data.robot_name = cl_args.buggy_name\n gen_fn(data)\n await self.stream.write(Packet.make_packet_from_bytes(\n data.SerializeToString()))\n except tornado.iostream.StreamClosedError as e:\n pass\n # logging.warning(\n # \"%s, unable to send message. [Hint: server may be down!]\", e)\n return send\n\n\nif __name__ == \"__main__\":\n global cl_args\n cl_args = parser.parse_args()\n logging.warning(cl_args)\n\n # Setup the client\n logging.basicConfig(level=logging.DEBUG)\n if (cl_args.key):\n with open(cl_args.key) as key_file:\n key = key_file.read().strip()\n client = Client(key, cl_args.team_name, cl_args.buggy_name,\n cl_args.hostname, cl_args.port)\n\n # Every second, try to authenticate and establish a connection.\n tornado.ioloop.PeriodicCallback(client.make_connection, 1000).start()\n # Periodically send various types of messages\n if cl_args.status:\n tornado.ioloop.PeriodicCallback(client.async_send_stream(\n client.make_status_data), 5).start() # 200 hz\n if cl_args.imu:\n tornado.ioloop.PeriodicCallback(client.async_send_stream(\n client.make_imu_data), 20).start() # 50 hz\n if cl_args.gps:\n tornado.ioloop.PeriodicCallback(client.async_send_stream(\n client.make_gps_data), 500).start() # 1 hz\n if cl_args.camera:\n tornado.ioloop.PeriodicCallback(client.async_send_stream(\n client.make_camera_data), 50).start() # 30 hz\n\n else:\n logging.error(\"Key is invalid! Quitting program.\")\n sys.exit(1)\n\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":9819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"563498790","text":"from django.urls import path\nfrom django.views.generic import RedirectView\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import views as auth_views\nfrom .views import ThreadBanCreate, UserPostBanCreate, TransgressionList, ReportedThreadList, ReportedUserPostList, ThreadReportDismiss, UserPostReportDismiss\n\nurlpatterns = [\n path('thread//ban/', ThreadBanCreate.as_view(), name='moderation_thread_ban'),\n path('post//ban/', UserPostBanCreate.as_view(), name='moderation_userpost_ban'),\n path('reports/threads/', ReportedThreadList.as_view(), name='moderation_thread_report_list'),\n path('reports/posts/', ReportedUserPostList.as_view(), name='moderation_userpost_report_list'),\n path('reports/dismiss/thread//', ThreadReportDismiss.as_view(), name='moderation_thread_report_dismiss'),\n path('reports/dismiss/post//', UserPostReportDismiss.as_view(), name='moderation_userpost_report_dismiss'), \n path('banned/', TransgressionList.as_view(), name='moderation_ban_page'),\n path('login/', auth_views.login, {'template_name': 'moderation/login.html'}, name='login'),\n path('logout/', auth_views.logout, {'template_name': 'moderation/logged_out.html', 'extra_context': {'form': AuthenticationForm}}, name='logout'),\n path('', RedirectView.as_view(pattern_name='dj-mod:moderation_ban_page', permanent=False))\n\n] \n","sub_path":"imagenaut/moderation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"537126648","text":"\"\"\"\npull_ocdid_data.py\n\nusage:\n python pull_ocdid_data.py\n\ncreates an SQLite3 database:\n ocd-id.sqlite.db\n\n\nThis script OCD-ID data from the opencivicdata/ocd-division-ids\nGitHub repo* and loads it into a SQLite3 database for inspection.\n\n *https://github.com/opencivicdata/ocd-division-ids/\n blob/master/identifiers/country-us.csv\n\n\nThe datasets:\n\n OCDEP 2: Open Civic Data Divisions\n This script reads the division IDs from the file\n _identifiers/country-us.csv_ in this repo,\n https://github.com/opencivicdata/ocd-division-ids/,\n and loads them into a sqlite database named _ocd-id.db_,\n under the table name `country_us`.\n \n In a second table, named `lookup`, the OCD-ID types are\n broken into columns to be joined to. The goal is to make\n a lookup table that maps from the column name and value in\n each state's voter file to the correct OCD-ID.\n \n The OCD-IDs are flobally unique identifiers for political divisions.\n Defined in:\n http://docs.opencivicdata.org/en/latest/proposals/0002.html\n Implemented in:\n https://github.com/opencivicdata/ocd-division-ids\n Identifier format: \n ocd-division/country:(/:)*\n\"\"\"\nfrom __future__ import print_function\nimport collections\n#import csv\nimport io\nimport os\nimport sqlite3\ntry:\n import urllib.request as request # Python 3\n from csv import DictReader\n\n def get_iostream(response):\n return io.StringIO(response.read().decode('utf-8'))\n\n def utf8(text):\n return text\n \nexcept ImportError:\n import urllib2 as request # Python 2\n import codecs\n from csv import DictReader #import csv\n\n def get_iostream(response):\n return io.BytesIO(response.read())\n\n def utf8(text):\n return unicode(text, 'utf-8')\n\n\nOCDID_US_DATA_URI = (\n 'https://github.com/opencivicdata/ocd-division-ids/'\n 'raw/master/identifiers/country-us.csv'\n)\n\nrunning_in_docker = os.path.exists('/.dockerenv')\nif running_in_docker:\n DATABASE_NAME = '/national-voter-file/data/ocd-id.sqlite.db'\nelse:\n DATABASE_NAME = 'ocd-id.sqlite.db'\n\n\nprint('Downloading from\\n', OCDID_US_DATA_URI)\nresponse = request.urlopen(OCDID_US_DATA_URI)\niostream = get_iostream(response)\nrdr = DictReader(iostream)\nfieldnames = rdr.fieldnames\nall_rows = [row for row in rdr]\nids = [row['id'] for row in all_rows]\n\n\n# First, put the whole dataset into sqlite3 as it is.\n# Create the table\nprint('Writing to', DATABASE_NAME)\nconn = sqlite3.connect(DATABASE_NAME)\nc = conn.cursor()\nc.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS\n country_us (ocdid TEXT PRIMARY KEY\n ,{},\n CONSTRAINT unique_ocdid UNIQUE (ocdid) ON CONFLICT IGNORE\n );\n \"\"\".format('\\n ,'.join(\n '{} TEXT'.format(k) for k in fieldnames[1:]))\n)\n\n# Populate the table\ninsertion = \"\"\" \n INSERT INTO country_us\n (ocdid\\n,{})\n VALUES ({})\n\"\"\".format('\\n,'.join(fieldnames[1:]), ', '.join(['?'] * len(fieldnames)))\nc.executemany(\n insertion,\n [tuple(utf8(row[f]) for f in fieldnames) for row in all_rows])\n\n\n# Now, get the hierarchy of region types by looking at the first column.\n# OCD-ID values are of the form \n# ocd-division/country:(/:)*\nsplits = [id.split('/') for id in ids]\ntypes = [(s[2], tuple(sub.split(':')[0] for sub in s[2:])) for s in splits if len(s) > 2]\ntype_hierarchy = {}\nfor locale, entry in types:\n if locale not in type_hierarchy:\n type_hierarchy[locale] = {}\n sub_type = type_hierarchy[locale]\n for type in entry:\n if type not in sub_type:\n sub_type[type] = {'COUNT': 0}\n sub_type[type]['COUNT'] += 1\n sub_type = sub_type[type]\n\n\n# -----------------------\n# Get all of the possible columns in the dataset\ntmp = set([sub.split(':')[0] for s in splits if len(s) > 2 for sub in s[2:]])\nall_possible_columns = sorted([s.lower() for s in tmp])\n\nc.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS\n lookup (\n ocdid TEXT PRIMARY KEY\n ,{},\n CONSTRAINT unique_lookup_ocid UNIQUE (ocdid) ON CONFLICT IGNORE\n );\n \"\"\".format('\\n ,'.join(\n '{} TEXT'.format(k) for k in all_possible_columns))\n)\n\ninsertion_template = \"\"\" \n INSERT INTO lookup\n (ocdid, {})\n VALUES ('{}', {})\n\"\"\"\n\nfor id, split in zip(ids, splits):\n if len(split) > 2:\n all_keys, all_vals = zip(*[utf8(s).split(':') for s in split[2:]])\n insertion = insertion_template.format(\n ', '.join(all_keys),\n id,\n ','.join(['?'] * len(all_vals))\n )\n c.execute(insertion, all_vals)\n\nconn.commit()\nconn.close()\n","sub_path":"src/python/utils/ocdidreporter/pull_ocdid_data.py","file_name":"pull_ocdid_data.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"610692725","text":"\n\n#--------------------------------------------------------------------------------------------\n# it will take a context free grammer and calculate its PDA step by step as the time goes by\n#--------------------------------------------------------------------------------------------\n\nimport sys, os, time\n# grammer class\nclass Grammer:\n\n\tdef __init__(self, grammer):\n\t\tself._grammer = grammer\n\t\tself._stack = [] # instance variable unique to each instance; our pda stack\n\n\t# printing the context free grammer\n\tdef show(self):\n\t\tif self.cf():\n\t\t\tprint(\"[+] Context Free Detected: {}\".format(self._grammer))\n\t\telse:\n\t\t\tprint(\"[!] Not A Context Free!\")\n\t\t\tsys.exit(1)\n\n\t# checking context free procedure\n\tdef cf(self):\n\t\ti = 0\n\t\twhile i < len(self._grammer):\n\t\t\tif len(self._grammer[i][1].split(\"->\")[0])>1:\n\t\t\t\treturn False\n\t\t\t\tbreak\n\t\t\ti = i + 1\n\t\treturn True\n\n\t# checking string derivation procedure\n\t# we know that every variable is reachable from S in the grammer; we assume its a normal grammer!\n\t# so if we derivate S till we found a match between our str and the derivation itself we can produce our considered string, cause there is a variables\n\t# which will end up to the lambda or a terminal.\n\tdef strdev(self, st):\n\t\t# checking if the string is a derivation from our grammer or not\n\t\t# it'll return True if there was any match or false for none\n\t\t# we derivate S till we find a match between our str and the derivation itself\n\t\ti = j = k = 0\n\t\t# cause every varible is reachable from S so we store the first rule which is S in our pdev stack to derivate it till the end of our string\n\t\t#pdev = [self._grammer[0][1]]\n\t\tterminal_or_lambda_rule = []\n\t\tif st[-1:]!=self._grammer[0][1].split('->')[1][-1:]:\n\t\t\tprint(\"[-] Can't Derivate, Last Character '{}' of Input String Detected!\".format(st[-1:]))\n\t\t\treturn False\n\t\t# finding varibles which end up to terminal or lambda in every rule\n\t\tfor i in range(len(self._grammer)):\n\t\t\tif len(self._grammer[i][1].split(\"->\")[1])==1 and self._grammer[i][1].split(\"->\")[1].islower():\n\t\t\t\tterminal_or_lambda_rule.append(self._grammer[i][1])\n\t\t\tif len(self._grammer[i][1].split(\"->\")[1])==1 and self._grammer[i][1].split(\"->\")[1]==\"^\":\n\t\t\t\tterminal_or_lambda_rule.append(self._grammer[i][1])\n\t\t\tif len(self._grammer[i][1].split(\"->\")[1])==2:\n\t\t\t\tterminal_or_lambda_rule.append(self._grammer[i][1])\n\t\t#print(terminal_or_lambda_rule)\n\t\t# every variable is reachable from S so we'll find those variables which is in terminal_or_lambda_rule stack in S and replace them with lambda or their terminals\n\t\t# and we'll do this till we find a match between our input string and S produced derivation \n\t\t# ISSUE: it can't derivate the grammer n times, kind of AI required to detect the number of derivations from our input string; doesn't accept \"aababab\"!!!!\n\t\t''' FIXED: we should derivate our devstr as the length as of our input string; \n\t\t\t\t for example if len(st) is equal to 7 then the length of our devstr must be 7\n\t\t\t\t (number of lambdas won't calculate, cause they will increase the length of our devstr) '''\n\t\tdevstr = self._grammer[0][1].split('->')[1]\n\t\tinput_string_length = len(st)\n\t\tdevstr_length = len(devstr)\n\t\t# we derivate the devstr till we find a match between the length of our input string and the length of devstr itself\n\t\twhile devstr_length!=input_string_length:\n\t\t\tfor k in range(len(terminal_or_lambda_rule)):\n\t\t\t\tif terminal_or_lambda_rule[k].split(\"->\")[0] in devstr and '^' not in terminal_or_lambda_rule[k].split(\"->\")[1]:\n\t\t\t\t\tif terminal_or_lambda_rule[k].split(\"->\")[0]=='S':\n\t\t\t\t\t\tdevstr = devstr.replace(terminal_or_lambda_rule[k].split(\"->\")[0], devstr)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdevstr = devstr.replace(terminal_or_lambda_rule[k].split(\"->\")[0], terminal_or_lambda_rule[k].split(\"->\")[1])\n\t\t\tdevstr_length = len(devstr)\n\t\t# we found the desired derivation for our input string, then we need to produce our string from our S derivation \n\t\tprint(devstr) # debug purposes\n\t\t# so we replace varibles in S with their terminals or lambdas\t\t\n\t\tfor k in range(len(terminal_or_lambda_rule)):\n\t\t\tif terminal_or_lambda_rule[k].split(\"->\")[0] in devstr:\n\t\t\t\tdevstr += \"*->\"+devstr.replace(terminal_or_lambda_rule[k].split(\"->\")[0], terminal_or_lambda_rule[k].split(\"->\")[1])\n\t\tprint(\"[+] Processing Derivation...\")\n\t\ttime.sleep(1)\n\t\tprint(devstr) # debug purposes\n\t\t# removing lambdas from our derivation string \n\t\tprint(\"[+] Removing Lambdas...\")\n\t\tif \"^\" in devstr:\n\t\t\tdevstr = devstr.replace(\"^\", '')\n\t\ttime.sleep(1)\n\t\tprint(devstr) # debug purposes\n\t\tif st in devstr:\n\t\t\tprint(\"[+] Derivation Found!\")\n\t\t\treturn True\n\t\telse:\n\t\t\tprint(\"[-] Unable To Find Derivation!\")\n\t\t\treturn False\n\n\n\t# computing pda for our input string right after we derivated it from our grammer\n\tdef pda(self, st):\n\t\tpass\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ntry:\n\t# getting & storing the rules\n\tn = int(input(\"[+] Number Of Rules: \"))\n\ti = j = 0\n\tgr = []\n\twhile i < n:\n\t\tgr.append(input(\"[+] Enter Grammer: \"))\n\t\ti += 1\n\t# removing | from grammer\n\t# for j in range(len(gr)):\n\t# \tif '|' in gr[j].split(\"->\")[1]:\n\t# \t\tgr.append(gr[j].split(\"->\")[0]+\"->\"+gr[j].split(\"->\")[1].split(\"|\")[0])\n\t# \t\tgr.append(gr[j].split(\"->\")[0]+\"->\"+gr[j].split(\"->\")[1].split(\"|\")[1])\n\t# \t\tdel gr[j]\n\t# \telse:\n\t# \t\tpass\n\tgr = list(enumerate(gr))\n\t# initializing grammer instance\n\tg = Grammer(gr)\n\t# printing the considered rules\n\tg.show()\n\t# getting string from user\n\tst = input(\"[+] Input String: \")\n\t# string derivation process\n\tg.strdev(st)\n\t# if g.strdev(st):\n\t# \t# if we found our string in some derivation of S the there is a pda that accept this string/input\n\t# \tg.pda(st) \n\n\n# user input to stop the script like ctrl+C\nexcept KeyboardInterrupt:\n\tprint(\"\\n[*] Ctrl + C pressed\")\n\tsys.exit(1)","sub_path":"PDA.py","file_name":"PDA.py","file_ext":"py","file_size_in_byte":5687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"273832161","text":"import asyncio\nimport json\nimport random\n\nimport aiohttp\nimport discord\nfrom discord.ext import commands\nfrom libneko.aggregates import Proxy\n\nfrom synth.utils import errors, extras\n\n\nclass API:\n \"\"\"Overall manager for all API's this bot is using.\"\"\"\n def __init__(self, bot: commands.Bot):\n self.session = aiohttp.ClientSession()\n self.colorapi = ColorAPI(self.session)\n self.pypi = PyPI(self.session)\n self.urbandictionary = UrbanDictionary(self.session)\n self.ksoft = Ksoft(self.session, bot.config.ksoft)\n self.nekobot = NekoBot(self.session)\n\nclass PyPI:\n \"\"\"Docs: https://warehouse.readthedocs.io/api-reference/json\"\"\"\n\n class PyPIResponse:\n def __init__(self, response):\n self._info = response[\"info\"]\n self.author = self._info[\"author\"]\n self.name = self._info[\"name\"]\n self.summary = self._info[\"summary\"] if self._info[\"summary\"] != \"UNKNOWN\" else None\n self.link = self._info[\"package_url\"]\n self.classifiers = self.__get_classifiers()\n self.releases = len(response[\"releases\"])\n if self._info['project_urls']:\n self.urls = \"\\n\".join([f\"[{url}]({self._info['project_urls'][url]})\" for url in self._info[\"project_urls\"] if self._info['project_urls'][url] != \"UNKNOWN\"])\n else:\n self.urls = None\n\n self.embed = self.__generate_embed()\n\n def __get_classifiers(self, limit=5):\n classifiers = self._info[\"classifiers\"]\n if classifiers:\n sliced = classifiers[:limit]\n if len(classifiers) > limit:\n sliced.append(f\"... and {len(classifiers) - len(sliced)} more.\")\n return \"\\n\".join(sliced) \n else:\n return \"No classifiers for this project.\" \n\n def __generate_embed(self):\n embed = discord.Embed(color=discord.Color.blue(), description=self.summary)\n embed.set_author(name=self.name, url=self.link)\n embed.set_thumbnail(url=\"https://i.imgur.com/obx8Wis.png\")\n embed.add_field(name=\"Classifiers\", value=self.classifiers, inline=False)\n embed.add_field(name=\"Links\", value=self.urls or \"No valid project urls.\", inline=False)\n embed.set_footer(text=f\"{self.releases} releases\")\n\n return embed\n\n def __init__(self, session: aiohttp.ClientSession):\n self.session = session\n self.base_url = \"https://pypi.org/pypi/\"\n\n async def get_project(self, project_name: str):\n project_name = project_name.lower()\n async with self.session.get(self.base_url + project_name + \"/json\") as resp:\n if resp.status != 200:\n raise commands.BadArgument\n \n return self.PyPIResponse(await resp.json())\n\nclass UrbanDictionary:\n \"\"\"Docs: None (lol)\"\"\"\n\n class UrbanDictionaryResponse:\n def __init__(self, response):\n self._list = response[\"list\"]\n if self._list:\n self._list = self._list[0]\n self.word = self._list[\"word\"]\n self.definition = self.__format(self._list[\"definition\"])\n self.example = self.__format(self._list[\"example\"])\n self.thumbs_up = self._list[\"thumbs_up\"]\n self.thumbs_down = self._list[\"thumbs_down\"]\n self.link = self._list[\"permalink\"]\n self.icon = \"https://i.imgur.com/35mpixh.png\"\n\n self.embed = self.__generate_embed()\n else:\n raise commands.BadArgument\n\n def __generate_embed(self) -> discord.Embed:\n embed = discord.Embed(color=discord.Color.orange())\n embed.set_author(name=f\"{self.word} [{self.thumbs_up} 👍 | {self.thumbs_down} 👎]\", icon_url=self.icon, url=self.link)\n embed.add_field(name=\"Definition\", value=self.definition, inline=False)\n embed.add_field(name=\"Example\", value=self.example, inline=False)\n\n return embed\n\n def __format(self, string: str):\n s = string.replace(\"[\", \"**\").replace(\"]\", \"**\").replace(\"`\", \"’\")\n return s if not len(s) >= 1024 else s[:1020] + \" ...\"\n\n def __init__(self, session: aiohttp.ClientSession):\n self.session = session\n self.base_url = \"http://api.urbandictionary.com/v0/define?term=\"\n\n async def get_term(self, term: str):\n term = term.lower()\n try:\n async with self.session.get(self.base_url + term) as resp:\n return self.UrbanDictionaryResponse(response=await resp.json())\n except:\n raise errors.APIError\n\nclass ColorAPI:\n \"\"\"Docs: https://www.thecolorapi.com/docs\"\"\"\n\n class ColorAPIResponse:\n def __init__(self, response):\n try:\n self.hex = response[\"hex\"][\"clean\"]\n self.rgb = response[\"rgb\"][\"value\"]\n self.hsl = response[\"hsl\"][\"value\"]\n self.hsv = response[\"hsv\"][\"value\"]\n self.cmyk = response[\"cmyk\"][\"value\"]\n self.xyz = response[\"XYZ\"][\"value\"]\n self.color = self.__format_color(response[\"rgb\"])\n self.name = response[\"name\"][\"value\"]\n self.link = \"https://www.thecolorapi.com/id?format=html&hex=\" + self.hex\n\n self.embed = self.__generate_embed()\n except:\n raise commands.BadArgument\n\n def __format_color(self, rgb):\n dummy = Dummy()\n dummy.r = rgb[\"r\"]\n dummy.g = rgb[\"g\"]\n dummy.b = rgb[\"b\"]\n\n return dummy\n\n def __generate_embed(self) -> discord.Embed:\n embed = discord.Embed(color=discord.Color.from_rgb(self.color.r, self.color.g, self.color.b))\n embed.set_author(name=self.name, url=self.link, icon_url=\"https://www.htmlcsscolor.com/preview/32x32/\" + self.hex + \".png\")\n embed.add_field(name=\"Hex\", value=f\"#{self.hex}\")\n embed.add_field(name=\"RGB\", value=self.rgb)\n embed.add_field(name=\"HSL\", value=self.hsl)\n embed.add_field(name=\"HSV\", value=self.hsv)\n embed.add_field(name=\"CMYK\", value=self.cmyk)\n embed.add_field(name=\"XYZ\", value=self.xyz)\n\n return embed\n\n def __init__(self, session: aiohttp.ClientSession):\n self.session = session\n self.base_url = \"https://www.thecolorapi.com/\"\n\n async def get_color(self, color):\n color = color.strip(\"#\")\n async with self.session.get(self.base_url + \"id?hex=\" + color) as resp:\n return self.ColorAPIResponse(await resp.json())\n\nclass Ksoft:\n def __init__(self, session: aiohttp.ClientSession, token):\n self.session = session\n self.token = token\n self.base_url = \"https://api.ksoft.si/\"\n\n async def __request(self, endpoint, **kwargs):\n async with self.session.get(self.base_url + endpoint, headers={\"Authorization\": self.token}, params=kwargs) as resp:\n proxy = Proxy(from_keys=(await resp.json()))\n if self.__validate(proxy):\n return proxy\n else:\n raise errors.KsoftError(message=proxy[\"message\"])\n\n def __validate(self, proxy):\n try:\n if proxy.code == 404:\n return False\n except:\n return True \n else:\n return True \n\n async def random_image(self, tag):\n \"\"\"Gets random image from the specified tag.\"\"\"\n return await self.__request(\"images/random-image\", tag=tag)\n\n async def tags(self):\n \"\"\"Retrieve the list of all available tags.\"\"\"\n return await self.__request(\"images/tags\")\n\n async def tag_search(self, search):\n \"\"\"Search for tags.\"\"\"\n return await self.__request(\"images/tags/\" + search)\n\n async def image_from_snowflake(self, snowflake):\n \"\"\"Retrieve image data.\"\"\"\n return await self.__request(\"images/image/\" + snowflake)\n\n async def random_meme(self):\n \"\"\"Retrieves a random meme from the cache. Source: reddit\"\"\"\n return await self.__request(\"images/random-meme\")\n\n async def random_wikihow(self):\n \"\"\"Retrieves weird images from WikiHow.\"\"\"\n return await self.__request(\"images/random-wikihow\")\n\n async def random_aww(self):\n \"\"\"Get random cute pictures, mostly animals.\"\"\"\n return await self.__request(\"images/random-aww\")\n\n async def random_nsfw(self):\n \"\"\"Retrieves random NSFW pics. (real life stuff)\"\"\"\n return await self.__request(\"images/random-nsfw\")\n\n async def random_image_from_subreddit(self, subreddit, remove_nsfw = True, span = \"day\"):\n \"\"\"Retrieve images from the specified subreddit.\"\"\"\n if span not in [\"hour\", \"day\", \"week\", \"month\", \"year\", \"all\"]:\n raise Ksoft(message=\"Span must be one of the following arguments: hour, day, week, month, year, all\")\n \n return await self.__request(\"images/rand-reddit/\" + subreddit, remove_nsfw=remove_nsfw, span=span)\n\n async def get_ban(self, user):\n \"\"\"Get more information about a ban.\"\"\"\n return await self.__request(\"bans/info\", user=user)\n\n async def check_ban(self, user):\n \"\"\"Simple way to check if the user is banned.\"\"\"\n return await self.__request(\"bans/check\", user=user)\n\n async def get_bans(self):\n \"\"\"Pagination of bans, you can request up to 1000 records per page, default is 20.\"\"\"\n return await self.__request(\"bans/list\")\n\n async def get_ban_updates(self, timestamp):\n \"\"\"Gets updates from the previous update.\"\"\"\n return await self.__request(\"bans/updated\", timestamp=timestamp)\n\n async def get_map(self, query):\n \"\"\"You can get coordinates and more information about the searched location, if needed image of the area is generated.\"\"\"\n return await self.__request(\"kumo/gis\", q=query)\n\n async def weather(self, query, report_type = \"currently\"):\n \"\"\"Gets weather from a location.\"\"\"\n report_types = [\"currently\", \"minutely\", \"hourly\", \"daily\"]\n if report_type not in report_types:\n raise errors.KsoftError(message=f\"Report type must be on the the following arguments: {', '.join(report_types)}\")\n\n return await self.__request(\"kumo/weather/\" + report_type, q=query)\n\n async def weather_advanced(self):\n \"\"\"Gets weather by coordinates, this endpoint is faster than weather - easy, because it doesn't need to lookup the location.\n https://api.ksoft.si/kumo/weather/\"\"\"\n raise NotImplementedError\n\n async def geoip(self, ip):\n \"\"\"Gets location data from the IP address.\"\"\"\n return await self.__request(\"kumo/geoip\", ip=ip)\n\n #async def currency(self, from, to, value):\n # \"\"\"Convert currency\"\"\"\n # \"\"\"https://en.wikipedia.org/wiki/ISO_4217#Active_codes\"\"\"\n # return await self.__request(\"kumo/currency\", from=from, to=to, value=value)\n\n async def lyrics(self, query):\n \"\"\"Searches for lyrics and returns a list of results.\"\"\"\n return await self.__request(\"lyrics/search\", q=query)\n\n async def artist(self, id):\n \"\"\"Retrieves all albums and songs by that artist.\"\"\"\n return await self.__request(\"lyrics/artist/\" + id)\n\n async def album(self, id):\n \"\"\"Retrieves artist name and all tracks in the album.\"\"\"\n return await self.__request(\"lyrics/album/\" + id)\n \n async def track(self, id):\n \"\"\"Get info about a song.\"\"\"\n return await self.__request(\"lyrics/track/\" + id)\n\nclass NekoBot:\n \"\"\"Docs: https://docs.nekobot.xyz\"\"\"\n\n class NekoBotResponse:\n def __init__(self, response):\n self.image = response[\"message\"]\n\n self.embed = self.__generate_embed()\n\n def __generate_embed(self):\n embed = discord.Embed(color=discord.Color.greyple())\n embed.set_image(url=self.image)\n\n return embed\n\n def __init__(self, session: aiohttp.ClientSession):\n self.session = session\n self.base_url = \"https://nekobot.xyz/api/\"\n\n async def __request(self, **kwargs):\n async with self.session.get(self.base_url + \"imagegen\", params=kwargs) as resp:\n return self.NekoBotResponse(await resp.json())\n\n async def threats(self, url: str):\n return await self.__request(type=\"threats\", url=url)\n\n async def baguette(self, url: str):\n return await self.__request(type=\"baguette\", url=url)\n\n async def clyde(self, text: str):\n return await self.__request(type=\"clyde\", text=text)\n\n async def ship(self, user1: str, user2: str):\n return await self.__request(type=\"ship\", user1=user1, user2=user2)\n\n async def captcha(self, url: str, username: str):\n return await self.__request(type=\"captcha\", url=url, username=username)\n\n async def whowouldwin(self, user1: str, user2: str):\n return await self.__request(type=\"whowouldwin\", user1=user1, user2=user2)\n\n async def changemymind(self, text: str):\n return await self.__request(type=\"changemymind\", text=text) \n\n async def jpeg(self, url: str):\n return await self.__request(type=\"jpeg\", url=url)\n \n async def lolice(self, url: str):\n return await self.__request(type=\"lolice\", url=url)\n\n async def kannagen(self, text: str):\n return await self.__request(type=\"kannagen\", text=text)\n\n async def iphonex(self, url: str):\n return await self.__request(type=\"iphonex\", url=url)\n\n async def kms(self, url: str):\n return await self.__request(type=\"kms\", url=url)\n\n async def animeface(self, image: str):\n return await self.__request(type=\"animeface\", image=image)\n\n async def awooify(self, url: str):\n return await self.__request(type=\"awooify\", url=url)\n\n async def trap(self, name: str, author: str, image: str):\n return await self.__request(type=\"trap\", name=name, author=author, image=image) \n\n async def nichijou(self, text: str):\n return await self.__request(type=\"nichijou\", text=text)\n\n async def trumptweet(self, text: str):\n return await self.__request(type=\"trumptweet\", text=text)\n\n async def tweet(self, username: str, text: str):\n return await self.__request(type=\"tweet\", username=username, text=text)\n\n async def kidnap(self, image: str):\n return await self.__request(type=\"kidnap\", image=image)\n\n async def deepfry(self, image: str):\n return await self.__request(type=\"deepfry\", image=image)\n\n async def blurpify(self, image: str):\n return await self.__request(type=\"blurpify\", image=image)\n\n async def phcomment(self, image: str, text: str, username: str):\n return await self.__request(type=\"phcomment\", image=image, text=text, username=username)\n\n async def magik(self, image: str):\n return await self.__request(type=\"magik\", image=image, intensity=random.randint(0, 10))\n\n async def clickforhentai(self, image: str, fontsize: int):\n return await self.__request(type=\"clickforhentai\", image=image, fontsize=fontsize)\n\n async def trash(self, url: str):\n return await self.__request(type=\"trash\", url=url)\n\nclass Dummy:\n pass\n","sub_path":"synth/utils/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":15309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"75218567","text":"import markdown\nfrom django.contrib.syndication.views import Feed\nfrom django.core.paginator import Paginator, PageNotAnInteger, InvalidPage\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect, reverse\nfrom .models import Paper, Tag, Comment, Category, User\n\n\n# Create your views here.\ndef render_page(request, papers=None, html_path='Index/index.html', paper=None):\n \"\"\"\n 根据所给数据渲染页面\n :param request: url请求包括POST, 和GET\n :param papers: 多篇文章\n :param html_path: html页面路径\n :param paper: 单片文章 --- 详情页\n :return: render(something)\n \"\"\"\n if papers:\n limit = 3 # 按每页4条分页\n page_number = 3 # 每页页码数量\n paginator = Paginator(papers, limit)\n if request.method == \"GET\":\n # 获取 url 后面的 page 参数的值, 首页不显示 page 参数, 默认值是 1\n page = request.GET.get('page')\n try:\n papers = paginator.page(page)\n # 注意捕获异常\n except PageNotAnInteger:\n # 如果请求的页数不是整数, 返回第一页。\n papers = paginator.page(1)\n except InvalidPage:\n # 如果请求的页数不存在, 重定向页面\n return HttpResponse('找不到页面的内容')\n\n finally:\n # 生成当前页页码范围\n\n if papers.number+page_number < papers.paginator.num_pages:\n page_range = range(papers.number, papers.number+page_number)\n else:\n page_range = range(papers.number, papers.paginator.num_pages)\n\n dates = Paper.objects.dates('date', 'month')[0:3]\n tags = Tag.objects.all()\n categories = Category.objects.all()\n all_paper = Paper.objects.all()\n latest_papers = all_paper.order_by('-date')\n latest_papers = latest_papers[0:4]\n return render(request, html_path,\n {\n 'papers': papers,\n 'dates': dates,\n 'tags': tags,\n 'latest_papers': latest_papers,\n 'categories': categories,\n 'username': request.session.get('username'),\n 'paper': paper,\n 'page_range': page_range\n })\n\n\ndef detail(request, paper_id):\n if request.method == 'GET':\n paper = Paper.objects.get(pk=paper_id)\n paper.reading += 1\n paper.save()\n return render_page(request, paper=paper, html_path='blog/single.html')\n\n # comment\n elif request.method == 'POST':\n comment_ = request.POST.get('comment')\n paper = Paper.objects.get(pk=paper_id)\n # TODO 这里的 user 要改成当前登录用户\n user = request.session.get('username')\n user = User.objects.get(name=user)\n comment = Comment(content=comment_, paper=paper, user=user)\n comment.save()\n return redirect(to=reverse('blog:detail', args=paper_id))\n\n\ndef write(request):\n if request.method == 'GET':\n return render_page(request, html_path='blog/write.html')\n\n elif request.method == 'POST':\n paper = Paper()\n paper.name = request.POST.get('name')\n paper.user = User.objects.get(name=request.session['username'])\n paper.category = Category.objects.get(pk=request.POST.get('category'))\n paper.save()\n # paper.tag_set = tags 不需要save就可以添加多对多字段\n for tag_id in request.POST.getlist('tag'):\n paper.tag.add(tag_id)\n\n paper.content = request.POST.get('content')\n # markdown渲染\n md = markdown.Markdown(extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n 'markdown.extensions.toc',\n ])\n paper.content = md.convert(paper.content)\n paper.toc = md.toc\n paper.save()\n return redirect(to=reverse('blog:detail', args=[paper.id]))\n\n\ndef category(request, category_id):\n category_ = Category.objects.get(pk=category_id)\n papers = category_.paper_set.all()\n return render_page(request, papers=papers)\n\n\ndef tag(request, tag_id):\n category_ = Category.objects.get(pk=tag_id)\n papers = category_.paper_set.all()\n return render_page(request, papers=papers)\n\n\ndef date(request, date_):\n year = int(date_[0:4])\n month = int(date_[5:7])\n papers = Paper.objects.filter(date__year=year).filter(date__month=month).all()\n return render_page(request, papers=papers)\n\n\nclass RSSFeed(Feed):\n title = \"RSS feed - article\"\n link = \"/\"\n description = \"RSS feed - blog posts\"\n\n def items(self):\n return Paper.objects.order_by('-date')\n\n def item_title(self, item):\n return item.name\n\n def item_pubdate(self, item):\n return item.date\n\n def item_description(self, item):\n return item.toc\n\n def item_link(self, item):\n return reverse('blog:detail', args=(item.id,))\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"93225762","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\"\"\"This file is part of the django ERP project.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\n__author__ = 'Emanuele Bertoldi '\n__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'\n__version__ = '0.0.5'\n\nfrom django.test import TestCase\nfrom django.contrib.auth import get_user_model\n\nfrom ..models import *\nfrom ..utils import *\n\nclass FakeModel():\n pk = 5\n\nclass CreateBookmarksUtilTestCase(TestCase):\n def test_create_bookmarks_for_user(self):\n \"\"\"Tests creating bookmarks for the given user instance.\n \"\"\"\n u1, n = get_user_model().objects.get_or_create(username=\"u1\")\n m, n = create_bookmarks(u1)\n \n self.assertTrue(isinstance(m, Menu))\n self.assertEqual(m.slug, \"user_%d_bookmarks\" % u1.pk)\n # NOTE: the bookmark menu was already created by the signal handler.\n self.assertEqual(n, False)\n \n def test_create_bookmarks_for_any_model(self):\n \"\"\"Tests creating bookmarks for a generic model instance.\n \"\"\" \n fm = FakeModel()\n m, n = create_bookmarks(fm)\n \n self.assertTrue(isinstance(m, Menu))\n self.assertEqual(m.slug, \"fakemodel_5_bookmarks\")\n self.assertEqual(n, True)\n\nclass DeleteBookmarksUtilTestCase(TestCase):\n def test_delete_bookmarks_for_user(self):\n \"\"\"Tests deleting bookmarks of the given user instance.\n \"\"\"\n u1, n = get_user_model().objects.get_or_create(username=\"u1\")\n m, n = create_bookmarks(u1)\n \n self.assertTrue(isinstance(m, Menu))\n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(u1)).exists(),\n True\n )\n \n delete_bookmarks(u1)\n\n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(u1)).exists(),\n False\n )\n \n def test_delete_bookmarks_for_any_model(self):\n \"\"\"Tests deleting bookmarks of a generic model instance.\n \"\"\" \n fm = FakeModel()\n m, n = create_bookmarks(fm)\n \n self.assertTrue(isinstance(m, Menu))\n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(fm)).exists(),\n True\n )\n \n delete_bookmarks(fm)\n\n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(fm)).exists(),\n False\n )\n \n def test_delete_bookmarks_without_bookmarks(self):\n \"\"\"Tests calling \"delete_bookmarks\" on an instance without bookmarks.\n \"\"\" \n fm = FakeModel()\n \n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(fm)).exists(),\n False\n )\n \n delete_bookmarks(fm)\n \n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(fm)).exists(),\n False\n )\n \nclass GetBookmarksForUtilTestCase(TestCase):\n def test_bookmarks_for_user(self):\n \"\"\"Tests retrieving bookmark list owned by user with a given username.\n \"\"\" \n u1, n = get_user_model().objects.get_or_create(username=\"u1\")\n \n self.assertTrue(n)\n \n bookmarks = Menu.objects.get(slug=get_bookmarks_slug_for(u1))\n \n self.assertEqual(get_bookmarks_for(u1.username), bookmarks)\n \nclass GetUserOfUtilTestCase(TestCase): \n def test_user_of_bookmarks(self):\n \"\"\"Tests retrieving the user of bookmarks identified by the given slug.\n \"\"\" \n u1, n = get_user_model().objects.get_or_create(username=\"u1\")\n bookmarks = Menu.objects.get(slug=\"user_1_bookmarks\")\n \n self.assertEqual(get_user_of(bookmarks.slug), u1)\n \nclass CreateDetailNavigationTestCase(TestCase):\n def test_create_detail_navigation(self):\n \"\"\"Tests creating a detail view navigation menu.\n \"\"\"\n m, n = create_detail_navigation(FakeModel)\n \n self.assertEqual(m.slug, \"fakemodel_detail_navigation\")\n self.assertEqual(m.description, \"Fakemodel navigation\")\n \nclass CreateDetailActionsTestCase(TestCase):\n def test_create_detail_actions(self):\n \"\"\"Tests creating a detail view action menu.\n \"\"\"\n m, n = create_detail_actions(FakeModel)\n \n self.assertEqual(m.slug, \"fakemodel_detail_actions\")\n self.assertEqual(m.description, \"Fakemodel actions\")\n \nclass CreateListActionsTestCase(TestCase):\n def test_create_list_actions(self):\n \"\"\"Tests creating a list view action menu.\n \"\"\"\n m, n = create_list_actions(FakeModel)\n \n self.assertEqual(m.slug, \"fakemodel_list_actions\")\n self.assertEqual(m.description, \"Fakemodel list actions\")\n","sub_path":"djangoerp/menus/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"517969335","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cal', '0042_auto_20160925_0730'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='gcalendar',\n name='color_index',\n field=models.CharField(default='10', max_length=10, choices=[(b'1', b'#a4bdfc'), (b'2', b'#7ae7bf'), (b'3', b'#dbadff'), (b'4', b'#ff887c'), (b'5', b'#fbd75b'), (b'6', b'#ffb878'), (b'7', b'#46d6db'), (b'8', b'#e1e1e1'), (b'9', b'#5484ed'), (b'10', b'#51b749'), (b'11', b'#dc2127')]),\n preserve_default=False,\n ),\n ]\n","sub_path":"cal/cal/migrations/0043_gcalendar_color_index.py","file_name":"0043_gcalendar_color_index.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"150097728","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This module creates the postgres database\n\nfrom troposphere import Parameter, Ref, GetAtt, Tags, Join, Output\nfrom troposphere.rds import DBInstance, DBSubnetGroup\nfrom troposphere.ec2 import SecurityGroup, SecurityGroupRule\n\nimport config as cfn\nfrom config import template, CLOUDENV, CLOUDNAME, DEFAULT_ROUTE\n\ndef emit_configuration():\n vpc = cfn.vpcs[0]\n region = Ref(\"AWS::Region\")\n\n dbname = template.add_parameter(\n Parameter(\n \"RDSDatabaseInstanceName\",\n Default=\"reporting{0}\".format(CLOUDENV),\n Description=\"Postgres Instance Name\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"63\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=\"Must begin with a letter and contain only alphanumeric characters\"\n )\n )\n\n dbuser = template.add_parameter(\n Parameter(\n \"RDSDatabaseUser\",\n Default=\"sa\",\n Description=\"The database admin account username\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"63\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=\"Must being with a letter and be alphanumeric\"\n )\n )\n\n dbpassword = template.add_parameter(\n Parameter(\n \"RDSDatabasePassword\",\n NoEcho=True,\n Description=\"The database admin account password\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"41\",\n AllowedPattern=\"[a-zA-Z0-9]*\",\n ConstraintDescription=\"Must contain only alphanumeric characters.\",\n Default=\"LeafLeaf123\"\n )\n )\n\n dbclass = template.add_parameter(\n Parameter(\n \"RDSInstanceClass\",\n Default=\"db.t2.medium\",\n Description=\"Database instance size\",\n Type=\"String\",\n AllowedValues=[\n \"db.t2.small\", \"db.t2.medium\", \"db.m3.medium\", \"db.m3.large\",\n \"db.m3.xlarge\", \"db.m3.2xlarge\", \"db.r3.large\", \"db.r3.xlarge\",\n \"db.r3.2xlarge\", \"db.r3.4xlarge\", \"db.r3.8xlarge\"\n ]\n )\n )\n\n allocated_storage = template.add_parameter(\n Parameter(\n \"RDSAllocatedStorage\",\n Default=\"100\",\n Description=\"The size of the Postgres Database (GB)\",\n Type=\"Number\",\n MinValue=\"5\",\n MaxValue=\"512\",\n ConstraintDescription=\"Must be between 5 and 512 GB\"\n )\n )\n\n db_subnet_group = template.add_resource(\n DBSubnetGroup(\n \"RDSSubnetGroup\",\n DBSubnetGroupDescription=\"Subnets available for RDS in {0}\".format(CLOUDNAME),\n SubnetIds=[Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)],\n DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)]\n )\n )\n\n ingress_rules = [\n SecurityGroupRule(\n IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]\n ) for p in [('tcp', 5432)]]\n\n security_group = template.add_resource(\n SecurityGroup(\n \"RDSDatabaseSecurityGroup\",\n GroupDescription=\"Security group for Postgres Instances\",\n VpcId=Ref(vpc),\n SecurityGroupIngress=ingress_rules,\n DependsOn=vpc.title\n )\n )\n\n database = template.add_resource(\n DBInstance(\n \"RDSPostgresInstance\",\n DBInstanceIdentifier=Ref(dbname),\n AllocatedStorage=Ref(allocated_storage),\n DBInstanceClass=Ref(dbclass),\n Engine=\"postgres\",\n EngineVersion=\"9.3.6\",\n MasterUsername=Ref(dbuser),\n MasterUserPassword=Ref(dbpassword),\n DBSubnetGroupName=Ref(db_subnet_group),\n VPCSecurityGroups=[Ref(security_group)],\n DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)]\n )\n )\n\n template.add_output(\n Output(\n \"ConnectionString\",\n Description=\"JDBC connection string for Postgres\",\n Value=Join(\"\", [\n GetAtt(\"RDSPostgresInstance\", \"Endpoint.Address\"),\n GetAtt(\"RDSPostgresInstance\", \"Endpoint.Port\")\n ])\n )\n )\n","sub_path":"components/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"303095065","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport sys\n# import os\n# sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\nsys.path.append('D:\\\\WorkSpace\\\\question-bank')\n# sys.path.append('/var/www/question-bank.manual')\n\nimport codecs\nimport json\nfrom collections import OrderedDict\nfrom argparse import ArgumentParser\n\n\ndef arg_parser():\n parser = ArgumentParser(prog=__file__)\n parser.add_argument('-sc', '--schemaFile', type=str,\n metavar='XXX.json', required=True,\n help='json file of schemas to update.')\n parser.add_argument('-u', '--usage', type=str, default=None,\n metavar='USAGE', required=True,\n help='schema usage')\n parser.add_argument('-sid', '--subject_id', type=int, default=103,\n metavar='[101|102|103]',\n help='subject_id, default: 103')\n parser.add_argument('-sn', '--schema_name', type=str, default=None,\n metavar='SCHEMA_NAME',\n help='only update this schema.')\n return parser\n\n\ndef update_schema(fname, usage, subject_id, schema_name_to_update):\n if not StoneSchema.objects(usage=usage, deleted_at=None):\n choice = raw_input('\\nusage<{}> does not exist yet, do you wanna add it?\\n\\t'.format(usage))\n if choice.lower() not in ['y', 'yes']:\n sys.exit()\n schemas = OrderedDict()\n\n with codecs.open(fname, mode='r', encoding='utf-8') as f:\n data = json.load(f, object_pairs_hook=OrderedDict)\n schemas.update(OrderedDict(data))\n # print(json.dumps(schemas, indent=4))\n if schema_name_to_update:\n json_schema = schemas.get(schema_name_to_update)\n if json_schema:\n save_schema(schema_name_to_update, subject_id, usage, json_schema)\n else:\n print('schema_name={} NOT found!'.format(schema_name_to_update))\n else:\n for schema_name, json_schema in schemas.items():\n save_schema(schema_name, subject_id, usage, json_schema)\n\n\ndef save_schema(schema_name, subject_id, usage, json_schema):\n schema = StoneSchema.get_by_name(schema_name)\n if not schema:\n schema = StoneSchema()\n schema.subject_id = subject_id\n schema.name = schema_name\n schema.name_display = json_schema.get('title', schema_name)\n schema.usage = usage\n schema.json_schema = json.dumps(json_schema, ensure_ascii=False)\n # print(schema.to_mongo())\n try:\n schema.save()\n except BankValidationError as e:\n print(False, e.error_message())\n\n\nif __name__ == '__main__':\n args = arg_parser().parse_args()\n # print(args)\n from errors import BankValidationError\n from models.stone_models import StoneSchema\n update_schema(args.schemaFile, args.usage, args.subject_id, args.schema_name)\n","sub_path":"schema_manager/update_schema.py","file_name":"update_schema.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"109633921","text":"#!/usr/bin/python\n\nimport logging\nimport re\nimport defusedxml.ElementTree as ET #use hardened xml implementation because reading unknown xml document\n\nlogger = logging.getLogger('opf')\n\n\nDC11_NS = 'http://purl.org/dc/elements/1.1/'\nOPF2_NS = 'http://www.idpf.org/2007/opf'\nAPP_NAME = \"opf\"\n\n\ndef opf(name):\n return '{%s}%s' % (OPF2_NS, name)\n\n\ndef dc(name):\n return '{%s}%s' % (DC11_NS, name)\n\n\nclass Metadata:\n def __init__(self):\n self.title = \"\"\n self.authors = []\n self.series = \"\"\n self.series_index = None\n self.title_sort = \"\"\n self.language = \"en\"\n self.tags = []\n self.publication_year = None\n\n logger.info(\"opf inititalized\")\n\n @classmethod\n def from_file(cls, filename):\n def clean_string(string):\n string = string.strip()\n string = re.sub(\" +\", \" \", string)\n return string\n\n tree = ET.parse(filename)\n root = tree.getroot()\n result = cls()\n for child in root:\n if child.tag.endswith('metadata'):\n for meta in child:\n logger.debug(\"%s %s %s\", meta.tag, meta.attrib, meta.text)\n if meta.text:\n text = clean_string(meta.text)\n if meta.tag.endswith('title'):\n result.title = text\n elif meta.tag.endswith('language'):\n result.language = text\n elif meta.tag.endswith('creator'):\n result.authors.append(text)\n elif meta.tag.endswith('subject'):\n result.tags.append(text)\n elif meta.tag.endswith('date'):\n year_match = re.match(\"([0-9]{4}).*\", text)\n if year_match:\n result.publication_year = int(year_match.group(1))\n elif meta.tag.endswith('meta'):\n attrib = meta.attrib['name']\n if attrib == 'calibre:title_sort':\n result.title_sort = clean_string(meta.attrib['content'])\n elif attrib == 'calibre:series':\n result.series = clean_string(meta.attrib['content'])\n elif attrib == 'calibre:series_index':\n result.series_index = clean_string(meta.attrib['content'])\n\n return result\n\n def __str__(self):\n series_text = \"\"\n if self.series_index:\n series_text = \" Part %d of series %s\" % (self.series_index, self.series)\n\n return \"Metadata: %s by %s Language: %s Tags: %s\" % (\n self.title, \"; \".join(self.authors), self.language, self.tags) + series_text\n\n","sub_path":"pydb/opf.py","file_name":"opf.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"492805124","text":"#%% Imports\nimport pandas as pd\nimport numpy as np\nimport logging\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport os\nrepopath = '/Users/francescofusaro/Documents/Github/aml2020'\nos.chdir(repopath)\n\nimport hashlib\n\nfrom biosppy.signals import ecg\n#from ecgdetectors import Detectors\n#from hrv import HRV\nimport neurokit2 as nk\n\n#%% Populate container for plot signals\ndef populate_PlotData(PD,i,sample_id,class_id,raw_ecg,rpeaks_biosppy,filtered_biosppy,signals_neurokit):\n PD[i][0]=sample_id\n PD[i][1]=class_id\n PD[i][2]=raw_ecg\n PD[i][3]=rpeaks_biosppy\n PD[i][4]=filtered_biosppy\n PD[i][5]=signals_neurokit\n \n return(PD)\n#%%Load Data Set\ndef load_data(repopath):\n X = pd.read_csv(f'{repopath}/project3_ffu/X_train_small.csv')\n y = pd.read_csv(f'{repopath}/project3_ffu/y_train_small.csv')\n #X_test = pd.read_csv(f'{repopath}/project3_ffu/X_test.csv')\n X_test = 0\n logging.info('Dataset imported')\n \n return (X, y, X_test)\n\n#%%Split Classes\ndef split_classes(X,y):\n class0_ls = y.index[y['y'] == 0].tolist() #healthy\n class1_ls = y.index[y['y'] == 1].tolist() #Arrhythmia1\n class2_ls = y.index[y['y'] == 2].tolist() #Arrhythmia2\n class3_ls = y.index[y['y'] == 3].tolist() #Noise\n \n X0 = X.iloc[class0_ls,:]\n df_X0 = pd.DataFrame(data=X0,columns=X.columns)\n \n X1 = X.iloc[class1_ls,:]\n df_X1 = pd.DataFrame(data=X1,columns=X.columns)\n \n X2 = X.iloc[class2_ls,:]\n df_X2 = pd.DataFrame(data=X2,columns=X.columns)\n \n X3 = X.iloc[class3_ls,:]\n df_X3 = pd.DataFrame(data=X3,columns=X.columns)\n \n return(df_X0, df_X1, df_X2, df_X3)\n\n#%% Define more flexible ecg_process function\ndef ecg_process_AML(ecg_signal, sampling_rate=1000, method=\"neurokit\"):\n \"\"\"Process an ECG signal as original neurokit2 function, see:\n https://neurokit2.readthedocs.io/en/latest/_modules/neurokit2/ecg/ecg_process.html#ecg_process\n \n However to increase flexibility, 'method' parameter is specifically set for each subfuntion call:\n \n - ecg_clean methods: Can be one of ‘neurokit’ (default), ‘biosppy’, ‘pamtompkins1985’, ‘hamilton2002’, ‘elgendi2010’, ‘engzeemod2012’.\n \n - ecg_peaks methods: Can be one of ‘neurokit’ (default), ‘pamtompkins1985’, ‘hamilton2002’, ‘christov2004’, ‘gamboa2008’, ‘elgendi2010’, ‘engzeemod2012’ or ‘kalidas2017’\n \n - ecg_delineate methods: Indentify PQRST peak Can be one of ‘peak’ (default) for a peak-based method, ‘cwt’ for continuous wavelet transform or ‘dwt’ for discrete wavelet transform.\n see: https://neurokit2.readthedocs.io/en/latest/examples/ecg_delineate.html\n \"\"\"\n \n #clean\n ecg_preprocess_clean_method = 'biosppy' \n ecg_cleaned = nk.ecg.ecg_clean(ecg_signal, sampling_rate=sampling_rate, method=ecg_preprocess_clean_method)\n # R-peaks\n ecg_preprocess_rpeaks_method = 'neurokit'\n instant_peaks, rpeaks, = nk.ecg.ecg_peaks(\n ecg_cleaned=ecg_cleaned, sampling_rate=sampling_rate, method=ecg_preprocess_rpeaks_method, correct_artifacts=True\n )\n\n rate = nk.signal_rate(rpeaks, sampling_rate=sampling_rate, desired_length=len(ecg_cleaned))\n\n quality = nk.ecg.ecg_quality(ecg_cleaned, rpeaks=None, sampling_rate=sampling_rate)\n\n signals = pd.DataFrame({\"ECG_Raw\": ecg_signal, \"ECG_Clean\": ecg_cleaned, \"ECG_Rate\": rate, \"ECG_Quality\": quality})\n\n # Additional info of the ecg signal\n ecg_preprocess_delineate_method = 'dwt'\n delineate_signal, delineate_info = nk.ecg.ecg_delineate(\n ecg_cleaned=ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate, method=ecg_preprocess_delineate_method\n )\n \n if ecg_preprocess_delineate_method != 'peak':\n # 'dwt' and 'cwt' Unlike the peak method, 'dwt' and 'cwt' does not idenfity the Q-peaks and S-peaks.\n delineate_signal_peak, delineate_info_peak = nk.ecg.ecg_delineate(\n ecg_cleaned=ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate, method='peak' \n )\n delineate_signal['ECG_Q_Peaks'] = delineate_signal_peak['ECG_Q_Peaks']\n delineate_signal['ECG_S_Peaks'] = delineate_signal_peak['ECG_S_Peaks']\n \n cardiac_phase = nk.ecg.ecg_phase(ecg_cleaned=ecg_cleaned, rpeaks=rpeaks, delineate_info=delineate_info)\n\n signals = pd.concat([signals, instant_peaks, delineate_signal, cardiac_phase], axis=1)\n\n info = rpeaks\n return signals, info\n \n#%% Extracted peaks summary\ndef calc_peak_summary(signals, sampling_rate):\n #peak summary\n summary = []\n sig_qq = signals[signals['ECG_Q_Peaks'] == 1]\n q_count = len(sig_qq)\n sig_rr = signals[signals['ECG_R_Peaks'] == 1]\n r_count = len(sig_rr)\n sig_pp = signals[signals['ECG_P_Peaks'] == 1]\n p_count= len(sig_pp)\n sig_ss = signals[signals['ECG_S_Peaks'] == 1]\n s_count = len(sig_ss)\n sig_tt = signals[signals['ECG_T_Peaks'] == 1]\n t_count = len(sig_tt)\n \n #peak counts\n p_rel = p_count/r_count\n q_rel = q_count/r_count\n s_rel = s_count/r_count\n t_rel = t_count/r_count\n summary.append(p_rel)\n summary.append(q_rel)\n summary.append(r_count)\n summary.append(s_rel)\n summary.append(t_rel)\n \n #peak p amplitude\n p_mean = sig_pp['ECG_Clean'].mean()\n summary.append(p_mean)\n p_std = sig_pp['ECG_Clean'].std()\n summary.append(p_std)\n \n #peak s amplitude\n s_mean = sig_ss['ECG_Clean'].mean()\n summary.append(s_mean)\n s_std = sig_ss['ECG_Clean'].std()\n summary.append(s_std)\n \n #QRS duration\n sig_r_onset = signals[signals['ECG_R_Onsets'] == 1]\n sig_r_offset = signals[signals['ECG_R_Offsets'] == 1]\n if (len(sig_r_onset) == len(sig_r_offset)):\n d_qrs_N = sig_r_offset.index.to_numpy().ravel() - sig_r_onset.index.to_numpy().ravel() #number of samples between R Onset and Offset\n d_qrs_t = (d_qrs_N - 1) / sampling_rate\n d_qrs_t_mean = d_qrs_t.mean()\n d_qrs_t_std = d_qrs_t.std()\n else:\n #TODO: in case of unenven R Onset and Offset detection develop more sofisticated algo to check which peaks can be retained?\n d_qrs_t_mean = np.nan\n d_qrs_t_std = np.nan\n \n \n summary.append(d_qrs_t_mean)\n summary.append(d_qrs_t_std)\n \n return summary\n\n#%% extract features from ECGs\ndef extract_features(df, Fs, feature_list, remove_outlier, biosppy_enabled, ecg_quality_check, ecg_quality_threshold, class_id, verbose):\n \n if remove_outlier:\n logging.info('Removing ecg outliers with pyheart...')\n \n if biosppy_enabled:\n logging.info('Pre-filtering ECG with biosspy')\n \n # Define F array to aggregate extracted sample features\n F=np.zeros([df.shape[0],len(feature_list)])\n \n # Define PD as a list array to aggregate extracted sample infos (for later plotting)\n # PD columns: [0:sample id | 1: class id | 2: raw signal| 3: r_peaks_biosspy | 4: filtered biosppy | 5: signals neurokit ]\n # PD rows: number of ecg signals\n plotData = []\n for n_row in range(df.shape[0]):\n column = []\n for n_col in range(6):\n column.append(0)\n plotData.append(column)\n \n # for all the rows in the df\n for i in range(len(df)):\n sig_i = df.iloc[i,1:] #signal i wo sample id\n sig_i = sig_i.replace(to_replace='NaN',value=np.nan)\n sig_i_np = (sig_i.to_numpy()).ravel()\n sig_i_np = sig_i_np[~np.isnan(sig_i_np)] #this is our ecg raw signal\n \n # remove outliers using pyheart?\n if remove_outlier:\n dummy=1 #TODO: remove outliers using pyheart?\n \n \n # filter ecg signal with biosspy first\n if biosppy_enabled:\n try:\n out = ecg.ecg(signal=sig_i_np, sampling_rate=Fs, show=False)\n \n # ts (array) – Signal time axis reference (seconds).\n # filtered (array) – Filtered ECG signal.\n # rpeaks (array) – R-peak location indices.\n # templates_ts (array) – Templates time axis reference (seconds).\n # templates (array) – Extracted heartbeat templates.\n # heart_rate_ts (array) – Heart rate time axis reference (seconds).\n # heart_rate (array) – Instantaneous heart rate (bpm).\n\n (ts, filtered_biosppy, rpeaks_biosppy, templates_ts, \n templates, heart_rate_ts, heart_rate) = out\n \n no_rpeaks_biosppy = len(rpeaks_biosppy)\n \n except Exception:\n logging.info(f'biosppy crashed for sample {i} in class {class_id}')\n rpeaks_biosppy = np.nan\n no_rpeaks_biosppy = np.nan\n filtered_biosppy = np.nan\n else:\n rpeaks_biosppy = np.nan\n no_rpeaks_biosppy = np.nan\n filtered_biosppy = np.nan\n\n \n # process ecg sample with with neurokit\n # signals, info = nk.ecg_process(sig_i_np, sampling_rate=Fs)\n # use customized function\n try: \n signals, info = ecg_process_AML(sig_i_np, sampling_rate=Fs)\n \n if ecg_quality_check:\n #TODO: keep only the signals with ecq quality above threshold?\n dummy=1\n \n # calculate ecg signal HR indicators\n df_analyze = nk.ecg_analyze(signals, sampling_rate=Fs, method='auto')\n \n # filter signals for peak counts, amplitudes, and QRS event duration\n peak_summary_neurokit = calc_peak_summary(signals=signals, sampling_rate=Fs)\n \n # calculate the mean and standard devation of the signal quality\n ecg_q_mean = signals['ECG_Quality'].mean() \n ecg_q_std = signals['ECG_Quality'].std()\n \n # consolidate the features for sample i\n feat_i = [df.iloc[i,0]] # init a list with sample id\n feat_i.append(ecg_q_mean)\n feat_i.append(ecg_q_std,)\n feat_i.append(df_analyze.iloc[0,0]) #ECG_Rate_Mean\n feat_i.append(df_analyze.iloc[0,1]) #HRV_RMSSD\n feat_i.append(len(rpeaks_biosppy)) #no. of detected r-peaks in biosspy\n for elem in peak_summary_neurokit:\n feat_i.append(elem)\n except Exception:\n logging.info(f'neurokit2 crashed for sample {i} in class {class_id}')\n n = len(feature_list)\n feat_i = [np.nan]*n\n feat_i[0] = df.iloc[i,0] # sample id\n feat_i[5] = no_rpeaks_biosppy #maybe biosppy worked\n \n F[i,:] = feat_i\n plotData = populate_PlotData(plotData,i,df.iloc[i,0],class_id,sig_i_np,rpeaks_biosppy,filtered_biosppy,signals)\n if verbose:\n sample_left = df.shape[0]-i\n print(f'Preprocessed ECG sample {i}({df.iloc[i,0]}) in class {class_id}... {sample_left} samples to go!')\n #TODO: in a suitable container collect the sample id and the signals dataframe (output of neurokit), which\n #which contains all the info for the plots\n \n feat_df = pd.DataFrame(data=F,columns=feature_list)\n \n return(feat_df, plotData) \n \n#%% Main\n\nrepopath = '/Users/francescofusaro/Documents/Github/aml2020'\nos.chdir(repopath)\n\n#%% Load data from repo (keep sample id for later use)\nX, y, X_test = load_data(repopath)\n\n#%% Split the original dataframe according to class\nX0, X1, X2, X3 = split_classes(X, y)\n\n#%% Define dataframe template in which will be filled with the extracted features\nfeature_list = ['Sample_Id', \n 'ECQ_Quality_Mean', 'ECQ_Quality_STD', \n 'ECG_Rate_Mean', 'ECG_Rate_STD',\n 'R_P_biosppy', 'P_P/R_P', 'Q_P/R_P', 'R_P_neurokit' ,'S_P/R_P', 'T_P/R_P', #relative number of peaks TODO\n 'P_Amp_Mean', 'P_Amp_STD', 'S_Amp_Mean', 'S_Amp_STD',\n 'QRS_t_Mean', 'QRS_t_STD']\n\n\n#%% Feature extraction class 0\nX0_features, X0_plotData = extract_features(df=X0,\n Fs = 300,\n feature_list = feature_list, \n remove_outlier=True, \n biosppy_enabled=True, \n ecg_quality_check=True, \n ecg_quality_threshold=0.8, \n class_id=0,\n verbose=True\n )\n\nX0_features.head()\n#%% Feature extraction class 1\nX1_features, X1_plotData = extract_features(df=X1,\n Fs = 300,\n feature_list = feature_list, \n remove_outlier=False, \n biosppy_enabled=True, \n ecg_quality_check=False, \n ecg_quality_threshold=0.8, \n class_id=1,\n verbose=True\n )\nX1_features.head()\n#%% Feature extraction class 2\nX2_features, X2_plotData = extract_features(df=X2,\n Fs = 300,\n feature_list = feature_list, \n remove_outlier=True, \n biosppy_enabled=True, \n ecg_quality_check=True, \n ecg_quality_threshold=0.8, \n class_id=2,\n verbose=True\n )\nX2_features.head()\n#%% Write pickle or similar\nsave_pickle = True\nif save_pickle:\n df_hash_f = lambda obj: hashlib.sha1(pd.util.hash_pandas_object(obj).values).hexdigest()\n X2_features_hash = df_hash_f(X2_features)","sub_path":"project3_ffu/feat_extr_prj3.py","file_name":"feat_extr_prj3.py","file_ext":"py","file_size_in_byte":13764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"450937721","text":"import numpy as np\nfrom rvv_io import *\nfrom rvv_functions import *\nfrom rvv_solution import *\nfrom rvv_pushers import *\nfrom rvv_fields import *\nfrom rel_col18 import implicit_coll\nfrom rel_sdc2 import *\nfrom gauss_legendre import CollGaussLegendre\nfrom gauss_lobatto import CollGaussLobatto\n\nsims = [10,20,40,80,160,320]\n# sims = [100]\ntend = 1\n\nM = 3\nK = 2\n\nc = 29979\nq = 1\n\n# gamma_max = 1.0000000000005\ngamma_max = 5.\nbeta_max = np.sqrt(1-1./gamma_max**2.)\nuy_max = beta_max*c\nnew = True\n\nfor Nt in sims:\n dt = tend/Nt\n\n nq = 1\n\n pos = np.zeros((nq,3),dtype=np.float)\n vel = np.zeros((nq,3),dtype=np.float)\n\n vel[:,1] = np.linspace(uy_max/8,uy_max,nq)\n vel[:,2] = np.sqrt(1-1./gamma_max**2.)\n\n pos = np.array([[10.,0.,0.]])\n vel = np.array([[100.,0.,100.]])\n\n # gamma = gu(vel,c=c)\n # lfreq = -q*Bfield/(1*c*gamma)\n # larmor = vel[:,1]/gamma/lfreq\n # #larmor = 1*vel[:,1]/(-q*B)\n # pos[:,0] = larmor\n\n t = 0\n\n x_array = [pos]\n x2_array = [pos]\n v_array = [vel]\n t_array = [t]\n\n col = coll(CollGaussLobatto,dt,nq,K=K,M=M,c=c,q=q,predictor=True)\n rx_array = [np.linalg.norm(col.Rx,axis=1)]\n rv_array = [np.linalg.norm(col.Rv,axis=1)]\n\n # Collocation solution stuff\n posc = np.copy(pos)\n velc = np.copy(vel)\n colc = coll(CollGaussLobatto,dt,nq,M=5,K=1,c=c,q=q)\n\n for ti in range(1,Nt+1):\n t = ti*dt\n\n pos, vel, col = boris_SDC(pos,vel,col)\n # posc, velc, colc = implicit_coll(posc,velc,colc)\n rx_array.append(np.linalg.norm(col.Rx,axis=1))\n rv_array.append(np.linalg.norm(col.Rv,axis=1))\n x2_array.append(posc)\n x_array.append(pos)\n v_array.append(vel)\n t_array.append(t)\n\n # colc.calc_residual_2018(1)\n # col.calc_residual_2018(4)\n # errorx = np.abs(col.x[2:,0,:]-np.around(colc.x[2:,0,:],14))/np.abs(np.around(colc.x[2:,0,:],14))\n # errorf = np.abs(col.F[2:,0,:]-np.around(colc.F[2:,0,:],14))/np.abs(np.around(colc.F[2:,0,:],14))\n # erroru = np.abs(col.u[2:,0,:]-np.around(colc.u[2:,0,:],14))/np.abs(np.around(colc.u[2:,0,:],14))\n # print(\"Diff in x: {0}\".format(errorx))\n # print(\"Diff in F: {0}\".format(errorf))\n # print(\"Diff in u: {0}\".format(erroru))\n # print(\"SDC solution: {0}\".format(col.Rv))\n # print(\"Collocation solution: {0}\".format(colc.Rv))\n rx_array = np.array(rx_array)\n rv_array = np.array(rv_array)\n x_array = np.array(x_array)\n x2_array = np.array(x2_array)\n v_array = np.array(v_array)\n t_array = np.array(t_array)\n\n if col.predictor == True:\n rhs = (M-1)*(K+1)*Nt\n else:\n rhs = (M-1)*K*Nt\n\n wp_dump(t_array,x_array,v_array,dt,\"sdc_M{0}K{1}_wp_vvrel.h5\".format(M,K),rhs=rhs,new=new)\n new = False\n\nplot_xres(t_array,rx_array,\"sdc_M{0}K{1}_\".format(M,K)+str(Nt))\nplot_vres(t_array,rv_array,\"sdc_M{0}K{1}_\".format(M,K)+str(Nt))\nplot_isotraj(x_array,\"sdc_\"+str(Nt),label=\"sim\")\n# plot_isotraj(x2_array,\"col2_\"+str(Nt),label=\"sim\")\nplot_vel(t_array,v_array,\"sdc_\"+str(Nt),label=\"sim\")\n","sub_path":"rvv_sdc.py","file_name":"rvv_sdc.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"147878078","text":"import torch\n# import torch should be first. Unclear issue, mentioned here: https://github.com/pytorch/pytorch/issues/2083\nimport torch.nn as nn\n\nfrom collections import OrderedDict\n\nfrom models import TernausNet, unet, checkpointed_unet, inception\n\n\ndef maxpool_level(model, num_bands, size):\n \"\"\"Calculate and return the number of maxpool inside the model definition.\n This function is useful during inference in order to calculate the number of pixel required as context.\n \"\"\"\n def register_hook(module):\n def hook(module, input, output):\n class_name = str(module.__class__).split('.')[-1].split(\"'\")[0]\n module_idx = len(summary)\n\n m_key = '%s-%i' % (class_name, module_idx + 1)\n summary[m_key] = OrderedDict()\n\n if not isinstance(module, nn.Sequential) and not isinstance(module, nn.ModuleList) and not (module == model):\n hooks.append(module.register_forward_hook(hook))\n\n input_size = (num_bands, size, size)\n x = torch.rand(1, *input_size).type(torch.FloatTensor)\n\n summary = OrderedDict()\n hooks = []\n model.apply(register_hook)\n model(x)\n # remove these hooks\n for h in hooks:\n h.remove()\n\n maxpool_count = 0\n for layer in summary:\n if layer.startswith(\"MaxPool2d\"):\n maxpool_count += 1\n return {'MaxPoolCount': maxpool_count}\n\n\ndef net(net_params, rtn_level=False):\n \"\"\"Define the neural net\"\"\"\n model_name = net_params['global']['model_name'].lower()\n state_dict_path = ''\n if model_name == 'unetsmall':\n model = unet.UNetSmall(net_params['global']['num_classes'],\n net_params['global']['number_of_bands'],\n net_params['models']['unetsmall']['dropout'],\n net_params['models']['unetsmall']['probability'])\n if net_params['models']['unetsmall']['pretrained']:\n state_dict_path = net_params['models']['unetsmall']['pretrained']\n elif model_name == 'unet':\n model = unet.UNet(net_params['global']['num_classes'],\n net_params['global']['number_of_bands'],\n net_params['models']['unet']['dropout'],\n net_params['models']['unet']['probability'])\n if net_params['models']['unet']['pretrained']:\n state_dict_path = net_params['models']['unet']['pretrained']\n elif model_name == 'ternausnet':\n model = TernausNet.ternausnet(net_params['global']['num_classes'],\n net_params['models']['ternausnet']['pretrained'])\n elif model_name == 'checkpointed_unet':\n model = checkpointed_unet.UNetSmall(net_params['global']['num_classes'],\n net_params['global']['number_of_bands'],\n net_params['models']['unetsmall']['dropout'],\n net_params['models']['unetsmall']['probability'])\n if net_params['models']['unetsmall']['pretrained']:\n state_dict_path = net_params['models']['unetsmall']['pretrained']\n elif model_name == 'inception':\n model = inception.Inception3(net_params['global']['num_classes'],\n net_params['global']['number_of_bands'])\n if net_params['models']['inception']['pretrained']:\n state_dict_path = net_params['models']['inception']['pretrained']\n else:\n raise ValueError('The model name in the config.yaml is not defined.')\n\n if rtn_level:\n lvl = maxpool_level(model, net_params['global']['number_of_bands'], 256)\n return model, state_dict_path, lvl['MaxPoolCount']\n else:\n return model, state_dict_path, model_name\n","sub_path":"models/model_choice.py","file_name":"model_choice.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"512255318","text":"\"\"\"Sorted, reveresed and lamba function\"\"\"\r\n\r\nlst=['james','smith','blake','king','meena']\r\n\r\nfor i in sorted(lst): #Sorted\r\n print(i,end=\" \")\r\n\r\nprint()\r\n\r\nfor i in reversed(lst): #reversed\r\n print(i,end=\" \")\r\n\r\n\r\ndef mys(lst):\r\n return lst[-1]\r\n\r\nlst.sort(key=mys)\r\n\r\n\r\nlst.sort(Key=lambda name:name[-1]) #to sort the list based on last letter\r\n #using lamba function\r\n","sub_path":"26-11 sorted.py","file_name":"26-11 sorted.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"227051920","text":"from collections import OrderedDict\nfrom ming import create_datastore\nfrom stores.mongo_store import MongoStore\nfrom unittest import TestCase\n\nurl = 'mongodb://localhost:27017/metahosting_tests'\n\n\nclass MongoStoreTest(TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n conn = create_datastore(url).conn\n conn.drop_database('metahosting_tests')\n\n def get_store(self):\n config = dict()\n config['url'] = url\n config['database'] = 'metahosting_tests'\n config['collection'] = 'tests'\n return MongoStore(config=config)\n\n def test_update(self):\n store = self.get_store()\n self.assertIsNone(store.get('name'))\n store.update('name', 'value')\n self.assertIsNotNone(store.get('name'))\n store.update('name', 'new_value')\n self.assertEqual(store.get('name'), 'new_value')\n\n def test_get(self):\n store = self.get_store()\n self.assertIsNone(store.get('foo'))\n store.update('foo', 'bar')\n self.assertIsNotNone(store.get('foo'))\n self.assertEqual('bar', store.get('foo'))\n\n def test_get_all(self):\n store = self.get_store()\n ret = store.get_all()\n self.assertDictEqual({}, ret)\n a = dict()\n a['foo'] = 'bar'\n a['foo2'] = 'bar2'\n a[1] = 'some'\n a['foo3'] = 21211\n for key, value in a.iteritems():\n store.update(key, value)\n\n self.assertDictEqual(a, store.get_all())\n\n def test_get_all_sorted(self):\n store = self.get_store()\n ret = store.get_all(sort_key='anything')\n self.assertDictEqual({}, ret)\n\n a = OrderedDict()\n a['foo'] = 'bar'\n a['foo2'] = 'bar2'\n a[1] = 'some'\n a['foo3'] = 21211\n for key, value in a.iteritems():\n store.update(key, value)\n\n self.assertDictEqual(a, store.get_all(sort_key='anything'))\n\n def test_update_with_dict(self):\n store = self.get_store()\n simple_dict = dict()\n simple_dict['a'] = {'foo': 'bar'}\n store.update('foo', simple_dict)\n simple_dict['a'] = {'ooo': 'barr'}\n ret = store.get('foo')\n self.assertTrue('a' in ret)\n self.assertFalse('b' in ret)\n ret['c'] = {'foo': 'ba'}\n ret2 = store.get('foo')\n self.assertFalse('c' in ret2)\n\n def test_constrained(self):\n store = self.get_store()\n for i in range(0, 10):\n store.update('key%s' % i, {'foo': 'bar',\n 'numerical': i,\n 'textual': '%sval' % i})\n\n key = 'value.numerical'\n result = store.get_constrained({key: {'$lt': 0}})\n self.assertEqual(len(result), 0)\n\n result = store.get_constrained({key: {'$lt': 5}})\n self.assertEqual(len(result), 5)\n\n result = store.get_constrained({key: {'$lt': 100}})\n self.assertEqual(len(result), 10)\n\n # mongo 2.4 (as provided by travis) does not have $eq\n key = 'value.textual'\n result = store.get_constrained({key: '8val'})\n self.assertEqual(len(result), 1)\n\n key = 'value.foo'\n result = store.get_constrained({key: 'bar'})\n self.assertEqual(len(result), 10)\n\n result = store.get_constrained({})\n self.assertEqual(len(result), 10)\n\n def get_service(self, name, description, availability):\n service = dict()\n service['name'] = name\n service['description'] = description\n service['available'] = availability\n return service\n\n def test_alternative_type_retrieval(self):\n store = self.get_store()\n service = self.get_service('neo4j', 'database', True)\n store.update(service['name'], service)\n service = self.get_service('eXist', 'XML', True)\n store.update(service['name'], service)\n service = self.get_service('voyant', 'GUI', False)\n store.update(service['name'], service)\n\n result = store.get_all()\n self.assertEqual(len(result), 3)\n\n result = store.get_constrained(constrain={'value.available': True})\n self.assertEqual(len(result), 2)\n\n result = store.get_constrained(constrain={'value.available': False})\n self.assertEqual(len(result), 1)\n\n service = self.get_service('neo5j', 'New database', True)\n service.pop('available')\n store.update(service['name'], service)\n\n result = store.get_constrained(constrain={'value.available': True})\n self.assertEqual(len(result), 2)\n\n result = store.get_constrained(constrain={'value.available': False})\n self.assertEqual(len(result), 1)\n\n result = store.get_constrained(constrain={})\n self.assertEqual(len(result), 4)\n","sub_path":"tests/test_mongo_store.py","file_name":"test_mongo_store.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"157021048","text":"#!/usr/bin/python\n# -*-coding:utf-8 -*\n\nimport curses\nimport curses.wrapper\nimport curses.panel\nimport curses.textpad\nimport sys\nimport pickle\nimport os\nimport datetime\nimport locale\n\nclass Liste():\n\t\n\tdef __init__(self, index, titre, descr, prior=\"L\"):\n\t\tself.creation = (datetime.date.today())\n\t\tself.ref = index\n\t\tself.titre = titre\n\t\tself.descr = descr\n\t\tself.prior = prior\n\t\t\n\tdef __repr__(self):\n\t\treturn \"({0}) - {3}\\n\t{1}: {2}\\n\".format(\\\n\t\tself.ref, self.titre, self.descr, self.creation)\n\t\n\tdef __getstate__(self):\n\t\tdict_attr = dict(self.__dict__)\n\t\tdict_attr[\"attribut_temporaire\"] = 0\n\t\treturn dict_attr\n\t\n\tdef __setstate__(self, dict_attr):\n\t\tdict_attr[\"attribut_temporaire\"] = 0\n\t\tself.__dict__ = dict_attr\n\ndef getMaxYX(self, char):\n\tmaxYX = self.getmaxyx()\n\tif char == 'X':\n\t\treturn maxYX[1]\n\tif char == 'Y':\n\t\treturn maxYX[0]\n\t\t\n\t\ndef about_box(pan_about, about):\n\tpan_about.show()\n\tabout.getch()\n\tpan_about.hide()\n\t\ndef Index():\n\tif os.path.exists(\"index\"):\n\t\tindex = open(\"index\", \"r\")\n\t\tnbItems = int(index.read())\n\t\tindex.close()\n\t\treturn nbItems\n\telse:\n\t\tindex = open(\"index\", \"w\")\n\t\tindex.write(str(0))\n\t\tindex.close()\n\t\treturn Index()\n\t\t\n\ndef UpdateIndex(nbItems, test):\n\tindex = open(\"index\", \"w\")\n\tif test == \"up\":\n\t\tindex.write(str(nbItems + 1))\n\telse:\n\t\tindex.write(str(nbItems - 1))\n\tindex.close()\n\t\ndef RecupDonneesBis(nbItems):\n\tmesNotes = []\n\tif os.path.exists(\"todo_liste\"):\n\t\twith open('todo_liste', 'rb') as todo:\n\t\t\tmy_pickler = pickle.Unpickler(todo)\n\t\t\ti = 1\n\t\t\twhile i <= nbItems:\n\t\t\t\tmesNotes.append(my_pickler.load())\n\t\t\t\ti += 1\n\t\t\treturn mesNotes\n\telse:\n\t\treturn mesNotes\n\t\t\ndef EnregDonnees(nbItems, mesNotes):\n\twith open('todo_liste', 'wb') as todo:\n\t\tmy_pickler = pickle.Pickler(todo)\n\t\ti = 1\n\t\twhile i <= nbItems:\n\t\t\tmy_pickler.dump(mesNotes[i-1])\n\t\t\ti += 1\n\ndef AffichageNote_bis():\n\tliste_note = []\n\tliste_note = RecupDonneesBis(Index())\n\treturn liste_note\n\t\t\ndef NouvelleNoteBis(titre, descr):\n\t\n\ttitre = titre[2:len(titre)-1]\n\tdescr = descr[2:len(descr)-1]\n\t\n\tmesNotes = RecupDonneesBis(Index())\n\tUpdateIndex(Index(), \"up\")\n\tmesNotes.append(Liste(Index(), titre.capitalize(), descr.capitalize()))\n\tEnregDonnees(Index(), mesNotes)\n\t\ndef SupressionNoteBis(id_note):\n\tif Index() != 0:\n\t\tmesNotes = RecupDonneesBis(Index())\n\t\tdel mesNotes[id_note - 1]\n\t\tUpdateIndex(Index(), \"down\")\n\t\ti = 1\n\t\twhile i <= Index():\n\t\t\tmesNotes[i-1].ref = i\n\t\t\ti += 1\n\t\tEnregDonnees(Index(), mesNotes)\n\n\nclass TODO(object):\n\t\t\n\tmenu = [\"n:\",\"New\",\"d:\",\"Delete\",\"t:\",\"Toogle\",\"q:\",\"Quit\",\"a:\",\"About\"]\n\titem = 1\n\t\n\tdef __init__(self, scr):\n\t\tself.scr = scr\n\t\tself.init_curses_mode()\n\t\n\tdef init_curses_mode(self):\n\t\tself.init_curses()\n\t\tself.draw_menu_bottom()\n\t\tself.draw_menu_top()\n\t\tself.draw_notes()\n\t\tself.show_notes()\n\t\tself.create_about()\n\t\tself.handle_key_stroke()\n\n\tdef init_curses(self):\n\t\tlocale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')\n\t\tcurses.noecho()\n\t\tcurses.cbreak()\n\t\tcurses.curs_set(0)\n\t\tself.scr.keypad(1)\n\t\tself.scr.erase()\n\t\tcurses.use_default_colors()\n\t\tcurses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)\n\t\tcurses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_GREEN)\n\t\tcurses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_RED)\n\t\tcurses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_BLUE)\n\t\tcurses.init_pair(5, curses.COLOR_BLACK, curses.COLOR_YELLOW)\n\t\tcurses.init_pair(6, curses.COLOR_BLUE, -1)\n\t\n\tdef draw_menu_bottom(self):\n\t\tself.maxyx = self.scr.getmaxyx()\n\t\tself.bot = self.scr.subwin(3, self.maxyx[1], self.maxyx[0] - 3, 0)\n\t\t\n\t\tj=2\n\t\tfor i, elt in enumerate(self.menu):\n\t\t\tif i%2 == 0:\n\t\t\t\tself.bot.addstr(1, j, elt, curses.A_BOLD)\n\t\t\t\tj += len(elt)\n\t\t\telse:\n\t\t\t\tself.bot.addstr(1, j, elt, curses.color_pair(1))\n\t\t\t\tj += len(elt) + 2\n\t\t\t\t\n\t\tself.bot.addstr(1, self.maxyx[1] - 5, \"v1.0\")\n\t\t#self.bot.border(0)\n\t\tself.bot.refresh()\n\t\t\n\tdef draw_menu_top(self):\n\t\tself.top = self.scr.subwin(self.maxyx[0] - 3, self.maxyx[1], 0, 0)\n\t\t\n\t\tself.pad = self.top.subpad(3, self.maxyx[1] - 2, 0, 1)\n\t\t\n\t\tfor y in range(0,3):\n\t\t\tfor x in range(0, self.maxyx[1] - 2):\n\t\t\t\ttry: self.pad.addstr(y, x, \" \", curses.color_pair(1))\n\t\t\t\texcept curses.error: pass\n\t\t\t\t\n\t\tself.top.attron(curses.A_BOLD)\t\t\n\t\tself.top.addstr(1, 2, \"TODO - Notes Manager\")\n\t\tself.top.attroff(curses.A_BOLD)\t\n\t\tself.pad.refresh()\n\t\tself.top.refresh()\n\t\t\n\tdef draw_notes(self):\n\t\tself.notes = self.top.subwin(self.maxyx[0] - 6, self.maxyx[1], 3, 0)\n\n\t\tif Index() != 0:\n\t\t\tself.notes.addstr(1, self.maxyx[1] - 15, \"{0} Notes saved\".format(Index()), curses.A_BLINK)\n\t\telse:\n\t\t\tself.notes.addstr(1, self.maxyx[1] - 15, \"0 Note saved\")\n\t\t#self.notes.border(0)\n\t\tself.notes.refresh()\n\t\t\t\n\tdef show_notes(self):\n\t\tmesNotes = []\n\t\tmesNotes = AffichageNote_bis()\n\t\t\n\t\tmaxY = getMaxYX(self.notes, 'Y')\n\t\t\n\t\tj=5\n\t\theight = maxY - 3\n\t\t\n\t\tfor i, elt in enumerate(mesNotes):\n\t\t\t\n\t\t\tif i + 1 >=self.item:\n\t\t\t\tif height >= 4:\n\t\t\t\t\tcadre = \"notes_\" + str(i)\n\t\t\t\t\tself.cadre = self.notes.subwin(4, self.maxyx[1] - 2, j, 1)\n\t\t\t\t\t\n\t\t\t\t\tif mesNotes[i].ref == self.item:\n\t\t\t\t\t\tself.cadre.addstr(1, 4, \"N°{0} - {1}\".format(mesNotes[i].ref, mesNotes[i].titre), curses.color_pair(5))\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.cadre.addstr(1, 4, \"N°{0} - {1}\".format(mesNotes[i].ref, mesNotes[i].titre), curses.color_pair(1))\n\t\t\t\t\t\t\n\t\t\t\t\tif mesNotes[i].prior == \"L\":\n\t\t\t\t\t\tself.cadre.addstr(1, 2, \"{0}\".format(mesNotes[i].prior), curses.color_pair(2))\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.cadre.addstr(1, 2, \"{0}\".format(mesNotes[i].prior), curses.color_pair(3))\n\t\t\t\t\t\t\n\t\t\t\t\tk = 1\n\t\t\t\t\ta = 0\n\t\t\t\t\tb = getMaxYX(self.scr, 'X') - 33\n\t\t\t\t\twhile k<=2:\n\t\t\t\t\t\tchaine = mesNotes[i].descr\n\t\t\t\t\t\tchaine = chaine[a:b]\n\t\t\t\t\t\tself.cadre.addstr(k, 30, \"{0}\".format(chaine))\n\t\t\t\t\t\ta = b\n\t\t\t\t\t\tb = 2 * b\n\t\t\t\t\t\tk += 1\n\t\t\t\t\t\n\t\t\t\t\tself.cadre.addstr(2, 4, \"{0}\".format(mesNotes[i].creation))\n\t\t\t\t\t\n\t\t\t\t\tself.cadre.border(1,1,0,0,0,0,0,0)\n\t\t\t\t\tself.cadre.refresh()\n\t\t\t\t\tj += 4\n\t\t\t\t\tk += 2\n\t\t\t\t\theight -= 4\n\t\tself.notes.vline(3,29,'|',self.maxyx[0] - 8)\n\t\t\t\t\n\tdef navigate_up(self):\n\t\tif(self.item >= 2):\n\t\t\tself.item -= 1\n\t\t\tself.affichage_refresh()\n\t\t\t\n\tdef navigate_down(self):\n\t\tif(self.item < Index()):\n\t\t\tself.item += 1\n\t\t\tself.affichage_refresh()\n\t\t\t\n\tdef suppression(self):\n\t\tSupressionNoteBis(int(self.item))\n\t\tif self.item != 1:\n\t\t\tself.item -= 1\n\t\tself.affichage_refresh()\n\t\t\n\tdef create_about(self):\n\t\theight = 9\n\t\twidth = 47\n\t\t\n\t\tself.about = curses.newwin(height, width, int(1 / 2 * getMaxYX(self.scr, 'Y') - height / 2), int(1 / 2 * getMaxYX(self.scr, 'X') - width / 2))\n\t\tself.about.border(0)\n\t\tself.about.addstr(1, width - 11 , \"About TODO\", curses.color_pair(6))\n\t\tself.about.addstr(2, 1, \" ***** * ** *\")\n\t\tself.about.addstr(3, 1, \" * * * * * * *\")\n\t\tself.about.addstr(4, 1, \" * * * * * * * Brought to you by:\")\n\t\tself.about.addstr(5, 1, \"* * *** * Vincent Cottineau\")\n\t\tfor x in range(1, width - 1):\n\t\t\tself.about.addch(7, x, \"-\")\n\t\tself.about.addstr(8, 15, \"<>\")\n\t\tself.pan_about = curses.panel.new_panel(self.about)\n\t\t\n\tdef affichage_refresh(self):\n\t\tself.scr.erase()\n\t\tself.draw_menu_bottom()\n\t\tself.draw_menu_top()\n\t\tself.draw_notes()\n\t\tself.show_notes()\n\t\tself.create_about()\n\t\tself.scr.refresh()\n\t\tself.bot.refresh()\n\t\tself.top.refresh()\n\t\tself.notes.refresh()\n\t\treturn True\n\t\t\n\tdef toggle(self):\n\t\tmesNotes = []\n\t\tmesNotes = AffichageNote_bis()\n\t\tif(mesNotes[int(self.item) - 1].prior) == 'L':\n\t\t\tmesNotes[int(self.item) - 1].prior = 'H'\n\t\telse:\n\t\t\tmesNotes[int(self.item) - 1].prior = 'L'\n\t\tEnregDonnees(Index(), mesNotes)\n\t\tself.affichage_refresh()\n\t\t\n\tdef nouvelle_entree(self):\n\t\tself.scr.erase()\n\t\tself.draw_menu_top()\n\t\tself.draw_notes()\n\t\tself.bot.refresh()\n\t\t\n\t\tself.cadre_new = self.notes.subwin(4, getMaxYX(self.scr, 'X') - 2, 5, 1)\n\t\t#self.cadre_new.border(1,1,0,0,0,0,0,0)\n\t\t\n\t\tself.cadre_new.addstr(1, 4, \"N°1 -\", curses.color_pair(1))\n\t\tself.cadre_new.addstr(1, 2, \"L\", curses.color_pair(2))\n\t\tself.cadre_new.addstr(2, 4, \"{0}\".format(datetime.date.today()))\n\t\t\t\t\n\t\tcurses.echo()#;curses.nocbreak()\n\t\tcurses.textpad.rectangle(self.cadre_new, 0, 9, 2, 25)\n\t\tinp_1 = self.cadre_new.getstr(1,10,15)\n\t\t\n\t\tcurses.textpad.rectangle(self.cadre_new, 0, 29, 2, getMaxYX(self.scr, 'X') - 4)\n\t\tinp_2 = self.cadre_new.getstr(1, 30, getMaxYX(self.scr, 'X') - 34)\n\t\t\n\t\tNouvelleNoteBis(str(inp_1), str(inp_2))\n\t\t\n\t\tself.cadre_new.erase()\n\t\tcurses.noecho()\n\t\tself.affichage_refresh()\n\t\t\n\t\t\t\t\n\tdef handle_key_stroke(self):\n\t\twhile True:\n\t\t\tch = self.scr.getch()\n\t\t\tif ch == ord('q'):\n\t\t\t\tcurses.endwin()\n\t\t\t\tbreak\n\t\t\telif ch == ord('d'):\n\t\t\t\tself.suppression()\n\t\t\telif ch == ord('a'):\n\t\t\t\tabout_box(self.pan_about, self.about)\n\t\t\telif ch == ord('n'):\n\t\t\t\tself.nouvelle_entree()\n\t\t\telif ch == ord('t'):\n\t\t\t\tself.toggle()\n\t\t\telif ch == curses.KEY_RESIZE:\n\t\t\t\tself.affichage_refresh()\n\t\t\telif ch == curses.KEY_UP:\n\t\t\t\tself.navigate_up()\n\t\t\telif ch == curses.KEY_DOWN:\n\t\t\t\tself.navigate_down()\n\t\t\nif __name__ == '__main__':\n\t\t\t\n\tdirectory = os.environ.get(\"HOME\") + \"/.todo\"\n\t\n\tif os.path.isdir(directory) == True:\n\t\tsys.stdout.write(directory)\n\t\tos.chdir(directory)\n\telse:\n\t\tos.mkdir(directory)\n\t\tos.chdir(directory)\n\tcurses.wrapper(TODO)\n\n","sub_path":"2do-curses.py","file_name":"2do-curses.py","file_ext":"py","file_size_in_byte":9062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"283174892","text":"from time import clock, time, perf_counter\nfrom functools import reduce\n\n\ndef secondsToStr(t):\n return \"%d:%02d:%02d.%03d\" % \\\n reduce(lambda ll, b: divmod(ll[0], b) + ll[1:],\n [(t * 1000,), 1000, 60, 60])\n\n\ndef now():\n return secondsToStr(perf_counter())\n\n\nclass Timing:\n line = \"=\" * 40\n\n def __init__(self, name):\n self.name = name\n self.log(\"Starting Test: \"+name)\n self.start = perf_counter()\n\n def log(self, s, elapsed=None):\n print(self.line)\n print(secondsToStr(perf_counter()), '-', s)\n if elapsed:\n print(\"Elapsed time:\", elapsed)\n print(self.line)\n print\n\n def end_log(self):\n end = perf_counter()\n elapsed = end - self.start\n self.log(\"End Program\", secondsToStr(elapsed))\n\n#timer = Timing()\n#timer.start = clock()\n#atexit.register(timer.end_log)\n#timer.log(\"Start Program\")\n","sub_path":"CS420 Project/CaffeVideoDetection/timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"508824890","text":"\nfrom Parser import Parser\nfrom Colorizer import Model\nfrom skimage.io import imsave\nimport numpy as np\n\nparser = Parser( 64 )\nmodel = Model()\n\nX = parser.prepare_images_from_dir( 'train_images/' , 'grayscale' )\nY = parser.prepare_images_from_dir( 'train_images/' )\ntest_X = parser.prepare_images_from_dir( 'test_images/' , 'grayscale' )\n\nnp.save( 'sample_data/X.npy' , X )\nnp.save( 'sample_data/Y.npy' , Y )\nnp.save( 'sample_data/test_X.npy' , test_X )\nprint( 'data processed' )\n\nmodel.load_model( 'models/final_model.h5' )\n\n#model.fit( X , Y , number_of_epochs=100 )\n#model.save_model( 'models/model.h5')\n\nvalues = model.predict( test_X )\nvalues = np.maximum( values , 0 )\nfor i in range( test_X.shape[0] ):\n image_final = ( values[i] * 255).astype( np.uint8)\n imsave( 'predictions/{}.png'.format( i + 1 ) , image_final )\n\n","sub_path":"MainFile.py","file_name":"MainFile.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"617960704","text":"import sys\nimport tkinter as tk\nfrom tkinter import *\nimport urllib.request\nimport webbrowser\nfrom functools import partial\nfrom tkinter import Tk, StringVar , ttk\nfrom . import convert_temp\n\n\ndef TemperatureConverter():\n def convert():\n celTemp = celTempVar.get()\n fahTemp = fahTempVar.get()\n kelTemp = kelTempVar.get()\n\n if celTempVar.get() != 0.0:\n fahTempVar.set(convert_temp.convert_celToFah(celTemp))\n kelTempVar.set(convert_temp.covert_celToKel(celTemp))\n\n elif fahTempVar.get() != 0.0:\n celTempVar.set(convert_temp.convert_fahToCel(fahTemp))\n kelTempVar.set(convert_temp.convert_fahToKel(fahTemp))\n \n elif kelTempVar.get() !=0.0:\n celTempVar.set(convert_temp.convert_kelToCel(kelTemp))\n fahTempVar.set(convert_temp.convert_kelTofah(kelTemp))\n \n\n def reset():\n top = Toplevel(padx=50, pady=50)\n top.grid()\n message = Label(top, text = \"Reset Complete\")\n button = Button(top, text=\"OK\", command=top.destroy)\n\n message.grid(row = 0, padx = 5, pady = 5)\n button.grid(row = 1, ipadx = 10, ipady = 10, padx = 5, pady = 5)\n\n fahTempVar.set(int(0))\n celTempVar.set(int(0))\n kelTempVar.set(int(0))\n \n top = Toplevel()\n top.title(\"Temperature Converter\")\n \n celTempVar = IntVar()\n celTempVar.set(int(0))\n fahTempVar = IntVar()\n fahTempVar.set(int(0))\n kelTempVar = IntVar()\n kelTempVar.set(int(0))\n titleLabel = Label (top, text = \"Temperature Converter\", font = (\"Arial\", 12, \"bold\"), justify = CENTER).grid(column=1,row=1)\n \n\n celLabel = Label (top, text = \"Celcius: \", font = (\"Arial\", 16), fg = \"red\")\n celLabel.grid(row = 2, column = 1, pady = 10, sticky = NW)\n\n fahLabel = Label (top, text = \"Fahrenheit: \", font = (\"Arial\", 16), fg = \"blue\")\n fahLabel.grid(row = 3, column = 1, pady = 10, sticky = NW)\n \n kelLabel = Label (top, text = \"Kelvin: \", font = (\"Arial\", 16), fg = \"black\")\n kelLabel.grid(row = 4, column = 1, pady = 10, sticky = NW)\n\n celEntry = Entry (top, width = 10, bd = 5, textvariable = celTempVar)\n celEntry.grid(row = 2, column = 1, pady = 10, sticky = NW, padx = 125 )\n\n\n fahEntry = Entry (top, width = 10, bd = 5, textvariable = fahTempVar)\n fahEntry.grid(row = 3, column = 1, pady = 10, sticky = NW, padx = 125 )\n \n kelEntry = Entry (top, width = 10, bd = 5, textvariable = kelTempVar)\n kelEntry.grid(row = 4, column = 1, pady = 10, sticky = NW, padx = 125 )\n\n convertButton =Button (top, text = \"Convert\", font = (\"Arial\", 8, \"bold\"), relief = RAISED, bd=5, justify = CENTER, highlightbackground = \"red\", overrelief = GROOVE, activebackground = \"green\", activeforeground=\"blue\", command = convert)\n convertButton.grid(row = 5, column = 1, ipady = 8, ipadx = 12, pady = 5, sticky = NW, padx = 55)\n\n resetButton = Button (top, text = \"Reset\", font = (\"Arial\", 8, \"bold\"), relief = RAISED, bd=5, justify = CENTER, highlightbackground = \"red\", overrelief = GROOVE, activebackground = \"green\", activeforeground=\"blue\", command = reset)\n resetButton.grid(row = 5, column = 2,ipady = 8, ipadx = 12, pady = 5, sticky = NW)","sub_path":"src/temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"611875683","text":"__author__ = 'ian'\n\nfrom django.conf.urls import patterns, include, url\n\nfrom abstracts import views\n\nurlpatterns = patterns('',\n # ex: /abstracts/\n url(r'^$', views.index, name='index'),\n url(r'journals$', views.get_all_journals, name='get_all_journals'),\n # ex: /abstracts/nature/abstracts\n\n # ?{\n url(r'^(?P[\\s\\S]+)/abstracts/$', views.get_all_abstracts_for_journal, name=\"abstracts_by_journal\")\n)\n\n\n","sub_path":"Python/AbstractsApp/abstracts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"617703714","text":"import pytest\nfrom app import ProcessPayment\nfrom providers import CheapPaymentGateway, PremiumPaymentGateway, ExpensivePaymentGateway\nimport requests\nimport json\n\n\nurl = 'http://127.0.0.1:5000/api/v1/payment'\n\nPAYMENT_PROCESSED = ({\"message:\" : \"Payment is processed.\"}, 200)\nINVALID_REQUEST = ({\"error:\" : [\"The request is invalid.\"]}, 400)\nINTERAL_SERVER_ERROR = ({\"error:\" : [\"An error occur while processing the request.\"]}, 500)\n\n@pytest.fixture(scope='module')\ndef request_data():\n payment_info = { \"creditCardNumber\": \"6069980060280276\",\n\t\t\t\t \"cardHolder\": 12222.5,\n\t\t\t\t \"expirationDate\": \"11/25\",\n\t\t\t\t \"securityCode\": \"234\",\n\t\t\t\t \"amount\": 501.7 }\n return payment_info\n\n\ndef test_invalid_credit_card_number(request_data):\n\trequest_data[\"creditCardNumber\"] = \"sadndnsad636363\"\n\tres_data = requests.post(url, data = request_data)\n\tassert 500 == res_data.status_code\n\n\ndef test_validation_credit_card_number(request_data):\n\trequest_data[\"creditCardNumber\"] = \"6069980060280276\"\n\tres_data = requests.post(url, data = request_data)\n\tassert 200 == res_data.status_code\n\tjson_response=json.dumps(res_data.json())\n\n\ndef test_validation_invalid_expiration_date(request_data):\n\trequest_data[\"expirationDate\"]= \"25/00\"\n\tres_data = requests.post(url, data = request_data)\n\tjson_response=json.dumps(res_data.json())\n\tassert 500 == res_data.status_code\n\ndef test_validation_mandatory_data(request_data):\n\tdel request_data[\"expirationDate\"]\n\tres_data = requests.post(url, data = request_data)\n\tjson_response=json.dumps(res_data.json())\n\tassert 500 == res_data.status_code\n\n\tdel request_data[\"creditCardNumber\"]\n\tres_data = requests.post(url, data = request_data)\n\tjson_response=json.dumps(res_data.json())\n\tassert 500 == res_data.status_code\n\n\tdel request_data[\"amount\"]\n\tres_data = requests.post(url, data = request_data)\n\tjson_response=json.dumps(res_data.json())\n\tassert 500 == res_data.status_code\n\n\tdel request_data[\"cardHolder\"]\n\tres_data = requests.post(url, data = request_data)\n\tjson_response=json.dumps(res_data.json())\n\tassert 500 == res_data.status_code\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PaymentApp/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"567606631","text":"from flask import Flask, request, render_template\n\nfrom apis import cat_img_api, cat_fact_api, cat_video_api\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home_page():\n return render_template('index.html')\n\n\n@app.route('/get-cat')\ndef get_cat():\n category = request.args.get('category') #or 'space' # set a default\n\n cat_img_url = cat_img_api.get_cat(category)\n cat_fact = cat_fact_api.get_random_fact()\n cat_video = cat_video_api.cat_video(category)\n\n if cat_img_url and cat_fact and cat_video:\n return render_template('cat.html', cat_img=cat_img_url, category=category, cat_fact=cat_fact, cat_video=cat_video)\n else:\n return render_template('error.html')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"365641935","text":"import gym\nimport torch\nimport math\nimport time\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom csv import writer, reader\n\nimport torch.optim as optim\nimport torch.distributions.categorical as categorical\nfrom gym_miniworld.wrappers import *\nfrom A2CNN3 import *\nfrom rpm2 import rpm\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nclass Agent(object):\n def __init__(self, **kwargs):\n self.lr_act = 0.0009\n print(self.lr_act)\n self.lr_crit = 0\n self.batch_size = 64\n self.atoms = 80\n self.actions = 3\n self.channels = 9\n self.gamma = 0.65\n self.lambdaEntrop = 0.32\n print(self.lambdaEntrop)\n self.lambdaCrit = 0.41667\n self.weightDecay = False\n self.actor = CNNBase(self.channels, self.actions, self.atoms)\n self.optimizer_actor = optim.RMSprop(self.actor.parameters(), lr= self.lr_act, alpha=0.88, eps=1e-5)#, alpha= 0.99, eps=1e-5)#, weight_decay=self.weightDecay)\n self.memory = rpm(250000)\n self.maxReward = 0\n self.minFrame = 0\n self.AveRew = 0\n self.bestEps = 0\n self.ModUpdate = 0\n self.Good = False\n self.maxSteps = 360\n\n def get_action(self, state):\n with torch.no_grad():\n self.eval()\n state = state.to(dtype=torch.float, device=device)\n state = state.reshape([1] + list(state.shape))\n a, val = self.actor.act(state)\n\n log = 0.99\n return int (a), val, log\n\n def learn(self, frame):\n\n self.train()\n #ac_loss = 0\n _actor_loss = 0\n _critic_loss = 0\n Qval = 0\n Qvals = []\n #state_batch, action_batch, next_state_batch, reward_batch, log_batch, value_batch = self.memory.sample_spec(frame)\n state_batch, action_batch, next_state_batch, reward_batch, log_batch, value_batch, done_batch = self.memory.sample(frame)\n state_batch = state_batch.to(dtype=torch.float, device=device)\n action_batch = action_batch.to(dtype=torch.float, device=device)\n reward_batch = reward_batch.to(dtype=torch.float, device=device)\n next_state_batch = next_state_batch.to(dtype=torch.float, device=device)\n done_batch = done_batch.to(dtype=torch.float, device=device)\n #print(next_state_batch.size()) #[12,3,60,80]\n #print(\"Log\", log_batch.size()) #[12,1]\n\n #print(action_batch)\n vals, logs, entropy = self.actor.evaluate_actions(state_batch, action_batch)\n vals = vals.to(dtype=torch.float, device=device)\n entropy = entropy.to(dtype=torch.float, device=device)\n new_vals, _, _ = self.actor.evaluate_actions(next_state_batch, action_batch)\n new_vals = new_vals.to(dtype=torch.float, device=device)\n advantages = (reward_batch + (1-done_batch)*self.gamma*new_vals- vals).to(device)\n critic_loss = advantages.pow(2).mean()\n actor_loss = -(advantages.detach() * logs).mean()\n loss = (actor_loss+critic_loss*self.lambdaCrit -self.lambdaEntrop*entropy).to(device)\n #print(loss)\n self.optimizer_actor.zero_grad()\n\n # Calculate gradients\n loss.backward()\n #ac_loss.backward()\n # Apply gradients\n self.optimizer_actor.step()\n\n with torch.no_grad():\n #ac_loss = float(ac_loss)\n _actor_loss = float(actor_loss)\n _critic_loss = float(critic_loss)\n\n return loss, actor_loss, critic_loss, entropy\n\n def train(self):\n self.actor.train()\n\n def eval(self):\n self.actor.eval()\n\n def save_model(self):\n torch.save(self.actor.state_dict(),'A2C.pkl')\n #self.memory.save_ipt(path)\n\n def load_model(self, path):\n self.actor.load_state_dict(torch.load(path + 'A2C.pkl'))\n # self.memory.load_ipt(path)\n\n\n def step(self, steps, env, m_obs, i_episode):\n #print(\"steps\", steps) #250\n m_reward = [0 for _ in range(10)]\n m_action = [torch.FloatTensor([0]) for _ in range(10)]\n m_value = [torch.FloatTensor([0]) for _ in range(10)]\n m_log = [torch.FloatTensor([0]) for _ in range(10)]\n m_done = [torch.FloatTensor([0]) for _ in range(10)]\n state = [state_to(m_obs[-3:]) for _ in range(10)] # the last 3 items\n #print(\"state: \", type(state), len(state))\n _reward =[]\n done = False\n frame = 0\n batch_frame = 0\n while frame 0\n reward = sum(_reward)\n if important and not self.Good:\n for i in reversed(range(2,3)):\n gam = pow(self.gamma, i)\n rew = torch.FloatTensor([gam*m_reward[-1]])\n #print(str(-1-i))\n self.memory.push([state[-1-i], m_action[-i], state[-i], rew, m_log[-i], m_value[-i], m_done[-i]],\n important)\n #vf important = r>5\n self.memory.push([state[-2], m_action[-1], state[-1], m_reward[-1], m_log[-1], m_value[-1], m_done[-1]],\n important)\n #print(batch_frame)\n if batch_frame == self.batch_size:\n #print(\"Update time\")\n loss, aclos, critlos, entropy = self.learn(batch_frame)\n batch_frame = 1\n #do_print(loss, aclos, critlos, entropy)\n # if ((entropy < 0.25) and (reward > 1)) and i_episode>5:\n # #print(\"TEST\", reward, entropy)\n # #self.save_model('train/test/')\n # #self.test(m_obs, m_reward, m_log, m_value, m_action, state)\n # #self.Good = True\n # else:\n # self.Good = False\n\n # If done, batch data\n if done:\n obs = env.reset()\n if frame == steps:\n\n loss, aclos, critlos, entropy = self.learn(batch_frame)\n do_print(loss, aclos, critlos, entropy)\n\n #obs = env.reset()\n\n return reward, frame, loss, entropy\n\ndef np2torch(s):\n state = torch.from_numpy(s.copy())\n state.to(dtype=torch.float)\n #state = state.reshape([1] + list(state.shape))\n return state.to(dtype=torch.float, device=device)#, device=device)\n\ndef state_to(pov):\n state = torch.cat(pov, 2) #concatenates given sequence of tensors in given dimension\n state = state.permute(2, 0, 1) #permute dimensions of tensor\n return state.to(dtype=torch.float, device=device)#.to(torch.device('cpu'))\n\ndef do_print(loss, aclos, critlos, entropy):\n print('loss %2.7f acloss %2.7f critloss %2.7f entropy %2.7f' % \\\n (loss, aclos, critlos, entropy))\n\n\ndef envstep(env, action_num):\n reward = 0\n #print(action)\n obs, rew, done, info = env.step(action_num)\n #env.render('human')\n #rew = -0.01\n if rew>0:\n print(\"REWARD\")\n #rew = torch.LongTensor(rew)\n if done:\n done = 1\n else:\n done = 0\n return obs, rew, done, info, 1\n\ndef plotGraph(episodes, codeName, rew_all, Plotrew_all, list_lr, list_ac_loss, i_episode, entropy):\n plt.figure()\n plt.plot(episodes, rew_all, 'r--', episodes, list_lr, 'b.')\n plt.savefig('/home/anna/gym-miniworld/scripts/' + str(codeName) + 'A2C2Episode' + str(i_episode) + '.png')\n plt.close()\n plt.figure()\n plt.plot(episodes, Plotrew_all, 'r--', episodes, list_ac_loss, 'b--')\n plt.savefig('/home/anna/gym-miniworld/scripts/' + str(codeName) + 'A2C2Loss' + str(i_episode) + '.png')\n plt.close()\n\ndef read():\n with open ('A2CResults.csv', 'r') as f:\n Reader1 = reader(f, delimiter=',')\n Rows = list(Reader1)\n Tot_rows = len(Rows)\n return Tot_rows\n\ndef write(Agent1, cdName, AveRew, sum_episodes, tot_frame):\n with open('A2CResults.csv', 'a', newline='') as write_obj:\n csv_writer = writer(write_obj)\n csv_writer.writerow([str(cdName), \"AC2\", str(Agent1.lr_act), str(Agent1.lr_crit),\n str(Agent1.gamma),str(Agent1.lambdaCrit),str(Agent1.lambdaEntrop),\n str(Agent1.weightDecay), str(Agent1.maxReward),\n str(Agent1.minFrame), str(Agent1.bestEps),\n str(AveRew), str(tot_frame), str(Agent1.maxSteps), str(sum_episodes),\n str(Agent1.ModUpdate), str(Agent1.batch_size), str(Agent1.channels)])\n\ndef write_episode(_rew, frame, entropy):\n with open('A2C-EpisodeResults.csv', 'a', newline='') as write_obj:\n csv_writer = writer(write_obj)\n csv_writer.writerow([_rew, entropy])\n\ndef write_start():\n with open('A2C-EpisodeResults.csv', 'a', newline='') as write_obj:\n csv_writer = writer(write_obj)\n csv_writer.writerow([\"START\"])\n\ndef train(episode, env):\n\n Agent1 = Agent()\n Agent1.actor= Agent1.actor.to(device = device)\n write_start()\n #Agent1.load_model('train' + str(Agent1.tryNum) + '/')\n sum_episodes = episode\n rew_all = []\n Plotrew_all = []\n codeName = read()\n list_lr = []\n list_ac_loss = []\n list_crit_loss = []\n AveRew = 0\n #Agent1.save_model('train/')\n \n tot_rew = 0\n tot_frame = 0\n write(Agent1, codeName, AveRew, sum_episodes, tot_frame)\n for i_episode in range(sum_episodes):\n print(\"episode: \", i_episode)\n eps = i_episode\n obs = env.reset()\n #env.render('human')\n m_obs = [np2torch(obs) for _ in range(10)]\n _reward = []\n #Agent1.memory.load_ipt('train' + str(Agent1.tryNum) + '/')\n _rew, frame, ac_loss, entropy= Agent1.step(env.max_episode_steps, env, m_obs, i_episode)\n write_episode(_rew, frame, entropy)\n list_ac_loss.append(ac_loss)\n #list_crit_loss.append(crit_loss)\n list_lr.append(Agent1.lr_act)\n rew_all.append(_rew)\n tot_rew += _rew\n\n if _rew >Agent1.maxReward:\n Agent1.maxReward = _rew\n Agent1.minFrame = frame\n Agent1.bestEps = i_episode\n if entropy < 0.7:\n Agent1.save_model()\n if _rew>= 14:\n Agent1.lr_act = 1e-7\n tot_frame += frame\n Plottot_rew = _rew - 1\n Plotrew_all.append(Plottot_rew)\n\n if (i_episode % 100 == 0) and (i_episode != 0) or (i_episode == episode-1):\n episodes = range(0, i_episode+1)\n plotGraph(episodes, codeName, rew_all, Plotrew_all, list_lr, list_ac_loss, i_episode, entropy )\n AveRew = tot_rew / (eps+1)\n #Agent1.save_model('train/')\n #write(Agent1, codeName, AveRew, sum_episodes, tot_frame)\n\n print('epi %d frame %5d loss %2.5f entropy %2.5f reward %2.5f'%\\\n (i_episode, frame, ac_loss, entropy, _rew))\n\n\n AveRew = tot_rew / eps\n #Agent1.save_model('train/')\n write(Agent1, codeName, AveRew, sum_episodes, tot_frame)\n\n\n\nif __name__ == '__main__':\n print(\"Make environment\")\n env = gym.make('MiniWorld-OneRoom-v0')\n #env = RGBImgPartialObsWrapper(env)\n #env = ImgObsWrapper(env)\n #env.render('human')\n #env.framerate = 5\n done = False\n obs = env.reset()\n #a = float(sys.argv[1])\n env.seed(1000)\n #print(obs.shape())\n env.max_episode_steps =1000\n train(700, env)\n","sub_path":"A2C1.py","file_name":"A2C1.py","file_ext":"py","file_size_in_byte":12569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"356169213","text":"import theano\nfrom theano import tensor as T\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport struct\n#import load_mnist\n\ndef load_mnist(path, kind = 'train'):\n labels_path = os.path.join(path, '%s-labels-idx1-ubyte' %kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte' %kind)\n\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', lbpath.read(8))#8byte 읽어오기\n#struct.unpack(fmt, buffer):\n#Unpack from the buffer buffer (presumably packed by pack(fmt, ...)) \n#according to the format string fmt. The result is a tuple even if it contains exactly one item. \n#format에서:\n#> : Big-endian is an order in which the \"big end\" (most significant value in the sequence) \n#is stored first\n#I = unsigned integer\n\n labels = np.fromfile(lbpath, dtype = np.uint8)\n#Construct an array from data in a text or binary file.\n#A highly efficient way of reading binary data with a known data-type, \n#as well as parsing simply formatted text files. \n#Data written using the tofile method can be read using this function. \n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\", imgpath.read(16))#16바이트 읽어오기\n \n images = np.fromfile(imgpath, dtype = np.uint8).reshape(len(labels), 784)\n \n return images, labels\n \n \n \nX_train, y_train = load_mnist('mnist', kind='train')\nX_test, y_test = load_mnist('mnist', kind='t10k')\n\n#print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))\n#print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1])) \n\ntheano.config.floatX = 'float32'\n\nX_train = X_train.astype(theano.config.floatX)\nX_test = X_test.astype(theano.config.floatX)\n\nfrom keras.utils import np_utils\n#print('First 3 labels: ', y_train[:3])\n\ny_train_ohe = np_utils.to_categorical(y_train)\n#print('\\nFirst 3 labels (one-hot):\\n', y_train_ohe[:3])\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense\nfrom keras.optimizers import SGD\n\nnp.random.seed(1)\n\nmodel = Sequential()\nmodel.add(Dense(input_dim=X_train.shape[1], output_dim=50, init='uniform', activation='tanh'))\nmodel.add(Dense(input_dim=50, output_dim=50, init='uniform', activation='tanh'))\nmodel.add(Dense(input_dim=50, output_dim=y_train_ohe.shape[1], init='uniform', activation='softmax'))\n\nsgd = SGD(lr=0.001, decay=1e-7, momentum=0.9)\nmodel.compile(optimizer=sgd, loss='categorical_crossentropy', metrics = ['accuracy'])\nmodel.fit(X_train, y_train_ohe, nb_epoch=1000, batch_size=15, verbose=1, validation_split=0.1)\n#이게 batch size가 15이상 올라가면 안돼고 계속 멈췄다....하 이 것 때문에 5시간 넘게 뻘 짓을...\n\ny_train_pred = model.predict_classes(X_train, verbose=0)\n\nprint('First 3 predictions: ', y_train_pred[:3])\n\ntrain_acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]\nprint('Training accuracy: %.2f%%' % (train_acc * 100))\n\ny_test_pred = model.predict_classes(X_test, verbose=0)\ntest_acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]\nprint('Test accuracy: %.2f%%' % (test_acc * 100))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"chapter13/chapter13_ex4.py","file_name":"chapter13_ex4.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"308535463","text":"import pickle\nimport os\nfrom Error import *\nfrom auth import *\nfrom auth_admin import *\nfrom product import *\nfrom check import *\nimport base64\nimport random\nfrom cart import *\n#---------------------------------------------------------------------------------------------------------------------------\n# if you run this file all the data will be intialled\n#---------------------------------------------------------------------------------------------------------------------\n\n\ndef initial():\n data=[]\n save_adminDB(data)\n save_cartDB(data)\n save_chatDB(data)\n save_productDB(data)\n save_recordDB(data)\n save_searchDB(data)\n save_UserDB(data)\n\n# if you run this function, the userDB will refresh, and will only contain the following data \ndef put_data_into_user():\n #refresh userDB to []\n data=[]\n save_UserDB(data)\n\n #add data\n reed=user_register('z5221388@ad.unsw.edu.au',123,'reed')[\"token\"]\n alex=user_register('z5190777@ad.unsw.edu.au',123,'alex')[\"token\"]\n leslie=user_register('z5212833@ad.unsw.edu.au',123,'leslie')[\"token\"]\n dylan=user_register('z5157999@ad.unsw.edu.au',123,'dylan')[\"token\"]\n tony=user_register('z5238695@ad.unsw.edu.au',123,'tony')[\"token\"]\n robot=user_register('z5555555@ad.unsw.edu.au',123,'robot')[\"token\"]\n user_logout(reed)\n user_logout(alex)\n user_logout(leslie)\n user_logout(dylan)\n user_logout(tony)\n user_logout(robot)\n print(\"Put data into user finish\")\n\n\n# if you run this function, the userDB will refresh, and will only contain the following data \ndef put_data_into_admin():\n #refresh adminDB to []\n data=[]\n save_adminDB(data)\n\n #add data\n admin=admin_add('5hd@ad.unsw.edu.au',123,'5hd')\n print(\"Put data into admin finish\")\n\ndef put_data_into_product():\n #refresh productDB to []\n data=[]\n save_productDB(data)\n\n adminDB=load_adminDB()\n \n if isLoggedin(adminDB[0]['email'])!=1:\n admin_login(adminDB[0]['email'], 123)\n\n token=adminDB[0]['token']\n \n '''\n the following is for category 0, electronic add_product(token, category_id, name, detail, price, stock, tag)\n '''\n #phone\n add_product(token,0,'Apple iPhone 12 128GB (Black)', '6.1-inch Super Retina XDR display* Ceramic Shield, tougher than any smartphone glass 5G for superfast downloads and high-quality streaming*',1429, 10, 'black iphone12 128G','','')\n add_product(token,0,'Apple iPhone 12 128GB (Blue)', '6.1-inch Super Retina XDR display* Ceramic Shield, tougher than any smartphone glass 5G for superfast downloads and high-quality streaming*',1429, 10, 'blue iphone12 128G','','')\n add_product(token,0,'Apple iPhone 12 128GB (Green)', '6.1-inch Super Retina XDR display* Ceramic Shield, tougher than any smartphone glass 5G for superfast downloads and high-quality streaming*',1429, 10, 'green iphone12 128G','','')\n add_product(token,0,'Apple iPhone 12 128GB (White)', '6.1-inch Super Retina XDR display* Ceramic Shield, tougher than any smartphone glass 5G for superfast downloads and high-quality streaming*',1429, 10, 'white iphone12 128G','','')\n\n #switch\n add_product(token,0,'Nintendo Switch Console Neon', 'Buy the Nintendo Switch Neon Console online today and experience a full home video game console experience anytime, anywhere!',399, 10, 'red blue switch','','')\n add_product(token,0,'Nintendo Switch Console Mario Red & Blue Edition', 'Nintendo Switch – Mario Red & Blue Edition, with a distinct red-and-blue colour scheme in honour of Mario’s iconic outfit.',449, 10, 'Mario Red Blue Edition switch','','')\n add_product(token,0,'Nintendo Switch Console Grey', 'Buy the Nintendo Switch Grey Console online today and experience a full home video game console experience anytime, anywhere!',399, 10, 'gray switch','','')\n\n #macbook\n add_product(token,0,'Apple MacBook Air 13-inch Space Grey', 'Apple-designed M1 chip for a giant leap in CPU, GPU and machine learning performance Go longer than ever with up to 18 hours of battery life* 8-core CPU delivers up to 3.5x faster performance, to tackle projects faster than ever*',1599, 10, 'laptop macbook air space gray','','')\n add_product(token,0,'Apple MacBook Pro 13-inch Space Grey', 'Apple-designed M1 chip for a giant leap in CPU, GPU and machine learning performance Go longer than ever with up to 18 hours of battery life* 8-core CPU delivers up to 3.5x faster performance, to tackle projects faster than ever*',1999, 10, 'laptop macbook pro space gray','','')\n add_product(token,0,'Apple MacBook Air 13-inch Silver', 'Apple-designed M1 chip for a giant leap in CPU, GPU and machine learning performance Go longer than ever with up to 18 hours of battery life* 8-core CPU delivers up to 3.5x faster performance, to tackle projects faster than ever*',1999, 10, 'laptop macbook air silver','','')\n\n '''\n the following is for category 1, book\n '''\n add_product(token,1,'hunger game 1', 'a famous book which talk about the adventure of a girl',50, 100, 'science adventure youth fiction','','')\n add_product(token,1,'hunger game 2', 'a famous book which talk about the adventure of a girl',50, 100, 'science adventure youth fiction','','')\n add_product(token,1,'hunger game 3', 'a famous book which talk about the adventure of a girl',50, 100, 'science adventure youth fiction','','')\n\n add_product(token,1,'Harry Potter 1', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n add_product(token,1,'Harry Potter 2', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n add_product(token,1,'Harry Potter 3', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n\n add_product(token,1,'Hobbits 1', 'The story narrated a sorcerer leads the story which 13 dwarves and east a Huo bit person treasurees hunt',69, 100, 'magic war fiction','','')\n add_product(token,1,'Hobbits 2', 'The story narrated a sorcerer leads the story which 13 dwarves and east a Huo bit person treasurees hunt',69, 100, 'magic war fiction','','')\n \n add_product(token,1,'Romeo and Juliet', 'a story about love',80, 100, 'love romantic youth fiction','','')\n add_product(token,1,'Jane Eyre', 'Jane Eyre ranks as one of the greatest and most perennially popular works of English fiction',85, 100, 'love realism fiction','','')\n\n \n '''\n the following is for category 2, sport\n '''\n add_product(token,2,'air jordan 34', 'a shoes which is belong to nike, air jordan series', 1200, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 1', 'a shoes which is belong to nike, air jordan series', 1100, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 32', 'a shoes which is belong to nike, air jordan series', 800, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 4', 'a shoes which is belong to nike, air jordan series', 500, 30, 'shoes nike AJ','','')\n\n add_product(token,2,'baseball cap', 'a cap for baseball', 40, 30, 'baseball cap','','')\n add_product(token,2,'baseball bat', 'a bat for baseball', 80, 30, 'baseball bat','','')\n\n add_product(token,2,'hair band', 'band for sport', 70, 30, 'nike band','','')\n add_product(token,2,'tennis racket', 'racket for tennis', 76, 30, 'tennis racket','','')\n add_product(token,2,'tennis shoes', 'shoes for tennis', 60, 30, 'tennis shoes','','')\n add_product(token,2,'badminton racket', 'racket for badminton', 90, 30, 'badminton racket','','')\n\n\n\n '''\n the following is for category 3, clothes\n '''\n add_product(token,3,'Balenciaga T shirt', 'a white extravagant t-shirt', 5000, 30, 'Balenciaga T-shirt black','','' )\n add_product(token,3,'Balenciaga sweater', 'a black sweater', 5200, 30, 'Balenciaga sweater black','','' )\n\n\n add_product(token,3,'lv fleece', 'a white fleece', 4000, 30, 'lv fleece white','','' )\n add_product(token,3,'lv trousers', 'a white trousers', 3400, 30, 'lv trousers','','' )\n\n add_product(token,3,'burberry fleece', 'a brown fleece', 8000, 30, 'burberry fleece brown','','' )\n add_product(token,3,'burberry shirt', 'a blue shirt', 3400, 30, 'burberry blue shirt','','')\n\n add_product(token,3,'gucci sweater', 'a blue sweater', 9100, 30, 'gucci sweater','','')\n add_product(token,3,'gucci jeans', 'a blue jeans', 9100, 30, 'gucci jeans','','')\n\n add_product(token,3,'nike shorts', 'a black shorts', 900, 30, 'nike shorts','','')\n add_product(token,3,'nike shirt', 'a black shirt', 900, 30, 'nike shirt','','')\n \n '''\n the following is for category 4, home\n '''\n add_product(token,4,'desk', 'a normal desk fro working', 323, 30, 'black medium desk','','' )\n add_product(token,4,'table', 'a normal table for chatting', 360, 30, 'white medium table','','' )\n add_product(token,4,'chair', 'a normal chair', 400, 30, 'chair','','' )\n add_product(token,4,'cabinet', 'a normal cabinet for storing', 500, 30, 'black cabinet','','' )\n add_product(token,4,'sofa', 'a sofa', 3000, 30, 'sofa','','' )\n add_product(token,4,'table lamp', 'a table lamp', 200, 30, 'table lamp','','' )\n add_product(token,4,'refrigerator ', 'a refrigerator ', 700, 30, 'refrigerator','','' )\n add_product(token,4,'quilt', 'a quilt', 300, 30, 'quilt','','' )\n add_product(token,4,'pillow', 'a pillow', 7000, 30, 'pillow','','' )\n add_product(token,4,'washing machine', 'a washing machine', 600, 30, 'washing machine','','' )\n\n '''\n the following is for category 5, toy\n '''\n add_product(token,5,'LEGO JEEP', 'a model for JEEP', 3000, 30, 'model splicing car','','' )\n add_product(token,5,'LEGO benz', 'a model for benz', 3300, 30, 'model splicing car','','' )\n add_product(token,5,'LEGO ship', 'a model for ship', 2000, 30, 'model splicing ship','','' )\n\n add_product(token,5,'telecontrolled car', 'a telecontrolled car', 5000, 30, 'telecontrolled car','','' )\n add_product(token,5,'telecontrolled plane', 'a telecontrolled plane', 5500, 30, 'telecontrolled plane','','' )\n add_product(token,5,'telecontrolled ship', 'a telecontrolled ship', 4000, 30, 'telecontrolled ship','','' )\n\n\n add_product(token,5,'plush toy', 'plush toy', 4000, 30, 'plush toy','','' )\n add_product(token,5,'sliding plate', 'a sliding plate', 4000, 30, 'sliding plate','','' )\n add_product(token,5,'female barbie doll ', 'a female barbie doll', 4000, 30, 'female barbie doll','','' )\n add_product(token,5,'male barbie doll ', 'a male barbie doll', 4000, 30, 'male barbie doll','','' )\n\n # id here start with 60, take care when you give name to the photo\n\n '''\n the following is for category 0, electronic add_product(token, category_id, name, detail, price, stock, tag)\n '''\n add_product(token,0,'Samsung Galaxy 20', 'The 4500mAh (typical) battery gives your phone the juice it needs to outlast your day and power for when you really need it',12499, 30,'samsung phone','','')\n add_product(token,0,'Samsung Galaxy Z Fold2 5G', '7.6-inch Tablet-like Display and Full-viewing Cover Screen',2499, 30, 'samsung phone','','')\n add_product(token,0,'Samsung Galaxy S21+', 'Pro-grade Camera and Intelligent Infinity-O Display',1299, 20,'samsung phone','','')\n add_product(token,0,'HUAWEI P40 Pro 5G','Ultra Vision Leica Quad Camera, VIP Service - Deep Sea Blue',1188,30,'huawei phone','','')\n\n add_product(token,0,'HUAWEI MATE 30 PRO','DUAL-SIM LIO-L29',899,30,'huawei phone','','')\n add_product(token,0,'Huawei P30 Pro','Dual Sim 40MP 8GB 256GB Mobile Phone',998,30,'huawei phone','','')\n\n add_product(token,0,'Surface Laptop 3','13.5“,Sandstone(Metal),Intel Core i7',2099,30,'Surface laptop','','')\n add_product(token,0,'Surface Pro 7','Platinum,Intel Core i7',2099,30,'Surface laptop','','')\n add_product(token,0,'Microsoft Surface Laptop 3','13.5“,128GB i5 Platinum',1298,30,'Surface laptop','','')\n\n '''\n the following is for category 1, book\n '''\n add_product(token,1,'Harry Potter and the Goblet of Fire', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n add_product(token,1,'Harry Potter and the Order of the Phoenix', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n add_product(token,1,'Harry Potter and the Half-Blood Prince', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n add_product(token,1,'Harry Potter and the Deathly Hallows', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n\n add_product(token,1,'Wuthering Heights', 'an 1847 novel by Emily Bronte',80, 100, 'romantic realism fiction','','')\n add_product(token,1,'War and Peace', 'a novel by the Russian author Leo Tolstoy',80, 100, 'historical war fiction','','')\n add_product(token,1,'The Great Gatsby', 'a 1925 novel by American writer F. Scott Fitzgerald',80, 100, 'romantic love american-dream fiction','','')\n add_product(token,1,'Norwegian Wood', 'a 1987 novel by Japanese author Haruki Murakami',80, 100, 'nostalgic love fiction','','')\n add_product(token,1,'The lady of the camellias', 'a novel by Alexandre Dumas fils,',80, 100, 'romantic tragedy fiction','','')\n add_product(token,1,'The Hunchback of Notre-Dame', ' a French Gothic novel by Victor Hugo,',80, 100, 'gothic religion love fiction','','')\n\n\n '''\n the following is for category 2, sport\n '''\n add_product(token,2,'air jordan 1 Travis Scott ', 'a shoes which is belong to nike, air jordan series', 4200, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 6', 'a shoes which is belong to nike, air jordan series', 1100, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 32 Golden Harvest', 'a shoes which is belong to nike, air jordan series', 800, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 4 Linen', 'a shoes which is belong to nike, air jordan series', 500, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 11', 'a shoes which is belong to nike, air jordan series', 500, 30, 'shoes nike AJ','','')\n\n add_product(token,2,'NYC Baseball Cap', 'a cap for baseball', 40, 30, 'baseball cap','','')\n add_product(token,2,'Brooklyn Basher Baseball Bat', 'a bat for baseball', 80, 30, 'baseball bat','','')\n\n add_product(token,2,'Tennis Ball', 'Competition used tennis ball ', 76, 30, 'tennis ball','','')\n add_product(token,2,'Wilson Pro Tennis Racket', 'racket for tennis', 76, 30, 'tennis racket','','')\n add_product(token,2,'Adidas tennis shoes', 'shoes for tennis', 60, 30, 'tennis shoes','','')\n add_product(token,2,'Lining badminton racket', 'racket for badminton', 90, 30, 'badminton racket','','')\n '''\n the following is for category 3, clothes\n '''\n add_product(token,3,'Balenciaga T shirt', 'a beige extravagant t-shirt', 5000, 30, 'Balenciaga T-shirt beige','','' )\n add_product(token,3,'Balenciaga Hoodie', 'a black sweater', 5200, 30, 'Balenciaga Hoodie black','','' )\n\n\n add_product(token,3,'lv jacket', 'a black fleece with LV logo', 4000, 30, 'lv black jackte','','' )\n add_product(token,3,'lv logo pants ', 'a black shiny pants', 3400, 30, 'lv black pants','','' )\n\n add_product(token,3,'burberry jacket', 'a brown fleece', 8000, 30, 'burberry jacket brown','','' )\n add_product(token,3,'burberry plaid shirt', 'a signature beige plaid shirt', 3400, 30, 'burberry plaid shirt','','')\n\n add_product(token,3,'gucci cardigan', 'a blue&brown cardigan', 9100, 30, 'gucci brown blue cardigan','','')\n add_product(token,3,'gucci jeans skinny', 'a blue jeans', 9100, 30, 'gucci jeans skinny','','')\n\n add_product(token,3,'nike swoosh shorts ', 'a black shorts with swoosh', 900, 30, 'nike shorts black','','')\n add_product(token,3,'nike swoosh T-shirt', 'a black t-shirt', 900, 30, 'nike t-shirt black','','')\n '''\n the following is for category 4, home\n '''\n add_product(token,4,'Wood Desk', 'a normal wood desk fro working', 323, 30, 'wooden brown desk','','' )\n add_product(token,4,'Wooden table', 'a normal table for studying', 360, 30, 'wooden brown table','','' )\n add_product(token,4,'wooden chair', 'a normal chair', 400, 30, 'wooden chair','','' )\n add_product(token,4,'wooden cabinet', 'a normal cabinet for storing', 500, 30, 'wooden brown cabinet','','' )\n add_product(token,4,'Double cloth Sofa', 'a sofa', 3000, 30, 'Cloth sofa','','' )\n add_product(token,4,'pink table lamp', 'a table lamp', 200, 30, 'pink table lamp','','' )\n add_product(token,4,'Hisense refrigerator ', 'a refrigerator ', 700, 30, 'silver refrigerator','','' )\n add_product(token,4,'wool quilt', 'a quilt', 300, 30, 'wool quilt','','' )\n add_product(token,4,'Haier washing machine', 'a washing machine', 600, 30, 'white washing machine','','' )\n '''\n the following is for category 5, toy\n '''\n add_product(token,5,'Lego Harry Potter Castle','71043 Castle Model Building Kit with Harry Potter Figures',519,30,'Lego Model Castle','','')\n add_product(token,5,'Jellycat Bunny','Jellycat Small Bashful Bunny',200,30,'Plush toy ','','')\n add_product(token,5,'JellyCat Dragon','Bashful Dragon medium 31cm soft toy',519,30,'plush soft toy','','')\n add_product(token,5,'JellyCat Curvie Pig',' super fluffy lovable look pig',519,30,'plush soft toy','','')\n add_product(token,5,'JellyCat Amuseable Cloud','dreamy companion cutie cloud',519,30,'plush soft toy','','')\n add_product(token,5,'JellyCat bag','Amusable Pineapple woven bag 33cm',200,30,'plush soft toy bag','','')\n\n\n add_product(token,5,'JellyCat Pear bag','Amusable Pear plushn cross body bag',519,30,'plush soft toy bag','','')\n\n\n add_product(token,5,'Lego Technic Porche','Porche 911 RSR 42096 Building Kit',1000,30,'lego model splicing car','','')\n add_product(token,5,'Lego Star Wars','The Rise of Skywalker Millennium Falcon 75257 Building Kit',1000,30,'lego model splicing ship','','')\n admin_logout(adminDB[0]['token'])\n print(\"put data into product finish\")\n\n\ndef add_photo():\n adminDB=load_adminDB()\n productDB=load_productDB()\n if isLoggedin(adminDB[0]['email'])!=1:\n admin_login(adminDB[0]['email'], 123)\n\n token=adminDB[0]['token']\n\n #this is for add first img\n for i in range(0,len(productDB)):\n url='photo/'+str(i)+'-'+'0'+'.png'\n if os.path.exists(url):\n base_64=change_iml_to_base64(url)\n base_64='data:image/png;base64,'+str(base_64)\n update_first_photo(token,i,base_64)\n #this is for add second img\n for i in range(0,len(productDB)):\n url='photo/'+str(i)+'-'+'1'+'.png'\n if os.path.exists(url):\n base_64=change_iml_to_base64(url)\n base_64='data:image/png;base64,'+str(base_64)\n update_second_photo(token,i,base_64)\n\n admin_logout(adminDB[0]['token'])\n print(\"add photo finish\")\n\ndef add_pro():\n token=user_login('z5555555@ad.unsw.edu.au',123)['token']\n productDB=load_productDB()\n id=[]\n for i in productDB:\n id.append(i['id'])\n length=len(productDB)\n l=random.sample(id, length)\n\n j=0\n for i in l:\n j+=1\n add_product_to_cart(token,i)\n if j%10==0 or j==length:\n purchase_product(token)\n user_logout(token)\n print(\"add record\")\ndef change_iml_to_base64(url):\n f=open(url,'rb')#第一个参数图像路径\n ls_f=base64.b64encode(f.read())\n ls_f = ls_f.decode(\"utf-8\")\n f.close()\n return ls_f\n\n\nif __name__ == '__main__':\n initial()\n\n put_data_into_admin()\n\n put_data_into_user()\n\n put_data_into_product()\n\n add_pro()\n\n add_photo()\n\n \n \n \n\n \n \n \n","sub_path":"RecommendWebsite/reed/put_data.py","file_name":"put_data.py","file_ext":"py","file_size_in_byte":19647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"456968908","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom xml.sax.handler import ContentHandler\nfrom xml.sax import make_parser\nimport sys\n\nclass myContentHandler(ContentHandler):\n\n def __init__ (self):\n self.inItem = False\n self.inContent = False\n self.Content = \"\"\n self.Html = \"\"\n self.Title = \"\"\n\n def startElement (self, name, attrs):\n if name == 'item':\n self.inItem = True\n elif self.inItem:\n if name == 'title' or name == 'link':\n self.inContent = True\n \n def endElement (self, name):\n if name == 'item':\n self.inItem = False\n elif self.inItem:\n if name == 'title':\n self.Title += self.Content\n # To avoid Unicode trouble\n self.inContent = False\n self.Content = \"\"\n elif name == 'link':\n self.Html += (\"\"\n + self.Title + \"
\\n\")\n self.inContent = False\n self.Content = \"\"\n self.Title = \"\"\n\n def characters (self, chars):\n if self.inContent:\n self.Content = self.Content + chars\n \n# --- Main prog\ndef getNews(): \n # Load parser and driver\n\n theParser = make_parser()\n theHandler = myContentHandler()\n theParser.setContentHandler(theHandler)\n\n # Ready, set, go!\n\n theParser.parse(\"http://barrapunto.com/index.rss\")\n return \"
News:
\" + theHandler.Html","sub_path":"cms/xmlparser.py","file_name":"xmlparser.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"29554904","text":"#!/usr/bin/env python3\n# session analysis\nfrom collections import defaultdict\nfrom tqdm import tqdm\nfrom common import get_t\nimport os\nimport argparse\nimport time\n\ndef read_file(file_name, out_dir=None):\n user_events = defaultdict(list)\n items_view_freq = defaultdict(int)\n items_purchase_freq = defaultdict(int)\n item_list = set()\n behavior_list = set()\n with open(file_name, 'r') as in_f:\n for line in tqdm(in_f):\n ad_id, item_id, behavior, ts = line.strip().split('\\t')\n if item_id:\n item_id = item_id.replace(' ', '')\n user_events[ad_id].append((item_id, behavior, int(ts)))\n if behavior == 'ViewContent':\n items_view_freq[item_id] += 1\n elif behavior == 'revenue':\n items_purchase_freq[item_id] += 1\n item_list.add(item_id)\n behavior_list.add(behavior + ':' + item_id)\n if out_dir:\n with open(f'{out_dir}/items_view_freq.csv', 'w') as f:\n [print(f'{key}\\t{value}', file=f) for key, value in sorted(items_view_freq.items(), key=lambda item: item[1], reverse=True)]\n with open(f'{out_dir}/items_purchase_freq.csv', 'w') as f:\n [print(f'{key}\\t{value}', file=f) for key, value in sorted(items_purchase_freq.items(), key=lambda item: item[1], reverse=True)]\n\n with open(f'{out_dir}/user_idx.csv', 'w') as f:\n [print(f'{key}\\t{idx}', file=f) for idx, key in enumerate(list(user_events.keys()))]\n with open(f'{out_dir}/item_idx.csv', 'w') as f:\n [print(f'{key}\\t{idx}', file=f) for idx, key in enumerate(list(item_list))]\n with open(f'{out_dir}/behavior_idx.csv', 'w') as f:\n [print(f'{key}\\t{idx}', file=f) for idx, key in enumerate(list(behavior_list))]\n return user_events\n\n# session_period : sec\ndef session_process(tmp_user_events, session_period=None, last_N=10):\n user_sessions = defaultdict(list)\n user_last_N_events = defaultdict(list)\n user_events = defaultdict(list)\n for ad_id in tqdm(tmp_user_events):\n if len(tmp_user_events[ad_id]) > 5:\n user_events[ad_id] = tmp_user_events[ad_id]\n\n if session_period:\n for ad_id in tqdm(user_events):\n tmp = []\n user_events[ad_id].sort(key = lambda x:x[-1])\n user_last_N_events[ad_id] = user_events[ad_id][-last_N:]\n for event in user_events[ad_id]:\n if len(tmp) == 0:\n tmp.append(event)\n else:\n if event[-1] - tmp[-1][-1] < session_period:\n tmp.append(event)\n else:\n user_sessions[ad_id].append(tmp)\n tmp = [event]\n if len(tmp) > 0:\n user_sessions[ad_id].append(tmp)\n\n else:\n for ad_id in tqdm(user_events):\n user_events[ad_id].sort(key = lambda x:x[-1])\n user_last_N_events[ad_id] = user_events[ad_id][-last_N:]\n user_sessions[ad_id].append(user_events[ad_id])\n\n return user_sessions, user_last_N_events\n\n\ndef user_events_session_statistic(user_event_session):\n user_count = len(user_event_session)\n sessions_count, session_length = 0, 0\n for idx, user in tqdm(enumerate(user_event_session)):\n sessions = user_event_session[user]\n if len(sessions) < 2:\n continue\n # sessions_count += len(sessions)\n for session in sessions:\n session_length += len(session)\n sessions_count += 1\n if idx < 1000:\n print(f\"{user} ---> {user_event_session[user]}\")\n print(f'user_count:{user_count}, sessions_count:{sessions_count}, avg_session_length:{session_length/sessions_count}')\n\n\ndef save_test_file_new(user_event_session , file_name, last_N=10):\n with open(file_name, 'w') as out_f:\n for user in tqdm(user_event_session):\n sessions = user_event_session[user]\n if len(sessions) < 2:\n continue\n \n for idx in range(len(sessions)-1):\n if len(sessions[idx]) >= last_N and len(sessions[idx+1]) >= 5:\n history_events = [f'{s[1]}:{s[0]}' for s in sessions[idx]]\n predict_events = [f'{s[1]}:{s[0]}' for s in sessions[idx+1]]\n if set(history_events) != set(predict_events):\n print(f\"{'#'.join(history_events)}\\t{'#'.join(predict_events)}\", file=out_f)\n\n \ndef save_user_event_seqence(user_event_session, file_name):\n \"\"\"\n - train seqence\n - test prefix 10 seqence\n \"\"\"\n with open(file_name, 'w') as out_f: \n for user in tqdm(user_event_session):\n for session in user_event_session[user]:\n if len(session) < 3: continue\n events = [f'{s[1]}:{s[0]}' for s in session]\n print(' '.join(events), file=out_f)\n \n\n\ndef save_test_file(user_event_session, user_last_N_events, file_name):\n \"\"\"\n ev1, ev2, ev3, ev4, ev5 ......\n ev6, ev7, ev8, ev9, ......\n \"\"\"\n with open(file_name, 'w') as out_f: \n for user in tqdm(user_event_session):\n if user in user_last_N_events:\n print('#'.join([f'{s[1]}:{s[0]}' for s in user_last_N_events[user]]), end='\\t', file=out_f)\n else:\n print('', end='\\t', file=out_f)\n\n for session in user_event_session[user]:\n events = [f'{s[1]}:{s[0]}' for s in session]\n print('#'.join(events), file=out_f)\n \n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"python3 0_raw_data_handler.py\")\n parser.add_argument(\"date\", type=str, help=\"date\")\n parser.add_argument(\"output_dir\", type=str, help=\"output file foler\")\n parser.add_argument(\"--session_period\", type=int, default=None, help=\"how long would consider the new session (sec)\")\n parser.add_argument(\"--last_N\", type=int, default=10, help=\"How many reference events when testing\")\n args = parser.parse_args()\n tr_data = f\"data/{args.date}/tr_data/merged.data\"\n te_data = f\"../data/{args.date}/te_data/merged.data\"\n #te_data = f\"te_sample.data\"\n # train\n print(f\"[{get_t()}] reading train data events\")\n #events = read_file(tr_data, args.output_dir)\n print(f\"[{get_t()}] train data session_process\")\n #sessions, last_N_events = session_process(events, session_period=args.session_period, last_N=args.last_N)\n print(f\"[{get_t()}] train data session_statistic\")\n #user_events_session_statistic(sessions)\n print(f\"[{get_t()}] train data save file\")\n #save_user_event_seqence(sessions, os.path.join(args.output_dir, 'tr_data.csv'))\n # release memory\n events, sessions = None, None\n #time.sleep(15)\n\n # test\n print(f\"[{get_t()}] reading test data events\")\n events = read_file(te_data)\n print(f\"[{get_t()}] test data session_process\")\n sessions, _ = session_process(events, session_period=args.session_period, last_N=args.last_N)\n print(f\"[{get_t()}] test data session_statistic\")\n user_events_session_statistic(sessions)\n save_test_file_new(sessions, f'test.sample.{args.session_period}.csv', last_N=args.last_N)\n\n # print(f\"[{get_t()}] reading sample data events\")\n # sample_events = read_file('data/sample.csv', 'data')\n # sample_sessions, sample_last_N_events = session_process(sample_events, session_period=None)\n # user_events_session_statistic(sample_sessions)\n # save_user_event_seqence(sample_sessions, 'tr_data.csv')\n # save_test_file(sample_sessions, sample_last_N_events, 'te_data.csv')\n # # for u in sample_sessions:\n # # print(sample_sessions[u])\n","sub_path":"data_process/test_session.py","file_name":"test_session.py","file_ext":"py","file_size_in_byte":7742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"552984673","text":"# Python standard library modules\nimport asyncio\nimport bisect\nimport io\nimport json\nimport logging\nimport random\nimport sys\nimport time\nfrom urllib.parse import splittype, urljoin\n\n# Third-party modules\nimport aiohttp\nimport click\nimport coloredlogs\nimport jwt\nimport websockets\nfrom jsonrpcserver import method, async_dispatch as dispatch\nfrom jsonrpcserver.response import DictResponse\n\n\nRECSYS_NAME = 'baseline'\n\nENVVAR_PREFIX = 'RENEWAL'\n\nRENEWAL_API_BASE_URI = 'https://api.renewal-research.com/v1'\n\nINITIAL_ARTICLES = 1000\n\"\"\"Number of articles to initialize the in-memory article cache with.\"\"\"\n\nMAX_ARTICLES = 10000\n\"\"\"Maximum number of articles to keep cached in memory.\"\"\"\n\nRECOMMEND_DEFAULT_LIMIT = 30\n\"\"\"Max number of recommendations to return by default.\"\"\"\n\n\nlog = logging.getLogger(RECSYS_NAME)\n# Log all uncaught exceptions\nsys.excepthook = lambda *exc_info: log.exception(\n 'an uncaught exception occurred', exc_info=exc_info)\n\narticles = None\n\"\"\"Articles cache; initialized in `initialize`.\"\"\"\n\n\nasync def initialize(api_base_uri, token):\n \"\"\"Start-up tasks to perform before starting the main client loop.\"\"\"\n\n global articles\n\n log.info(f'initializing articles cache with {INITIAL_ARTICLES} articles')\n headers = {'Authorization': 'Bearer ' + token}\n async with aiohttp.ClientSession(\n headers=headers, raise_for_status=True) as session:\n async with session.get(urljoin(api_base_uri, 'articles'),\n params={'limit': INITIAL_ARTICLES}) as resp:\n articles = ArticleCollection(await resp.json())\n log.debug(f'cached {len(articles)} articles')\n\n\n# RPC methods\n# WARNING: Don't forget to make these functions async even if they\n# don't use await, otherwise the async_dispatch gets confused.\n\n@method\nasync def ping():\n return 'pong'\n\n\n@method\nasync def new_article(article):\n articles.push(article)\n\n\n@method\nasync def recommend(user_id, limit=RECOMMEND_DEFAULT_LIMIT, since_id=None,\n max_id=None):\n \"\"\"Return recommendations for the specified user and article ID range.\"\"\"\n\n # Currently just supports the 'random' strategy: Take a random selection\n # of up to limit articles from the given range.\n if since_id is None:\n # If no since_id is given (i.e. we are being asked for the most recent\n # articles, just take the top `limit * 2` articles and then take a\n # random selection from them\n start = -2 * limit\n else:\n start = since_id + 1\n end = max_id\n selection = articles[start:end]\n limit = min(limit, len(selection))\n sample = sorted(random.sample(range(len(selection)), limit), reverse=True)\n return [selection[idx]['article_id'] for idx in sample]\n\n\n# websocket server loops\n\nasync def request_loop(api_base_uri, token):\n \"\"\"\n Main loop of the recsystem application.\n\n Connects to the event stream websocket and starts a loop to receive and\n handle events from the backend.\n \"\"\"\n\n log.info(f'initializing websocket connection to event stream')\n uri = urljoin('ws:' + splittype(api_base_uri)[1], 'event_stream')\n headers = {'Authorization': 'Bearer ' + token}\n async with websockets.connect(uri, extra_headers=headers) as websocket:\n log.info(f'listening to websocket for events...')\n # Incoming RPC requests are added to this queue, and their results are\n # popped off the queue and sent; the queue is used as a means of\n # serializing responses, otherwise we could have multiple coroutines\n # concurrently trying to write to the same websocket\n queue = asyncio.Queue()\n\n # Start the incoming and outgoing message handlers; a slight variant of\n # this pattern:\n # https://websockets.readthedocs.io/en/stable/intro.html#both\n await multiplex_tasks(handle_incoming(websocket, queue),\n handle_outgoing(websocket, queue))\n\n\nasync def multiplex_tasks(*tasks):\n \"\"\"\n Run multiple coroutines simultaneously as tasks, exiting as soon as any one\n of them raises an exception.\n\n The exception from the coroutine is then re-raised.\n \"\"\"\n\n done, pending = await asyncio.wait(tasks,\n return_when=asyncio.FIRST_EXCEPTION)\n\n try:\n for task in done:\n # If one of the tasks exited with an exception\n # Calling .result() re-raises that exception\n task.result()\n finally:\n for task in pending:\n task.cancel()\n\n\nasync def dispatch_incoming(queue, request):\n \"\"\"\n Dispatch incoming messages to the JSON-RPC method dispatcher.\n\n When the result is ready it is placed on the outgoing queue.\n \"\"\"\n\n response = await dispatch(request)\n log.info(format_rpc_call(request, response))\n await queue.put(response)\n\n\nasync def handle_incoming(websocket, queue):\n \"\"\"\n This coroutine checks the websocket for incoming JSON-RPC requests and\n passes them to `dispatch_incoming`.\n \"\"\"\n\n while True:\n request = await websocket.recv()\n asyncio.ensure_future(dispatch_incoming(queue, request))\n\n\nasync def handle_outgoing(websocket, queue):\n \"\"\"\n This coroutine checks the outgoing response queue for results from\n dispatched RPC methods, and sends them on the websocket.\n \"\"\"\n\n while True:\n response = await queue.get()\n if response.wanted:\n await websocket.send(str(response))\n\n\nclass ArticleCollection:\n \"\"\"Maintain a list of articles sorted by article_id (ascending).\"\"\"\n\n def __init__(self, initial=None, max_size=MAX_ARTICLES):\n self.article_ids = []\n self.articles = {}\n self.max_size = max_size\n if initial:\n for item in initial:\n id_ = item['article_id']\n if id_ not in self.articles:\n self.article_ids.append(id_)\n self.articles[id_] = item\n\n self.article_ids = sorted(self.article_ids)\n # Limit to the max_size highest article IDs\n self.article_ids = self.article_ids[-max_size:]\n\n def __len__(self):\n return len(self.article_ids)\n\n def __getitem__(self, article_id):\n \"\"\"\n Retrieve items from the collection by article_id or a range of\n article_ids.\n \"\"\"\n\n if not isinstance(article_id, slice):\n # The single article case is simple.\n try:\n return self.article_ids[article_id]\n except KeyError:\n raise IndexError(article_id)\n\n # Select ranges of article IDs--this can be tricky because although\n # self.article_ids is assumed to be sorted, it have missing items in\n # the range\n slc = article_id\n start = slc.start\n stop = slc.stop\n\n if start is not None:\n idx = bisect.bisect_left(self.article_ids, start)\n if idx == len(self.article_ids):\n start = None\n else:\n start = idx\n\n if stop is not None:\n # reverse enumerate\n stop = bisect.bisect_left(self.article_ids, stop)\n\n ids = self.article_ids[start:stop:slc.step]\n\n return [self.articles[id_] for id_ in ids]\n\n def push(self, item):\n \"\"\"\n Push a new article to the collection while maintaining the sort\n invariant.\n\n If the new article is already than the lowest article ID and the\n collection is already at capacity, it is discarded.\n \"\"\"\n\n id_ = item['article_id']\n if (id_ in self.articles or\n (len(self.article_ids) == self.max_size and\n id_ < self.article_ids[0])):\n return\n\n bisect.insort_left(self.article_ids, id_)\n self.articles[id_] = item\n\n if len(self.article_ids) > self.max_size:\n old_id = self.article_ids.pop(0)\n del self.articles[old_id]\n\n self.articles[id_] = item\n\n log.debug(f'new article added to the collection: {item}')\n log.debug(f'article collection size: {len(self)}')\n\n\ndef format_rpc_call(request, response=None):\n \"\"\"\n For debugging purposes, print parsed JSON-RPC requests/responses.\n \"\"\"\n\n if isinstance(request, str):\n request = json.loads(request)\n\n if isinstance(response, DictResponse):\n response = response.deserialized()\n else:\n response = None\n\n method = request['method']\n params = request.get('params', {})\n if isinstance(params, list):\n params = ', '.join(repr(v) for v in params)\n else:\n params = ', '.join(f'{k}={v!r}' for k, v in params.items())\n call = f'{method}({params})'\n\n if response is None:\n return call\n\n if 'error' in response:\n return f'{call} !! {response[\"error\"]!r}'\n else:\n return f'{call} -> {response[\"result\"]!r}'\n\n\nclass FileOrToken(click.File):\n \"\"\"\n Extends `click.File` to also accept a JWT token.\n\n If the input value resembles a properly formatted JWT token its value will\n be taken as-is wrapped in an `io.StringIO`. Otherwise the input is assumed\n to be a filename and the file is returned as an open file object.\n \"\"\"\n\n def convert(self, value, param, ctx):\n try:\n jwt.decode(value, verify=False)\n except jwt.DecodeError:\n return super().convert(value, param, ctx)\n\n return io.StringIO(value)\n\n\n@click.command()\n@click.option('-a', '--api-base-uri', default=RENEWAL_API_BASE_URI,\n help='URI for the Renewal HTTP API')\n@click.option('-t', '--token', required=True, type=FileOrToken(),\n help='authentication token for the recsystem; if a valid '\n 'filename is given the token is read from a file instead')\n@click.option('--log-level', default='INFO',\n type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR'],\n case_sensitive=False),\n help='minimum log level to output')\ndef main(api_base_uri, token, log_level):\n logging.basicConfig(level=log_level)\n log.setLevel(log_level)\n coloredlogs.install(level=log_level, logger=log)\n\n if api_base_uri[-1] != '/':\n # Add trailing slash to make it easier to join URL fragments with\n # urljoin()\n api_base_uri += '/'\n\n log.info(f'starting up {RECSYS_NAME} recsystem on {api_base_uri}')\n token = token.read().strip()\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(initialize(api_base_uri, token))\n while True:\n try:\n loop.run_until_complete(request_loop(api_base_uri, token))\n except (websockets.WebSocketException, ConnectionRefusedError):\n log.warning(\n 'lost connection to the backend; trying to re-establish...')\n time.sleep(5)\n except KeyboardInterrupt:\n return\n finally:\n # Cancel all pending tasks\n for task in asyncio.Task.all_tasks(loop=loop):\n task.cancel()\n try:\n # Give the task a chance to finish up\n loop.run_until_complete(task)\n except Exception:\n # This may result in a CancelledError or other miscellaneous\n # exceptions as connections are shut down, but we are exiting\n # anyways so ignore them.\n pass\n\n loop.run_until_complete(loop.shutdown_asyncgens())\n loop.close()\n\n\nif __name__ == '__main__':\n main(auto_envvar_prefix=ENVVAR_PREFIX)\n","sub_path":"recsystems/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":11559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"452518773","text":"import csv\nfrom django.db import IntegrityError\nfrom tot.utils.ingredient_fetch import fetchIngredients\nfrom tot.models import Drink\n\n\n# Parsing csv file to db models\ndef fetchCsv(file):\n\n #line separator\n separator='|'\n\n # new drinks to add\n newDrinks = []\n\n # new ingredients\n newIngredints = []\n\n file.open()\n file_data = file.read().decode(\"utf-8\")\n lines = file_data.split(\"\\n\")\n\n for line in lines[1:-1]:\n parameters = line.split(separator)\n print(parameters)\n name = parameters[0].strip()\n desc = parameters[1].strip()\n alc = int(float(parameters[2].strip()))\n times = int(float(parameters[3].strip()))\n url = parameters[4].strip()\n ingredients = []\n rest = parameters[5:]\n print(\"REST:\", rest)\n for ing in rest:\n if not ing == '':\n ingredients.append(ing)\n print(\"----------------\")\n print(\"NAME:\", name)\n print(\"DESC:\", desc)\n print(\"ALC:\", alc)\n print(\"TIMES\", times)\n print(\"URL:\", url)\n print(\"INGREDIENTS:\", ingredients)\n\n # saving\n try:\n drink = Drink.objects.create(alcohol_level=alc, drink_name=name, description=desc, timies_drank=times,\n image=url)\n drink.save()\n\n # parsing to form accepted by fetch\n ing_string = convertToFetch(ingredients)\n print('FETCH:' + ing_string)\n newIng, allIng = fetchIngredients(ing_string, drink)\n newIngredints = newIngredints + newIng\n newDrinks.append(name)\n except (IntegrityError, AttributeError) as e:\n print(e)\n print(name)\n pass\n print(\"added drinks:\")\n print(newDrinks)\n print(\"added ingr:\")\n print(newIngredints)\n\n return newDrinks, newIngredints\n\n\n# converts to form accepted by fetchIngredients function\ndef convertToFetch(ingredients):\n ing_string = ''\n for ing in ingredients:\n ing_replaced = ing.replace(';', ',')\n print(ing_replaced)\n ing_string += ing_replaced\n ing_string += ';'\n return ing_string\n","sub_path":"tot/utils/csv_fetch.py","file_name":"csv_fetch.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"43254282","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom matplotlib.animation import FuncAnimation\nfrom copy import copy\n\nclass Branch:\n \"\"\"A simple example class\"\"\"\n def __init__(self, iter):\n self.x = []\n self.y = []\n self.sep = 0.075\n self.decay_rate = 0.01\n self.iter = iter\n\n def reproduce(self):\n return Branch(self.iter)\n\n def degrees_to_rads(self, degs):\n rads = (np.pi/180*degs)\n return rads\n\n def compute_trajectory(self):\n for i in range(iter):\n direction = random.uniform(self.degrees_to_rads(0),\n self.degrees_to_rads(360))\n if i == 0:\n self.x.append(random.randint(-1, 1))\n self.y.append(random.randint(-1, 1))\n elif i == 10:\n self.x.append(self.x[i-1] + self.sep*np.cos(direction))\n self.y.append(self.y[i-1] + self.sep*np.sin(direction))\n else:\n self.x.append(self.x[i-1] + self.sep*np.cos(direction))\n self.y.append(self.y[i-1] + self.sep*np.sin(direction))\n\n\nfig = plt.figure()\nax1 = fig.add_subplot(1, 1, 1) # specify (nrows, ncols, axnum)\nax2 = ax1.twinx() # specify (nrows, ncols, axnum)\nax1.set_xlim(-3, 3)\nax1.set_ylim(-3, 3)\nimage_1, = ax1.plot([], [], 'o', color='k')\nimage_2, = ax2.plot([], [], 'o', color='k')\nmarkersize = 3\niter = 10000\nk = 10\n\nseed = Branch(iter)\nseed.compute_trajectory()\nnew_branch = Branch(iter)\n\ndef animate(i):\n image_1.set_data(seed.x[:i+1], seed.y[:i+1])\n image_1.set_markersize(markersize)\n image_2.set_data(new_branch.x[:i+1], new_branch.y[:i+1])\n image_2.set_markersize(markersize)\n\nani = FuncAnimation(fig, animate, frames=iter, interval=1)\nplt.show()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"529892693","text":"# Hint built-in modules: datetime, time\n# 1\n# Sa se defineasca un decorator care:\n# Printeaza ora la care a inceput executia functiei\n# Printeaza ora la care functia s-a finalizat\n# Printeaza cate secunde a rulat functia\n\n# 2 Sa se defineasca un decorator care creaza un log file (un fisier txt cu 'jurnalul' de activitate) pentru fiecare functie decorata care sa contina:\n# Ora la care a inceput/sfarsit\n# Numele functiei\n# In fisier sa avem cate 5 entries pentru fiecare apel de functie (sa avem cate 5 rulari separate in jurnal)\n\n# Sa se aplice decoratoarele(pe rand, si dupa impreuna) pe urmatoarele functii:\n# Functie care printeaza itereaza in 1 milion, si printeaza fiecare numar\n# Functie care creaza 10 fisiere txt separate si scrie de la 1 la 1 milion(linii separate fiecare numar) in fiecare fisier\n# Functie care printeaza 5 cuvinte, dar fiecare cuvant este printat odata la 5 secunde\n\n# Sa incercam sa apelam fiecare functie separat, nu toate odata, considerand ca fiecare functie poate dura destul de mult.\n\nfrom time import time, ctime, sleep\n\ndef detalii_functie(funct):\n def wrapper(*args):\n t1 = time()\n rezultat=funct(*args)\n t2 = time()\n print(f\"Functia a inceput sa ruleze la {ctime(t1)} si s-a finalizat la {ctime(t2)}\")\n timp_rulare=round((t2-t1),4)\n print(f\"Timpul de rulare a fost de {timp_rulare} secunde\")\n return rezultat\n return wrapper\n\ndef log_file(funct):\n def wrapper(*args):\n numar_linii = 0\n t1 = time()\n rezultat=funct(*args)\n t2 = time()\n name=funct.__name__\n file = open(f\"fisier functie {name}.txt\", \"a\")\n file.write(f\"Functia {name} a inceput sa ruleze la {ctime(t1)} si s-a finalizat la {ctime(t2)}.\\nTimpul de rulare a fost de {round((t2-t1),4)} secunde\\n\")\n file.close()\n try:\n file = open(f\"fisier functie {name}.txt\", \"r\")\n for line in file.readlines():\n numar_linii +=1\n file.close()\n except:\n numar_linii = 3\n file = open(f\"fisier functie {name}.txt\", \"a\")\n file.write(f\"Final Executia {(numar_linii//4)+1}\\n\"+\"*\"*70+\"\\n\")\n file.close()\n return rezultat\n return wrapper\n\n# @detalii_functie\n@log_file\ndef million():\n for i in range (1000001):\n print (i)\nmillion()\n\n# @detalii_functie\n@log_file\ndef creare_fisiere():\n for i in range(10):\n file = open(f\"fisier.{i+1}.txt\",\"w\")\n for i in range(1,1000001):\n file.write(f\"{i}\\n\")\n file.close()\ncreare_fisiere()\n\n# @detalii_functie\n@log_file\ndef pause_function(*cuvinte):\n for i in cuvinte:\n sleep(5)\n print(i)\npause_function(\"Acesta\",\"nu\",\"este\",\"un\",\"melc\")\n","sub_path":"Tema21/Tema_21.py","file_name":"Tema_21.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"194541971","text":"# -*- coding: UTF-8 -*-\n'''\nAuthor: chenxing\nDate: 2018-04-30\n'''\n\nimport numpy as np\nimport pickle\nimport jieba\nimport time\nimport wave\nfrom pyaudio import PyAudio, paInt16\nfrom aip import AipSpeech\n\nstop_word = [',', '。', '、', '!', '?', ',', '.', '!', '?', ' ', '', '\\n', '(', ')', '(', ')', '\\ufeff']\n'''\n 停用词集, 包含“啊,吗,嗯”一类的无实意词汇以及标点符号\n'''\n\n'''\n 载入数据\n'''\n\n\ndef loadStopword():\n fr = open('stopword.txt', 'r', encoding=('utf-8'))\n lines = fr.readlines()\n for line in lines:\n stop_word.append(line.strip())\n fr.close()\n\n\n'''\n 创建词集\n params:\n documentSet 为训练文档集\n return:词集, 作为词袋空间\n'''\n\n\ndef createVocabList(documentSet):\n vocabSet = set([])\n for document in documentSet:\n vocabSet = vocabSet | set(document) # union of the two sets\n return list(vocabSet)\n\n\n'''\n 文本处理,如果是未处理文本,则先分词(jieba分词),再去除停用词\n'''\n\n\ndef textParse(bigString): # input is big string, #output is word list\n cutted = jieba.cut(bigString, cut_all=False)\n listOfWord = []\n for word in cutted:\n if word not in stop_word:\n listOfWord.append(word)\n return listOfWord\n\n\n'''\n 交叉训练\n'''\nBAD = 1\nGOOD = 0\n\n\ndef testClassify():\n listAllDoc = []\n listClasses = []\n\n print(\"----loading document list----\")\n\n # 31个标注为差评的文档\n for i in range(1, 32):\n wordList = textParse(open('bad/%d.txt' % i, 'r', encoding=('utf-8')).read())\n listAllDoc.append(wordList)\n listClasses.append(BAD)\n # 31个标注为好评的文档\n for i in range(1, 32):\n wordList = textParse(open('good/%d.txt' % i, 'r', encoding=('utf-8')).read())\n listAllDoc.append(wordList)\n listClasses.append(GOOD)\n\n print(\"----creating vocab list----\")\n # 构建词袋模型\n listVocab = createVocabList(listAllDoc)\n docNum = len(listAllDoc)\n # testSetNum = int(docNum * 0.1)\n testSetNum = 10\n\n trainingIndexSet = list(range(docNum)) # 建立与所有文档等长的空数据集(索引)\n testSet = [] # 空测试集\n\n # 随机索引,用作测试集, 同时将随机的索引从训练集中剔除\n for i in range(testSetNum):\n randIndex = int(np.random.uniform(0, len(trainingIndexSet)))\n testSet.append(trainingIndexSet[randIndex])\n del (trainingIndexSet[randIndex])\n\n trainMatrix = []\n trainClasses = []\n\n for docIndex in trainingIndexSet:\n trainMatrix.append(bagOfWords2VecMN(listVocab, listAllDoc[docIndex]))\n trainClasses.append(listClasses[docIndex])\n\n print(\"----traning begin----\")\n pBAD, pGOODV, pCLASS = trainNaiveBayes(np.array(trainMatrix), np.array(trainClasses))\n\n print(\"----traning complete----\")\n print(\"pBAD:\", pBAD)\n print(\"pGOODV:\", pGOODV)\n print(\"pCLASS:\", pCLASS)\n print(\"bad: %d, good:%d\" % (BAD, GOOD))\n\n args = dict()\n args['pBAD'] = pBAD\n args['pGOODV'] = pGOODV\n args['pCLASS'] = pCLASS\n\n fw = open(\"args.pkl\", \"wb\")\n pickle.dump(args, fw, 2)\n fw.close()\n\n fw = open(\"vocab.pkl\", \"wb\")\n pickle.dump(listVocab, fw, 2)\n fw.close()\n\n errorCount = 0\n for docIndex in testSet:\n vecWord = bagOfWords2VecMN(listVocab, listAllDoc[docIndex])\n if classifyNaiveBayes(np.array(vecWord), pBAD, pGOODV, pCLASS) != listClasses[docIndex]:\n errorCount += 1\n doc = ' '.join(listAllDoc[docIndex])\n print(\"classfication error\", doc)\n print('the error rate is: ', float(errorCount) / len(testSet))\n\n\n# 分类方法(这边只做二类处理)\ndef classifyNaiveBayes(vec2Classify, pBADVec, pGOODVec, pClass1):\n pIsBAD = sum(vec2Classify * pBADVec) + np.log(pClass1) # element-wise mult\n pIsGOOD = sum(vec2Classify * pGOODVec) + np.log(1.0 - pClass1)\n\n if pIsBAD > pIsGOOD:\n return BAD\n else:\n return GOOD\n\n\n'''\n 训练\n params:\n tranMatrix 由测试文档转化成的词空间向量 所组成的 测试矩阵\n tranClasses 上述测试文档对应的分类标签\n'''\n\n\ndef trainNaiveBayes(trainMatrix, trainClasses):\n numTrainDocs = len(trainMatrix)\n numWords = len(trainMatrix[0]) # 计算矩阵列数, 等于每个向量的维数\n numIsBAD = len([x for x in trainClasses if x == BAD])\n pCLASS = numIsBAD / float(numTrainDocs)\n pBADNum = np.ones(numWords)\n pGOODNum = np.ones(numWords)\n pBADDemon = 2.0\n pGOODDemon = 2.0\n\n for i in range(numTrainDocs):\n if trainClasses[i] == BAD:\n pBADNum += trainMatrix[i]\n pBADDemon += sum(trainMatrix[i])\n else:\n pGOODNum += trainMatrix[i]\n pGOODDemon += sum(trainMatrix[i])\n\n pBADVect = np.log(pBADNum / pBADDemon)\n pGOODVect = np.log(pGOODNum / pGOODDemon)\n\n return pBADVect, pGOODVect, pCLASS\n\n\n'''\n 将输入转化为向量,其所在空间维度为 len(listVocab)\n params: \n listVocab-词集\n inputSet-分词后的文本,存储于set\n'''\n\n\ndef bagOfWords2VecMN(listVocab, inputSet):\n returnVec = [0] * len(listVocab)\n for word in inputSet:\n if word in listVocab:\n returnVec[listVocab.index(word)] += 1\n return returnVec\n\n\n'''\n 读取保存的模型,做分类操作\n'''\n\n\ndef Classify(textList):\n fr = open(\"args.pkl\", \"rb\")\n args = pickle.load(fr)\n pBAD = args['pBAD']\n pGOODV = args['pGOODV']\n pCLASS = args['pCLASS']\n fr.close()\n\n fr = open(\"vocab.pkl\", \"rb\")\n listVocab = pickle.load(fr)\n fr.close()\n\n if len(listVocab) == 0:\n print(\"got no args\")\n return\n\n text = textParse(textList)\n vecWord = bagOfWords2VecMN(listVocab, text)\n class_type = classifyNaiveBayes(np.array(vecWord), pBAD, pGOODV, pCLASS)\n if class_type == 1:\n print(\"classfication type:差评\")\n return BAD\n else:\n print(\"classfication type:好评\")\n return GOOD\n\n\n'''\n 存储音频\n'''\nframerate = 8000 # 采样频率\nNUM_SAMPLES = 2000\nchannels = 1 # 声道\nsampwidth = 2 # 采样字节\nTIME = 1 # 时间\n\n\ndef save_wave_file(filename, data):\n wf = wave.open(filename, 'wb')\n wf.setnchannels(channels)\n wf.setsampwidth(sampwidth)\n wf.setframerate(framerate)\n wf.writeframes(b\"\".join(data))\n wf.close()\n\n\ndef my_record():\n pa = PyAudio()\n stream = pa.open(format=paInt16, channels=1,\n rate=framerate, input=True,\n frames_per_buffer=NUM_SAMPLES)\n my_buf = []\n count = 0\n while count < TIME * 10: # 控制录音时间\n string_audio_data = stream.read(NUM_SAMPLES)\n my_buf.append(string_audio_data)\n count += 1\n print(count, '秒')\n save_wave_file('01.wav', my_buf)\n stream.close()\n\n\nif __name__ == \"__main__\":\n loadStopword()\n goodCount = 0\n badCount = 0\n # 定义常量,此处替换为你自己的应用信息\n APP_ID = '11177120'\n API_KEY = 'lGIefOgI5IuELBPUYziS4APL'\n SECRET_KEY = 'csbojnHuFzZPL5ZfXxd76EZed01T3b2j'\n while True:\n opcode = input(\"input 1 for training, 2 for Crawler text test, 3 for Audio test, Others for text test: \")\n if opcode.strip() == \"1\":\n begtime = time.time()\n testClassify()\n print(\"cost time total:\", time.time() - begtime)\n elif opcode.strip() == \"2\":\n textList = open('taobao.txt', 'r', encoding=('utf-8')).readlines()\n print(len(textList))\n for text in textList:\n if Classify(text) == BAD:\n badCount += 1\n else:\n goodCount += 1\n print(goodCount)\n print(badCount)\n print(\"好评率:\", goodCount / (goodCount + badCount))\n goodCount = 0\n badCount = 0\n elif opcode.strip() == '3':\n my_record()\n # 初始化AipSpeech对象\n aipSpeech = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n print('----录音已完成----')\n print('----开始语音识别----')\n result = aipSpeech.asr(open('01.wav', 'rb').read(), 'wav', 8000, {\n 'dev_pid': '1536',\n })\n\n if result['err_msg'] != 'success.':\n print('未获得语音')\n else:\n print('----语音识别已完成----')\n print(result['result'][0])\n text = result['result'][0]\n Classify(text)\n else:\n text = input(\"input the text:\")\n Classify(text)\n","sub_path":"NaiveBayes/NavieBayes.py","file_name":"NavieBayes.py","file_ext":"py","file_size_in_byte":8629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"592592385","text":"'''\nCreated on 13 Aug 2015\n\n@author: NoNotCar\n'''\nimport pygame\n# import pygame._view\nimport sys\nimport UniJoy\n\nscreen = pygame.display.set_mode((400, 256))\n\nimport Img\nfrom Enemies import *\n\n\ndef die(screen):\n pygame.mixer.music.stop()\n pygame.display.flip()\n pygame.time.wait(1000)\n screen.fill((0, 0, 0))\n Img.bcentre(Img.bfont, \"FOOL\", screen, col=(255, 255, 255))\n pygame.display.flip()\n pygame.time.wait(1000)\n\n\ndef Instruct(instructions, time):\n words = instructions.split()\n text = \"\"\n for i in range(len(words)):\n pygame.event.pump()\n if i:\n text += \" \"\n text += words[i]\n screen.fill((255, 255, 255))\n Img.bcentre(Img.dfont, text, screen, col=(0, 0, 0))\n pygame.display.flip()\n pygame.time.wait(time)\n\n\nclass Player(object):\n def __init__(self):\n self.radius = 100\n self.angle = 0.0\n self.direction = 1\n self.speedmult = 1\n self.lasedown = 0\n\n def get_x(self):\n return int(round(self.radius * math.sin(math.radians(self.angle)))) + 128\n\n def get_y(self):\n return int(round(self.radius * math.cos(math.radians(self.angle)))) + 128\n\n def get_speed(self):\n return self.radius ** -1 * 100 * self.speedmult\n\n\nlevels = (([Asteroid], 15, 1), ([Asteroid, BigAsteroid], 20, 1.5), ([Hostage, Asteroid], 30, 1),\n ([BigAsteroid, SmallAsteroid], 30, 2), ([MustShoot], 30, 1),\n ([Asteroid, Obstacle], 30, 1), ([Obstacle2], 30, 1), ([EnemyShip], 30, 1), ([Ranged], 30, 1),\n ([Obstacle, MustShoot], 30, 1.5), ([EnemyShip2], 60, 2))\nlevel = 0\njnum = pygame.joystick.get_count()\nunijs = [UniJoy.Unijoy(n) for n in range(jnum)]\nassert jnum>0,\"NOT ENOUGH CONTROLLERS\"\nInstruct(\"UP/DOWN TO MOVE\", 500)\npygame.time.wait(500)\nInstruct(\"SHOOT WITH A\", 500)\nwhile True:\n p = Player()\n c = pygame.time.Clock()\n obstacles = []\n plasers = []\n score = 0\n tick = 0\n dead = False\n if level == 4:\n Instruct(\"MUST SHOOT YELLOW\", 500)\n elif level == 5:\n Instruct(\"RED IS IMMORTAL\", 500)\n elif level == 2:\n Instruct(\"DON'T SHOOT PINK\", 500)\n elif level == len(levels) - 1:\n Instruct(\"ULTIMATE DEFENCE\", 1000)\n p.speedmult = levels[level][2]\n if level != 9:\n Instruct(\"LEVEL \" + str(level + 1), 500)\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n screen.fill((255, 255, 255) if level != len(levels) - 1 else (255, 150, 0))\n for obsc in levels[level][0]:\n obsc.generate(obstacles)\n ujdir = 0\n for uj in unijs:\n ujd = uj.getdirstick(1)\n if ujd:\n ujdir -= ujd[1]\n if ujdir < -(jnum // 2):\n if p.radius > 30:\n p.radius -= 1\n elif ujdir > jnum // 2:\n if p.radius < 100:\n p.radius += 1\n if any([uj.get_b(\"A\") for uj in unijs]) and not p.lasedown:\n plasers.append([p.get_x() - 8, p.get_y() - 2])\n p.lasedown = 20\n if p.lasedown > 0:\n p.lasedown -= 1\n pygame.draw.circle(screen, (127, 127, 127), (128, 128), p.radius, 1)\n orects = []\n plrects = []\n for obstacle in obstacles:\n orects.append((pygame.draw.rect(screen, obstacle.col,\n pygame.Rect(obstacle.x, obstacle.y, obstacle.w, obstacle.h)), obstacle))\n obstacle.update(obstacles)\n for pos in plasers:\n plrects.append(pygame.draw.rect(screen, (0, 0, 255), pygame.Rect(pos[0], pos[1], 16, 4)))\n pos[0] += 4\n prect = pygame.draw.rect(screen, (0, 0, 0), pygame.Rect(p.get_x() - 8, p.get_y() - 8, 16, 16))\n for ore in [o for o in orects if o[1].plaser]:\n for pr in plrects:\n if ore[0].colliderect(pr):\n obstacles.remove(ore[1])\n if ore[1].hostage:\n die(screen)\n dead = True\n for ore in orects:\n if ore[1].isdeadly and ore[0].colliderect(prect):\n die(screen)\n dead = True\n for obstacle in obstacles:\n if obstacle.x <= -obstacle.w:\n if not obstacle.deadgooff:\n obstacles.remove(obstacle)\n else:\n die(screen)\n dead = True\n if dead:\n break\n for ore in orects:\n if not ore[1].isdeadly and ore[0].colliderect(prect):\n obstacles.remove(ore[1])\n for pos in plasers:\n if pos[0] > 400:\n plasers.remove(pos)\n p.angle = (p.angle - p.get_speed()) % 360\n pygame.display.flip()\n c.tick(60)\n if tick == 60:\n score += 1\n tick = 0\n if score == levels[level][1]:\n pygame.mixer.music.stop()\n Instruct(\"WELL DONE\", 500)\n level += 1\n if level == 10:\n Instruct(\"YOU WIN!\", 2000)\n sys.exit()\n break\n else:\n tick += 1\n","sub_path":"Orbital/multi.py","file_name":"multi.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"431978485","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport random\nfrom App.api.utils.FileCommon import FileCommon\nimport App.api.utils.RedisCommon as RedisCommon\nimport App.api.utils.Constants as Constants\nclass TestCommon():\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\n\t@classmethod\n\tdef randomStart(cls, token, typ, difficulty, length):\n\t\tli = cls.genQuestionList(typ, length)\n\t\t\"\"\"\n\t\t保存li内容到redis\n\t\t\"\"\"\n\t\tRedisCommon.saveQuestion(token, li)\n\t\treturn FileCommon.gen(li[0], difficulty)\n\n\t@classmethod\n\tdef nextQuestion(cls, answer, g):\n\t\ttoken = g.token\n\t\tdifficulty = g.difficulty\n\t\t\"\"\"\n\t\t从redis中取出余下问题链接和答案结果链接\n\t\t\"\"\"\n\t\tquestionList = RedisCommon.getQuestion(token)\n\t\tcorrect = RedisCommon.getCorrect(token)\n\t\tcheckList = RedisCommon.getCheck(token)\n\n\t\tresult = cls.check(questionList[len(checkList)], answer)\n\t\tif result:\n\t\t\tcorrect += 1\n\t\t\t\"\"\"\n\t\t\t\t保存分数,答对题数\n\t\t\t\"\"\"\n\t\t\tRedisCommon.saveCorrect(token, correct)\n\n\t\t\"\"\"\n\t\t\t答题内容\n\t\t\"\"\"\n\t\tRedisCommon.saveCheck(token, answer)\n\t\tprint('%d-%d' %(len(checkList), len(questionList)))\n\t\tif len(checkList) == len(questionList)-1:\n\t\t\treturn Constants.COMPLETE\n\t\treturn FileCommon.gen(questionList[len(checkList)+1], difficulty)\n\n\n\t@classmethod\n\tdef getResult(cls, g):\n\t\ttoken = g.token\n\t\tdifficulty = g.difficulty\n\t\t\"\"\"\n\t\t\t从redis中取出所有问题链接和答案结果链接与评分\n\t\t\"\"\"\n\t\tquestionList = RedisCommon.getQuestion(token)\n\t\tcorrect = RedisCommon.getCorrect(token)\n\t\tcheckList = RedisCommon.getCheck(token)\n\n\t\tRedisCommon.saveRanking(token, difficulty)\n\t\tif len(questionList) == 50:\n\t\t\tsaveRank(token, difficulty)\n\t\tretList = []\n\t\tfor i in range(len(questionList)):\n\t\t\tretList.append({\n\t\t\t\t\"q\": questionList[i].decode('utf8'),\n\t\t\t\t\"a\": checkList[i].decode('utf8')\n\t\t\t})\n\t\treturn {\n\t\t\t\"data\": retList\n\t\t}\n\n\t@classmethod\n\tdef check(cls, question, answer):\n\t\treturn question == answer\n\n\n\t@classmethod\n\tdef genQuestionList(cls, typ, length):\n\t\treturn random.sample(FileCommon.combine(typ), length)\n\n\n\t@classmethod\n\tdef savePlayer(cls, token, name):\n\t\tRedisCommon.savePlayer(token, name)\n\n\t@classmethod\n\tdef getRank(cls, difficulty):\n\t\treturn RedisCommon.getRanking(difficulty)\n\n\t@classmethid\n\tdef saveRank(cls, token, difficulty):\n\t\tRedisCommon.saveRank(token, difficulty)\n\n\n\n","sub_path":"App/api/utils/TestCommon.py","file_name":"TestCommon.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"44753006","text":"\"\"\"\n================\nPrecision-Recall\n================\n\nExample of Precision-Recall metric to evaluate classifier output quality.\n\nIn information retrieval, precision is a measure of result relevancy, while\nrecall is a measure of how many truly relevant results are returned. A high\narea under the curve represents both high recall and high precision, where high\nprecision relates to a low false positive rate, and high recall relates to a\nlow false negative rate. High scores for both show that the classifier is\nreturning accurate results (high precision), as well as returning a majority of\nall positive results (high recall).\n\nA system with high recall but low precision returns many results, but most of\nits predicted labels are incorrect when compared to the training labels. A\nsystem with high precision but low recall is just the opposite, returning very\nfew results, but most of its predicted labels are correct when compared to the\ntraining labels. An ideal system with high precision and high recall will\nreturn many results, with all results labeled correctly.\n\nPrecision (:math:`P`) is defined as the number of true positives (:math:`T_p`)\nover the number of true positives plus the number of false positives\n(:math:`F_p`).\n\n:math:`P = \\\\frac{T_p}{T_p+F_p}`\n\nRecall (:math:`R`) is defined as the number of true positives (:math:`T_p`)\nover the number of true positives plus the number of false negatives\n(:math:`F_n`).\n\n:math:`R = \\\\frac{T_p}{T_p + F_n}`\n\nThese quantities are also related to the (:math:`F_1`) score, which is defined\nas the harmonic mean of precision and recall.\n\n:math:`F1 = 2\\\\frac{P \\\\times R}{P+R}`\n\nIt is important to note that the precision may not decrease with recall. The\ndefinition of precision (:math:`\\\\frac{T_p}{T_p + F_p}`) shows that lowering\nthe threshold of a classifier may increase the denominator, by increasing the\nnumber of results returned. If the threshold was previously set too high, the\nnew results may all be true positives, which will increase precision. If the\nprevious threshold was about right or too low, further lowering the threshold\nwill introduce false positives, decreasing precision.\n\nRecall is defined as :math:`\\\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does\nnot depend on the classifier threshold. This means that lowering the classifier\nthreshold may increase recall, by increasing the number of true positive\nresults. It is also possible that lowering the threshold may leave recall\nunchanged, while the precision fluctuates.\n\nThe relationship between recall and precision can be observed in the\nstairstep area of the plot - at the edges of these steps a small change\nin the threshold considerably reduces precision, with only a minor gain in\nrecall. See the corner at recall = .59, precision = .8 for an example of this\nphenomenon.\n\n.. note::\n\n See also :func:`sklearn.metrics.average_precision_score`,\n :func:`sklearn.metrics.recall_score`,\n :func:`sklearn.metrics.precision_score`\n\"\"\"\nprint(__doc__)\n\nimport random\nimport pylab as pl\nimport numpy as np\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import auc\nfrom sklearn.cross_validation import train_test_split\n\n# import some data to play with\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\nX, y = X[y != 2], y[y != 2] # Keep also 2 classes (0 and 1)\n\n# Add noisy features\nrandom_state = np.random.RandomState(0)\nn_samples, n_features = X.shape\nX = np.c_[X, random_state.randn(n_samples, 200 * n_features)]\n\n# Split into training and test\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,\n random_state=random_state)\n\n# Run classifier\nclassifier = svm.SVC(kernel='linear', probability=True,\n random_state=random_state)\nprobas_ = classifier.fit(X_train, y_train).predict_proba(X_test)\n\n# Compute Precision-Recall and plot curve\nprecision, recall, thresholds = precision_recall_curve(y_test, probas_[:, 1])\narea = auc(recall, precision)\nprint(\"Area Under Curve: %0.2f\" % area)\n\npl.clf()\npl.plot(recall, precision, label='Precision-Recall curve')\npl.xlabel('Recall')\npl.ylabel('Precision')\npl.ylim([0.0, 1.05])\npl.xlim([0.0, 1.0])\npl.title('Precision-Recall example: AUC=%0.2f' % area)\npl.legend(loc=\"lower left\")\npl.show()\n","sub_path":"examples/plot_precision_recall.py","file_name":"plot_precision_recall.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"302434240","text":"#!/usr/bin/env python3\n\nimport argparse\nimport codecs\nimport http.client\nimport http.server\nimport json\nimport logging\nimport re\nimport sys\nimport time\nimport csv\nimport os\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport webbrowser\nfrom datetime import timedelta\n\nlogging.basicConfig(level=20, datefmt=\"%I:%M:%S\", format=\"[%(asctime)s] %(message)s\")\n\n\nclass SpotifyAPI:\n # Requires an OAuth token.\n def __init__(self, auth):\n self._auth = auth\n\n # Gets a resource from the Spotify API and returns the object.\n def get(self, url, params={}, tries=3):\n # Construct the correct URL.\n if not url.startswith(\"https://api.spotify.com/v1/\"):\n url = \"https://api.spotify.com/v1/\" + url\n if params:\n url += (\"&\" if \"?\" in url else \"?\") + urllib.parse.urlencode(params)\n\n # Try the sending off the request a specified number of times before giving up.\n for _ in range(tries):\n try:\n req = urllib.request.Request(url)\n req.add_header(\"Authorization\", \"Bearer \" + self._auth)\n res = urllib.request.urlopen(req)\n reader = codecs.getreader(\"utf-8\")\n return json.load(reader(res))\n except Exception as err:\n logging.info(\"Couldn't load URL: {} ({})\".format(url, err))\n time.sleep(2)\n logging.info(\"Trying again...\")\n sys.exit(1)\n\n # fetches liked, playlists, podcast episodes and albums then joins them\n def list(self, url, params={}):\n response = self.get(url, params)\n items = response[\"items\"]\n\n # loop through to bring all tracks and their data\n while response[\"next\"]:\n logging.info(f\"Loaded {len(items)}/{response['total']} items\")\n\n response = self.get(response[\"next\"])\n items += response[\"items\"]\n\n return items\n\n # fetches followed artists and joins them\n def list_artists(self, url, params={}):\n response = self.get(url, params)\n items = response['artists'][\"items\"]\n\n # loop through to bring all tracks and their data\n while response['artists'][\"next\"]:\n logging.info(f\"Loaded {len(items)}/{response['artists']['total']} items\")\n\n response = self.get(response['artists'][\"next\"])\n items += response['artists'][\"items\"]\n\n return items\n\n # Pops open a browser window for a user to log in and authorize API access.\n @staticmethod\n def authorize(client_id, scope):\n url = \"https://accounts.spotify.com/authorize?\" + urllib.parse.urlencode(\n {\n \"response_type\": \"token\",\n \"client_id\": client_id,\n \"scope\": scope,\n \"redirect_uri\": f\"http://127.0.0.1:{SpotifyAPI._SERVER_PORT}/redirect\",\n }\n )\n logging.info(f\"Authorizing... (click if browser doesn't open)\\n{url}\\n\")\n webbrowser.open(url)\n\n # Start a simple, local HTTP server to listen for the authorization token... (i.e. a hack).\n server = SpotifyAPI._AuthorizationServer(\"127.0.0.1\", SpotifyAPI._SERVER_PORT)\n try:\n while True:\n server.handle_request()\n except SpotifyAPI._Authorization as auth:\n return SpotifyAPI(auth.access_token)\n\n # The port that the local server listens on. Don't change this,\n # as Spotify only will redirect to certain predefined URLs.\n _SERVER_PORT = 43019\n\n class _AuthorizationServer(http.server.HTTPServer):\n def __init__(self, host, port):\n http.server.HTTPServer.__init__(\n self, (host, port), SpotifyAPI._AuthorizationHandler\n )\n\n # Disable the default error handling.\n def handle_error(self, request, client_address):\n raise\n\n class _AuthorizationHandler(http.server.BaseHTTPRequestHandler):\n def do_GET(self):\n # The Spotify API has redirected here, but access_token is hidden in the URL fragment.\n # Read it using JavaScript and send it to /token as an actual query string...\n if self.path.startswith(\"/redirect\"):\n self.send_response(200)\n self.send_header(\"Content-Type\", \"text/html\")\n self.end_headers()\n self.wfile.write(\n b''\n )\n\n # Read access_token and use an exception to kill the server listening...\n elif self.path.startswith(\"/token?\"):\n self.send_response(200)\n self.send_header(\"Content-Type\", \"text/html\")\n self.end_headers()\n self.wfile.write(\n b\"Thanks! You may now close this window.\"\n )\n\n access_token = re.search(\"access_token=([^&]*)\", self.path).group(1)\n logging.info(\"Received access token from Spotify\")\n raise SpotifyAPI._Authorization(access_token)\n\n else:\n self.send_error(404)\n\n # Disable the default logging.\n def log_message(self, format, *args):\n pass\n\n class _Authorization(Exception):\n def __init__(self, access_token):\n self.access_token = access_token\n\n# simple recursive y/n input with default\ndef yesno(question, default=None):\n ans = input(question).strip().lower()\n\n if default is not None:\n if ans == '':\n if default == 'y':\n return True\n return False\n elif ans not in ['y', 'n']:\n print(f'{ans} is invalid, please try again...')\n return yesno(question)\n if ans == 'y':\n return True\n else:\n if ans not in ['y', 'n']:\n print(f'{ans} is invalid, please try again...')\n return yesno(question)\n if ans == 'y':\n return True\n\n return False\n\n# return formatted hh mm ss\ndef timematter(x):\n s = timedelta(seconds=x)\n\n if s.days < 1:\n if s.seconds <= 60 * 60:\n out = f'{s.seconds//60}m {s.seconds - (s.seconds//60)*60}s'\n else:\n out = f'{s.seconds//(60*60)}h {int(s.seconds/60 - (s.seconds//3600)*60)}m {s.seconds - (s.seconds//60)*60}s'\n else:\n out = f'{s.days}d {s.seconds//(60*60)}h {int(s.seconds/60 - (s.seconds//3600)*60)}m {s.seconds - (s.seconds//60)*60}s'\n return out\n\n# save tracks to csv\ndef save_track(filename, track_list):\n file = open(filename, 'w')\n\n # init sheet rows\n fieldnames = [\n 'Track ID',\n 'Album ID',\n 'Track Name',\n 'Album Name',\n 'Artist Name(s)',\n 'Release Date',\n 'Duration (ms)',\n 'Explicity',\n 'Album Type',\n 'Popularity',\n 'Added On',\n 'Album Tracks',\n 'Track URL',\n 'Album URL',\n ]\n\n # init csv writer\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n\n # loop through tracks and add them as rows\n for track in track_list:\n try:\n writer.writerow({\n 'Track ID': track['track']['id'],\n 'Album ID': track['track']['album']['id'],\n 'Track Name': track['track']['name'],\n 'Album Name': track['track']['album']['name'],\n 'Album Tracks': track['track']['album']['total_tracks'],\n 'Artist Name(s)': \", \".join([artist['name'] for artist in track['track']['artists']]),\n 'Release Date': track['track']['album']['release_date'],\n 'Duration (ms)': timematter(int(track['track']['duration_ms']) / 1000),\n 'Explicity': track['track']['explicit'],\n 'Album Type': track['track']['album']['album_type'],\n 'Popularity': track['track']['popularity'],\n 'Added On': track['added_at'],\n 'Track URL': track['track']['external_urls']['spotify'],\n 'Album URL': track['track']['album']['external_urls']['spotify']\n })\n except KeyError:\n logging.error(f\"Failed to load track {track['track']['name']}\")\n continue\n\n file.close()\n\n# save artists to csv\ndef save_artist(filename, artist_list):\n file = open(filename, 'w')\n\n # init sheet rows\n fieldnames = [\n 'ID',\n 'Name',\n 'Type',\n 'Followers',\n 'Popularity',\n 'URL'\n ]\n\n # init csv writer\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n\n # loop through artists and add them as rows\n for artist in artist_list:\n try:\n writer.writerow({\n 'ID': artist['id'],\n 'Name': artist['name'],\n 'Type': artist['type'],\n 'Followers': artist['followers']['total'],\n 'Popularity': artist['popularity'],\n 'URL': artist['external_urls']['spotify'],\n })\n except KeyError:\n logging.error(f\"Failed to load artist {artist['name']}\")\n continue\n\n file.close()\n\n# save albums to csv\ndef save_album(filename, album_list):\n file = open(filename, 'w')\n\n # init sheet rows\n fieldnames = [\n 'ID',\n 'Name',\n 'Tracks',\n 'Artist Name(s)',\n 'Release Date',\n 'Label',\n 'Type',\n 'Popularity',\n 'Added On',\n 'URL',\n ]\n\n # init csv writer\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n\n # loop through albums and add them as rows\n for album in album_list:\n try:\n writer.writerow({\n 'ID': album['album']['id'],\n 'Name': album['album']['name'],\n 'Tracks': album['album']['total_tracks'],\n 'Artist Name(s)': \", \".join([album['name'] for album in album['album']['artists']]),\n 'Release Date': album['album']['release_date'],\n 'Label': album['album']['label'],\n 'Type': album['album']['album_type'],\n 'Popularity': album['album']['popularity'],\n 'Added On': album['added_at'],\n 'URL': album['album']['external_urls']['spotify']\n })\n except KeyError:\n logging.error(f\"Failed to load album {album['album']['name']}\")\n continue\n\n file.close()\n\n# save podcasts to csv\ndef save_podcast(filename, podcast_list):\n file = open(filename, 'w')\n\n # init sheet rows\n fieldnames = [\n 'ID',\n 'Name',\n 'Publisher',\n 'Description',\n 'Episodes',\n 'Type',\n 'Explicity',\n 'Added On',\n 'URL',\n ]\n\n # init csv writer\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n\n # loop through podcases and add them as rows\n for podcast in podcast_list:\n try:\n writer.writerow({\n 'ID': podcast['show']['id'],\n 'Name': podcast['show']['name'],\n 'Description': podcast['show']['description'],\n 'Episodes': podcast['show']['total_episodes'],\n 'Publisher': podcast['show']['publisher'],\n 'Type': podcast['show']['media_type'],\n 'Explicity': podcast['show']['explicit'],\n 'Added On': podcast['added_at'],\n 'URL': podcast['show']['external_urls']['spotify']\n })\n except KeyError:\n logging.error(f\"Failed to load podcast {podcast['show']['name']}\")\n continue\n\n file.close()\n\n# save episodes to csv\ndef save_episode(filename, episode_list):\n file = open(filename, 'w')\n\n # init sheet rows\n fieldnames = [\n 'Episode ID',\n 'Show ID',\n 'Episode Name',\n 'Show Name',\n 'Publisher',\n 'Description',\n 'Release Date',\n 'Duration (ms)',\n 'Explicity',\n 'Show Type',\n 'Added On',\n 'Episode URL',\n 'Show URL',\n ]\n\n # init csv writer\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n\n # loop through episodes and add them as rows\n for episode in episode_list:\n try:\n writer.writerow({\n 'Episode ID': episode['episode']['id'],\n 'Show ID': episode['episode']['show']['id'],\n 'Episode Name': episode['episode']['name'],\n 'Show Name': episode['episode']['show']['name'],\n 'Publisher': episode['episode']['show']['publisher'],\n 'Description': episode['episode']['description'],\n 'Release Date': episode['episode']['release_date'],\n 'Duration (ms)': timematter(int(episode['episode']['duration_ms']) / 1000),\n 'Explicity': episode['episode']['explicit'],\n 'Show Type': episode['episode']['show']['media_type'],\n 'Added On': episode['added_at'],\n 'Episode URL': episode['episode']['external_urls']['spotify'],\n 'Show URL': episode['episode']['show']['external_urls']['spotify']\n })\n except KeyError:\n logging.error(f\"Failed to load episode {episode['episode']['name']}\")\n continue\n\n file.close()\n\n\n# log into the Spotify API.\nspotify = SpotifyAPI.authorize(\n # id from spotify client app created at\n # https://developer.spotify.com/dashboard/applications\n # it has http://127.0.0.1:43019/redirect as the redirect URI\n client_id=\"fc84b0b659d64f568f72d0d6009ad965\",\n scope=\"playlist-read-private playlist-read-collaborative user-library-read user-follow-read\",\n)\n\n\n# get the ID of the logged in user.\nlogging.info('Loading user info...')\nme = spotify.get('me')\nlogging.info(f\"Logged in as {me['display_name']} ({me['id']})\")\n\n\n# for playlists not owned by user\nsave_foreign_playlists = yesno('Save playlists not owned by you? [y/N]: ', 'n')\n\n\n# create needed dirs\nlogging.info('Creating needed directories')\nos.makedirs('./done/Music/Playlists', exist_ok=True)\nos.makedirs('./done/Podcasts', exist_ok=True)\n\n\n# save liked songs\nlogging.info('Loading liked songs...')\nliked_tracks = spotify.list(f\"users/{me['id']}/tracks\", {'limit': 50})\nlogging.info('Saving liked songs')\nsave_track('done/Music/Liked.csv', liked_tracks)\n\n\n# get all playlist data\nplaylist_data = spotify.list(f\"users/{me['id']}/playlists\", {'limit': 50})\n\n\n# get user's playlist data\nlogging.info(\"Loading user's playlists...\")\nuser_playlists = [playlist for playlist in playlist_data if playlist['owner']['id'] == me['id']]\nlogging.info(f\"Found {len(user_playlists)} user's playlists\")\n\n# saving user's playlist songs\nfor playlist in user_playlists:\n logging.info(f\"Loading user playlist: {playlist['name']} ({playlist['tracks']['total']} songs)\")\n playlist_tracks = spotify.list(playlist['tracks']['href'], {'limit': 100})\n logging.info(f\"Saving {playlist['name']}'s songs\")\n save_track(f\"done/Music/Playlists/{playlist['name']} - {playlist['id']}.csv\", playlist_tracks)\n\n\n# check whether to save foreign playlists\nif save_foreign_playlists:\n # get foreign playlist data\n logging.info(\"Loading foreign playlists...\")\n foreign_playlists = [playlist for playlist in playlist_data if playlist['owner']['id'] != me['id']]\n logging.info(f\"Found {len(foreign_playlists)} foreign playlists\")\n\n # saving foreign playlist songs\n for playlist in foreign_playlists:\n logging.info(f\"Loading foreign playlist: {playlist['name']} ({playlist['tracks']['total']} songs)\")\n playlist_tracks = spotify.list(playlist['tracks']['href'], {'limit': 100})\n logging.info(f\"Saving {playlist['name']}'s songs\")\n save_track(f\"done/Music/Playlists/{playlist['name']} - {playlist['id']}.csv\", playlist_tracks)\n\n\n# following artists data\nlogging.info('Loading followed artists...')\nfollowing_artist_data = spotify.list_artists('me/following', {'type': 'artist', 'limit': 50})\nlogging.info(f'Found {len(following_artist_data)} artists')\nsave_artist('done/Music/Artists.csv', following_artist_data)\n\n\n# saved album data\nlogging.info('Loading saved albums...')\nsaved_album_data = spotify.list('me/albums', {'limit': 50})\nlogging.info(f'Found {len(saved_album_data)} albums')\nsave_album('done/Music/Albums.csv', saved_album_data)\n\n\n# saved podcast shows data\nlogging.info('Loading saved podcast shows...')\nsaved_podcast_data = spotify.list('me/shows', {'limit': 50})\nlogging.info(f'Found {len(saved_podcast_data)} podcasts')\nsave_podcast('done/Podcasts/Shows.csv', saved_podcast_data)\n\n\n# saved podcast episode data\nlogging.info('Loading saved podcast episodes...')\nsaved_episode_data = spotify.list(\"me/episodes\", {'limit': 50})\nlogging.info('Saving episodes')\nsave_episode('done/Podcasts/Episodes.csv', saved_episode_data)\n","sub_path":"spotify-backup.py","file_name":"spotify-backup.py","file_ext":"py","file_size_in_byte":16945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"477378935","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\nclass Projectile(object):\n # Class for the information about the object and movement tracking\n\n def __init__(self, y, length, acc, mass, rad, Cd, Rho, liquidLevel, RhoAir, multiLiquids, pipeLength):\n # SO MANY VARIABLES\n self.y = y\n self.yinit = y\n self.yprev = self.y\n self.length = length\n self.vel = 0\n self.acc = acc\n self.top = y + length\n self.mass = mass\n self.rad = rad\n self.Cd = Cd # Drag Coefficient\n self.Rho = Rho # Fluid Density\n self.RhoAir = RhoAir # Air Density\n\n # Fill pipe based on if we want object to fall through air or not\n if(not multiLiquids):\n self.liquidLevel = 2 * pipeLength\n else:\n self.liquidLevel = liquidLevel\n\n # data storage\n self.data = []\n self.realData = []\n self.velData = []\n\n # Accurate volume calculations\n self.cone = math.pi * self.rad**2 * self.length / 3.0\n self.sphere = 2.0 / 3.0 * math.pi * self.rad**3\n self.volume = self.cone + self.sphere\n self.density = self.mass / self.volume\n # Print density of object so we know it doesnt float\n print(\"Density of object: \" + str(self.density))\n\n # Update position to simulate movement\n def update(self, dt, time):\n self.Forces(dt)\n self.Position(dt, time)\n\n def checkBlocking(self, gates):\n for i in range(len(gates)):\n if self.y < gates[i].y1 and self.top > gates[i].y2:\n self.data.append([time, gates[i].y1])\n\n def Forces(self, dt):\n #Fb = (4 / 3.0) * math.pi * self.rad**3 * self.Rho * 9.81\n if(self.y < self.liquidLevel):\n Fb = self.volume * self.Rho * 9.81\n Fd = 0.5 * self.Cd * self.Rho * (math.pi * self.rad**2) * (self.vel**2)\n else:\n #Fb = (4 / 3.0) * math.pi * self.rad**3 * self.RhoAir * 9.81\n Fb = 0\n Fd = 0.5 * self.Cd * self.RhoAir * (math.pi * self.rad**2) * (self.vel**2)\n\n Mg = self.mass * 9.81\n self.acc = (-Mg + Fd + Fb) / self.mass\n #self.acc = (-Mg + Fd) / self.mass\n\n\n def Position(self, dt, time):\n self.y = self.yprev + self.vel * dt + (0.5) * (self.acc) * (dt**2)\n self.top = self.y + self.length\n self.vel = (self.y - self.yprev) / dt\n self.yprev = self.y\n self.realData.append([time, self.y])\n self.velData.append([time, self.vel])\n\n# gate object, didnt have to make it but made it easier to think about in my head\nclass Gate(object):\n # Class for the transistors\n def __init__(self, y1, length):\n self.y1 = y1\n self.y2 = y1 + length\n\n# Pipe constants\npipe = 10.0 # total length of pipe\nnumSensors = int(pipe / .12) # Number of sensors being used\nstarty = pipe + .1\n\n\n#---------------------#\n# Simulate #\n#---------------------#\n\n# Create projectile object\nproj = Projectile(starty, # ystart(where it starts the object)\n .06, # length\n -9.81, # Gravity\n 0.150, # Mass(kg)\n .03, # radius(m)\n .09, # Cd(Coefficient of drag for shape of object)\n 1000, # RhoLiquid(Density of liquid at bottom of pipe)\n pipe/2, # liquidLevel(Where in pipe liquid starts)\n 1.225, # RhoAir(Density of liquid at top of pipe)\n True, # Whether or not there are multiple liquids\n pipe) # Height of pipe\n\n#termvel = math.sqrt((2 * proj.mass * 9.81) / (proj.Rho * math.pi * proj.rad**2 * proj.Cd))\n#print(str(termvel))\n\n# Pre-run info\ngates = [] # List of gate objects\ndt = .0001 # Time step length\ntime = 0 # Current Time\nhitTerminal = False\ntermvelTime = 0\n\n# Make the gates\nfor i in range(numSensors):\n gates.append(Gate((pipe * i) / numSensors + .12, .001))\n\n# update function for the while loop of the simulationn\ndef update(time):\n proj.update(dt, time)\n proj.checkBlocking(gates)\n\nrunning = True\n\nif(proj.density <= proj.Rho):\n sys.exit(\"Projectile Floats!!\")\n\n#Euler updating loop\nwhile(running):\n update(time)\n if(proj.top < 0):\n running = False\n # Store time for termvel vertical line in graphs\n if(proj.acc > -0.005 and hitTerminal == False):\n hitTerminal = True\n termvelTime = time\n time += dt\n\n#print(proj.acc)\n#print(termvelTime)\n#-----------------------#\n# Plotting #\n#-----------------------#\n\n# Simulated data gathering of X and Y\nx = []\ny = []\n\n# Actual X and Y calculated by simulation\nrealx = []\nrealy = []\n\n# Instantaneous Velocitites\nvelx = []\nvely = []\n\nfor i in range(len(proj.data)):\n x.append(proj.data[i][0])\n y.append(proj.data[i][1])\n\nfor i in range(len(proj.realData)):\n realx.append(proj.realData[i][0])\n realy.append(proj.realData[i][1])\n\nfor i in range(len(proj.velData)):\n velx.append(proj.velData[i][0])\n vely.append(proj.velData[i][1])\n\n# Figure one\n#plt.figure(1)\nplt.subplot(211)\nplt.title(str(proj.mass) + \"kg mass, \" + str(proj.rad) + \"m radius\")\nplt.scatter(realx, realy, color=\"r\")\n#plt.scatter(x, y)\nplt.ylabel(\"pos(m)\")\nplt.xlabel(\"time(s)\")\n\nplt.subplot(212)\nplt.scatter(velx, vely, color = 'g')\nplt.axvline(x = termvelTime, ymin = 0, ymax = 1, color = 'k')\nplt.ylabel(\"vel(m/s)\")\nplt.xlabel(\"time(s)\")\nplt.show()\n\n# Saving to a file\n'''\nfilename = str(proj.mass * 1000).split('.', 1)\nplt.savefig(filename[0] + 'gBothDrop.png')\nplt.show()\n'''\n","sub_path":"capstone/simData.py","file_name":"simData.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"286246841","text":"\"\"\"\r\n771. 宝石与石头\r\n\r\n给定字符串J 代表石头中宝石的类型,和字符串 S代表你拥有的石头。 S 中每个字符代表了一种你拥有的石头的类型,你想知道你拥有的石��中有多少是宝石。\r\n\r\nJ 中的字母不重复,J 和 S中的所有字符都是字母。字母区分大小写,因此\"a\"和\"A\"是不同类型的石头。\r\n\r\n示例 1:\r\n\r\n输入: J = \"aA\", S = \"aAAbbbb\"\r\n输出: 3\r\n示例 2:\r\n\r\n输入: J = \"z\", S = \"ZZ\"\r\n输出: 0\r\n注意:\r\n\r\nS 和 J 最多含有50个字母。\r\n J 中的字符不重复。\r\n\r\n来源:力扣(LeetCode)\r\n链接:https://leetcode-cn.com/problems/jewels-and-stones\r\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\r\n\"\"\"\r\n\r\n\r\nclass Solution:\r\n def numJewelsInStones(self, J: str, S: str) -> int:\r\n J = dict(zip(J, [0]*len(J)))\r\n jewels = 0\r\n for stone in S:\r\n if stone in J:\r\n jewels += 1\r\n return jewels\r\n\r\n\r\nif __name__ == '__main__':\r\n result = Solution().numJewelsInStones('aA', 'aAAbbbb')\r\n print(result)\r\n","sub_path":"t771.py","file_name":"t771.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"244123651","text":"n = int(input())\narr = []\nfor _ in range(n):\n arr.append(list(map(int,input().split())))\n\nlen = len(arr)\nrank = [1] * len\n\nfor i in range(len):\n for x in range(len):\n if i == x:\n continue\n if arr[i][0] < arr[x][0] and arr[i][1] < arr[x][1]:\n rank[i]+=1\n\nfor i in rank:\n print(i,end=' ')","sub_path":"VS 2019/BOJ_Python/단계/11. 브루트 포스/BOJ_7568.py","file_name":"BOJ_7568.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"250412365","text":"\r\n\r\ndef run():\r\n #Inputs the reference string\r\n print(\"Enter the reference string: \", end=\"\")\r\n refList = list(map(int, input().strip().split()))\r\n\r\n #Inputs the capacity or size of the page frame\r\n print(\"Enter the number of frames: \", end=\"\")\r\n capacity = int(input())\r\n \r\n #Initiates the page frame to be given \r\n page_frame = [] \r\n #Initiates the page fault. set defaults to 0\r\n pageFaults = 0\r\n #This one discovers what is recently used during comparison\r\n most_recently_used = None\r\n\r\n #Loops through the entire reference list to check with the page_frame\r\n for i in refList:\r\n #Checks if the element is in the page frame\r\n if i not in page_frame:\r\n #If the page frame is empty or less than the capacity\r\n if len(page_frame) < capacity:\r\n page_frame.append(i)\r\n #If it's not empty, then replace the one with the most recently used \r\n else:\r\n #Finds the matching page inside the page_frame and replaces it\r\n index = page_frame.index(most_recently_used)\r\n page_frame[index] = i\r\n #Increase the page fault since the page is not in the page frame\r\n pageFaults +=1\r\n #Sets the current checked frame as the one as the most recently used.\r\n most_recently_used = i\r\n \r\n #Prints the page frame\r\n print(\"Total Page Faults: {}\".format(pageFaults))\r\n","sub_path":"Page Fault Algorithm/PAGE_FAULT_CONTENT/MRU.py","file_name":"MRU.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"234196072","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom .models import Post\nfrom .forms import PostForm\nfrom django.views.generic import ListView\n\n# Create your views here.\n\n\n\n@login_required(login_url = 'login')\ndef function(request):\n post = Post.objects.all()\n context = {\n 'posts': post\n }\n return render(request,'home.html', context)\n\ndef fun2(request):\n return render(request,'about.html')\n\ndef createPost(request):\n form = PostForm(initial = {'author' : request.user})\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n form.save()\n form = PostForm()\n\n context = {'form': form}\n\n return render (request,'createPost.html', context)\n\ndef updatePost(request, _id):\n postid = Post.objects.get(id=_id)\n form = PostForm(instance = postid)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=postid)\n if form.is_valid():\n form.save()\n return redirect('home')\n\n context = {'form': form}\n\n return render (request,'createPost.html', context)\n\ndef myPost(request,_id):\n user = User.objects.get(id=_id)\n post = user.post_set.all()\n context = {'user' : user, 'post': post}\n return render(request,'mypost.html',context)\n\ndef userPost(request,_id):\n # user = User.objects.get(Post.author)\n post = User.post_set.all(Post.author)\n context = { 'post': post}\n return render(request,'userPost.html',context)\n\nclass UserPostListView(ListView):\n model = Post\n template_name = 'userPost.html' # /_.html\n context_object_name = 'posts'\n\n def get_queryset(self):\n user = get_object_or_404(User, username=self.kwargs.get('username'))\n return Post.objects.filter(author=user)\n\n\ndef delete(request,_id):\n post = Post.objects.get(id=_id)\n if request.method == \"POST\":\n # form = PostForm(request.POST, instance=postid)\n post.delete()\n return redirect('home')\n\n context = {'post' :post}\n\n return render(request,'delete.html', context)\n\n\n\n\n","sub_path":"blog/blogapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"297086166","text":"from os.path import join, dirname, sep, splitext\nfrom path import Path\nfrom glob import glob\nimport pygame\n\nclass Character_assets:\n def __init__(self, path_str):\n self.assets = dict()\n\n # Encontra os assets dentro das pastas\n path = path_str.split(\"/\")\n modules = Path(dirname(__file__)).glob(join(*path, \"**\", \"**\", \"*\"))\n\n for path in modules:\n # Separa arquivo das as duas ultimas pastas parentes do caminho\n *directory, dtype, key, filename = path.split(sep)\n\n # Separa nome e formato do arquivo\n file_split = splitext(filename)\n name, extension = file_split\n\n # Registra pasta do tipo de asset\n if not dtype in self.assets:\n self.assets[dtype] = {}\n\n # Registra o nome do asset\n if not key in self.assets[dtype]:\n self.assets[dtype][key] = []\n\n # Lida com arquivos de imagem\n if extension in (\".png\", \".jpg\"):\n asset = pygame.image.load(path).convert_alpha()\n\n # Lida com arquivos de som\n elif extension in (\".ogg\", \".wav\", \".mp3\"):\n asset = pygame.mixer.Sound(path)\n\n # Registra o asset ou variação do mesmo no escopo do programa\n self.assets[dtype][key].append(asset)\n\n\n def get_assets(self):\n return self.assets\n","sub_path":"src/Character_assets.py","file_name":"Character_assets.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"314454565","text":"#!/usr/bin/env python \n# encoding: utf-8 \n\n\"\"\"\n测试 TfUtils 的函数功能\n\n@version: v1.0 \n@author: Jeffrey Wang\n@license: Apache Licence \n@contact: shwangjj@163.com\n@file: Test_TfUtils.py \n@time: 2018/5/6 0006 下午 15:53 \n\"\"\"\nimport unittest\nfrom tools.TfUtils import TfUtils\nimport os\n\nclass TestTfUtils(unittest.TestCase):\n\n dirname = os.path.dirname(__file__)\n\n exp_text_filepath = dirname + \"//数据Excel文件_测试数据文件.txt\"\n\n def setUp(self):\n # 删除测试过程中可能生成的文件\n if os.path.exists(self.exp_text_filepath):\n os.remove(self.exp_text_filepath)\n\n def test_exportExcelToTextFile(self):\n # 测试将excel文件中内容,导出成 测试用数据文件\n excel_file = self.dirname + \"//数据Excel文件.xlsx\"\n text_file = self.exp_text_filepath\n\n r = TfUtils.exportExcelToTextFile(excel_file, text_file, col_list=\"A,C,D\", txt_title_type=TfUtils.TITLE_SAME_AS_ABC)\n self.assertTrue(r)\n # 检查,生成的文件存在\n self.assertTrue(os.path.exists(text_file))\n # 检查,生成的文件有标题行\n c_file = open(self.exp_text_filepath, \"r\", encoding=\"UTF-8\")\n titleLine = c_file.readline()\n self.assertEqual(\"A|C|D\", titleLine)\n c_file.close()\n\n\n def test_exportExcelToTextFile_源文件不存在的情况(self):\n # 测试,源文件不存在的情况\n excel_file = self.dirname + \"//xxxx.xlsx\"\n text_file = self.exp_text_filepath\n\n r = TfUtils.exportExcelToTextFile(excel_file, text_file, col_list=\"A,C,D\")\n self.assertFalse(r)\n\n def test_exportExcelToTextFile_源文件中sheet不存在的情况(self):\n # 测试,sheet不存在的情况\n excel_file = self.dirname + \"//数据Excel文件.xlsx\"\n text_file = self.exp_text_filepath\n\n r = TfUtils.exportExcelToTextFile(excel_file, text_file, sheet_name=\"xxxx\", col_list=\"A,C,D\")\n self.assertFalse(r)\n\n def test_exportExcelToTextFile_源文件中列名不存在的情况(self):\n # 测试,列不存在的情况\n excel_file = self.dirname + \"//数据Excel文件.xlsx\"\n text_file = self.exp_text_filepath\n\n r = TfUtils.exportExcelToTextFile(excel_file, text_file, sheet_name=\"xxxx\", col_list=\"A,C,D,777\")\n self.assertFalse(r)\n\n def test__getExcelColIndex(self):\n # 测试,根据ABC,���得excel列号的函数\n self.assertEqual([0,1,2,4,25], TfUtils._getExcelColIndex(\"A,B,C,E,Z\"))\n self.assertEqual([0,26], TfUtils._getExcelColIndex(\"A,AA\") )\n self.assertEqual([26], TfUtils._getExcelColIndex(\"AA\"))\n self.assertEqual([1, 27], TfUtils._getExcelColIndex(\"B,AB\"))\n # 有非法字符\n self.assertEqual([1, 27], TfUtils._getExcelColIndex(\"B, AB \"))\n self.assertEqual([1, 27], TfUtils._getExcelColIndex(\"B, AB0\"))","sub_path":"src_testcase/test_tools/Test_TfUtils.py","file_name":"Test_TfUtils.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"577715683","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as sp\nimport math\nimport random as rm\nimport NumerosGenerados as ng\nimport scipy.interpolate as si\nfrom Testsdiscretos import testPoisson\n\nmu = 6.3 \npoisson = sp.poisson(mu) \nxLine = np.arange(poisson.ppf(0.01),\n poisson.ppf(0.99))\nfmp = poisson.pmf(xLine)\nplt.plot(xLine, fmp, '--',color = \"red\")\nplt.vlines(xLine, 0, fmp, colors='b', lw=5, alpha=0.5,ec='black')\nplt.title('Distribución Poisson')\nplt.ylabel('probabilidad')\nplt.xlabel('valores')\nplt.show()\nprint(\"Media: \", round(np.mean(xLine),3))\nprint(\"Desvio: \", round(np.sqrt(np.var(xLine)),3))\nprint(\"Varianza: \", round(np.var(xLine),3))\n\n#----------Naylor----------\ncant = 10000\nrandomGCL = ng.generarNumeros(cant)\npoissons = []\n\ndef funPoisson(lamda):\n for i in range (cant):\n x = 0\n b = np.exp(-lamda)\n tr = 1\n r = rm.uniform(1,0)\n tr = tr * r\n while((tr-b)>=0):\n x = x + 1\n r = rm.uniform(1,0)\n tr = tr * r\n poissons.append(x)\n unicos, cuenta = np.unique(poissons, return_counts=True)\n frec = np.array(cuenta/cant)\n print(\"Media: \", round(np.mean(poissons),3))\n print(\"Desvio: \", round(np.sqrt(np.var(poissons)),3))\n print(\"Varianza: \", round(np.var(poissons),3))\n plt.title(\"Distribucion de Poisson\")\n print(unicos,cuenta)\n xnew = np.linspace(unicos.min(), unicos.max(), 300) \n spl = si.make_interp_spline(unicos, frec, k=3)\n frec_suavizada = spl(xnew)\n\n plt.plot(xLine, fmp, '--',color = \"violet\")\n plt.vlines(xLine, 0, fmp, colors='black', lw=5, alpha=0.5)\n\n plt.plot(xnew, frec_suavizada, '--', color = \"brown\")\n plt.bar(unicos, frec, width=0.2, alpha = 0.7)\n plt.show()\n\nfunPoisson(mu)\n\npoissonTeorica = np.random.poisson(mu,cant)\n\ntestPoisson(poissons,poissonTeorica)\n","sub_path":"TP2_2/Poisson.py","file_name":"Poisson.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"586434045","text":"c, n = list(map(int, input().split()))\npf = []\nnf = []\nfor _ in range(n):\n fl = int(input())\n if fl > 0:\n pf.append(fl)\n elif fl < 0:\n nf.append(fl)\n\nspf = sorted(pf, reverse=True)\nsnf = [abs(x) for x in sorted(nf)]\nmoves = 0\n\nfloors = [spf, snf]\nif len(spf) == 0 and len(snf) == 0:\n pass\nelif len(spf) == 0:\n moves -= snf[0]\nelif len(snf) == 0:\n moves -= spf[0]\nelse:\n moves -= max(spf[0], snf[0])\n \n\nfor i, l in enumerate(floors):\n while len(l) > 0:\n moves += l[0] * 2\n if c < len(l):\n l = l[c:]\n else:\n l = []\n\nseconds = moves * 20\nminutes = 0\nhours = 0\nif seconds >= 60:\n minutes = seconds // 60\n seconds = seconds % 60\nif minutes >= 60:\n hours = minutes // 60\n minutes = minutes % 60\n\nif hours % 24 < 3 or hours % 24 > 14:\n tod = 'AM'\nelse:\n tod = 'PM'\n\nhour = (9 + hours) % 12\nif hour == 0:\n hour = 12\nif hour < 10:\n hour = '0' + str(hour)\nif minutes < 10:\n minutes = '0' + str(minutes)\nif seconds < 10:\n seconds = '0' + str(seconds)\n\ntime = str(hour) + ':' + str(minutes) + ':' + str(seconds) + ' ' + tod\nprint(time)\n","sub_path":"elevator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"596314068","text":"import random\nimport string\n\n\nVOWELS = 'aeiouy'\nCONSONANTS = 'bcdfghjklmnpqrstvwxz'\n\n\ndef gen_word(minlength = 4, maxlength = 20):\n result = '' + random.choice(random.choice([CONSONANTS, VOWELS]))\n\n if(result[0] in VOWELS):\n odd_letters = VOWELS\n even_letters = CONSONANTS\n else:\n odd_letters = CONSONANTS\n even_letters = VOWELS\n\n for i in range(random.randint(minlength - 1, maxlength - 1)):\n if i % 2 == 1:\n result += random.choice(odd_letters)\n elif i % 2 == 0:\n result += random.choice(even_letters)\n\n return result\n","sub_path":"src/randstring.py","file_name":"randstring.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"505642989","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\n\n\ndef replace_zeros(data: pd.DataFrame, cont_vars: list) -> pd.DataFrame:\n df = data.copy()\n for var in cont_vars:\n mean = df[var].mean()\n df.loc[df[var] == 0.0, var] = mean\n return df\n\n\ndef log_transform(data: pd.DataFrame, cont_vars: list) -> pd.DataFrame:\n X = data.copy()\n for var in cont_vars:\n if var not in [\"age\"]:\n X[var] = np.log(X[var])\n return X\n\n\ndef feature_scaling(data: pd.DataFrame, vars: list):\n df = data.copy()\n # fit scaler\n scaler = MinMaxScaler() # create an instance\n scaler.fit(df[vars])\n df = pd.concat([df['response'].reset_index(drop=True),\n pd.DataFrame(scaler.transform(data[vars]),\n columns=vars)], axis=1)\n\n return df\n","sub_path":"feature_eng.py","file_name":"feature_eng.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"605846836","text":"#!/usr/bin/env python3\n\nimport sys\nimport os.path\n\nfrom functools import reduce\nfrom collections import namedtuple, Counter\n\nAddress = namedtuple(\"Address\", [\"name\", \"v4\", \"v6\", \"lineno\"])\nService = namedtuple(\"Service\", [\"name\", \"tcp\", \"udp\", \"lineno\"])\nInterface = namedtuple(\"Interface\", [\"name\", \"zone\", \"protocols\", \"lineno\"])\nRule = namedtuple(\"Rule\", [\"srczone\", \"dstzone\", \"srcaddr\", \"dstaddr\", \"service\", \"action\", \"lineno\"])\nVirtual = namedtuple(\"Virtual\", [\"srczone\", \"extaddr\", \"intaddr\", \"extservice\", \"intservice\", \"lineno\"])\n\nETC_DIR = \"/etc/microfw\"\n\nif len(sys.argv) > 1:\n ETC_DIR = sys.argv[1]\n\n\ndef read_table(filename):\n columns = {\n \"addresses\": 3,\n \"services\": 3,\n \"interfaces\": 3,\n \"rules\": 6,\n \"virtual\": 5\n }\n\n types = {\n \"addresses\": Address,\n \"services\": Service,\n \"interfaces\": Interface,\n \"rules\": Rule,\n \"virtual\": Virtual\n }\n\n if filename not in columns:\n raise RuntimeError(\"table %s does not exist\" % filename)\n\n table = open(os.path.join(ETC_DIR, filename), \"r\")\n for lineno, line in enumerate(table, start=1):\n if not line.strip() or line.startswith(\"#\"):\n continue\n col_data = line.split()\n if len(col_data) != columns[filename]:\n raise ValueError(\n \"%s:%d (%s): Expected %d values, found %d\" % (\n filename, lineno, col_data[0],\n columns[filename], len(col_data)\n )\n )\n yield types[filename]( *(col_data + [lineno]) )\n\n\ndef chain_gen(cmd_gen, next_gen):\n # Take the results from the last step, and pipe every result\n # into the next step individually.\n for cmd in cmd_gen:\n yield from next_gen(cmd)\n\n\ndef printf(fmt, obj):\n \"\"\" Format a string using a namedtuple as args. \"\"\"\n print(fmt % obj._asdict())\n\n\ndef generate_setup():\n # Parse tables\n\n all_addresses = {\n address.name: address\n for address in read_table(\"addresses\")\n }\n all_services = {\n service.name: service\n for service in read_table(\"services\")\n }\n\n all_interfaces = list(read_table(\"interfaces\"))\n all_zones = set( iface.zone for iface in all_interfaces )\n all_rules = list(read_table(\"rules\"))\n all_virtuals = list(read_table(\"virtual\"))\n\n # Validate interfaces, rules and virtuals\n\n for interface in all_interfaces:\n if interface.zone in (\"FW\", \"ALL\"):\n raise ValueError(\n \"interfaces:%d (%s): Interface zone cannot be ALL or FW\" % (\n interface.lineno, interface.name\n )\n )\n\n for rule in all_rules:\n if rule.action not in (\"accept+nat\", \"accept\", \"reject\", \"drop\"):\n raise ValueError(\n \"rules:%d: Invalid action '%s'\" % (\n rule.lineno, rule.action\n )\n )\n if rule.srczone in (\"FW\", \"ALL\"):\n raise ValueError(\"rules:%d: Source Zone cannot be ALL or FW\" % rule.lineno)\n if rule.srczone not in all_zones:\n raise ValueError(\n \"rules:%d: Source zone '%s' does not exist\" % (\n rule.lineno, rule.dstzone\n )\n )\n if rule.dstzone not in all_zones | {\"FW\", \"ALL\"}:\n raise ValueError(\n \"rules:%d: Destination zone '%s' does not exist\" % (\n rule.lineno, rule.dstzone\n )\n )\n if rule.srcaddr != \"ALL\":\n if rule.srcaddr not in all_addresses:\n raise ValueError(\n \"rules:%d: Source Address '%s' does not exist\" % (\n rule.lineno, rule.srcaddr\n )\n )\n if rule.dstaddr != \"ALL\":\n if rule.dstaddr not in all_addresses:\n raise ValueError(\n \"rules:%d: Destination Address '%s' does not exist\" % (\n rule.lineno, rule.dstaddr\n )\n )\n if rule.service != \"ALL\":\n if rule.service not in all_services:\n raise ValueError(\n \"rules:%d: Service '%s' does not exist\" % (\n rule.lineno, rule.service\n )\n )\n\n for virtual in all_virtuals:\n if virtual.srczone in (\"FW\", \"ALL\"):\n raise ValueError(\n \"virtuals:%d: Source zone cannot be ALL or FW\" % virtual.lineno\n )\n if virtual.extaddr == \"ALL\":\n raise ValueError(\"virtuals:%d: External Address cannot be ALL\" % rule.lineno)\n if virtual.extaddr not in all_addresses:\n raise ValueError(\n \"virtuals:%d: External Address '%s' does not exist\" % (\n virtual.lineno, virtual.extaddr\n )\n )\n if virtual.intaddr == \"ALL\":\n raise ValueError(\"virtuals:%d: Internal Address cannot be ALL\" % rule.lineno)\n if virtual.intaddr not in all_addresses:\n raise ValueError(\n \"virtuals:%d: Internal Address '%s' does not exist\" % (\n virtual.lineno, virtual.intaddr\n )\n )\n if \"ALL\" in (virtual.extservice, virtual.intservice):\n if virtual.extservice != virtual.intservice:\n raise ValueError(\n \"virtuals:%d: When setting one service to ALL, the other must also be ALL\" % (\n virtual.lineno\n )\n )\n if virtual.extservice != \"ALL\":\n if virtual.extservice not in all_services:\n raise ValueError(\n \"virtuals:%d: External Service '%s' does not exist\" % (\n virtual.lineno, virtual.extservice\n )\n )\n if virtual.intservice != \"ALL\":\n if virtual.intservice not in all_services:\n raise ValueError(\n \"virtuals:%d: Internal Service '%s' does not exist\" % (\n virtual.lineno, virtual.intservice\n )\n )\n\n # For address and service tables, figure out which entries are actually _used_\n\n used_addresses = set(\n all_addresses[rule.srcaddr] for rule in all_rules if rule.srcaddr != \"ALL\"\n ) | set(\n all_addresses[rule.dstaddr] for rule in all_rules if rule.dstaddr != \"ALL\"\n ) | set(\n all_addresses[virtual.extaddr] for virtual in all_virtuals if virtual.extaddr != \"ALL\"\n ) | set(\n all_addresses[virtual.intaddr] for virtual in all_virtuals if virtual.intaddr != \"ALL\"\n )\n\n used_services = set(\n all_services[rule.service] for rule in all_rules if rule.service != \"ALL\"\n ) | set(\n all_services[virtual.extservice] for virtual in all_virtuals if virtual.extservice != \"ALL\"\n ) | set(\n all_services[virtual.intservice] for virtual in all_virtuals if virtual.intservice != \"ALL\"\n )\n\n # Now let's generate a bash script.\n\n print(\"#!/bin/bash\")\n print(\"set -e\")\n print(\"set -u\")\n print(\"\")\n\n # Generate ipsets for the entries we're going to use\n\n for address in sorted(used_addresses, key=lambda x: x.name):\n if address.v4 != '-':\n printf(\"ipset create '%(name)s_v4' hash:net family inet hashsize 1024 maxelem 65536\", address)\n printf(\"ipset add '%(name)s_v4' '%(v4)s'\", address)\n if address.v6 != '-':\n printf(\"ipset create '%(name)s_v6' hash:net family inet6 hashsize 1024 maxelem 65536\", address)\n printf(\"ipset add '%(name)s_v6' '%(v6)s'\", address)\n\n for service in sorted(used_services, key=lambda x: x.name):\n if service.tcp != '-':\n printf(\"ipset create '%(name)s_tcp' bitmap:port range 1-65535\", service)\n printf(\"ipset add '%(name)s_tcp' '%(tcp)s'\", service)\n if service.udp != '-':\n printf(\"ipset create '%(name)s_udp' bitmap:port range 1-65535\", service)\n printf(\"ipset add '%(name)s_udp' '%(udp)s'\", service)\n\n print(\"\")\n\n # Generate implicit accept rules for lo, icmp and related\n\n print(\"iptables -A INPUT -i lo -j ACCEPT\")\n print(\"ip6tables -A INPUT -i lo -j ACCEPT\")\n\n print(\"iptables -A INPUT -p icmp -j ACCEPT\")\n print(\"iptables -A FORWARD -p icmp -j ACCEPT\")\n\n print(\"ip6tables -A INPUT -p icmpv6 -j ACCEPT\")\n print(\"ip6tables -A FORWARD -p icmpv6 -j ACCEPT\")\n\n print(\"iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT\")\n print(\"ip6tables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT\")\n\n print(\"iptables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT\")\n print(\"ip6tables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT\")\n\n # Generate action chains\n\n print(\"iptables -N accept\")\n print(\"iptables -A accept -j ACCEPT\")\n\n print(\"iptables -N drop\")\n print(\"iptables -A drop -j DROP\")\n\n print(\"iptables -N reject\")\n print(\"iptables -A reject -m addrtype --src-type BROADCAST -j DROP\")\n print(\"iptables -A reject -s 224.0.0.0/4 -j DROP\")\n print(\"iptables -A reject -p igmp -j DROP\")\n print(\"iptables -A reject -p tcp -j REJECT --reject-with tcp-reset\")\n print(\"iptables -A reject -p udp -j REJECT --reject-with icmp-port-unreachable\")\n print(\"iptables -A reject -p icmp -j REJECT --reject-with icmp-host-unreachable\")\n print(\"iptables -A reject -j REJECT --reject-with icmp-host-prohibited\")\n\n print(\"ip6tables -N accept\")\n print(\"ip6tables -A accept -j ACCEPT\")\n\n print(\"ip6tables -N drop\")\n print(\"ip6tables -A drop -j DROP\")\n\n print(\"ip6tables -N reject\")\n print(\"ip6tables -A reject -p tcp -j REJECT --reject-with tcp-reset\")\n print(\"ip6tables -A reject -j REJECT --reject-with icmp6-adm-prohibited\")\n\n # Generate zone-specific chains\n\n for zone in sorted(all_zones):\n print(\"iptables -N '%s_inp'\" % zone)\n print(\"ip6tables -N '%s_inp'\" % zone)\n print(\"iptables -N '%s_fwd'\" % zone)\n print(\"ip6tables -N '%s_fwd'\" % zone)\n\n # Generate rules to route traffic from INPUT and FORWARD to those chains\n\n for interface in all_interfaces:\n if interface.protocols != \"-\":\n for proto in interface.protocols.split(\",\"):\n print(\"iptables -A INPUT -i '%s' -p '%s' -j ACCEPT\" % (interface.name, proto))\n print(\"ip6tables -A INPUT -i '%s' -p '%s' -j ACCEPT\" % (interface.name, proto))\n\n # Route incoming traffic to zone-specific input chains\n printf(\"iptables -A INPUT -i '%(name)s' -j '%(zone)s_inp'\", interface)\n printf(\"ip6tables -A INPUT -i '%(name)s' -j '%(zone)s_inp'\", interface)\n\n # We will never allow hairpin traffic though (traffic cannot be\n # forwarded out the same interface where it came in)\n printf(\"iptables -A FORWARD -i '%(name)s' -o '%(name)s' -j drop\", interface)\n printf(\"ip6tables -A FORWARD -i '%(name)s' -o '%(name)s' -j drop\", interface)\n\n # Route incoming traffic to zone-specific forward chains\n printf(\"iptables -A FORWARD -i '%(name)s' -j '%(zone)s_fwd'\", interface)\n printf(\"ip6tables -A FORWARD -i '%(name)s' -j '%(zone)s_fwd'\", interface)\n\n # Generate rules to implement filtering\n\n for rule in all_rules:\n # cmd is a dictionary that contains all the necessary building blocks for\n # an iptables command.\n # We're gonna pass it through a bunch of generators that each yield a\n # number of combinations for ipv4/ipv6 addresses, tcp/udp services and\n # accept/masquerade rules.\n # So the number of combinations grows with each step along the way.\n # At the end, every combination gets passed into render_cmd which\n # turns it into a string.\n\n def iptables(cmd=None):\n yield dict(cmd=\"iptables\")\n yield dict(cmd=\"ip6tables\")\n\n def chains(cmd):\n # Find out which input/forward chains we need to use\n if rule.dstzone == \"ALL\":\n dstzones = all_zones | {\"FW\"}\n else:\n dstzones = [rule.dstzone]\n\n for dstzone in dstzones:\n # Destination ALL or FW: goto _inp\n if dstzone in (\"FW\", \"ALL\"):\n yield dict(cmd,\n chain=\"%s_inp\" % rule.srczone,\n iface=\"\"\n )\n\n # Destination ALL or specific zone: goto _fwd\n for interface in all_interfaces:\n if dstzone in (interface.zone, \"ALL\"):\n yield dict(cmd,\n chain=\"%s_fwd\" % rule.srczone,\n iface=interface.name\n )\n\n def address(addr, direction):\n def _filter_addr(cmd):\n if addr == \"ALL\":\n yield cmd\n elif cmd[\"cmd\"] == \"iptables\":\n if all_addresses[addr].v4 != '-':\n yield dict(cmd, **{ \"%saddr\" % direction : \"%s_v4\" % addr })\n else:\n if all_addresses[addr].v6 != '-':\n yield dict(cmd, **{ \"%saddr\" % direction : \"%s_v6\" % addr })\n\n return _filter_addr\n\n def service(cmd):\n if rule.service == \"ALL\":\n yield cmd\n else:\n if all_services[rule.service].tcp != '-':\n yield dict(cmd, service='%s_tcp' % rule.service, proto=\"tcp\")\n if all_services[rule.service].udp != '-':\n yield dict(cmd, service='%s_udp' % rule.service, proto=\"udp\")\n\n def action(cmd):\n action = \"accept\" if rule.action == \"accept+nat\" else rule.action\n yield dict(cmd, action=action)\n\n def masq(cmd):\n yield cmd\n if rule.action == \"accept+nat\":\n yield dict(cmd, table=\"nat\", chain=\"POSTROUTING\", action=\"MASQUERADE\")\n\n def render_cmd(cmd):\n fmt = \"%(cmd)-9s \"\n if cmd.get(\"table\"):\n fmt += \"-t '%(table)s' \"\n fmt += \"-A '%(chain)s' \"\n if cmd.get(\"iface\"):\n fmt += \"-o '%(iface)s' \"\n if cmd.get(\"srcaddr\"):\n fmt += \"-m set --match-set '%(srcaddr)s' src \"\n if cmd.get(\"dstaddr\"):\n fmt += \"-m set --match-set '%(dstaddr)s' dst \"\n if cmd.get(\"service\"):\n fmt += \"-p '%(proto)s' -m set --match-set '%(service)s' dst \"\n fmt += \"-j %(action)s\"\n yield fmt % cmd\n\n # Create a pipeline of steps ready to be consumed by reduce.\n # The first element we need to invoke manually.\n # The others are invoked by chain_gen.\n pipeline = [\n iptables(),\n chains,\n address(rule.srcaddr, \"src\"),\n address(rule.dstaddr, \"dst\"),\n service,\n action,\n masq,\n render_cmd\n ]\n\n # Now reduce() the pipeline to generate the actual commands.\n for command in reduce(chain_gen, pipeline):\n print(command)\n\n\n # Generate rules to implement virtual services\n\n for virtual in all_virtuals:\n def iptables(cmd=None):\n yield dict(cmd=\"iptables\")\n yield dict(cmd=\"ip6tables\")\n\n def interfaces(cmd):\n for interface in all_interfaces:\n if interface.zone == virtual.srczone:\n yield dict(cmd, iface=interface.name)\n\n def address(addr, which_one):\n def _filter_addr(cmd):\n if cmd[\"cmd\"] == \"iptables\":\n if all_addresses[addr].v4 != '-':\n yield dict(cmd, **{ \"%saddr\" % which_one : all_addresses[addr].v4 })\n else:\n if all_addresses[addr].v6 != '-':\n yield dict(cmd, **{ \"%saddr\" % which_one : all_addresses[addr].v6 })\n return _filter_addr\n\n def service(service, which_one):\n def _filter_service(cmd):\n if service == \"ALL\":\n yield cmd\n else:\n if all_services[service].tcp != '-':\n yield dict(cmd, proto=\"tcp\", **{\n \"%sservice\" % which_one : all_services[service].tcp,\n })\n if all_services[service].udp != '-':\n yield dict(cmd, proto=\"udp\", **{\n \"%sservice\" % which_one : all_services[service].udp,\n })\n return _filter_service\n\n def render_cmd(cmd):\n fmt_dnat = \"%(cmd)s -t 'nat' -A 'PREROUTING' -i '%(iface)s' -d '%(extaddr)s' \"\n fmt_fltr = \"%(cmd)s -t 'filter' -A 'FORWARD' -i '%(iface)s' -d '%(intaddr)s' \"\n\n if cmd.get(\"extservice\"):\n fmt_dnat += \"-p '%(proto)s' -m '%(proto)s' --dport '%(extservice)s' \"\n fmt_fltr += \"-p '%(proto)s' -m '%(proto)s' --dport '%(intservice)s' \"\n\n if virtual.intservice == virtual.extservice:\n fmt_dnat += \"-j DNAT --to-destination '%(intaddr)s'\"\n fmt_fltr += \"-j ACCEPT\"\n else:\n fmt_dnat += \"-j DNAT --to-destination '%(intaddr)s:%(intservice)s'\"\n fmt_fltr += \"-j ACCEPT\"\n\n yield fmt_dnat % cmd\n yield fmt_fltr % cmd\n\n pipeline = [\n iptables(),\n interfaces,\n address(virtual.extaddr, \"ext\"),\n address(virtual.intaddr, \"int\"),\n service(virtual.extservice, \"ext\"),\n service(virtual.intservice, \"int\"),\n render_cmd\n ]\n\n # Now reduce() the pipeline to generate the actual commands.\n for command in reduce(chain_gen, pipeline):\n print(command)\n\n # Generate last-resort reject rules\n\n print(\"iptables -A INPUT -j reject\")\n print(\"ip6tables -A INPUT -j reject\")\n print(\"iptables -A FORWARD -j reject\")\n print(\"ip6tables -A FORWARD -j reject\")\n\n\n\nif __name__ == '__main__':\n generate_setup()\n\n","sub_path":"src/generate_setup.py","file_name":"generate_setup.py","file_ext":"py","file_size_in_byte":18416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"421654868","text":"from django.conf.urls.defaults import patterns, url\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.decorators import login_required\n\nurlpatterns = patterns('app.asphodel.views',\n url(r'^$', 'user_status'),\n url(r'^game/new/?$', 'new_game'),\n url(r'^game/(?P\\d+)/?$', 'game'),\n\n url(r'(?P\\d+)/lanes/build$', 'build_lane'),\n \n url(r'(?P\\d+)/ships/move$', 'move_ship'),\n url(r'(?P\\d+)/ships/build$', 'build_ship'),\n url(r'(?P\\d+)/ships/deploy$', 'deploy_ship'),\n\n url(r'(?P\\d+)/tech/invest$', 'invest_tech'),\n \n url(r'(?P\\d+)/change_phase$', 'change_phase'),\n url(r'(?P\\d+)/done_turn$', 'done_turn'),\n url(r'(?P\\d+)/zoom$', 'zoom'),\n url(r'(?P\\d+)/combat$', 'combat'),\n url(r'(?P\\d+)/check_turn$', 'check_turn'),\n \n url(r'test_js', TemplateView.as_view(template_name=\"asphodel/test_js.html\")),\n)\n \n","sub_path":"app/asphodel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"530720037","text":"from tkinter import *\nfrom dipy.viz import fvtk\nfrom dipy.tracking.distances import bundles_distances_mam\nfrom nibabel import trackvis\nfrom dipy.tracking.utils import length\nimport numpy as np\nimport nibabel\nimport os\nimport vtk.util.colors as colors\nimport _tkinter\nimport matplotlib.pyplot as plt\n\n\n\n\n\ndef loadtrkfile(T_filename, threshold_short_streamlines=10.0):\n \"\"\"Load tractogram from TRK file and remove short streamlines with\n length below threshold.\n \"\"\"\n print(\"Loading %s\" % T_filename)\n T, hdr = trackvis.read(T_filename, as_generator=False)\n T = np.array([s[0] for s in T], dtype=np.object)\n \n\n \n return T, hdr\n\n\n\ndef show_tract(segmented_tract, color):\n ren = fvtk.ren() \n fvtk.add(ren, fvtk.line(segmented_tract.tolist(),colors=color, linewidth=2,opacity=0.3))\n fvtk.show(ren)\n fvtk.clear(ren)\n\n\n\n\ndef countstreamlines():\n print(\"total %s streamlines\" % ( len(T_A)))\n\n\n\ndef showhistogram():\n lengths = list(length(T_A))\n fig_hist, ax = plt.subplots()\n ax.hist(lengths, color='burlywood')\n ax.set_xlabel('Length')\n ax.set_ylabel('Count')\n plt.show()\n\ndef load():\n T_A, hdr = loadtrkfile(T_A_filename, threshold_short_streamlines=threshold_short_streamlines) \n \n\nif __name__ == '__main__':\n \n print(__doc__)\n np.random.seed(0)\n\n T_A_filename = 'F:\\Thesis\\Resources\\CST_L.trk'\n \n \n threshold_short_streamlines = 0.0 \n\n \n \n color=colors.red\n T_A, hdr = loadtrkfile(T_A_filename, threshold_short_streamlines=threshold_short_streamlines) \n \n \n root = Tk()\n Frame= Frame(root)\n Frame.pack(fill=X)\n\n\n button1=Button(Frame,text=\"Load Tract\",fg=\"blue\",command=load)\n button2=Button(Frame,text=\"Show Tract\",fg=\"blue\",command=show_tract(T_A,color))\n button3=Button(Frame,text=\"Streamlines Count\",fg=\"blue\",command=countstreamlines)\n button4=Button(Frame,text=\"Show histogram\",fg=\"blue\",command=showhistogram)\n\n button1.pack(fill=X)\n button2.pack(fill=X)\n button3.pack(fill=X)\n button4.pack(fill=X)\n\n\n\n\n root.mainloop()\n\n\n","sub_path":"firsttask.py","file_name":"firsttask.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"173052328","text":"from datetime import datetime\n\nOFF = 0\nINFO = 5\nDEBUG = 10\nWARNING = 15\nERROR = 20\nCRITICAL = 25\n\nlevels = {}\nlevels[\"OFF\"] = OFF\nlevels[\"INFO\"] = INFO\nlevels[\"DEBUG\"] = DEBUG\nlevels[\"WARNING\"] = WARNING\nlevels[\"ERROR\"] = ERROR\nlevels[\"CRITICAL\"] = CRITICAL\n\ndef isInfo(level):\n return isDebug(level) or level == INFO\n\ndef isDebug(level):\n return isWarning(level) or level == DEBUG\n\ndef isWarning(level):\n return isError(level) or level == WARNING\n\ndef isError(level):\n return isCritical(level) or level == ERROR\n\ndef isCritical(level):\n return level == CRITICAL\n\ndef _getTime():\n return str(datetime.now())[:22]\n\nclass logger():\n def __init__(self, script=\"Main\", file=INFO, screen=INFO, path=\"log.txt\"):\n self.script = script\n self.file = file\n self.screen = screen\n self.path = path\n\n def new(self):\n f = open(self.path, \"w\")\n f.close()\n\n def _construct(self, level, message):\n return _getTime() + \" - \" + level + \" [\" + self.script + \"] \" + message\n\n def info(self, message):\n temp = self._construct(\"INFO\", message)\n if isInfo(self.file):\n f = open(self.path, \"a\")\n f.write(temp + \"\\n\")\n f.close()\n if isInfo(self.screen):\n print(temp)\n \n\n def debug(self, message):\n temp = self._construct(\"DEBUG\", message)\n if isDebug(self.file):\n f = open(self.path, \"a\")\n f.write(temp + \"\\n\")\n f.close()\n if isDebug(self.screen):\n print(temp)\n\n def warning(self, message):\n temp = self._construct(\"WARNING\", message)\n if isWarning(self.file):\n f = open(self.path, \"a\")\n f.write(temp + \"\\n\")\n f.close()\n if isWarning(self.screen):\n print(temp)\n\n def error(self, message):\n temp = self._construct(\"ERROR\", message)\n if isError(self.file):\n f = open(self.path, \"a\")\n f.write(temp + \"\\n\")\n f.close()\n if isError(self.screen):\n print(temp)\n\n def critical(self, message):\n temp = self._construct(\"CRITICAL\", message)\n if isCritical(self.file):\n f = open(self.path, \"a\")\n f.write(temp + \"\\n\")\n f.close()\n if isCritical(self.screen):\n print(temp)\n","sub_path":"python_src/engine/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"330925049","text":"boys = ['Peter', 'Alex', 'John', 'Arthur', 'Richard']\ngirls = ['Kate', 'Liza', 'Kira', 'Emma', 'Trisha']\n\ndef pairs_list(list1, list2):\n if len(list1) != len(list2):\n print(\"К сожалению один список короче другого, следовательно, кто-то останется без пары\")\n return 0\n else:\n ideal_pairs = zip(list1, list2)\n print(\"Идеальные пары:\")\n for pair in ideal_pairs:\n print(pair[0], \"и\", pair[1])\n\npairs_list(sorted(boys),sorted(girls))\n","sub_path":"dating.py","file_name":"dating.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"407645048","text":"from sklearn.neighbors import KNeighborsClassifier\nimport numpy as np\nfrom graphs.graphs import *\n\n\ndef build_affinity_matrix(embed_matrix):\n \n aff_mat = None\n \n return aff_mat\n\n\ndef KNN(X, y, n):\n l = len(y)\n y_hat = []\n \n for i in range(l): \n X_train = np.delete(X, i, axis = 0) \n y_train = np.delete(y, i, axis = 0) \n \n neigh = KNeighborsClassifier(n_neighbors = n, n_jobs = -1)\n neigh.fit(X_train, y_train)\n y_hat.extend(neigh.predict(X[i].reshape(1,-1)))\n \n print(\"%d-NN\" %n)\n print(sum(np.array(y_hat) == y) / l)\n return sum(np.array(y_hat) == y) / l\n\ndef KNN_large(X, y, n):\n l = len(y)\n y_hat = []\n \n for i in range(0,l,20):\n \n batch_size = 20\n if i + 20 >= l:\n batch_size = l - i\n \n to_predict = X[i:i+batch_size, :]#.reshape(1,-1)\n X_train = np.delete(X, np.arange(i,i + batch_size), axis = 0)\n y_train = np.delete(y, np.arange(i,i + batch_size), axis = 0)\n \n neigh = KNeighborsClassifier(n_neighbors = n)\n neigh.fit(X_train, y_train)\n \n y_hat.extend(neigh.predict(to_predict))\n \n print(\"%d-NN\" %n)\n print(sum(np.array(y_hat) == y) / l)\n\n \ndef prepare_adj(df, method = 'gaussian', sig = 1, alpha = 1, delta = 20, lazy_flag = True):\n\n \"\"\"\n Input: Adjacency matrix or feature matrix with the last column including the labels\n Output: Row normalized gaussian kernel similarity matrix\n \"\"\"\n X = df.values[:,:-1] #consider X a graph or a feature matrix, both fine\n np.fill_diagonal(X,0) #set diagonal to zero / remove self loops\n Q_index = range(X.shape[0]) # for now always use this\n\n dis = distanceEuclidean(X, Q_index, n_jobs=-1)\n similarity = kerGauss(dis, sigma = sig) #try different sigma\n\n # origianl similarity matrix, using gaussian kernel, row normalize\n if method == 'gaussian':\n graph = RandomWalkNormalize(similarity)\n \n elif method == 'MSTKNN':\n A_KNN = MSTKNN(dis,Q_index,delta,n_jobs=-1,spanning=True)\n A_KNN_ker = A_KNN*similarity\n graph = RandomWalkNormalize(A_KNN_ker)\n \n elif method == 'nnlsw':\n A_KNN = MSTKNN(dis,Q_index,delta,n_jobs=-1,spanning=True)\n graph = multicoreNNLS(X,A_KNN,Q_index,n_jobs=-1)\n \n if lazy_flag:\n graph = lazy(graph, alpha= alpha) # convert to lazy\n \n return graph\n\ndef apply_laplacian(graph):\n\n graph = Symmetricalize(graph)\n graph = LaplacianFilter(graph)\n graph = graph.toarray()\n \n return graph\n\ndef get_train_and_val_mask(train_mask, val_size):\n \n \"\"\"\n Input: Indices of the p rows in the Domain matrix\n \n split the training set into training and validation sets\n train_ind and val_ind are used to filter the base_embeddings during training\n \"\"\"\n p = len(train_mask)\n indices = np.arange(p)\n np.random.shuffle(indices)\n \n train_lim = int((1-val_size)* p) # 90% training, 10% validation\n \n train_ind = [indices[i] for i in range(train_lim)]\n val_ind = [indices[i] for i in range(train_lim, p)]\n \n val_mask = [train_mask[i] for i in val_ind]\n train_mask = [train_mask[i] for i in train_ind]\n \n return train_mask, val_mask, train_ind, val_ind\n ","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"226407804","text":"# coding:iso-8859-9 Türkçe\r\n# p_20902.py: Grafik yumruları ve bağlantıları, doğrusal ve dairesel grafik çıktıları örneği.\r\n\r\nimport networkx as nwx\r\nimport matplotlib.pyplot as pp\r\n\r\ngrafik1 = nwx.path_graph (6) # grafik1 dizilimi: [5<--0], sağdan-sola...\r\nşehirler = {0:\"Edirne\", 1:\"İstanbul\", 2:\"Ankara\", 3:\"Malatya\", 4:\"Sivas\", 5:\"Kars\"}\r\ngrafik2 = nwx.relabel_nodes (grafik1, şehirler) # grafik2 dizilimi: [Kars<--Edirne], sağdan-sola\r\n# grafik2 yeniden yaratılır, grafik1 etkilenmez...\r\n# Grafiklerde maalesef isimler görünmüyor, MS Paint ile etiketlenmeli...\r\n\r\nprint (\"Grafik1'in yumruları:\", grafik1.nodes() )\r\nprint (\"Grafik1'in bağlantıları:\", grafik1.edges() )\r\n\r\nprint (\"\\nGrafik2'in yumruları:\", grafik2.nodes() )\r\nprint (\"Grafik2'in bağlantıları:\", grafik2.edges() )\r\n\r\nnwx.draw (grafik2)\r\n#pp.savefig (\"p_20902a.png\")\r\npp.show()\r\n\r\nprint (\"-\"*75)\r\n#--------------------------------------------------------------------------------------------------\r\n\r\nharitalama1 = dict (zip (grafik1.nodes(), \"abc\"))\r\nnwx.relabel_nodes (grafik1, haritalama1, copy=False) # grafik1 yeni kısagelen isimlerle değiştirilir...\r\n\r\nprint (\"Grafik1'in yumruları:\", grafik1.nodes() )\r\nprint (\"Grafik1'in bağlantıları:\", grafik1.edges() )\r\nprint (\"-\"*75)\r\n#--------------------------------------------------------------------------------------------------\r\n\r\n\r\ngrafik1 = nwx.path_graph (6)\r\nharitalama2 = dict (zip (grafik1.nodes(), (100,101,102,103,104,105) ))\r\nnwx.relabel_nodes (grafik1, haritalama2, copy=False) # grafik1 haritalama fonksiyonuyla tamamen ismen değişir...\r\ngrafik1.add_edge (100, 105) # Sondan başa bağlantı...\r\n\r\nprint (\"Grafik1'in yumruları:\", grafik1.nodes() )\r\nprint (\"Grafik1'in bağlantıları:\", grafik1.edges() )\r\n\r\nnwx.draw (grafik1)\r\n#pp.savefig (\"p_20902b.png\")\r\npp.show()\r\n\r\n\r\n\r\n\"\"\"Çıktı:\r\n>python p_20902.py\r\nGrafik1'in yumruları: [0, 1, 2, 3, 4, 5]\r\nGrafik1'in bağlantıları: [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]\r\n\r\nGrafik2'in yumruları: ['Edirne', 'İstanbul', 'Ankara', 'Malatya', 'Sivas', 'Kars']\r\nGrafik2'in bağlantıları: [('Edirne', 'İstanbul'), ('İstanbul', 'Ankara'), ('Ankara', 'Malatya'), ('Malatya', 'Sivas'), ('Sivas', 'Kars')]\r\n\r\nnx_pylab.py:579: MatplotlibDeprecationWarning:\r\nThe iterable function was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use np.iterable instead. if not cb.iterable(width):\r\n---------------------------------------------------------------------------\r\n\r\nGrafik1'in yumruları: [3, 4, 5, 'a', 'b', 'c']\r\nGrafik1'in bağlantıları: [(3, 4), (3, 'c'), (4, 5), ('a', 'b'), ('b', 'c')]\r\n---------------------------------------------------------------------------\r\n\r\nGrafik1'in yumruları: [100, 101, 102, 103, 104, 105]\r\nGrafik1'in bağlantıları: [(100, 101), (100, 105), (101, 102), (102, 103), (103,104), (104, 105)]\r\n\"\"\"","sub_path":"Bernd Klein (520) ile Python/p_20902.py","file_name":"p_20902.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"461246903","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nmatplotlib.use(\"Agg\")\r\nimport pylab\r\nfrom collections import deque\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.optimizers import RMSprop\r\nfrom ple import PLE\r\nfrom ple.games.catcher import Catcher\r\nfrom pygame.constants import K_a, K_d\r\n\r\nos.putenv('SDL_VIDEODRIVER', 'fbcon')\r\nos.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\r\nEPISODES = 100000\r\nnp.random.seed(0)\r\n\r\n\r\ndef process_state(state):\r\n return np.array([state.values()])\r\n\r\n\r\nclass DQNAgent:\r\n def __init__(self, env):\r\n self.env = env\r\n self.memory = deque(maxlen=200000)\r\n self.gamma = 0.99\r\n self.epsilon = 1\r\n self.epsilon_min = 0.05\r\n self.epsilon_decay = 0.9984\r\n self.learning_rate = 1e-5\r\n self._build_model()\r\n\r\n def _build_model(self):\r\n model = Sequential()\r\n model.add(Dense(100, input_dim=4, activation='tanh', init='he_uniform'))\r\n model.add(Dense(100, activation='tanh', init='he_uniform'))\r\n model.add(Dense(3, activation='linear', init='he_uniform'))\r\n model.compile(loss='mse',\r\n optimizer=RMSprop(lr=self.learning_rate))\r\n self.model = model\r\n\r\n\r\n\r\n def remember(self, state, action, reward, next_state, done): #메모리 저장\r\n self.memory.append((state, action, reward, next_state, done))\r\n\r\n def act(self, state):\r\n if np.random.rand() <= self.epsilon:\r\n return np.random.choice([K_a, None, K_d])\r\n act_values = self.model.predict(state)\r\n # print (act_values)\r\n return [K_a, None, K_d][np.argmax(act_values[0])]\r\n\r\n def replay(self, batch_size):\r\n if len(self.memory) < 120000: #메모리 사이즈가 120000 이하면 학습 안함\r\n return\r\n batchs = np.random.choice(len(self.memory), batch_size, replace=False) #배치사이즈만큼 메모리 랜덤하게 가져오기\r\n states, targets = [], []\r\n for i in batchs:\r\n state, action, reward, next_state, done = self.memory[i]\r\n #if not done:\r\n target = reward + self.gamma * \\\r\n np.amax(self.model.predict(next_state)[0])\r\n\r\n target_f = self.model.predict(state)\r\n target_f[0][action] = target\r\n states.append(state[0])\r\n targets.append(target_f[0])\r\n states = np.array(states)\r\n targets = np.array(targets)\r\n self.model.fit(states, targets, nb_epoch=1, verbose=0) # 학습하기\r\n if self.epsilon > self.epsilon_min:\r\n self.epsilon *= self.epsilon_decay\r\n\r\n def load(self, name): # 학습된 네트워크 로드\r\n self.model.load_weights(name)\r\n\r\n def save(self, name): # 네트워크 저장\r\n self.model.save_weights(name)\r\n\r\nif __name__ == \"__main__\":\r\n game = Catcher(width=320, height=320)\r\n env = PLE(game, display_screen=True, state_preprocessor=process_state)\r\n agent = DQNAgent(env)\r\n agent.load(\"./save/catcher.h5\")\r\n\r\n #초기화\r\n #pylab.title(\"reward\")\r\n #pylab.xlabel(\"episodes\")\r\n #pylab.ylabel(\"rewards\")\r\n env.init()\r\n scores, time = [], []\r\n for e in range(EPISODES):\r\n\r\n env.reset_game()\r\n state = env.getGameState()\r\n state = np.array([list(state[0])])\r\n score = 0\r\n for time_t in range(20000):\r\n action = agent.act(state)\r\n\r\n reward = env.act(action) #액션 선택\r\n score += reward\r\n\r\n next_state = env.getGameState()\r\n next_state = np.array([list(next_state[0])])\r\n\r\n action = [K_a, None, K_d].index(action)\r\n\r\n agent.remember(state, action, reward, next_state, env.game_over())\r\n state = next_state\r\n\r\n if env.game_over() or time_t == 19999:\r\n #에피소드가 끝나면 출력\r\n print(\"episode: {}/{}, score: {}, memory size: {}, e: {}\"\r\n .format(e, EPISODES, score,\r\n len(agent.memory), agent.epsilon))\r\n\r\n #리워드 플랏을 위한 코드\r\n scores.append(score)\r\n time.append(e+1)\r\n if e % 10 == 0:\r\n pylab.plot(time, scores, 'b')\r\n pylab.savefig(\"./save/catcher_dqn.png\")\r\n break\r\n\r\n if e % 100 == 0:\r\n agent.save(\"./save/catcher.h5\")\r\n\r\n if time_t % 4 == 3:\r\n agent.replay(32)\r\n","sub_path":"deep-q-learning/catcher_DQN_plot.py","file_name":"catcher_DQN_plot.py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"64041197","text":"# from sqlalchemy.orm import Session, session\n\nfrom api import models, schema\n\nfrom api.db import bookingslips, database\n\n\n\n###########################BOOKING SLIP#####################################\n\nasync def add_slip(source='', destination='', booking_code='', new_booking_code=''):\n query = bookingslips.insert().values(source=source, destination=destination, booking_code=booking_code, new_booking_code=new_booking_code)\n return await database.execute(query=query)\n\nasync def get_slip(booking_code):\n query = bookingslips.select(bookingslips.c.booking_code==booking_code)\n return await database.fetch_one(query=query)\n\nasync def get_slip_detail(source, destination, booking_code):\n query = bookingslips.select().where(bookingslips.c.source==source).where(bookingslips.c.destination==destination).where(bookingslips.c.booking_code==booking_code)\n return await database.fetch_one(query=query)\n\nasync def get_slips(skip: int = 0, limit: int = 10):\n query = bookingslips.select().order_by(bookingslips.c.id.desc()).offset(skip).limit(limit)\n return await database.fetch_all(query=query)\n\nasync def delete_slip(id: int):\n query = bookingslips.delete().where(bookingslips.c.id==id)\n return await database.execute(query=query)\n\nasync def update_slip(id: int, payload: schema.BookingSlipCreate): #I can't think of a possible usecase for now\n query = (\n bookingslips\n .update()\n .where(bookingslips.c.id == id)\n .values(**payload.dict())\n )\n return await database.execute(query=query)\n\n\n\n###########################BOOKING SLIP#####################################\n\n# async def get_slips(db: Session, skip: int = 0, limit: int = 100):\n# return db.query(models.BookingSlip).offset(skip).limit(limit).all()\n\n# async def get_slip(booking_code: str):\n# query = models.BookingSlip.filter(models.BookingSlip.booking_code == booking_code).first()\n# return await database.execute(query=query)\n# async def get_slip_detail(db: Session, booking_code: str, source: str, destination: str):\n# return db.query(models.BookingSlip).filter(models.BookingSlip.booking_code == booking_code)\\\n# .filter(models.BookingSlip.source == source).filter(models.BookingSlip.destination == destination).first()\n\n# async def create_slip(db: Session, source='', destination='', booking_code='', new_booking_code=''):\n# db_slip = models.BookingSlip(source=source, destination=destination, booking_code=booking_code, new_booking_code=new_booking_code)\n# db.add(db_slip)\n# db.commit()\n# db.refresh(db_slip)\n# return db_slip\n \n# async def create_slip_convert(db: Session, _convert: schema.ConvertedSlipCreate, bookingslip_id: int):\n# db_convert = models.ConvertedSlip(**_convert.dict(), booking_slip_id=bookingslip_id)\n# db.add(db_convert)\n# db.commit()\n# db.refresh(db_convert)\n# return db_convert\n\n","sub_path":"api/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"99195938","text":"#original idea from https://gist.github.com/frederic/cd56923c8af46ae44fd5\n#GPLv3\n\nimport struct #struct kinda sucks\nimport sys\n\n# /* msm8960 bootloader.img format */\n# #define BOOTLDR_MAGIC \"BOOTLDR!\"\n# #define BOOTLDR_MAGIC_SIZE 8\n#\n# typedef struct {\n# \tchar name[64];\n# \tuint32_t size;\n# }img_info_s;\n#\n# typedef struct {\n# \tchar magic[BOOTLDR_MAGIC_SIZE];\n# \tuint32_t num_images;\n# \tuint32_t start_offset;\n# \tuint32_t bootldr_size;\n# \timg_info_s img_info[];\n# }bootloader_images_header;\n\ncomplete_bootloader = open('binaries/bootloader-mako-makoz30f.img','rb').read()\nmagic = complete_bootloader[0:8]\n\nif (magic != bytes(\"BOOTLDR!\",'ascii')):\n print(\"magic was supposed to be BOOTLDR!, got {0} instead\".format(magic))\n sys.exit(1)\n\nnumber_of_images = int.from_bytes(complete_bootloader[8:12], byteorder='little')\nstart_offset = int.from_bytes(complete_bootloader[12:16], byteorder='little')\nbootloader_size = int.from_bytes(complete_bootloader[16:20], byteorder='little')\n\nprint(\"Read BOOTLDR header\")\nprint(\"File size: {0}, bootloader size: {1}\".format(len(complete_bootloader),bootloader_size))\nprint(\"Calculated offset: {0}\".format(len(complete_bootloader)-bootloader_size))\nprint(\"Expecting {0} images\".format(number_of_images))\nprint(\"Start offset: {0}\".format(start_offset))\n\nposition=start_offset\nfor i in range(0,number_of_images):\n info_size = 68 #probably shouldn't hardcode this\n info_start = 20+info_size*i\n info_end = info_start+info_size\n info = complete_bootloader[info_start:info_end]\n image_name = info[0:64].decode('utf-8').rstrip('\\0')\n image_size = int.from_bytes(info[64:68],byteorder='little')\n print(\"{0}: {1} ({2}) at offset {3}\".format(i, image_name,image_size, position))\n bin = open('binaries/'+image_name + '.bin','wb').write(complete_bootloader[position:position+image_size])\n position+=image_size\n\n","sub_path":"Python/nexus4-bootloader-tool/unbootldr.py","file_name":"unbootldr.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"436725587","text":"__author__ = 'vincent'\r\nclass Solution(object):\r\n def removeDuplicates(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n Another typical two index problem.\r\n \"\"\"\r\n\r\n if not nums: return 0;\r\n if len(nums) < 3: return len(nums)\r\n\r\n dupFlag = False\r\n count = 1\r\n\r\n for i in xrange(1, len(nums)):\r\n if nums[i] == nums[i-1]:\r\n if not dupFlag:\r\n nums[count] = nums[i]\r\n count+=1\r\n dupFlag = True\r\n else:\r\n dupFlag = False\r\n nums[count] = nums[i]\r\n count += 1\r\n return count\r\n\r\n\r\n\r\n","sub_path":"80_Remove Duplicates from Sorted Array II.py","file_name":"80_Remove Duplicates from Sorted Array II.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"344379259","text":"import bpy\nimport numpy as np\nimport sys\nsys.path.append(\".\")\n\n#デスクトップ型パソコンのパスの設定\nsys.path.append(\"/Users/hiroshi19/Documents/research_git/calibration\")\nimport cv2 as cv\nimport glob\nimport calibration as cb\n\n#print (np(モジュール名).__file__)でモジュールの場所を調べる\n\n\n\n#ビューに選択されているものを選択\nbpy.ops.object.select_all(action='SELECT')\n\n#すべて選択された状態になったら削除\nbpy.ops.object.delete(True)\n\n#カメラを追加\n#locationはメートル単位\n#rotationは3.14(円周率)でちょうど半周\nbpy.ops.object.camera_add(\n location = (7,0, 0.5),\n rotation = (1.65,0,1.9)\n)\n\n\n#イメージセンサの大きさを求める\ndef get_sensor_size(pint, pint_35, pix_x, pix_y):\n\n #センサーの対角線の長さを求める\n sensor_diagonal = 2 * pint * np.tan(np.arctan2(21.6335,pint_35))\n print(sensor_diagonal)\n\n #センサーの横の長さを求める→引数の解像度から、タテヨコ比を計算して、算出\n width = pix_x * (sensor_diagonal / np.sqrt((pix_x*pix_x) + (pix_y*pix_y)))\n #センサーの縦の長さを求める→引数の解像度から、タテヨコ比を計算して、算出\n height = pix_y * (sensor_diagonal / np.sqrt((pix_x*pix_x) + (pix_y*pix_y)))\n\n return width, height\n\n\n\n\n\n#内部パラメータ行列について\n#行列の変換関数(blenderカメラデータ→内部パラメータ行列)\ndef get_intrinsicMatrix(camera_data):\n #焦点距離\n focus_mm = camera_data.lens\n\n scene = bpy.context.scene\n resolution_ratio = scene.render.resolution_percentage / 100\n\n #センサーの物理的大きさ(mm)\n sensor_width_mm = camera_data.sensor_width\n sensor_height_mm = camera_data.sensor_height\n #ピクセルのタテヨコ比\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n\n if (camera_data.sensor_fit == 'VERTICAL'):\n #verticalは鉛直方向という意味で、sensor_heightが固定\n #uが横方向、vが縦方向\n scale_u = scene.render.resolution_x * resolution_ratio / sensor_width_mm\n scale_v = scene.render.resolution_y * resolution_ratio * pixel_aspect_ratio / sensor_height_mm\n else:\n #'HORIZONTAL' and 'AUTO'\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n scale_u = scene.render.resolution_x * resolution_ratio / sensor_width_mm\n scale_v = scene.render.resolution_y * resolution_ratio * pixel_aspect_ratio / sensor_height_mm\n\n alpha_u = focus_mm * scale_u\n alpha_v = focus_mm * scale_v\n u_0 = scene.render.resolution_x * resolution_ratio / 2\n v_0 = scene.render.resolution_y * resolution_ratio / 2\n skew = 0\n\n kmat = np.zeros((3,3))\n kmat[0,0] = alpha_u\n kmat[1,1] = alpha_v\n kmat[0,2] = u_0\n kmat[1,2] = v_0\n return kmat\n\n\n#blenderカメラデータの代入\ndef change_cameraData(RES_X, RES_Y, RES_PERCENT, PIX_ASPECT_X, PIX_ASPECT_Y, SENSOR_W_mm, SENSOR_H_mm):\n\n #レンダリングパラメータの設定\n bpy.context.scene.render.resolution_x = RES_X\n bpy.context.scene.render.resolution_y = RES_Y\n bpy.context.scene.render.resolution_percentage = RES_PERCENT\n bpy.context.scene.render.pixel_aspect_x = PIX_ASPECT_X\n bpy.context.scene.render.pixel_aspect_y = PIX_ASPECT_Y\n\n #カメラデータの設定\n bpy.data.objects[\"Camera\"].data.sensor_width = SENSOR_W_mm\n bpy.data.objects[\"Camera\"].data.sensor_height = SENSOR_H_mm\n\n\n\n\n#行列の変換関数(内部パラメータ行列→blenderカメラデータ)\n#行列から解像度を設定\ndef get_cameraData(kmat, sensor_size_x, sensor_size_y):\n\n #行列の値を変数に代入\n alpha_u = kmat[0,0]\n alpha_v = kmat[1,1]\n u_0 = kmat[0,2]\n v_0 = kmat[1,2]\n\n\n scene = bpy.context.scene\n camera_data = bpy.data.objects['Camera'].data\n resolution_ratio = scene.render.resolution_percentage / 100\n\n res_x_inBlender = u_0 * 2 / resolution_ratio\n res_y_inBlender = v_0 * 2 / resolution_ratio\n\n #素子(イメージセンサ)サイズ(mm)\n camera_data.sensor_width = sensor_size_x\n camera_data.sensor_height = sensor_size_y\n #ピクセルのタテヨコ比\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n\n #センサーフィットの設定\n camera_data.sensor_fit = 'AUTO'\n\n #スケールパラメータ(センサー上での1mmあたりのピクセル数)の設定\n scale_u = u_0 * 2 / camera_data.sensor_width\n scale_v = v_0 * 2 / camera_data.sensor_height\n\n #ピクセル単位の焦点距離をmm単位の焦点距離に変換\n focus_1 = alpha_u / scale_u\n focus_2 = alpha_v / scale_v\n\n print('focus_length1', focus_1)\n print('focus_length2', focus_2)\n\n\nif __name__ == '__main__':\n\n\n #センサーサイズの取得(解像度の入力はセンサーのタテヨコ比を計算するため)\n sensor_size_x, sensor_size_y = get_sensor_size(4.15,29,4030,3058)\n\n print(\"イメージセンサ サイズ\")\n print(\"横:\",sensor_size_x)\n print(\"縦:\",sensor_size_y)\n\n #キャリブレーション\n PATTERN_WIDTH = 10\n PATTERN_HEIGHT = 7\n checker_size = 23\n pic_location = './data_3/*.JPG'\n images, image_numbers, kmat, dist, rvecs, tvecs, objectpoints, imagepoints, rets = cb.cameraCalibration(PATTERN_WIDTH,PATTERN_HEIGHT,checker_size,pic_location)\n\n #キャリブレーション結果からblenderデータの導出\n print('blenderデータの導出')\n get_cameraData(kmat, sensor_size_x, sensor_size_y)\n","sub_path":"camera_parTobpy_data.py","file_name":"camera_parTobpy_data.py","file_ext":"py","file_size_in_byte":5612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"256971773","text":"\n# Standard library imports\nimport unittest\n\nimport os\n\n# Local imports\nfrom webasic.models.database import DataBase, SQLITE\nfrom webasic.models.person import Person\n\n\nclass DataBaseTest(unittest.TestCase):\n db = None\n\n @classmethod\n def setUpClass(cls):\n if os.path.exists('test.sqlite'):\n try:\n os.remove('test.sqlite')\n except:\n pass\n DataBaseTest.db = DataBase(SQLITE, dbname='test.sqlite')\n DataBaseTest.db.create_db_tables()\n\n @classmethod\n def tearDownClass(cls):\n try:\n os.remove('test.sqlite')\n except:\n pass\n\n # Test insert data base\n def test_insert_database_correct(self):\n\n dict_address = {'address': 'Test', 'city': 'Test', 'postal_code': 111, 'country': 'Test'}\n\n person_to_test = Person(\"PersonTest\", \"SurnameTest\", 111, dict_address, 'test@test.com', 'www.test.es')\n\n result = DataBaseTest.db.person_insert(person_to_test)\n\n self.assertTrue(result)\n\n query_to_execute = '''SELECT Name, Surname FROM Person WHERE Name =\"{}\"'''.format(\n person_to_test.name)\n\n result = DataBaseTest.db.select_data(query_to_execute)\n\n first_result = result[0]\n self.assertEqual(first_result[0], \"PersonTest\")\n self.assertEqual(first_result[1], \"SurnameTest\")\n\n # Test insert data base\n\n def test_insert_database_incorrect(self):\n dict_address = {'address': 'Test', 'city': 'Test', 'postal_code': 111, 'country': 'Test'}\n\n person_to_test = Person(\"PersonTest\", \"SurnameTest\", \"a\", dict_address, 'test@test.com', 'www.test.es')\n\n result = DataBaseTest.db.person_insert(person_to_test)\n\n self.assertIsNone(result)\n\n # Test update data base\n def test_update_database_correct(self):\n\n dict_address = {'address': 'Amazonas 2', 'city': 'Alcorcon', 'postal_code': 28922, 'country': 'spain'}\n\n person_to_find = Person(\"PersonTest\", \"SurnameTest\", 111, dict_address, 'test@test.com', 'www.test.es')\n person = Person(\"NameTest\", \"SurnameTest\", 222, dict_address, 'test@test.com', 'www.test.es')\n\n query_to_execute = '''SELECT Id, Name, Surname FROM Person WHERE Name =\"{}\"'''.format(\n person_to_find.name)\n\n result = DataBase.select_data(self.db, query_to_execute)\n\n record = result[0]\n person.identifier = record[0]\n\n result = DataBaseTest.db.person_update(person)\n\n self.assertTrue(result)\n\n query_to_execute = '''SELECT Name, Surname FROM Person WHERE Name =\"{}\"'''.format(person.name)\n\n result = DataBase.select_data(self.db, query_to_execute)\n\n first_result = result[0]\n self.assertEqual(first_result[0], \"NameTest\")\n self.assertEqual(first_result[1], \"SurnameTest\")\n\n","sub_path":"test/database_test.py","file_name":"database_test.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"561688825","text":"#from urllib2 import parse_http_list as _parse_list_header\nfrom flask import Flask\nfrom flask import request\nimport urllib\n\napp = Flask(__name__)\n\n\n#http://127.0.0.1:5000/showRandArticle?lang=tel\n@app.route('/showRandArticle', methods=['GET', 'POST'])\ndef getByLangAndRandId():\n\n language = request.args.get('lang')\n return 'Hello ' + language + ' usr'\n\nif __name__ == \"__main__\":\n app.run('0.0.0.0')\n\n","sub_path":"python_prgrams/flask.py","file_name":"flask.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"121457680","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport pandas as pd\nfrom random import randint\n\n# Initializing Lists\ngods = []\nlevels = []\nkdas = []\ngold = []\ngpms = []\ndamage_dealt = []\ndamage_taken = []\ndamage_mitigated = []\ndamage_inhand = []\nteam_healing = []\nself_healing = []\nstructure_damage = []\nwards = []\ndistance_traveled = []\ngod_name = ' '\ngpm_check = ' '\n\n# Monitor Scraping Efficiency\nstart_time = time.time()\nreq = 0\n\n# read in all match #'s for Incon matches\nmatch_number = pd.read_csv('smite_incon_matches.csv', dtype=str)\nmatch_number.columns = ['Match']\nmatchcol = match_number['Match']\nlist1 = pd.Series.tolist(matchcol)\n\nfor match in list1:\n r = requests.get('https://smite.guru/match/' + match)\n time.sleep(randint(1, 3))\n\n req += 1\n now = time.time()\n time_lapse = now - start_time\n print('Request #: {}; Frequency: {} requests per second'.format(req, req / time_lapse))\n\n soup = BeautifulSoup(r.text, 'html.parser')\n # Grabbing Information from the Match Stats Table\n if soup.find('section', attrs={'id': 'match-stats'}) is not None:\n matches = soup.find('section', attrs={'id': 'match-stats'})\n if matches.findAll('div', attrs={'class': 'row match-table__row'}) is not None:\n players = matches.findAll('div', attrs={'class': 'row match-table__row'})\n for i in players:\n if i.find('a').text == 'Incon':\n # Grabbing the name of the god\n god = i.div.div.div.text\n god_name = god\n gods.append(god)\n # Grab the level, K/D/A, Gold Per Minute, Damage Dealt, Damage Taken, Damage Mitigated\n first_table_info = i.findAll('div', attrs={'class': 'row__item'})\n level = first_table_info[0].text\n levels.append(level)\n\n kda = first_table_info[1].text\n kdas.append(kda)\n\n gold1 = first_table_info[2].text\n gold.append(gold1)\n\n gpm = first_table_info[3].text\n gpm_check = gpm\n gpms.append(gpm)\n\n dd = first_table_info[4].text\n damage_dealt.append(dd)\n\n dt = first_table_info[5].text\n damage_taken.append(dt)\n\n dm = first_table_info[6].text\n damage_mitigated.append(dm)\n else:\n level = 'NA'\n levels.append(level)\n\n kda = 'NA'\n kdas.append(kda)\n\n gold1 = 'NA'\n gold.append(gold1)\n\n gpm = 'NA'\n gpms.append(gpm)\n\n dd = 'NA'\n damage_dealt.append(dd)\n\n dt = 'NA'\n damage_taken.append(dt)\n\n dm = 'NA'\n damage_mitigated.append(dm)\n else:\n level = 'NA'\n levels.append(level)\n\n kda = 'NA'\n kdas.append(kda)\n\n gold1 = 'NA'\n gold.append(gold1)\n\n gpm = 'NA'\n gpms.append(gpm)\n\n dd = 'NA'\n damage_dealt.append(dd)\n\n dt = 'NA'\n damage_taken.append(dt)\n\n dm = 'NA'\n damage_mitigated.append(dm)\n\n if soup.findAll('div', attrs={'class': 'match-table'}) is not None:\n d_insights = soup.findAll('div', attrs={'class': 'match-table'})[3]\n if d_insights.findAll('div', attrs={'class': 'row match-table__row'}) is not None:\n players_damage = d_insights.findAll('div', attrs={'class': 'row match-table__row'})\n for i in players_damage:\n if i.find('a').text == 'Incon':\n second_table_info = i.findAll('div', attrs={'class': 'row__item'})\n\n # In Hand Damage\n ihd = second_table_info[2].text\n damage_inhand.append(ihd)\n\n # Team Healing\n th = second_table_info[3].text\n team_healing.append(th)\n\n # Self Healing\n sh = second_table_info[4].text\n self_healing.append(sh)\n\n # Structure Damage\n st = second_table_info[7].text\n structure_damage.append(st)\n else:\n # In Hand Damage\n ihd = 'NA'\n damage_inhand.append(ihd)\n\n # Team Healing\n th = 'NA'\n team_healing.append(th)\n\n # Self Healing\n sh = 'NA'\n self_healing.append(sh)\n\n # Structure Damage\n st = 'NA'\n structure_damage.append(st)\n else:\n # In Hand Damage\n ihd = 'NA'\n damage_inhand.append(ihd)\n\n # Team Healing\n th = 'NA'\n team_healing.append(th)\n\n # Self Healing\n sh = 'NA'\n self_healing.append(sh)\n\n # Structure Damage\n st = 'NA'\n structure_damage.append(st)\n\n if soup.findAll('div', attrs={'class': 'match-table'}) is not None:\n farm_insights = soup.findAll('div', attrs={'class': 'match-table'})[4]\n if farm_insights.findAll('div', attrs={'class': 'row match-table__row'}) is not None:\n player_farm = farm_insights.findAll('div', attrs={'class': 'row match-table__row'})\n for i in player_farm:\n if god_name == i.find('img')['alt'] \\\n and gpm_check == i.findAll('div', attrs={'class': 'row__item'})[3].text:\n if len(player_farm) >= 6:\n index = player_farm.index(i)\n third_table_info = player_farm[index].findAll('div', attrs={'class': 'row__item'})\n # Wards Placed\n ward = third_table_info[8].text\n wards.append(ward)\n\n # Distance Traveled\n dist = third_table_info[7].text\n distance_traveled.append(dist)\n else:\n ward = 'NA'\n wards.append(ward)\n\n dist = 'NA'\n distance_traveled.append(dist)\n else:\n ward = 'NA'\n wards.append(ward)\n\n dist = 'NA'\n distance_traveled.append(dist)\n else:\n ward = 'NA'\n wards.append(ward)\n\n dist = 'NA'\n distance_traveled.append(dist)\ndamage_data = pd.DataFrame({'God': gods, 'Level': levels, 'KDA': kdas, 'Gold Per Minute': gpms,\n 'Damage Dealt': damage_dealt, 'In Hand Damage Dealt': damage_inhand,\n 'Damage Taken': damage_taken, 'Damage Mitigated': damage_mitigated,\n 'Team Healing': team_healing, 'Self Healing': self_healing,\n 'Structure Damage': structure_damage, 'Wards': wards, 'Distance Traveled': distance_traveled\n })\ndamage_data.info()\ndamage_data.head(5)\n\n#Send to CSV\ndamage_data.to_csv('C:/Users/donav/Documents/Projects/Smite/Damage.csv')\n","sub_path":"Web Scraper/RealScraper2.py","file_name":"RealScraper2.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"34771077","text":"# https://www.tensorflow.org/get_started/mnist/beginners\r\n\r\nfrom PIL import Image\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\r\n\r\nimport tensorflow as tf\r\n\r\nx = tf.placeholder(tf.float32, [None, 784])\r\n\r\nW = tf.Variable(tf.zeros([784, 10])) # Weights\r\nb = tf.Variable(tf.zeros([10])) # Biases\r\ny = tf.nn.softmax(tf.matmul(x, W) + b) # the learning model\r\n\r\ny_ = tf.placeholder(tf.float32, [None, 10]) # base truth\r\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\r\n\r\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\r\n\r\nsess = tf.InteractiveSession()\r\ntf.global_variables_initializer().run()\r\n\r\nfor _ in range(3000):\r\n batch_xs, batch_ys = mnist.train.next_batch(100)\r\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\r\n\r\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n# read-out\r\nprint(sess.run(accuracy, feed_dict={x:mnist.test.images, y_:mnist.test.labels}))\r\n\r\n# Very basic visualisation of the learned weights\r\n# http://pillow.readthedocs.io/en/4.0.x/\r\nimage_node = 127 + (W * 100) # calculation node that gets our image into a 0..255 range centred on 127\r\nimg_size = (280,28) # MNIST image sizes\r\nimg = Image.frombuffer('F', img_size, sess.run(image_node)).convert('RGB')\r\nimg.save('./my.png')\r\n#img.show()\r\n","sub_path":"04_mnist_softmax.py","file_name":"04_mnist_softmax.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"542871086","text":"# 1. В диапазоне натуральных чисел от 2 до 99 определить, сколько из них кратны каждому из чисел в диапазоне от 2 до 9.\n# Примечание: 8 разных ответов.\n\n# init result list\nresult = [0 for _ in range(2, 10)]\n\nfor num in range(2, 100):\n for comp_num in range(2, 10):\n if num % comp_num == 0:\n result[comp_num - 2] += 1\n\nfor comp_num in range(2, 10):\n print(f\"Number {comp_num} - {result[comp_num - 2]}\")\n","sub_path":"lesson3/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"332057973","text":"from Course import *\nimport itertools\nfrom scrape_websoc import *\n\n\ndef permute_schedules(courses: {str: {Course: {str: [Course]}}}, only: {int} = None):\n '''\n input:\n ' ': {\n : {\n 'Dis': [],\n 'Lab': []\n }, {...}\n }\n '''\n d = []\n for x in courses.values():\n group = set()\n for lec, children in x.items():\n if not only or lec.code in only:\n incld_children = ((x for x in c if not only or x.code in only) for c in children.values())\n for i in itertools.product([lec], *incld_children):\n group.add(i)\n # print('----------')\n # for y in x:\n # print('{} {} {} ({})'.format(y.num, y.c_type, y.section, y.code))\n # print('----------')\n\n d.append(group)\n return exclude_conflicts(itertools.product(*d))\n\n\nhm = 0\n\n\ndef exclude_conflicts(cs: iter):\n for i in cs: # course group combo\n global hm\n hm += 1\n valid = True\n for x in itertools.combinations(i, 2): # pick 2 groups from combo\n if valid:\n for (a, b) in itertools.product(*x): # cartesian product\n if a.conflicts_with(b):\n valid = False\n break\n else:\n break\n if valid:\n yield i\n\n\nif __name__ == '__main__':\n courses = get_department('I&C Sci')\n d = parse_sections(courses)\n sub = {k: d[k] for k in ('I&C Sci 51', 'I&C Sci 53')}\n courses = get_department('CHINESE')\n d = parse_sections(courses)\n sub2 = {k: d[k] for k in ('Chinese 1B',)}\n sub.update(sub2)\n\n i = 0\n start = datetime.now()\n for x in permute_schedules(sub):\n i += 1\n print(['{} {} {} ({})'.format(y.num, y.c_type, y.section, y.code) for y in itertools.chain.from_iterable(x)])\n end = datetime.now()\n print('generated {}/{} schedules (discarded {} due to conflicts) in {}s'.format(i, hm, hm - i,\n (end - start).total_seconds()))\n","sub_path":"schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"27475990","text":"'''\nWrite a program that takes as input a set sorted sequences and computes the union of these\nsequences as a sorted sequences. For example, if the input is [3, 5, 7], [0, 6], [0, 6, 28]\nthen the output is: [0, 0, 3, 5, 6, 6, 7, 28]\n\nBrute-force approach is to concatenate these sequences into a single array and sort\nTime-complexity: O(nlogn)\n'''\n\n'''\nApproach: A min-heap is ideal for maintaining a collection of elements when we need to add\narbitrary values and extract the smallest element. As a concrete example, suppose there are \nthree sorted arrays to be merged: (3, 5, 7), (0, 6), and (0, 6, 28)\nThe min-heap is initialized to the first entry of each array, i.e., it is [0, 0, 3]. \nWe extract the smallest entry, 0, and add it to the output which is (0). Then we add 6 to the\nmin-heap which is {3,0,6} now (We chose the 0 entry corresponding to the third array \narbitrarily, it would be perfectly acceptable to choose from the second array.) \nNext, extract 0, and add it to the output which is [0,0]; then add 6 to the min-heap which\nis [3,6,6]. Next, extract 3, and add it to the output which is (0,0,3); then add 5 to the \nmin-heap which is [5,6,6]. Next, extract 5, and add it to the output which is [0,0,3,5]; \nthen add 7 to the min-heap which is [7,6,6]. Next, extract 6, and add it to the output which \nis [0,0,3,5,6]; assuming 5 is selected from the second array, which has no remaining elements, \nthe min-heap is [7,6]. Next, extract 6, and add it to the output which is (0,0,3,5,6,6); \nthen add 28 to the min-heap which is [7,28]. Next, extract 7, and add it to the output which\nis (0,0,3,5,6,6,7); the min-heap is {28}. Next, extract 28, and add it to the output which is\n[0,0,3,5,6,6,7,28]; now, all elements are processed and the output stores the sorted \nelements.\n\nlet k be the number of input sequences. Then there are no more than k elements in the min-heap.\nBoth extract-min and insert take O(logk) time. Hence, we can do the merge in O(nlogk) time.\nThe space complexity is O(k) beyond the space needed to write the final result. In particular,\nif the data comes from files and is written to a file, instead of arrays, we would need only\nO(k) additional storage\n'''\nimport heapq\ndef merge_sorted_arrays(sorted_arrays):\n min_heap = []\n # Builds a list of iterators for each array in sorted_arrays.\n sorted_arrays_iters = [iter(x) for x in sorted_arrays]\n\n # Puts first element from each iterator in min_heap. NOTE: enumerate returns a tuple\n for i, it in enumerate(sorted_arrays_iters):\n first_element = next(it, None)\n if first_element is not None:\n heapq.heappush(min_heap, (first_element, i))\n \n result = []\n while min_heap:\n smallest_entry, smallest_array_i = heapq.heappop(min_heap)\n smallest_array_iter = sorted_arrays_iters[smallest_array_i]\n result.append(smallest_entry)\n next_element = next(smallest_array_iter, None)\n if next_element is not None:\n heapq.heappush(min_heap, (next_element, smallest_array_i))\n\n return result\n\nsorted_arrays = [[3, 5, 7], [0, 6], [0, 6, 28]]\nresult = merge_sorted_arrays(sorted_arrays)\nprint(result)","sub_path":"Python/EPI/Heaps/merge_sorted_arrays.py","file_name":"merge_sorted_arrays.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"37618598","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .serializers import *\nfrom .models import *\n\n\nclass IndexView(APIView):\n \"\"\"\n View class for index page\n \"\"\"\n @staticmethod\n def get(request, format=None):\n \"\"\"\n GET method. Lists all created polls\n :param request: Http request\n :param format: Either json or html. Returns response as given format. Default is json.\n :return: JSON data of all polls.\n \"\"\"\n polls = PollModel.objects.all()\n serializer = PollListSerializer(polls, many=True)\n return Response(serializer.data)\n\n\nclass CreateView(APIView):\n \"\"\"\n View class for creating poll\n \"\"\"\n @staticmethod\n def post(request, format=None):\n \"\"\"\n POST method. Creates a new poll by given request. Returns http response as type application/json\n :param request: JSON containing poll title and selections\n :param format: Either json or html. Returns response as given format. Default is json.\n :return: http 201 if created successfully, 400 if creation failed.\n \"\"\"\n body = request.data\n\n try:\n poll = PollModel(title=body['title'])\n poll.save()\n except KeyError:\n response = dict(error='KeyError')\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n response = dict(poll=body['title'], selections=list(), count=0)\n\n try:\n for choice in body['choices']:\n selection = SelectionModel(poll=poll, body=choice)\n selection.save()\n response['selections'].append(choice)\n response['count'] += 1\n return Response(response, status=status.HTTP_201_CREATED)\n\n except KeyError:\n response = dict(error='KeyError')\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UpdateView(APIView):\n \"\"\"\n View class for updating poll\n \"\"\"\n @staticmethod\n def post(request, format=None):\n \"\"\"\n POST method. Updates poll status and returns http response as type application/json\n :param request: JSON data, including poll title and to-be-modified selections\n :param format: Either json or html. Returns response as given format. Default is json.\n :return: Http 202 if valid, Http 400 if invalid request.\n \"\"\"\n body = request.data\n try:\n poll = PollModel.objects.get(title=body['poll'])\n except PollModel.DoesNotExist:\n response = dict(error='Poll does not exist')\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n response = dict(poll=body['poll'], updated=list())\n\n try:\n for choice in body['choices']:\n selection = SelectionModel.objects.filter(poll=poll).filter(body=choice['name']).get()\n if choice['selected'] is True:\n selection.num_people += 1\n response['updated'].append({'name': choice['name'], 'update': 'increased'})\n elif selection.num_people > 0:\n selection.num_people -= 1\n response['updated'].append({'name': choice['name'], 'update': 'decreased'})\n selection.save()\n return Response(response, status=status.HTTP_202_ACCEPTED)\n\n except SelectionModel.DoesNotExist:\n response = dict(error='Selection does not exist')\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ResultView(APIView):\n \"\"\"\n View class for returning poll result\n \"\"\"\n @staticmethod\n def get(request, format=None):\n \"\"\"\n Returns result of all poll\n :param request: http request\n :param format: Either json or html. Returns response as given format. Default is json.\n :return: JSON data, containing result of all polls\n \"\"\"\n polls = PollModel.objects.all()\n serializer = PollSerializer(polls, many=True)\n return Response(serializer.data)\n\n @staticmethod\n def post(request, format=None):\n \"\"\"\n Returns result of given poll title\n :param request: JSON data, including poll title\n :param format: Either json or html. Returns response as given format. Default is json.\n :return: JSON data, containing result of given poll\n \"\"\"\n print(request.data)\n try:\n poll = PollModel.objects.get(title=request.data['title'])\n serializer = PollSerializer(instance=poll)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as e:\n print(e)\n serializer = PollSerializer(data=request.data)\n serializer.is_valid()\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"api/poll/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"433325715","text":"from django.shortcuts import render\nfrom django.contrib import messages\nfrom .models import Plant, PlantScrap\n\nfrom django.views.generic import ListView\nfrom django.db.models import Q\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http.response import JsonResponse\nimport json\n\n\nclass PlantListView(ListView):\n model = Plant\n paginate_by = 6\n template_name = 'search/main_plant.html'\n context_object_name = 'plant_list'\n\n def get_queryset(self):\n plant_list = Plant.objects.order_by('name')\n return plant_list\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['plants'] = Plant.objects.all()\n paginator = context['paginator']\n page_numbers_range = 5\n max_index = len(paginator.page_range)\n\n page = self.request.GET.get('page')\n current_page = int(page) if page else 1\n\n start_index = int((current_page - 1) /\n page_numbers_range) * page_numbers_range\n end_index = start_index + page_numbers_range\n if end_index >= max_index:\n end_index = max_index\n\n page_range = paginator.page_range[start_index:end_index]\n context['page_range'] = page_range\n\n search_keyword = self.request.GET.get('q', '')\n search_type = self.request.GET.get('type', '')\n scrap_plant_list = []\n if self.request.user.is_authenticated:\n user = self.request.user\n scrap_list = PlantScrap.objects.filter(user=user)\n for scrap in scrap_list:\n plant = Plant.objects.get(pk=scrap.plant.pk)\n scrap_plant_list.append(plant)\n\n else:\n scrap_plant_list = []\n\n context['scrap_plant_list'] = scrap_plant_list\n if len(search_keyword) > 1:\n context['q'] = search_keyword\n context['type'] = search_type\n\n return context\n\n def get_queryset(self):\n search_keyword = self.request.GET.get('q', '')\n search_type = self.request.GET.get('type', '')\n plant_list = Plant.objects.order_by('name')\n\n if search_keyword:\n if len(search_keyword) > 1:\n if search_type == 'all':\n search_plant_list = plant_list.filter(\n Q(name__icontains=search_keyword))\n elif search_type == 'name':\n search_plant_list = plant_list.filter(\n Q(name__icontains=search_keyword))\n elif search_type == 'content':\n search_plant_list = plant_list.filter(\n Q(content__icontains=search_keyword))\n elif search_type == 'managelevel':\n search_plant_list = plant_list.filter(\n Q(management_level__icontains=search_keyword))\n return search_plant_list\n else:\n messages.error(self.request, '검색어는 2글자 이상 입력해주세요.')\n return plant_list\n\n\ndef main_plant(request):\n return render(request, 'search/main_plant.html')\n\n\ndef plant_detail(request, pk):\n plant = Plant.objects.get(pk=pk)\n ctx = {\n \"plant\": plant\n }\n return render(request, \"search/plant_detail.html\", ctx)\n\n\n@csrf_exempt\ndef scrap_ajax(request):\n req = json.loads(request.body)\n plant_id = req['Id']\n plant = Plant.objects.get(pk=plant_id)\n user = request.user\n\n if PlantScrap.objects.filter(user=user).filter(plant=plant):\n scrap = PlantScrap.objects.filter(user=user).get(plant=plant)\n scrap.delete()\n button_type = 'del_scrap'\n else:\n scrap = PlantScrap(user=user, plant=plant)\n scrap.save()\n button_type = 'scrap'\n print(button_type)\n return JsonResponse({'id': plant.pk, 'type': button_type})\n","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"436070063","text":"import numpy as np\nfrom utils.models.vae import VAE\nfrom utils.models.rnn import RNN\nfrom pathlib import Path\nimport cv2\nfrom utils import utils, model_config\n\nvideo_frame_folder_path = Path(\"../data/my_video/frames\")\n\nframe_paths = np.sort(list(video_frame_folder_path.glob(\"*.jpg\")))\nframes = np.array([utils.frame_preprocessor(cv2.imread(str(p))) for p in frame_paths])\n\n# Train VAE\nvae = VAE.init_default()\nvae.train(frames, 25, include_callbacks=False)\n\n# Create data for RNN\nencoded_images = vae.encoder.predict(frames)\ndecoded_images = (vae.decoder.predict(encoded_images) * 255).astype(np.uint8)\nx_rnn_data, y_rnn_data = utils.create_rnn_data(encoded_images, model_config.GRU_TIME_STEPS)\n\n# Train RNN\nrnn = RNN.init_default()\nrnn.train(x_rnn_data, y_rnn_data, 100, include_callbacks=False)\n\n# Generate frames\nn_images_to_generate = 100\nstarter_frames = frames[0:model_config.GRU_TIME_STEPS]\nstarter_frames = np.array([utils.frame_preprocessor(x) for x in starter_frames])\ngenerated_encoded_frames = vae.encoder.predict(starter_frames)\n\nfor i in range(n_images_to_generate):\n next_frame = rnn.model.predict(np.expand_dims(generated_encoded_frames[i:i + model_config.GRU_TIME_STEPS], axis=0))\n generated_encoded_frames = np.vstack((generated_encoded_frames, next_frame))\n\n# Remove the manually created \"starter\" frames\ngenerated_encoded_frames = generated_encoded_frames[model_config.GRU_TIME_STEPS:, :]\n\n# Decode the predicted images\ngenerated_decoded_frames = vae.decoder.predict(generated_encoded_frames)\ngenerated_decoded_frames = (generated_decoded_frames * 255).astype(np.uint8)\n\n# Save the generated frames\ngenerated_frames_folder = Path(\"./generated_frames\")\ngenerated_frames_folder.mkdir(exist_ok=True)\n\nfor i, generated_frame in enumerate(generated_decoded_frames):\n image_name = \"{0}.jpg\".format(str(i).ljust(6, \"0\"))\n cv2.imwrite(image_name, generated_frame)\n","sub_path":"video_generation.py","file_name":"video_generation.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"496705608","text":"\"\"\"Code to output web source code.\n\"\"\"\nimport os\nfrom subprocess import call\nimport json\nimport shutil\nfrom termcolor import cprint\nfrom .utils import WorkingDirectory\n\nclass Web(object):\n \"\"\"Class to handle web source code.\n \"\"\"\n def __init__(self, project):\n self.project = project\n\n def initialise(self):\n \"\"\"Initialise web project.\n \"\"\"\n cprint('Creating react web project if it doesn\\'t exist', color='yellow')\n fresh_project = False\n if os.path.exists('web'):\n with WorkingDirectory('web'):\n call(['yarn', 'install'])\n else:\n call(['create-react-app', self.project.project_name_lowercased])\n call(['mv', self.project.project_name_lowercased, 'web'])\n fresh_project = True\n with WorkingDirectory('web'):\n if self.project.run_yarn or fresh_project:\n cprint('Running yarn add for additional dependencies')\n packages_to_add = [\"aphrodite\", \"concurrently\", \"enzyme\", \"react-dom\", \"rimraf\",\n \"react-redux\", \"react-router-dom\", \"@types/react\", \"redux-logger\", \"redux-thunk\", \"redux\"]\n\n dev_packages_to_add = [\"react-addons-test-utils\", \"react-scripts-ts\", \"ts-jest\", \"ts-node\", \"typescript\"]\n call(['yarn', 'add'] + packages_to_add)\n call(['yarn', 'add', '--dev'] + dev_packages_to_add)\n core_script = \"mkdir -p node_modules\" + os.sep + self.project.project_name_lowercased + \"core \" + \\\n \"&& cp ../core/package.json node_modules\" + os.sep + self.project.project_name_lowercased + \"core\" + os.sep + \"package.json \" + \\\n \"&& cp -r ../core/dist node_modules\" + os.sep + self.project.project_name_lowercased + \"core/dist\"\n cprint('Updating package.json scripts', color='yellow')\n scripts = {\n \"link:core\": core_script,\n \"test\": \"jest\",\n \"tsc\": \"tsc\",\n \"clean\": \"rimraf artifacts\",\n \"build\": \"yarn run clean && yarn run link:core && yarn run tsc --\",\n \"watch\": \"yarn run build -- -w\",\n \"start\": \"yarn run link:core && PORT=3002 react-scripts-ts start\",\n }\n package_json = json.loads(open('web' + os.sep + 'package.json', 'r').read())\n package_json['scripts'] = scripts\n package_json_file = open('web' + os.sep + 'package.json', 'w')\n package_json_file.write(json.dumps(package_json, sort_keys=True, indent=4, separators=(',', ': ')))\n\n if os.path.exists('web' + os.sep + 'src'):\n shutil.rmtree('web' + os.sep + 'src')\n os.mkdir('web' + os.sep + 'src')\n\n def output(self):\n \"\"\"Output web source code.\n \"\"\"\n cprint('\\tSetting up typescript code', color='yellow')\n self.project.write_asset_template('web/src', 'index.tsx.j2', 'index.tsx')\n self.project.write_asset_template('web/src/containers', 'app.tsx.j2', 'app.tsx')\n self.project.write_asset_template('web/src/containers', 'login.tsx.j2', 'login.tsx')\n","sub_path":"react_bootstrap/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"18709920","text":"import math\nimport random\n\n\ndef play_guess(x, y):\n num_list = []\n\n for i in range(10):\n num_list.append(random.randint(x, y))\n\n while True:\n tip = int(input(\"Enter an integer from {0} to {1}: \".format(x, y)))\n\n if tip < num_list[i]:\n print(\"Guess is low.\")\n elif tip > num_list[i]:\n print(\"Guess is high.\")\n else:\n break\n\n print(\"You guessed it!\")\n\n\nplay_guess(1, 99)\n\nplay_guess(1, 49)\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"569024897","text":"\"\"\"\nAuthor: Le Tuan Luc\nDate: 2021/07/19\nProgram: exercise_02_page_72.py\nProblem:\n Write a code segment that displays the values of the integers x, y, and z on a single line, such that each value is right-justified with a field width of 6.\nSolution:\n print(\"%6s\" % )\n >>>\n\"\"\"\nx = 123\ny = 43\nz = 56789\nprint(\"|%6s\" % x, \"|%6s\" % y, \"|%6s\" % z)","sub_path":"chapter03/page_72/exercise_02_page_72.py","file_name":"exercise_02_page_72.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"71783719","text":"\"\"\"充值接口测试\"\"\"\nimport json\nimport os\nimport decimal\nfrom decimal import Decimal\n\nimport pytest\nimport requests\nfrom common.excel_handler import ExcelHandler\nfrom config.path import data_path\nfrom common.yaml_handler import yaml_config,user_config\nfrom common.logger_hander import logger\nfrom common.helper import generate_new_phone\nfrom common.db_handler import DBHandler\n\nexcel_file = os.path.join(data_path,'demo.xlsx')\ndata = ExcelHandler(excel_file).read_dict('recharge')\nprint(data)\n\n#db.db_colse()\n@pytest.mark.parametrize('info',data)\ndef test_recharge(info,login):\n \"\"\"充值\"\"\"\n \"\"\"先要替换\"\"\"\n if \"#member_id#\" in info['json']:\n info[\"json\"] = info[\"json\"].replace('#member_id#',str(login['id']))\n if \"#wrong_member_id#\" in info['json']:\n info[\"json\"] = info[\"json\"].replace('#wrong_member_id#', str(login['id'] + 1))\n\n # # token组装方式1:通过excel替换\n # if \"#token#\" in info['headers']:\n # info[\"headers\"] = info[\"headers\"].replace(\"#token#\",login['token'])\n\n # token 组装2:通过headers 添加,excel 表格里面不需要Authorization\n headers = json.loads(info[\"headers\"])\n headers['Authorization'] = login['token']\n\n # 数据库访问,充值之前的余额\n db = DBHandler()\n sql = 'select leave_amount from member where id={}'.format(login['id'])\n result = db.query(sql)\n before_recharge_money =result['leave_amount']\n db.db_colse()\n\n data = json.loads(info['json'])\n res= requests.request(url= yaml_config['host'] + info['url'],\n method=info['method'],\n headers=headers,\n json= data)\n res_body = res.json()\n print(res_body)\n try:\n assert res_body['code'] == info[\"expected\"]\n except AssertionError as e:\n logger.error(\"用例失败:{}\".format(e))\n raise e\n finally:\n excel = ExcelHandler(excel_file)\n excel.write('recharge',str(res_body),row=int(info['case_id']+1),column=9)\n if res_body['code'] == 0:\n db = DBHandler()\n sql = 'select leave_amount from member where id={}'.format(login['id'])\n result = db.query(sql)\n after_recharge_money = result['leave_amount']\n db.db_colse()\n money = Decimal(str(data['amount']))\n assert before_recharge_money + money == after_recharge_money\n if res_body['code'] == info[\"expected\"]:\n excel.write('recharge',True,row=int(info['case_id']+1),column=8)\n else:\n excel.write('recharge',False,row=int(info['case_id']+1),column=8)\n","sub_path":"lesson27_api_v6/test/test_recharge_db.py","file_name":"test_recharge_db.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"163558698","text":"#!/usr/bin/env python3\n'''\nThis program uses naive Bayes to guess the language of a text.\n'''\n\nimport argparse\nimport operator\nimport functools\nimport csv\nfrom decimal import Decimal, getcontext\n\n# Increase the precision of out calculations. Too much small numbers.\ngetcontext().prec = 100\n\n# Some constants\nPI = Decimal('3.141592653589793238462643383')\nLETTER_PROBABILITIES_FILE = 'letter_probabilities.csv'\nLANGUAGE_PROBABILITIES_FILE = 'language_probabilities.csv'\n\n# Probability of the language and the letter in language from A to Z.\n# Example:\n# 'English': {\n# 'probability': 0.25,\n# 'A': { 'mean': 0.12345, 'variance': 0.0000001 }\n# 'B'...\n# }\nLETTER_FREQUENCY_IN_LANGUAGES = {}\n\ndef main(files_to_analyze, language_file, letter_file):\n '''Main function that analyzes the text and produce the output'''\n load_data_from_disk(language_file, letter_file)\n for file_to_analyze in files_to_analyze:\n letter_frequency = letter_frequency_from_file(file_to_analyze)\n posterior = calculate_posterior(letter_frequency)\n print(language_with_highest_posterior(posterior))\n\ndef load_data_from_disk(language_file, letter_file):\n '''Load the data from the csv files with the probabilities.'''\n with open(language_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n LETTER_FREQUENCY_IN_LANGUAGES[row['Language']] = \\\n {'probability': Decimal(row['Probability'])}\n\n with open(letter_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n LETTER_FREQUENCY_IN_LANGUAGES[row['Language']][row['Letter']] = {\n 'mean': Decimal(row['Probability']),\n 'variance': Decimal(row['Variance'])}\n\ndef letter_frequency_from_file(file_to_analyze):\n '''Calculate the frequency of each letter in the file and return\n them as a dictionary, having each letter as the key and the\n frequency as the value'''\n letters = count_letters_from_file(file_to_analyze)\n total_letters = sum(letters.values())\n return {char: count/total_letters for char, count in letters.items()}\n\ndef count_letters_from_file(file_to_analyze):\n '''Count the valid letters in a file'''\n letters = {} # Frequency of each letter\n # Let's read the file and count the letters\n with open(file_to_analyze,'r') as f:\n for char in iter(lambda: f.read(1).upper(), ''):\n if is_valid_character(char):\n letters[char] = letters.get(char, 0) + 1\n return letters\n\ndef is_valid_character(char):\n '''Check if the character is a valid character of analysis'''\n return len(char) == 1 and \\\n (ord('A') <= ord(char) <= ord('Z'))\n\ndef calculate_posterior(letter_frequency):\n posterior = {}\n for lang in LETTER_FREQUENCY_IN_LANGUAGES.keys():\n probs = [probability_of_letter(frequency, letter, lang) \\\n for letter, frequency in letter_frequency.items()]\n probs.append(probability_of_language(lang))\n posterior[lang] = functools.reduce(operator.mul, probs)\n return posterior\n\ndef probability_of_language(language):\n '''Probability of each language occur'''\n return LETTER_FREQUENCY_IN_LANGUAGES[language]['probability']\n\ndef mean_probability_of_letter(letter, language):\n '''Return the mean of the probability of a letter of a given language'''\n # FIXME the division by 100 is there because values are fixed in %\n # Must create a training program to generate those values and normalize between 0..1\n return LETTER_FREQUENCY_IN_LANGUAGES[language][letter]['mean']\n\ndef variance(letter, language):\n '''Return the variance for a letter in a given language'''\n return LETTER_FREQUENCY_IN_LANGUAGES[language][letter]['variance']\n\ndef probability_of_letter(frequency, letter, language):\n '''Calculate the probability of a given letter belongs to the language'''\n mean = mean_probability_of_letter(letter, language)\n var = variance(letter, language)\n return Decimal(-((Decimal(frequency) - mean)**2/(2*var))).exp()/Decimal(2*PI*var).sqrt()\n\ndef language_with_highest_posterior(posterior):\n values = list(posterior.values())\n keys = list(posterior.keys())\n return keys[values.index(max(values))]\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='This program uses \\\n naive Bayes to find out the language of a text.')\n parser.add_argument(\n 'file',\n nargs='+',\n help='File to be analyzed.'\n )\n parser.add_argument(\n '--letter',\n default='letter_probabilities.csv',\n help='CSV file with the probabilities of each letter in each language'\n )\n parser.add_argument(\n '--language',\n default='language_probabilities.csv',\n help='CSV file with the probabilities of each language'\n )\n args = parser.parse_args()\n main(args.file, args.language, args.letter)\n","sub_path":"naive_lang_classifier.py","file_name":"naive_lang_classifier.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"180820477","text":"from tkinter import filedialog as fd\nfrom tkinter.messagebox import showinfo\nfrom tkinter import *\n\n\ndef selectFile(action, target):\n \"\"\"Function used to open the csv file to read to file name/ directory\n to used with different action to update the target folder\n\n Args:\n action (string): the action to perform with file\n target (variable): the target to update with the result of action regarding the file\n \"\"\"\n filetypes = (\n ('csv', '*.csv'),\n )\n filename = fd.askopenfilename(\n title='Select csv datasource',\n initialdir='./data-sample',\n filetypes=filetypes)\n\n if action == \"upload\":\n target.insert(END, filename)\n","sub_path":"controller/menu/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"105611339","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Country',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('name', models.CharField(verbose_name='国名', max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Team',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('name', models.CharField(verbose_name='チーム名', max_length=100)),\n ('country', models.ForeignKey(verbose_name='所属国', to='raul.Country')),\n ],\n ),\n migrations.CreateModel(\n name='TeamUser',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('position_type', models.CharField(verbose_name='ポジション', choices=[('0', 'FW'), ('1', 'MF'), ('2', 'DF'), ('3', 'GK'), ('9', '監督')], max_length=1)),\n ('belong_to', models.ForeignKey(verbose_name='所属チーム', to='raul.Team')),\n ('user', models.ForeignKey(verbose_name='ユーザ', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"raul/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"368027551","text":"import json, sys\n\ndef load_token(path):\n try:\n with open(path) as file:\n token = json.load(file)\n return token['token']\n except Exception as e:\n print('[ERR] config.load_token: {0}'.format(e))\n sys.exit(1)\n","sub_path":"dist-packages/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"265361138","text":"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport paddle.fluid.core as core\nfrom op_test import OpTest\n\n\ndef rpn_target_assign(iou, rpn_batch_size_per_im, rpn_positive_overlap,\n rpn_negative_overlap, fg_fraction):\n iou = np.transpose(iou)\n anchor_to_gt_max = iou.max(axis=1)\n gt_to_anchor_argmax = iou.argmax(axis=0)\n gt_to_anchor_max = iou[gt_to_anchor_argmax, np.arange(iou.shape[1])]\n anchors_with_max_overlap = np.where(iou == gt_to_anchor_max)[0]\n\n tgt_lbl = np.ones((iou.shape[0], ), dtype=np.int32) * -1\n tgt_lbl[anchors_with_max_overlap] = 1\n tgt_lbl[anchor_to_gt_max >= rpn_positive_overlap] = 1\n\n num_fg = int(fg_fraction * rpn_batch_size_per_im)\n fg_inds = np.where(tgt_lbl == 1)[0]\n if len(fg_inds) > num_fg:\n disable_inds = np.random.choice(\n fg_inds, size=(len(fg_inds) - num_fg), replace=False)\n tgt_lbl[disable_inds] = -1\n fg_inds = np.where(tgt_lbl == 1)[0]\n\n num_bg = rpn_batch_size_per_im - np.sum(tgt_lbl == 1)\n bg_inds = np.where(anchor_to_gt_max < rpn_negative_overlap)[0]\n if len(bg_inds) > num_bg:\n enable_inds = bg_inds[np.random.randint(len(bg_inds), size=num_bg)]\n tgt_lbl[enable_inds] = 0\n bg_inds = np.where(tgt_lbl == 0)[0]\n\n loc_index = fg_inds\n score_index = np.hstack((fg_inds, bg_inds))\n tgt_lbl = np.expand_dims(tgt_lbl, axis=1)\n return loc_index, score_index, tgt_lbl\n\n\nclass TestRpnTargetAssignOp(OpTest):\n def setUp(self):\n iou = np.random.random((10, 8)).astype(\"float32\")\n self.op_type = \"rpn_target_assign\"\n self.inputs = {'DistMat': iou}\n self.attrs = {\n 'rpn_batch_size_per_im': 256,\n 'rpn_positive_overlap': 0.95,\n 'rpn_negative_overlap': 0.3,\n 'fg_fraction': 0.25,\n 'fix_seed': True\n }\n loc_index, score_index, tgt_lbl = rpn_target_assign(iou, 256, 0.95, 0.3,\n 0.25)\n self.outputs = {\n 'LocationIndex': loc_index,\n 'ScoreIndex': score_index,\n 'TargetLabel': tgt_lbl,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestRpnTargetAssignOp2(OpTest):\n def setUp(self):\n iou = np.random.random((10, 20)).astype(\"float32\")\n self.op_type = \"rpn_target_assign\"\n self.inputs = {'DistMat': iou}\n self.attrs = {\n 'rpn_batch_size_per_im': 128,\n 'rpn_positive_overlap': 0.5,\n 'rpn_negative_overlap': 0.5,\n 'fg_fraction': 0.5,\n 'fix_seed': True\n }\n loc_index, score_index, tgt_lbl = rpn_target_assign(iou, 128, 0.5, 0.5,\n 0.5)\n self.outputs = {\n 'LocationIndex': loc_index,\n 'ScoreIndex': score_index,\n 'TargetLabel': tgt_lbl,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py","file_name":"test_rpn_target_assign_op.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"47381015","text":"\n# coding: utf-8\n\n# In[51]:\n\n\nimport cv2\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\nimport os\nimport pickle\nimport numpy.linalg as linalg\nfrom sklearn.model_selection import train_test_split\nfrom random import shuffle\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.naive_bayes import GaussianNB\nimport seaborn\nfrom numpy.random import choice\nfrom sklearn.tree import DecisionTreeClassifier\nimport math\nfrom sklearn import svm\nimport seaborn as sns\nimport matplotlib.patheffects as PathEffects\nimport torchvision.models as models\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.utils.data as utils\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom PIL import Image\nimport matplotlib.cm as cm\nimport imghdr\nimport tensorflow as tf\n\n\n# In[52]:\n\n\nfrom sklearn.svm import SVC\n\n\n# In[53]:\n\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\nsess = tf.Session(config=config)\n\n\n# In[54]:\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# In[55]:\n\n\ndef mean_cal(data):\n main_mean=[]\n for i in range(len(data[0])):\n temp=[]\n for j in range(len(data)):\n temp.append(data[j][i])\n mean1=np.mean(np.array(temp))\n main_mean.append(mean1)\n return main_mean\n \n\n\n# In[56]:\n\n\ndef cal_eigenenergy(arr,k):\n sum=0\n for i in range(len(arr)):\n sum+=arr[i]\n initial=arr[0]\n counter=0\n while initial<(sum*k/float(100)):\n counter+=1\n initial+=arr[counter]\n return counter+1\n \n\n\n# In[57]:\n\n\ndef pca(data,k): # k is eigen energy\n data=np.array(data)\n mean=np.array(mean_cal(data))\n normal_data=[]\n for i in range(len(data)):\n normal_data.append(np.subtract(data[i],mean))\n normal_data=np.array(normal_data)\n cov_mat=np.cov(np.transpose(normal_data))\n eig_val, eig_vect = linalg.eigh(cov_mat)\n eig_vect=np.transpose(eig_vect)\n# eig_val=abs(eig_val)\n eig_val1=copy.deepcopy(eig_val)\n eig_vect1=copy.deepcopy(eig_vect)\n for i in range(len(eig_val)):\n if eig_val[i]<0:\n eig_val1[i]=eig_val[i]*(-1)\n \n eig_vect_set=sort_list(eig_vect1.tolist(),eig_val1.tolist())\n eig_vect_set.reverse()\n \n eig_val2=sorted(eig_val1, reverse=True)\n k=cal_eigenenergy(eig_val2,k)\n# print(k)\n eig_vect_set=eig_vect_set[:k]\n# print(\"Eig Vl:\",eig_val)\n# print(\"Eig Vect : \",eig_vect_set)\n \n# print(eig_vect_set)\n print(\"Features it took are : \",k)\n eig_vect=np.transpose(eig_vect_set)\n# dot_result=np.dot(normal_data,eig_vect)\n return eig_vect\n\n\n# In[58]:\n\n\n#geeks for geeks \ndef sort_list(list1, list2): \n \n zipped_pairs = zip(list2, list1) \n \n z = [x for _, x in sorted(zipped_pairs)] \n \n return z \n\n\n# In[59]:\n\n\ndef labelling(predict,true):\n h=[]\n for i in range(len(predict)):\n if predict[i]==true[i]:\n h.append(1)\n else:\n h.append(0)\n return h\n\n\n# In[60]:\n\n\ndef accuracy(predict,true):\n count=0\n for i in range(len(predict)):\n if predict[i]==true[i]:\n count+=1\n return count/float(len(predict))\n\n\n# In[61]:\n\n\ndef find_tpr_fpr(predict,real,checker):\n tp=0\n tn=0\n fp=0\n# print(\"find_tpr_fpr\")\n fn=0\n voc=copy.deepcopy([0,1,2,3,4,5,6,7,8,9])\n v=voc.index(checker)\n del voc[v]\n for i in range(len(predict)):\n if predict[i]==checker and real[i]==checker:\n tp=tp+1\n if (predict[i] in voc ) and real[i]==checker:\n fn=fn+1\n if predict[i]==checker and (real[i] in voc):\n fp=fp+1\n if (predict[i] in voc) and (real[i] in voc):\n tn=tn+1\n tpr2=0\n fpr2=0\n# print(\"Total :\",(tp+fp+tn+fn))\n tpr2=float(tp/float(tp+fn)) \n fpr2=float(fp/float(fp+tn))\n \n return tpr2,fpr2\n\n\n# In[62]:\n\n\ndef adaboost(n,train_data1,train_label1,test_data,test_label,weights,d):\n \n alpha_k=[]\n Ck=[]\n nat=[i for i in range(len(train_data1))]\n main_data=copy.deepcopy(train_data1)\n main_label=copy.deepcopy(train_label1)\n \n for i in range(n):\n print(i)\n# print(\"Hello : \",i)\n sample = choice(nat, d,p=weights,replace=False)\n# sample = choice(nat,d,weights,replace=False)\n train_data=[]\n train_label=[]\n for j in range(len(sample)):\n train_data.append(train_data1[sample[j]])\n train_label.append(train_label1[sample[j]])\n \n clf=DecisionTreeClassifier(max_depth=3,max_leaf_nodes=10)\n clf.fit(np.array(train_data),np.array(train_label))\n predict1=clf.predict(np.array(main_data))\n h=labelling(predict1.tolist(),main_label)\n train_err=clf.score(np.array(train_data),np.array(train_label))\n train_err=1-train_err\n alpha=0.5*np.log((1-train_err)/float(train_err))+np.log(25)\n alpha_k.append(alpha)\n Ck.append(clf)\n# print(\"Hello1 : \",i)\n for j in range(len(weights)):\n \n if h[j]==1:\n \n weights[j]=weights[j]*math.exp((-1)*alpha)\n else:\n weights[j]=weights[j]*math.exp(alpha)\n w=copy.deepcopy(weights)\n total=np.sum(w)\n for j in range(len(weights)):\n weights[j]=weights[j]/float(total)\n# print(\"Hello2 : \",i)\n #For test set\n test_predict=[]\n for i in range(len(test_data)):\n disc_func=[[] for i in class_label]\n for j in range(k_max):\n index=Ck[j].predict(np.array(test_data[i]).reshape(1,-1)).tolist()[0]\n if disc_func[index]==[]:\n disc_func[index].append(alpha_k[j])\n else:\n disc_func[index][0]+=alpha_k[j]\n test_predict.append(disc_func.index(max(disc_func)))\n\n test_acc=accuracy(test_predict,test_label) \n# For train set\n train_predict=[]\n# for i in range(len(train_data1)):\n# disc_func1=[[] for i in class_label]\n# for j in range(k_max):\n# index=Ck[j].predict(np.array(train_data1[i]).reshape(1,-1)).tolist()[0]\n# if disc_func1[index]==[]:\n# disc_func1[index].append(alpha_k[j])\n# else:\n# disc_func1[index][0]+=alpha_k[j]\n# train_predict.append(disc_func1.index(max(disc_func1)))\n\n# train_acc=accuracy(train_predict,train_label1) \n train_acc=0\n return Ck,alpha_k,train_predict,test_predict,train_acc,test_acc\n \n\n\n# In[63]:\n\n\ndef bagging(n,train_data1,train_label1,test_data,test_label,d):\n \n alpha_k=[]\n Ck=[]\n nat=[i for i in range(len(train_data1))]\n\n for i in range(n):\n sample = choice(nat, d,replace=True)\n train_data=[]\n train_label=[]\n for j in range(len(sample)):\n train_data.append(train_data1[sample[j]])\n train_label.append(train_label1[sample[j]])\n\n clf=DecisionTreeClassifier(max_depth=2,max_leaf_nodes=5)\n clf.fit(np.array(train_data),np.array(train_label))\n Ck.append(clf)\n #For test set\n test_predict=[]\n\n for i in range(len(test_data)):\n disc_func=[[] for i in class_label]\n for j in range(k_max):\n index=Ck[j].predict(np.array(test_data[i]).reshape(1,-1)).tolist()[0]\n if disc_func[index]==[]:\n disc_func[index].append(1)\n else:\n disc_func[index][0]+=1\n test_predict.append(disc_func.index(max(disc_func)))\n\n test_acc=accuracy(test_predict,test_label) \n train_acc=0\n# print(\"Accuracy in test data : \",acc)\n# #For training set\n train_predict=[]\n\n# for i in range(len(train_data1)):\n# disc_func=[[] for i in class_label]\n# for j in range(k_max):\n# index=Ck[j].predict(np.array(train_data1[i]).reshape(1,-1)).tolist()[0]\n# if disc_func[index]==[]:\n# disc_func[index].append(1)\n# else:\n# disc_func[index][0]+=1\n# train_predict.append(disc_func.index(max(disc_func)))\n\n# train_acc=accuracy(train_predict,train_label1) \n \n\n return Ck,train_predict,test_predict,train_acc,test_acc\n \n\n\n# In[64]:\n\n\ndef roc_design(prob_dist,testdata,checker):\n aux1=[]\n aux2=[]\n testdata1=copy.deepcopy(testdata)\n for i in range(len(testdata)):\n \n aux1.append(prob_dist[i])\n aux2.append(testdata[i])\n main1=sort_list(aux2, aux1)\n# print(\"Probability in incresing order : \",main1)\n \n tpr=[]\n fpr=[]\n #aux1 has prob_distribution and main1 has testlabel in sorted order\n \n main2=[]\n j=0\n for j in range(len(prob_dist)):\n main2.append(checker)\n i=0\n #Logic \n if (checker+1)==10:\n flag=checker-1\n else:\n flag=checker+1\n \n while i None:\n super(JobOutput, self).__init__(**kwargs)\n self.error = None\n self.state = None\n self.progress = None\n self.odatatype = None\n","sub_path":"azure-mgmt-media/azure/mgmt/media/models/job_output_py3.py","file_name":"job_output_py3.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"584645283","text":"from abstract_module import abstract\nfrom abstract_module.constants import *\nfrom math import cos, pi, sin, sqrt, atan2\nimport math_utils\nimport numpy as np\nfrom typing import List, Tuple\n\ndef closestObstacle(x: int, y: int, obstacles: List[Tuple[int, int]]) -> Tuple[int, int]:\n '''\n Given an obstacles list, return the obstacle closest to the robot\n '''\n last_ro = 0\n count = 0\n for obstacle in obstacles:\n obs_x, obs_y = obstacle\n delta_x, delta_y = math_utils.delta_axis(x, y, obs_x, obs_y)\n if not count:\n last_ro = math_utils.norm(delta_x, delta_y)\n if (math_utils.norm(delta_x, delta_y) <= last_ro):\n closer_obs = obstacle\n last_ro = math_utils.norm(delta_x, delta_y)\n count += 1\n \n return closer_obs\n\n\ndef Nh(phi: float) -> List[float]:\n return np.array([cos(phi), sin(phi)])\n\n\ndef phiAuf(obs_x: int, obs_y: int, r_x: int, r_y: int, r_o_dist:float, v_obs: list = abstract.v_obstacle(), v_rob: list = abstract.v_robot(), ko: float = ko) -> float: # Avoid Obstacles\n '''\n Returns an avoidance coefficient, relative to the obstacle, considering the obstacle's position and velocity, \n as well as the robot's position and velocity\n '''\n obstacle_position = np.array([obs_x, obs_y])\n\n s_vec = ko * (v_obs - v_rob)\n s_norm = math_utils.norm(s_vec[0], s_vec[1])\n obs_robot_dist = r_o_dist\n\n if obs_robot_dist >= s_norm:\n p_line_obs = obstacle_position + s_vec\n else:\n p_line_obs = obstacle_position + obs_robot_dist * s_vec / s_norm\n\n delta_x, delta_y = math_utils.delta_axis(p_line_obs[0], p_line_obs[1], r_x, r_y)\n phi_auf = phiR(delta_x, delta_y)\n \n return math_utils.wrapToPi(phi_auf)\n\n\ndef phiComposed(phi_tuf: float, phi_auf: float, R: float, obstacles: List[Tuple], delta: float = delta, d_min: float = d_min) -> float: # Composition\n '''\n Merges the avoidance and movement coefficients and returns a coefficient of movement, considering the obstacles and robot's position\n '''\n if obstacles is None:\n phi_composed = math_utils.wrapToPi(phi_tuf)\n else:\n gauss = math_utils.gaussian(R - d_min, delta)\n \n if R <= d_min:\n phi_composed = phi_auf\n else:\n # phi_composed = phi_auf * G(R - d_min, delta_const) + phi_tuf * (1 - G(R - d_min, delta_const))\n diff = math_utils.wrapToPi(phi_auf - phi_tuf)\n phi_composed = math_utils.wrapToPi(gauss * diff + phi_tuf) \n\n return math_utils.wrapToPi(phi_composed) \n\n\ndef phiH(rho: float, theta: float, cw: bool = False, radius: float = de, kr: float = kr) -> float: # Hyperbolic\n '''\n Returns a coefficient of a hyperbolic spiral that guides the robot to the ball\n '''\n '''\n The direction of rotation of the spiral has been inverted, cause by passing as in the article, \n the clockwise direction becomes counterclockwise and vice versa\n '''\n\n if rho > radius:\n angle = (pi / 2) * (2 - ((radius + kr) / (rho + kr)))\n elif 0 <= rho <= radius:\n angle = (pi / 2) * sqrt(rho / radius)\n\n if cw:\n return math_utils.wrapToPi(theta + angle)\n else:\n return math_utils.wrapToPi(theta - angle)\n\n\ndef phiR(d_x: float, d_y: float) -> float: # Repulsive\n '''\n Returns an avoidance coefficient, relative to the obstacle, considering nothing but the obstacle's \n position and the robot's position \n '''\n return atan2(d_y, d_x)\n\n\ndef phiTuf(theta: float, d_x: float, d_y: float, radius: float = de) -> float: # Move to Goal\n '''\n Merges a clockwise and a counterclockwise hyperbolic spiral and returns a coefficient of \n movement that guides the robot to the ball, following the smallest path \n '''\n y_l = d_y + radius\n y_r = d_y - radius\n\n ro_l = math_utils.norm(d_x, d_y - radius)\n ro_r = math_utils.norm(d_x, d_y + radius)\n\n phi_ccw = phiH(ro_l, theta, cw=True)\n phi_cw = phiH(ro_r, theta, cw=False)\n\n nh_ccw = Nh(phi_ccw)\n nh_cw = Nh(phi_cw)\n # The absolute value of y_l and y_r was not specified in the article, but the obtained results \n # with this trick are closer to the article images\n spiral_merge = (abs(y_l) * nh_ccw + abs(y_r) * nh_cw) / (2 * radius) \n\n if -radius <= d_y < radius:\n phi_tuf = atan2(spiral_merge[1], spiral_merge[0])\n elif d_y < -radius:\n phi_tuf = phiH(ro_l, theta, cw=False)\n else:\n phi_tuf = phiH(ro_r, theta, cw=True)\n\n return math_utils.wrapToPi(phi_tuf)\n\n\ndef generateUnivectorField(r_x: int, r_y: int, ball_pos: Tuple[int, int], obs_pos: List[Tuple[int, int]], de: float = de, v_obs: list = abstract.v_obstacle(), v_rob: list = abstract.v_robot(), ko: float = ko, delta: float = delta, d_min: float = d_min) -> float:\n\n ball_x, ball_y = ball_pos\n d_ball_x, d_ball_y = math_utils.delta_axis(ball_x, ball_y, r_x, r_y)\n theta = phiR(d_ball_x, d_ball_y)\n phi_tuf = phiTuf(theta, d_ball_x, d_ball_y, de)\n\n obstacle = closestObstacle(r_x, r_y, obs_pos)\n obs_x, obs_y = obstacle\n\n robot_obs_x, robot_obs_y = math_utils.delta_axis(obs_x, obs_y, r_x, r_y)\n R = math_utils.norm(robot_obs_x, robot_obs_y)\n robot_obs_dist = math_utils.norm(robot_obs_x, robot_obs_y)\n \n phi_auf = phiAuf(obs_x, obs_y, r_x, r_y, robot_obs_dist, v_obs, v_rob, ko)\n phi_composed = phiComposed(phi_tuf, phi_auf, R, obstacle, delta, d_min)\n\n return Nh(phi_composed)","sub_path":"univector.py","file_name":"univector.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"596047892","text":"from scipy import optimize\nimport numpy as np\nimport pylab as pl\n\npl.rc('axes', linewidth=2)\n\n# set up your read_array to use later to read in your file\ndef read_array(filename, dtype, separator='\\t'):\n \"\"\" Read a file with an arbitrary number of columns.\n The type of data in each column is arbitrary\n It will be cast to the given dtype at runtime\n \"\"\"\n cast = np.cast\n data = [[] for dummy in range(len(dtype))]\n for line in open(filename, 'r'):\n fields = line.strip().split(separator)\n for i, number in enumerate(fields):\n data[i].append(number)\n for i in range(len(dtype)):\n data[i] = cast[dtype[i]](data[i])\n return np.rec.array(data, dtype=dtype)\n\n# now read in your file -- the line below gives examples of datatypes\n#mydescr = np.dtype([('column1', 'int32'), ('column2Name', 'uint32'), ('col3', 'uint64'), ('c4', 'float32')])\nmydescr = np.dtype([('xpos', 'float32'), ('ypos', 'float32'),('yerr', 'float32')])\nmyrecarray = read_array('CalibAfterChvsEn.txt', mydescr)\n\n# put in a small error on the x measurement\nxerr = 0.001\n\n# here we plot the data with error bars\npl.errorbar(myrecarray.xpos,myrecarray.ypos,myrecarray.yerr,xerr)\n\n# now we want to do a least squares fit to the data -- a straight line\n# here is our function that we fit \ndef func(x, a, b):\n return a + b*x\n\n\n# Initial guess for a and b, the parameters of the fit\nx0 = np.array([1.0, 0.1])\nsigma = myrecarray.yerr\n\n\nprint (optimize.curve_fit(func, myrecarray.xpos, myrecarray.ypos, x0, sigma))\n\n# Change size and font of tick labels\n# Again, this doesn't work in interactive mode.\nfontsize = 14\nax = pl.gca()\nfor tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(fontsize)\n tick.label1.set_fontweight('bold')\nfor tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(fontsize)\n tick.label1.set_fontweight('bold')\n\npl.xlabel('energy', fontsize=16, fontweight='bold')\npl.ylabel('channel', fontsize=16, fontweight='bold')\n\n# save the plot to a file\npl.savefig('HEP.png', bbox_inches='tight')\n# display the plot so you can see it\npl.show()\n\n\n\n\n\n\n\n\n\n","sub_path":"Compton Effect/Lab Data/2/fitHEP_e1.py","file_name":"fitHEP_e1.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"187104419","text":"\"\"\"tracker URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom webapp.views import IndexView, \\\n ArticleView, ArticleDeleteView, \\\n ArticleCreateView, ArticleUpdateView, \\\n StatusListView, StatusView, StatusDeleteView, \\\n StatusCreateView, StatusUpdateView, TypeListView, \\\n TypeView, TypeCreateView, TypeUpdateView, TypeDeleteView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', IndexView.as_view(), name='index'),\n path('article/', ArticleView.as_view(), name='form_view'),\n path('article/add/', ArticleCreateView.as_view(), name='article_create_view'),\n path('article//edit/', ArticleUpdateView.as_view(), name='form_update'),\n path('article//delete/', ArticleDeleteView.as_view(url='http://localhost:8000/'), name='delete_form'),\n path('status/', StatusListView.as_view(), name='status_index'),\n path('status/', StatusView.as_view(), name='status_view'),\n path('status//delete/', StatusDeleteView.as_view(url='http://localhost:8000/status/#'),\n name='delete_status'),\n path('status/add/', StatusCreateView.as_view(), name='status_create_view'),\n path('status//edit/', StatusUpdateView.as_view(), name='status_update'),\n path('type/', TypeListView.as_view(), name='type_index'),\n path('type/', TypeView.as_view(), name='type_view'),\n path('type/add/', TypeCreateView.as_view(), name='type_create_view'),\n path('type//edit/', TypeUpdateView.as_view(), name='type_update'),\n path('type//delete/',\n TypeDeleteView.as_view(url='http://localhost:8000/type/#'),\n name='delete_type'),\n]\n","sub_path":"tracker_src/tracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"376627314","text":"Import('env', 'plugins', 'os', 'sys')\n \nlibGtest = env.StaticLibrary('gtest', [ \n 'gtest-death-test.cc',\n 'gtest-filepath.cc',\n 'gtest-port.cc',\n 'gtest-printers.cc',\n 'gtest-test-part.cc',\n 'gtest-typed-test.cc',\n 'gtest.cc',\n 'gtest_main.cc'\n])\n\ngtestEnv = env.Clone()\ngtestEnv.Append(LIBS=['gtest']) \ngtestEnv.Append(LIBPATH=[os.path.join(env['BUILDDIR'], 'gtest')])\nExport('gtestEnv')","sub_path":"src/gtest/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"397325268","text":"import json\nimport requests\nfrom datetime import datetime\nfrom logging import Handler, Formatter\n\nPATH_TO_TELEGRAM_SECRET = \"/home/analytics/.credentials/Telegram_Bot/credentials.json\"\ncredentials = json.loads(open(PATH_TO_TELEGRAM_SECRET).read())\n\n\nclass TelegramHandler(Handler):\n \"\"\"\n Fork of standart handler for working with telegram bots API\n \"\"\"\n\n def emit(self, record):\n log_entry = self.format(record)\n payload = {\n 'chat_id': credentials['TELEGRAM_CHAT_ID'],\n 'text': log_entry,\n 'parse_mode': 'HTML'\n }\n return requests.post(\"https://api.telegram.org/bot{token}/sendMessage\" \\\n .format(token=credentials['TELEGRAM_TOKEN']), data=payload).content\n\n\nclass TelegramFormatter(Formatter):\n \"\"\"\n Format for telegram messages\n \"\"\"\n\n def __init__(self):\n super(TelegramFormatter, self).__init__()\n\n def format(self, record):\n return \"[{levelname}|{file_name}]\" \\\n \"
\\n{message}
\" \\\n \"\\n\\n{datetime}\".format(message=record.msg,\n datetime=datetime.now().strftime('%b-%d-%Y %H:%M:%S'),\n levelname=record.__dict__['levelname'],\n file_name=record.__dict__['pathname']\n )\n","sub_path":"modules/handler/telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"155182067","text":"from src.utils.tk import TKUtils\n\nfrom src.view.home.filtro import Filtro\nfrom src.view.home.actions import Actions\nfrom src.view.home.listagem import ListaDeElementos\nfrom src.view.home.cadastro import (FormularioApresentacao, FormularioTarefa,\n FormularioEvento)\n\n\nclass Home(TKUtils.obter_container()):\n\n def __init__(self):\n super().__init__()\n\n self.defs.pack['side'] = 'bottom'\n\n self.filtro = Filtro()\n self.actions = Actions()\n self.listagem = ListaDeElementos()\n self.cadastro_evento = FormularioEvento()\n self.cadastro_tarefa = FormularioTarefa()\n self.cadastro_apresentacao = FormularioApresentacao()\n\n def iniciar(self, master):\n super().iniciar(master=master)\n\n self.filtro.iniciar(master=self)\n self.actions.iniciar(master=self)\n self.listagem.iniciar(master=self)\n\n self.ocultar()\n","sub_path":"src/view/home/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"38029974","text":"fin = open('paint.in', 'r')\nfout = open('paint.out', 'w')\n\na, b = map(int, fin.readline().split())\nc, d = map(int, fin.readline().split())\n\nif c < a: # cases are symmetric\n\ta, c = c, a\n\tb, d = d, b\n\nanswer = (b-a) + (d-c)\n\nif b > c: # overlap\n\tanswer = max(b,d) - a\n\nfout.write(str(answer) + '\\n')\nfout.close()\n\n# Solution 2\n#for i in range(100):\n#\tif i >= a and i+1 <= b:\n#\t\tanswer += 1\n#\telif i >= c and i+1 <= d:\n#\t\tanswer += 1\n#fout.write(str(answer) + '\\n')\n#fout.close()\n","sub_path":"bronze/paint/paint.py","file_name":"paint.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"38244152","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Use text editor to edit the script and type in valid Instagram username/password\n\nfrom InstagramAPI import InstagramAPI\n\napi = InstagramAPI(\"\", \"\")\n\ndef sendDM(api,user_id,message):\n # api.searchUsername(username)\n # response = api.LastJson\n # user_id = response['user']['pk']\n mediaId = '1469246128228859784_1520786701' #i dont know what this is but was on sourse code\n recipients = [user_id]\n api.direct_message(message, user_id)\n\ndef getTotalFollowers(api, username):\n \"\"\"\n Returns the list of followers of the user.\n It should be equivalent of calling api.getTotalFollowers from InstagramAPI\n \"\"\"\n api.searchUsername(username)\n response = api.LastJson\n user_id = response['user']['pk']\n\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = api.getUserFollowers(user_id, maxid=next_max_id)\n followers.extend(api.LastJson.get('users', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n return followers\n\ndef getTotalFollowing(api, username):\n \"\"\"\n Returns the list of followers of the user.\n It should be equivalent of calling api.getTotalFollowers from InstagramAPI\n \"\"\"\n api.searchUsername(username)\n response = api.LastJson\n user_id = response['user']['pk']\n\n following = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = api.getUserFollowings(user_id, maxid=next_max_id)\n following.extend(api.LastJson.get('users', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n return following\n\nif (api.login()):\n #usersList = [\"lewis_boughtflower\",\"sofiaxsharp_\",\"spencermensah\"]\n # usersList = [\"spencermensah\"]\n #\n # for names in usersList:\n # sendDM(api,names)\n\n followers = getTotalFollowing(api, \"pechee.__\")\n\n message = \"hey xx\"\n\n for follower in followers:\n userId = follower['pk']\n sendDM(api,userId,message)\n print(follower['username'])\n\n # usersList = [\"spencermensah\",\"lewis_boughtflower\"]\n #\n # for names in usersList:\n # sendDM(api,names,message)\n\nelse:\n print(\"Can't login!\")\n","sub_path":"forex.py","file_name":"forex.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"548147689","text":"#!/usr/bin/env python3\n\n\"\"\"\nPython script to downgrade a double precision Fortran routine to single\n(real4) precision without changing its external signature, for example\nto study the effect of reduced precision arithmetic within only one\nsubroutine that is part of a larger piece of software.\n\nThe script takes the following arguments:\n 1. The file name to be read. A will be created with _real4 appended\n to the name.\n 2. An optional argument specifying the name of the subroutine that is\n to be treated. If the argument is not given, all subroutines in\n the file will be modified.\n\nLimitations:\n - Currently only F77 files are supported. This could be easily fixed by\n using the fortran.two parser for F90 files and adjusting some of the\n node types, e.g. not only test for fparser.one.statements.Assignment\n but also for f2003.Assignment_Stmt\n - Currently only files with one subroutine and nothing else are\n supported. No modules, no classes, nothing. This could also easily\n fixed, by discovering subroutine nodes in a recursive AST search just\n like what is already happening to discover assignments\n - Whether a variable is first written or first read is determined\n lexicographically, not by building the flow graph. This means that\n branches or even goto statements can trick this analysis. It should\n still be enough for cases where variables are either read-only or\n write-only. Fixing this would be more difficult.\n - Read and write access to variables is only detected in assignments,\n for example in 'foo(i) = bar + z(g)' we would detect 'foo' as being\n written, and 'bar', 'z' as being read. No other access is detected,\n for example a subroutine or function call would not result in the\n arguments being added to the read and write lists.\n\"\"\"\n\nimport fparser.one.parsefortran\nimport fparser.common.readfortran\nimport sys, re\nimport textwrap\n\n# Read arguments\nfilename = sys.argv[1]\nverbose = False\nif(sys.argv[1] == '-verbose'):\n filename = sys.argv[2]\n verbose = True\ndef printv(arg):\n if verbose:\n print(arg)\nfilename_preduced = \"%s_preduced.f\"%(filename[0:-2])\nunitname = None\nif(len(sys.argv)>3):\n unitname = sys.argv[2]\n if(sys.argv[1] == '-verbose'):\n unitname = sys.argv[3]\nif(unitname == None):\n printv(\"preducer downgrading the precision of all subroutines in file %s.\"%(filename))\nelse:\n printv(\"preducer downgrading the precision of subroutine \\\"%s\\\" in file %s.\"%(unitname,filename))\n\ndef cleanVariableName(var):\n \"\"\"\n A reference to a variable can be something messy like \"BaR(3,foo),\", where\n all we want is the variable name \"bar\". This function removes array\n indices, trailing commas etc, and makes everything lowercase, to get only\n a clean variable name and nothing else.\n \"\"\"\n return re.split('[,(]',var)[0].lower()\n\ndef find_vars(varstring):\n current_varname = ''\n varlist = list()\n parentheses_depth = 0\n for i, c in enumerate(varstring):\n if c == '(':\n parentheses_depth += 1\n elif c == ')':\n parentheses_depth -= 1\n elif parentheses_depth == 0:\n if c != ' ' and c != '\\t':\n if(c == ','):\n if(len(current_varname.strip())>0):\n varlist.append(current_varname)\n current_varname = ''\n else:\n current_varname += c\n varlist.append(current_varname)\n if(varlist[0].lower()=='doubleprecision'):\n del(varlist[0])\n else:\n varlist[0] = varlist[0][15:]\n return varlist\n\ndef visitDoublePrecisionStmt(node):\n \"\"\"\n The f77 parser treats a line containing a double precision variable\n declaration as a Line, which is a string of characters. We need to extract\n the variable names from that string, and not get confused by arrays. For\n example, \"double precision foo(a,3), bar\" should give us the variables\n \"foo\" and \"bar\", and nothing else.\n \"\"\"\n if(type(node)!=fparser.one.typedecl_statements.DoublePrecision):\n raise Exception(\"visitDoublePrecisionStmt called on wrong node type\")\n slist = find_vars(node.item.line)\n varset = set()\n for s in slist:\n varname = cleanVariableName(s)\n varset.add(varname) # add this variable name to set\n return varset\n\ndef visitNode(node,doublevars,doublevars_modified):\n \"\"\"\n Recursively go through the AST and find all assignments.\n This is needed to find variables that are read before modified, and\n variables that are modified at all.\n \"\"\"\n children = []\n doublevars_predefined = set()\n if hasattr(node, \"content\"):\n children = node.content\n elif hasattr(node, \"items\"):\n children = node.items\n elif type(node) in (tuple, list):\n children = node\n for child in children:\n if(type(child)==fparser.one.statements.Assignment):\n lhs = cleanVariableName(child.variable)\n # Visit an assignment statement, e.g. \"a = b + c\"\n if(lhs in doublevars):\n doublevars_modified.add(lhs)\n rhs = child.expr\n readDoubleVars = set(filter(lambda x: x in rhs, doublevars))\n doublevars_predefined = doublevars_predefined.union(readDoubleVars.difference(doublevars_modified))\n else:\n newmodified, newpredefined = visitNode(child, doublevars, doublevars_modified)\n doublevars_modified = doublevars_modified.union(newmodified)\n doublevars_predefined = doublevars_predefined.union(newpredefined)\n return doublevars_modified, doublevars_predefined\n\ndef f77linebreaks(instr):\n \"\"\"\n Takes a string as an input, and breaks all lines after at most 72\n characters, using F77 line continuation markers.\n \"\"\"\n outstr = ''\n for l in instr.splitlines():\n if(len(l.strip())==0): # empty line\n outstr += l+'\\n'\n elif(l[0]!=' ' or l.lstrip()[0]=='!'): # comment line, never touch those\n outstr += l+'\\n'\n else:\n if(len(l) > 7 and l[0:7].strip().isnumeric()): # workaround for parser bug: numeric line labels are printed with an incorrect blank space in column 1. Remove this.\n l = l[0:7].strip().ljust(7) + l[7:]\n while(len(l) > 72):\n outstr += l[0:71]+'\\n'\n l = ' *'+l[71:]\n outstr += l+'\\n'\n return outstr\n\ndef real4subroutine(unit, file, allunits):\n # Analysis part: Find the subroutine that needs to be modified,\n # and for that subroutine, find the double precision arguments\n # and for each of those, find out whether they are in/outputs.\n args = unit.args.copy()\n if(unit.blocktype == 'function'):\n args.append(unit.name)\n printv(args)\n doublevars = set() # all double precision variables declared within subroutine\n doublevars_predefined = set() # all double precision variables read before being modified\n doublevars_modified = set() # all double precision variables modified within subroutine\n decls = list()\n for c in unit.content:\n decltypes = [fparser.one.typedecl_statements.Byte,\n fparser.one.typedecl_statements.Character,\n fparser.one.typedecl_statements.Complex,\n fparser.one.typedecl_statements.DoubleComplex,\n fparser.one.typedecl_statements.DoublePrecision,\n fparser.one.typedecl_statements.Integer,\n fparser.one.typedecl_statements.Logical,\n fparser.one.typedecl_statements.Real,\n fparser.one.statements.Parameter]\n if(type(c) in decltypes):\n decls.append(c)\n if(type(c) == fparser.one.typedecl_statements.DoublePrecision):\n doublevars = doublevars.union(visitDoublePrecisionStmt(c))\n else:\n newmodified, newpredefined = visitNode(c, doublevars, doublevars_modified)\n doublevars_modified = doublevars_modified.union(newmodified)\n doublevars_predefined = doublevars_predefined.union(newpredefined)\n doubleargs_modified = doublevars_modified.intersection(args)\n doubleargs_predefined = doublevars_predefined.intersection(args)\n printv(\"local double precision variables: %s\"%doublevars.difference(args).__str__())\n printv(\"double precision arguments: %s\"%doublevars.intersection(args).__str__())\n printv(\" - modified: %s\"%(doubleargs_modified.__str__()))\n printv(\" - input: %s\"%(doubleargs_predefined.__str__()))\n printv(\" - unused: %s\"%(doublevars.intersection(args).difference(doubleargs_predefined.union(doubleargs_modified)).__str__()))\n\n # Cloning part: Create a subroutine that has the same body as the original\n # one, but uses the new precision throughout and append _sp to its name\n fclone = unit.tofortran()\n fclone = fclone.replace('DOUBLEPRECISION','REAL')\n if(unit.blocktype == 'function'):\n fclone = re.sub('FUNCTION %s'%unit.name,'FUNCTION %s_sp'%unit.name, fclone, flags=re.IGNORECASE)\n else:\n fclone = re.sub('SUBROUTINE %s'%unit.name,'SUBROUTINE %s_sp'%unit.name, fclone, flags=re.IGNORECASE)\n for otherunit in allunits:\n fclone = re.sub('CALL %s\\('%otherunit.name, 'CALL %s_sp('%otherunit.name, fclone, flags=re.IGNORECASE)\n fclone = re.sub('1.0d308', '1.0e38', fclone, flags=re.IGNORECASE)\n fclone = f77linebreaks(fclone)\n file.write(fclone)\n file.write('\\n\\n')\n\n # Wrapper part: Create a subroutine that has the signature of the original\n # one, and performs the down-cast/call/up-cast to the reduced precision\n # subroutine.\n args_str = \", \".join(unit.args)\n args_sp = args_str\n for dv in doublevars:\n args_sp = re.sub(r\"\\b%s\\b\" % dv , '%s_sp'%dv, args_sp)\n decls_sp = list()\n for d in decls:\n if(type(d) == fparser.one.typedecl_statements.DoublePrecision):\n varnames = visitDoublePrecisionStmt(d)\n d_sp = d.item.line.replace('DOUBLE PRECISION','REAL').lower()\n for vn in varnames:\n d_sp = re.sub(r\"\\b%s\\b\" % vn , '%s_sp'%vn, d_sp)\n decls_sp.append(d_sp)\n decls_sp.append(d.item.line)\n decls_sp = \"\\n\".join(decls_sp)\n copyin = set()\n for dm in doubleargs_predefined:\n copyin.add(\"%s_sp = %s\"%(dm,dm))\n copyin = \"\\n\".join(copyin)\n copyout = set()\n for dm in doubleargs_modified:\n copyout.add(\"%s = %s_sp\"%(dm,dm))\n copyout = \"\\n\".join(copyout)\n if(unit.blocktype == 'function'):\n wrapper = \"double precision function %s(%s)\\n%s\\n%s\\n%s = %s_sp(%s)\\n%s\\nreturn\\nend function\"%(unit.name,args_str,decls_sp,copyin,unit.name,unit.name,args_sp,copyout)\n else:\n wrapper = \"subroutine %s(%s)\\n%s\\n%s\\ncall %s_sp(%s)\\n%s\\nend subroutine\"%(unit.name,args_str,decls_sp,copyin,unit.name,args_sp,copyout)\n wrapper = f77linebreaks(textwrap.indent(wrapper,7*' '))\n file.write(wrapper)\n\n# Parse Fortran file\nreader = fparser.common.readfortran.FortranFileReader(filename)\nfp = fparser.one.parsefortran.FortranParser(reader)\nfp.parse()\n\nif(len(fp.block.content) == 0):\n print(\"Warning: Preducer called on empty file %s\"%(filename))\n from shutil import copyfile\n copyfile(filename, filename_preduced)\n exit()\nwith open(filename_preduced,'w') as file:\n for unit in fp.block.content:\n if(unit.blocktype != 'subroutine' and unit.blocktype != 'function'):\n raise Exception(\"Top Unit is neither subroutine nor function\")\n if(unitname == None or unit.name == unitname):\n real4subroutine(unit, file, fp.block.content)\n","sub_path":"preducer.py","file_name":"preducer.py","file_ext":"py","file_size_in_byte":11646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"128197635","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torchvision import models\n#from matplotlib import pyplot as plt\nfrom torchvision import transforms\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset, TensorDataset\nBatchSize = 1\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndef recreate_image(im_as_var):\n \"\"\"\n Recreates images from a torch variable, sort of reverse preprocessing\n\n Args:\n im_as_var (torch variable): Image to recreate\n\n returns:\n recreated_im (numpy arr): Recreated image in array\n \"\"\"\n reverse_mean = [-0.485, -0.456, -0.406]\n reverse_std = [1/0.229, 1/0.224, 1/0.225]\n recreated_im = torch.clone(im_as_var)\n recreated_im = recreated_im.cpu().numpy()[0]\n for c in range(3):\n recreated_im[c] /= reverse_std[c]\n recreated_im[c] -= reverse_mean[c]\n recreated_im[recreated_im > 1] = 1\n recreated_im[recreated_im < 0] = 0\n recreated_im = np.round(recreated_im * 255)\n\n recreated_im = np.uint8(recreated_im).transpose(1, 2, 0)\n return recreated_im\n\n\"\"\"\ndef load_imagenet(PATH = \"./data/\"):\n transform = transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )\n\n dataset = torchvision.datasets.ImageNet(root=PATH, split='val',\n transform=transform)\n return dataset\n\"\"\"\n\ndef predict_torch(model, img):\n \"\"\"\n with torch.no_grad():\n torch_img = torch.from_numpy(img)\n torch_output = model(torch_img)\n \"\"\"\n torch_output = model(img)\n return torch_output\n\n\ndef thundernna_attack(img, target, model, epsilon):\n model.eval()\n img.requires_grad = True\n output = predict_torch(model, img)\n loss = F.nll_loss(output, target)\n model.zero_grad()\n loss.backward()\n data_grad = img.grad.data\n tub = torch.clamp(torch.ones_like(img) / data_grad, -epsilon, epsilon)\n tub = torch.nan_to_num(tub)\n return img + tub\n\n\"\"\"\npretrained_model = models.resnet18(pretrained=True, progress = True).to(device)\npretrained_model.eval()\n\ndataset = load_imagenet()\n\ntest_loader = torch.utils.data.DataLoader(dataset, batch_size=BatchSize, shuffle=False)\nepsilon = 0.3\ncorrect = 0\nfor idx, (img, target) in tqdm(enumerate(test_loader)):\n perturbed_img =thundernna_attack(img, target, pretrained_model, epsilon)\n out = predict_torch(pretrained_model, perturbed_img)\n final_pred = out.data.max(1, keepdim=True)[1]\n correct += final_pred.eq(target.data.view_as(final_pred)).sum()\nfinal_acc = correct/float(len(test_loader.dataset))\nprint(\"Epsilon: {}\\tTest Accuracy = {} / {} = {}\".format(epsilon, correct, len(test_loader.dataset), final_acc))\n\"\"\"\n\n","sub_path":"thundernna.py","file_name":"thundernna.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"561749318","text":"import numpy as np\nimport matplotlib\n\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nfrom featureNormalize import featureNormalize\nfrom computeCostMulti import computeCostMulti\nfrom gradientDescentMulti import gradientDescentMulti\nfrom normalEqn import normalEqn\n\ndef main():\n # ======================= Part 1: Feature Normalization ===============\n print('Loading data ...')\n # Load Data\n data = np.loadtxt('ex1data2.txt', delimiter=',')\n X = data[:, 0:2]\n y = data[:, 2:]\n m = len(y)\n print('First 10 examples from the dataset:')\n for i in range(10):\n print('x =', X[i], ', y =', y[i])\n\n # Scale features and set them to zero mean\n print('Normalizing Features ...')\n X, mu, sigma = featureNormalize(X)\n\n # Add intercept term to X\n X = np.hstack((np.ones((m, 1)), X)) # Add a column of ones to x\n\n # ======================= Part 2: Gradient Descent ========================\n print('Running gradient descent ...')\n # Choose some alpha value\n alpha = 0.01\n num_iters = 400\n\n # Init Theta and Run Gradient Descent\n theta = np.zeros((3, 1))\n theta, J_history = gradientDescentMulti(X, y, theta, alpha, num_iters)\n\n # Plot the convergence graph\n plt.plot(np.arange(1, len(J_history) + 1), J_history, label='Linear regression')\n plt.xlabel('Number of iterations')\n plt.ylabel('Cost J')\n plt.ion()\n plt.show()\n input('Program paused. Press enter to continue.')\n\n # Display gradient descent's result\n print('Theta computed from gradient descent:', theta, sep='\\n')\n\n # Estimate the price of a 1650 sq-ft, 3 br house\n x = np.append(np.array([1]), np.array([1650, 3] - mu) / sigma)\n price = np.dot(x, theta)\n print('Predicted price of a 1650 sq-ft, 3 br house (using gradient descent): $', price, sep='')\n\n # ======================= Part 3: Normal Equations ========================\n print('Solving with normal equations...')\n # Load Data\n data = np.loadtxt('ex1data2.txt', delimiter=',')\n X = data[:, 0:2]\n y = data[:, 2:]\n m = len(y)\n\n # Add intercept term to X\n X = np.hstack((np.ones((m, 1)), X)) # Add a column of ones to x\n\n # Calculate the parameters from the normal equation\n theta = normalEqn(X, y)\n\n # Display normal equation's result\n print('Theta computed from the normal equations:', theta, sep='\\n')\n\n # Estimate the price of a 1650 sq-ft, 3 br house\n price = np.dot(np.array([1, 1650, 3]), theta)\n print('Predicted price of a 1650 sq-ft, 3 br house (using normal equations): $', price, sep='')\n\nif __name__ == '__main__':\n main()\n","sub_path":"ml/ex1/ex1_multi.py","file_name":"ex1_multi.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"3121394","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 204. Count Primes\n# Count the number of prime numbers less than a non-negative number, n.\n\n\n# 总结:\n# https://discuss.leetcode.com/topic/14036/fast-python-solution\n\n\nclass Solution(object):\n\n def countPrimes(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n <= 2:\n return 0\n\n prime = [True] * n\n prime[:2] = [False, False]\n for base in xrange(2, int((n - 1) ** 0.5) + 1):\n if prime[base]:\n prime[base ** 2::base] = [False] * len(prime[base ** 2::base])\n return sum(prime)\n\nimport unittest\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_demo(self):\n self.assertEqual(1, 1)\n self.assertTrue(True)\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestSolution)\nunittest.TextTestRunner(verbosity=2).run(suite)\n\nif __name__ == '__main__':\n print('ok')\n","sub_path":"py/p204.py","file_name":"p204.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"409983206","text":"from multiprocessing import Pool\nimport os, time, random\n\n\ndef worker(msg):\n t_start = time.time()\n print(\"%s 号事件开始执行,进程号为%d\" % (msg, os.getpid()))\n time.sleep(random.random()*2)\n t_stop = time.time()\n print(msg, \"号事件执行完毕,耗时%0.2f秒\" % (t_stop-t_start))\n\n\ndef main():\n pool = Pool(2) # 定义一个进程池,最大容量为3\n for i in range(5):\n # Pool().apply_async(要调用的目标,(传递给目标的参数))\n # 每次循环会用空闲的子进程调用目标\n pool.apply_async(worker, (i,)) # 往进程池添加任务\n print(\"-----start----\")\n pool.close() # 关闭进程池后,pool不再接收新的请求\n # 等待pool中的所有子进程执行完,必须放在close后\n # 若是没有join,则主进程提前结束,所有子进程消亡\n pool.join() # 主进程不会等,则需要手动阻塞\n print(\"-----end-----\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"MultipleTask/process_pool.py","file_name":"process_pool.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"508064754","text":"\ndef add_username_segment():\n if powerline.args.shell == 'bash':\n user_prompt = ' \\\\u '\n elif powerline.args.shell == 'zsh':\n user_prompt = ' %n '\n else:\n import os\n user_prompt = ' %s ' % os.getenv('USER')\n\n powerline.append(user_prompt, Color.USERNAME_FG, Color.USERNAME_BG)\n\nadd_username_segment()\n","sub_path":"segments/username.py","file_name":"username.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"471021347","text":"from epi import epimanager as EpiManager\nimport json\nimport sys\n\n#N4D es python2 y LlX Remote Installer es Python3, no puedo trabajar con librerias de EPI. Utilizo este script a modo de puente.\ntry:\n\tepi_operation=sys.argv[1]\n\n\tepi=EpiManager.EpiManager()\n\tepi_to_exec=\"epi.\"+epi_operation\n\n\tepi_solved={}\n\texec(\"epi_solved['val']=%s\"%epi_to_exec)\n\n\tdata=json.dumps(epi_solved['val'])\n\tprint (data)\n\nexcept Exception as e:\n\tprint('False')\n","sub_path":"lliurex-remote-installer-gui.install/usr/share/lliurex-remote-installer/helper_epi.py","file_name":"helper_epi.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"50596179","text":"#!/usr/bin/python2.7\n# Copyright 2012 JatLeGo Inc. All Rights Reserved.\n# Author: andyzh1314@gmail.com (Andy Zhau)\n\n\nfrom solurank.configs import config\nfrom solurank.db.db import db\nfrom solurank.models import model\n\n\nOWNER = 1\nCOMMENT = 2\nFAVORITE = 4\n\n\nclass UserParticipateModel(model.Model):\n\n\n ModelPath = \"user.participate\"\n\n\n @classmethod\n def UpdateUserParticipate(cls, uid=None, pid=None, aid=None, view_time=None,\n update_time=None, reason=None, prevent_uid=None,\n prevent_spread=False, problem_scope=False):\n query = { \"pid\": pid and int(pid) }\n if uid is not None: query[\"uid\"] = int(uid)\n if not problem_scope: query[\"aid\"] = aid and int(aid)\n\n processed = False\n for doc in db[cls.ModelPath].find(query):\n processed = True\n if doc[\"uid\"] == prevent_uid:\n continue\n if view_time is not None:\n doc[\"view_time\"] = view_time\n if update_time is not None:\n doc[\"update_time\"] = update_time\n doc[\"news\"] = (doc[\"view_time\"] and doc[\"update_time\"] and\n doc[\"view_time\"] < doc[\"update_time\"])\n if reason is not None:\n doc[\"reason\"] |= reason\n db[cls.ModelPath].save(doc)\n if not processed:\n doc = query\n doc[\"view_time\"] = view_time or update_time\n doc[\"update_time\"] = update_time\n doc[\"reason\"] = reason\n db[cls.ModelPath].save(doc)\n elif uid is not None and update_time is not None and not prevent_spread:\n cls.UpdateUserParticipate(pid=pid, aid=aid, update_time=update_time,\n prevent_uid=prevent_uid)\n\n\n @classmethod\n def RemoveUserParticipate(cls, uid=None, pid=None, aid=None,\n problem_scope=False, reason=-1):\n query = { \"pid\": pid and int(pid) }\n if uid is not None: query[\"uid\"] = int(uid)\n if not problem_scope: query[\"aid\"] = aid and int(aid)\n\n for doc in db[cls.ModelPath].find(query):\n doc[\"reason\"] &= ~reason\n if not doc[\"reason\"]:\n db[cls.ModelPath].remove({\"_id\": doc[\"_id\"]})\n else:\n db[cls.ModelPath].save(doc)\n\n\n @classmethod\n def GetUserParticipates(cls, uid, news_only=True, page=1):\n from solurank.models import answer\n from solurank.models import problem\n query = {\"uid\": int(uid)}\n if news_only:\n query[\"news\"] = True\n sort = [(\"update_time\", -1), (\"view_time\", -1)]\n else:\n sort = [(\"_id\", -1)]\n messages = []\n for doc in db[cls.ModelPath].find(\n query, sort=sort, skip=(page - 1) * config.SinglePageMessageNumber,\n limit=config.SinglePageMessageNumber):\n if \"pid\" in doc:\n messages.append(problem.ProblemModel.GetProblem(doc[\"pid\"]))\n else:\n messages.append(answer.AnswerModel.GetAnswer(doc[\"aid\"]))\n return messages\n\n\n @classmethod\n def GetUserParticipateCount(cls, uid, news_only=True):\n query = {\"uid\": int(uid)}\n if news_only:\n query[\"news\"] = True\n return cls.Count(query)\n","sub_path":"solurank/models/user_participate.py","file_name":"user_participate.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"377587653","text":"class Solution:\n def highestRankedKItems(self, grid: List[List[int]], pricing: List[int], start: List[int], k: int) -> List[List[int]]:\n n = len(grid)\n m = len(grid[0])\n arr = []\n b = [[-1] * m for i in range(n)]\n b[start[0]][start[1]] = 0\n q = Deque([(0, start)])\n d = ((0,1), (1,0), (0,-1), (-1,0))\n while q:\n h, (x, y) = q.popleft()\n for dx, dy in d:\n xx = x + dx\n yy = y + dy\n if 0<=xx 0:\n storeProduct = Product.objects.get(id=int(form.cleaned_data['product_id']))\n needAmount = form.cleaned_data['amount']\n\n if storeProduct.stock >= needAmount:\n orderProduct = ProductOrder.objects.create(\n product=storeProduct,\n amount=needAmount\n )\n storeProduct.change_stock(needAmount)\n storeProduct.save()\n orderProduct.save()\n productsArr.append(orderProduct)\n else:\n return redirect(reverse('new_order') + '?error=1&name=' + storeProduct.name + '&amount=' + str(storeProduct.stock))\n\n order = Order.objects.create(\n user=user\n )\n\n price = 0\n for p in productsArr:\n price += (p.amount * p.product.price_brutto)\n order.products.add(p)\n\n order.full_brutto_price = price\n order.save()\n return redirect(reverse('new_order') + '#list')\n\n products = Product.objects.all()\n data_formset = []\n\n orders = Order.objects.filter(user=user)\n\n notRealizedOrders = False\n if user.is_staff:\n notRealizedOrders = Order.objects.filter(realized=False)\n\n for product in products:\n data_formset.append({'product_name': product.name, 'product_id': product.id, 'amount': 0, 'price_per_item': product.price_brutto})\n\n formset = OrderFormSet(initial=data_formset)\n\n errorName = request.GET.get('name', False)\n errorAmount = request.GET.get('amount', 0)\n\n return render(request, 'admin_panel/client/client-form.html', {\n 'user': user,\n 'products': products,\n 'formset': formset,\n 'orders': orders,\n 'notRealizedOrders': notRealizedOrders,\n 'errorName': errorName,\n 'errorAmount': errorAmount\n })\n\n\n@login_required(login_url='/admin/login/?next=/admin-panel/')\ndef accept_order(request, num=\"0\"):\n\n order = Order.objects.get(id=num)\n\n for productOrder in order.products.all():\n productOrder.product.freeze_stock -= productOrder.amount\n productOrder.product.save()\n\n order.realized = True\n order.save()\n\n return redirect(reverse('new_order') + '#list-no-realize')\n","sub_path":"warehouse/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"113629926","text":"#!/usr/bin/env python3\n#\n# Copyright 2019 ROBOTIS CO., LTD.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors: Darby Lim, Pyo\n\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument\nfrom launch.actions import LogInfo\nfrom launch.substitutions import LaunchConfiguration\nfrom launch_ros.actions import Node\n\n\ndef generate_launch_description():\n # [RECOMMENDED] If you want handle arguments out of this launch file,\n # you have to set LaunchConfiguration\n # If you set arguments below, you can't access topic_name CLI or other launch files\n # qos_profile = 0\n\n # 0: Default QoSProfile(depth=10)\n qos_profile = LaunchConfiguration('qos_profile', default=0)\n namespace = LaunchConfiguration('ns', default='example')\n\n return LaunchDescription([\n LogInfo(msg=['Execute two ''publisher''s has different node name!!']),\n\n # [RECOMMENDED] This func allows you to expose the arguments\n DeclareLaunchArgument(\n 'topic_name',\n default_value='count',\n description='Specifying topic name to publisher'),\n\n DeclareLaunchArgument(\n 'qos_profile',\n default_value=qos_profile,\n description='Specifying qos_profile to publisher. Default QoSProfile(depth=10)'),\n\n DeclareLaunchArgument(\n 'namespace',\n default_value='ns',\n description='Specifying namespace to node'),\n\n Node(\n node_namespace=namespace,\n package='examples_rclcpp',\n node_executable='publisher',\n node_name='first_pub',\n parameters=[{'message': 'First Pub'}],\n arguments=['-q', qos_profile],\n output='screen'),\n\n Node(\n node_namespace=namespace,\n package='examples_rclcpp',\n node_executable='publisher',\n node_name='second_pub',\n parameters=[{'message': 'Second Pub'}],\n arguments=['-q', qos_profile],\n output='screen'),\n ])\n","sub_path":"examples_rclcpp/launch/multiple_node.launch.py","file_name":"multiple_node.launch.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"557416970","text":"import time\nimport datetime\nimport requests\nimport csv\nimport platform\nimport os\nfrom os.path import expanduser\nimport traceback\nimport json\nimport pytz\nimport zlib\nimport numpy as np\nimport tushare as ts\nfrom pytdx.hq import TdxHq_API\nfrom pytdx.params import TDXParams\n\napi = TdxHq_API()\n\nbaseUrl = \"http://pdfm2.eastmoney.com/EM_UBG_PDTI_Fast/api/js?id=*******&TYPE=k&js=(x)&rtntype=5&isCR=false&authorityType=fa&fsData1513514933723=fsData1513514933723\" #\"http://hq2fls.eastmoney.com/EM_Quote2010PictureApplication/Flash.aspx?Type=CHD&ID=*******&lastnum=300&r=0.6714464421384037\"\n\n\ndef cal_vol(code):\n #int_date = int(dates)\n result = []\n datas = api.get_transaction_data(TDXParams.MARKET_SH if code.startswith('6') else TDXParams.MARKET_SZ,\n code, 0, 2000)\n\n while len(datas) > 0:\n result += datas\n datas = api.get_transaction_data(TDXParams.MARKET_SH if code.startswith('6') else TDXParams.MARKET_SZ,\n code, len(result), 2000)\n\n if len(result) > 0:\n with open('test.txt', mode='a', encoding='utf-8') as f:\n for it in result:\n f.write(\"{0},{1},{2},{3}\\n\".format(code, it['time'], it['price'], it['vol']))\n\n\ndef cal(code):\n print(code)\n cal_vol(code)\n\ndef run():\n while True:\n try:\n print(\"try\")\n codes = ts.get_stock_basics().index.values\n for code in codes:\n try:\n cal(code)\n except Exception as error:\n traceback.print_exc()\n break\n except Exception as error:\n traceback.print_exc()\n\n time.sleep(10)\n\n\ndef main():\n with api.connect('119.147.212.81', 7709):\n run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"xueqiu/jihejinjia.py","file_name":"jihejinjia.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"616419965","text":"from random import randrange\n\nfrom FindMostProbableKmerUsingProfileMatrix import find_most_probable_kmer_using_profile_matrix\nfrom MotifMatrixCount import motif_matrix_count\nfrom MotifMatrixProfile import motif_matrix_profile\nfrom ScoreMotif import score_motif\n\ndnas = ['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG']\nk = 3\n\nmotifs = []\nfor dna in dnas:\n start = randrange(len(dna) - k)\n motif = dna[start:start+k]\n motifs.append(motif)\n\nbest_motifs = motifs\n\nwhile True:\n counts_matrix = motif_matrix_count(motifs)\n for elem, counts in counts_matrix.items(): # add in pseudocounts\n counts_matrix[elem] = [c + 1 for c in counts]\n profile_matrix = motif_matrix_profile(counts_matrix)\n\n motifs = [find_most_probable_kmer_using_profile_matrix(profile_matrix, dna)[0] for dna in dnas]\n if score_motif(motifs) < score_motif(best_motifs):\n best_motifs = motifs\n else:\n break\n\n[print(f'{m}') for m in best_motifs]","sub_path":"docs/data/learn/Bioinformatics/output/ch2_code/src/Stepik.2.7.ExerciseBreak1.py","file_name":"Stepik.2.7.ExerciseBreak1.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"477908227","text":"# -*- coding:utf-8 -*-\n# Copyright (c) 2015, Galaxy Authors. All Rights Reserved\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n#\n# Author: wangtaize@baidu.com\nimport os\nimport time\nimport logging\nfrom SOAPpy import WSDL\nfrom SOAPpy import headerType\nfrom django import shortcuts\nfrom bootstrap import settings\nfrom common import http\nfrom django.contrib import auth\nfrom django.contrib.auth import models\nimport xml.etree.ElementTree as ET\nLOG = logging.getLogger(\"console\")\n\ndef auto_login_required(func):\n def auto_login_wrapper(request, *args, **kwds):\n if request.user.is_authenticated():\n return func(request, *args, **kwds)\n ticket = request.GET.get('ticket',None)\n cas_url = \"%s?service=%s\"%(settings.UUAP_CAS_SERVER, settings.MY_HOST)\n if not ticket:\n LOG.info(\"redirect to %s\"%cas_url)\n return shortcuts.redirect(cas_url)\n else:\n user = auth.authenticate(ticket = ticket, service = settings.MY_HOST)\n if not user:\n return shortcuts.redirect(cas_url)\n else:\n auth.login(request, user)\n return func(request, *args, **kwds)\n return auto_login_wrapper\n\n\n\ndef auth_ticket(ticket, my_url):\n \"\"\"\n \n \n wangtaize\n \n \n \"\"\"\n client = http.HttpClient()\n auth_url = settings.UUAP_VALIDATE_URL\n response = client.do_post(auth_url,\n [('service',my_url),('ticket',ticket)],\n content_to_file = False)\n if response['error'] is None:\n root = ET.fromstring(response['content'])\n success = root.find('{http://www.yale.edu/tp/cas}authenticationSuccess')\n if success is not None:\n user = success.find('{http://www.yale.edu/tp/cas}user')\n if user is not None:\n return user.text\n return None\n\n\nclass UUAPBackend(object):\n def __init__(self):\n server = WSDL.Proxy(settings.UIC_SERVICE)\n hd = headerType(data={\"appKey\":settings.UIC_KEY})\n server.soapproxy.header = hd\n self.server = server \n def authenticate(self, ticket=None, service=None):\n username = auth_ticket(ticket, service)\n if not username:\n return None\n i_user = list(models.User.objects.filter(username=username))\n if not i_user:\n uic_user = server.getUserByUsername(arg0 = username)\n if hasattr(uic_user,'username'):\n new_user = models.User(username = uic_user.username,\n email = uic_user.email)\n new_user.save()\n return new_user\n return None\n return i_user[0]\n def get_user(self, user_id):\n try:\n return models.User.objects.get(pk=user_id)\n except models.User.DoesNotExist:\n return None\n","sub_path":"console/backend/src/common/cas.py","file_name":"cas.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"277273109","text":"from django.test import TestCase\n\nfrom database.models import LaundryShop, Rating\n\n# Create your tests here.\nclass ModelTestCase(TestCase):\n def test_simple_location_property(self):\n shop = LaundryShop.objects.create(name='ls1', province='province1',\n barangay='barangay1', contact_number='12345',\n hours_open='24 hours', days_open='mon - sat')\n expected = 'barangay1, province1'\n self.assertEquals(shop.location, expected)\n\n def test_complete_location_property(self):\n shop = LaundryShop.objects.create(name='ls1', province='province1',\n city='city1', barangay='barangay1', street='street1',\n building='building1', contact_number='12345612',\n hours_open='12hours', days_open='never')\n expected = 'building1, street1, barangay1, city1, province1'\n self.assertEquals(shop.location, expected)\n\n def test_average_rating(self):\n shop = LaundryShop.objects.create(name='ls1', province='province1',\n barangay='barangay1', contact_number='12345',\n hours_open='24 hours', days_open='mon - sat')\n Rating.objects.create(laundry_shop=shop, paws=4)\n Rating.objects.create(laundry_shop=shop, paws=5)\n expected = (4 + 5) / 2.0\n self.assertEquals(shop.average_rating, expected)\n\n def test_average_rating_no_rating(self):\n shop = LaundryShop.objects.create(name='ls1', province='province1',\n barangay='barangay1', contact_number='12345',\n hours_open='24 hours', days_open='mon - sat')\n expected = 0\n self.assertEquals(shop.average_rating, expected)\n","sub_path":"LaundryBear/database/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"521703288","text":"'''\n# Constructs Programming Model\n\n> Software-defined state\n\n![Release](https://github.com/aws/constructs/workflows/Release/badge.svg)\n[![npm version](https://badge.fury.io/js/constructs.svg)](https://badge.fury.io/js/constructs)\n[![PyPI version](https://badge.fury.io/py/constructs.svg)](https://badge.fury.io/py/constructs)\n[![NuGet version](https://badge.fury.io/nu/Constructs.svg)](https://badge.fury.io/nu/Constructs)\n[![Maven Central](https://maven-badges.herokuapp.com/maven-central/software.constructs/constructs/badge.svg?style=plastic)](https://maven-badges.herokuapp.com/maven-central/software.constructs/constructs)\n\n## What are constructs?\n\nConstructs are classes which define a \"piece of system state\". Constructs can be composed together to form higher-level building blocks which represent more complex state.\n\nConstructs are often used to represent the *desired state* of cloud applications. For example, in the AWS CDK, which is used to define the desired state for AWS infrastructure using CloudFormation, the lowest-level construct represents a *resource definition* in a CloudFormation template. These resources are composed to represent higher-level logical units of a cloud application, etc.\n\n## Contributing\n\nThis project has adopted the [Amazon Open Source Code of\nConduct](https://aws.github.io/code-of-conduct).\n\nWe welcome community contributions and pull requests. See our [contribution\nguide](./CONTRIBUTING.md) for more information on how to report issues, set up a\ndevelopment environment and submit code.\n\n## License\n\nThis project is distributed under the [Apache License, Version 2.0](./LICENSE).\n'''\nimport abc\nimport builtins\nimport datetime\nimport enum\nimport typing\n\nimport jsii\nimport publication\nimport typing_extensions\n\nfrom ._jsii import *\n\n\nclass ConstructMetadata(\n metaclass=jsii.JSIIMeta,\n jsii_type=\"constructs.ConstructMetadata\",\n):\n '''Metadata keys used by constructs.'''\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"DISABLE_STACK_TRACE_IN_METADATA\")\n def DISABLE_STACK_TRACE_IN_METADATA(cls) -> builtins.str:\n '''If set in the construct's context, omits stack traces from metadata entries.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"DISABLE_STACK_TRACE_IN_METADATA\"))\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"ERROR_METADATA_KEY\")\n def ERROR_METADATA_KEY(cls) -> builtins.str:\n '''Context type for error level messages.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"ERROR_METADATA_KEY\"))\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"INFO_METADATA_KEY\")\n def INFO_METADATA_KEY(cls) -> builtins.str:\n '''Context type for info level messages.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"INFO_METADATA_KEY\"))\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"WARNING_METADATA_KEY\")\n def WARNING_METADATA_KEY(cls) -> builtins.str:\n '''Context type for warning level messages.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"WARNING_METADATA_KEY\"))\n\n\n@jsii.data_type(\n jsii_type=\"constructs.ConstructOptions\",\n jsii_struct_bases=[],\n name_mapping={\"node_factory\": \"nodeFactory\"},\n)\nclass ConstructOptions:\n def __init__(self, *, node_factory: typing.Optional[\"INodeFactory\"] = None) -> None:\n '''Options for creating constructs.\n\n :param node_factory: A factory for attaching ``Node``s to the construct. Default: - the default ``Node`` is associated\n '''\n self._values: typing.Dict[str, typing.Any] = {}\n if node_factory is not None:\n self._values[\"node_factory\"] = node_factory\n\n @builtins.property\n def node_factory(self) -> typing.Optional[\"INodeFactory\"]:\n '''A factory for attaching ``Node``s to the construct.\n\n :default: - the default ``Node`` is associated\n '''\n result = self._values.get(\"node_factory\")\n return typing.cast(typing.Optional[\"INodeFactory\"], result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"ConstructOptions(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\n@jsii.enum(jsii_type=\"constructs.ConstructOrder\")\nclass ConstructOrder(enum.Enum):\n '''In what order to return constructs.'''\n\n PREORDER = \"PREORDER\"\n '''Depth-first, pre-order.'''\n POSTORDER = \"POSTORDER\"\n '''Depth-first, post-order (leaf nodes first).'''\n\n\n@jsii.data_type(\n jsii_type=\"constructs.Dependency\",\n jsii_struct_bases=[],\n name_mapping={\"source\": \"source\", \"target\": \"target\"},\n)\nclass Dependency:\n def __init__(self, *, source: \"IConstruct\", target: \"IConstruct\") -> None:\n '''A single dependency.\n\n :param source: Source the dependency.\n :param target: Target of the dependency.\n '''\n self._values: typing.Dict[str, typing.Any] = {\n \"source\": source,\n \"target\": target,\n }\n\n @builtins.property\n def source(self) -> \"IConstruct\":\n '''Source the dependency.'''\n result = self._values.get(\"source\")\n assert result is not None, \"Required property 'source' is missing\"\n return typing.cast(\"IConstruct\", result)\n\n @builtins.property\n def target(self) -> \"IConstruct\":\n '''Target of the dependency.'''\n result = self._values.get(\"target\")\n assert result is not None, \"Required property 'target' is missing\"\n return typing.cast(\"IConstruct\", result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"Dependency(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\n@jsii.interface(jsii_type=\"constructs.IAspect\")\nclass IAspect(typing_extensions.Protocol):\n '''Represents an Aspect.'''\n\n @jsii.member(jsii_name=\"visit\")\n def visit(self, node: \"IConstruct\") -> None:\n '''All aspects can visit an IConstruct.\n\n :param node: -\n '''\n ...\n\n\nclass _IAspectProxy:\n '''Represents an Aspect.'''\n\n __jsii_type__: typing.ClassVar[str] = \"constructs.IAspect\"\n\n @jsii.member(jsii_name=\"visit\")\n def visit(self, node: \"IConstruct\") -> None:\n '''All aspects can visit an IConstruct.\n\n :param node: -\n '''\n return typing.cast(None, jsii.invoke(self, \"visit\", [node]))\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, IAspect).__jsii_proxy_class__ = lambda : _IAspectProxy\n\n\n@jsii.interface(jsii_type=\"constructs.IConstruct\")\nclass IConstruct(typing_extensions.Protocol):\n '''Represents a construct.'''\n\n pass\n\n\nclass _IConstructProxy:\n '''Represents a construct.'''\n\n __jsii_type__: typing.ClassVar[str] = \"constructs.IConstruct\"\n pass\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, IConstruct).__jsii_proxy_class__ = lambda : _IConstructProxy\n\n\n@jsii.interface(jsii_type=\"constructs.INodeFactory\")\nclass INodeFactory(typing_extensions.Protocol):\n '''A factory for attaching ``Node``s to the construct.'''\n\n @jsii.member(jsii_name=\"createNode\")\n def create_node(\n self,\n host: \"Construct\",\n scope: IConstruct,\n id: builtins.str,\n ) -> \"Node\":\n '''Returns a new ``Node`` associated with ``host``.\n\n :param host: the associated construct.\n :param scope: the construct's scope (parent).\n :param id: the construct id.\n '''\n ...\n\n\nclass _INodeFactoryProxy:\n '''A factory for attaching ``Node``s to the construct.'''\n\n __jsii_type__: typing.ClassVar[str] = \"constructs.INodeFactory\"\n\n @jsii.member(jsii_name=\"createNode\")\n def create_node(\n self,\n host: \"Construct\",\n scope: IConstruct,\n id: builtins.str,\n ) -> \"Node\":\n '''Returns a new ``Node`` associated with ``host``.\n\n :param host: the associated construct.\n :param scope: the construct's scope (parent).\n :param id: the construct id.\n '''\n return typing.cast(\"Node\", jsii.invoke(self, \"createNode\", [host, scope, id]))\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, INodeFactory).__jsii_proxy_class__ = lambda : _INodeFactoryProxy\n\n\n@jsii.interface(jsii_type=\"constructs.ISynthesisSession\")\nclass ISynthesisSession(typing_extensions.Protocol):\n '''Represents a single session of synthesis.\n\n Passed into ``construct.onSynthesize()`` methods.\n '''\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"outdir\")\n def outdir(self) -> builtins.str:\n '''The output directory for this synthesis session.'''\n ...\n\n\nclass _ISynthesisSessionProxy:\n '''Represents a single session of synthesis.\n\n Passed into ``construct.onSynthesize()`` methods.\n '''\n\n __jsii_type__: typing.ClassVar[str] = \"constructs.ISynthesisSession\"\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"outdir\")\n def outdir(self) -> builtins.str:\n '''The output directory for this synthesis session.'''\n return typing.cast(builtins.str, jsii.get(self, \"outdir\"))\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, ISynthesisSession).__jsii_proxy_class__ = lambda : _ISynthesisSessionProxy\n\n\n@jsii.interface(jsii_type=\"constructs.IValidation\")\nclass IValidation(typing_extensions.Protocol):\n '''Implement this interface in order for the construct to be able to validate itself.'''\n\n @jsii.member(jsii_name=\"validate\")\n def validate(self) -> typing.List[builtins.str]:\n '''Validate the current construct.\n\n This method can be implemented by derived constructs in order to perform\n validation logic. It is called on all constructs before synthesis.\n\n :return: An array of validation error messages, or an empty array if there the construct is valid.\n '''\n ...\n\n\nclass _IValidationProxy:\n '''Implement this interface in order for the construct to be able to validate itself.'''\n\n __jsii_type__: typing.ClassVar[str] = \"constructs.IValidation\"\n\n @jsii.member(jsii_name=\"validate\")\n def validate(self) -> typing.List[builtins.str]:\n '''Validate the current construct.\n\n This method can be implemented by derived constructs in order to perform\n validation logic. It is called on all constructs before synthesis.\n\n :return: An array of validation error messages, or an empty array if there the construct is valid.\n '''\n return typing.cast(typing.List[builtins.str], jsii.invoke(self, \"validate\", []))\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, IValidation).__jsii_proxy_class__ = lambda : _IValidationProxy\n\n\n@jsii.data_type(\n jsii_type=\"constructs.MetadataEntry\",\n jsii_struct_bases=[],\n name_mapping={\"data\": \"data\", \"type\": \"type\", \"trace\": \"trace\"},\n)\nclass MetadataEntry:\n def __init__(\n self,\n *,\n data: typing.Any,\n type: builtins.str,\n trace: typing.Optional[typing.Sequence[builtins.str]] = None,\n ) -> None:\n '''An entry in the construct metadata table.\n\n :param data: The data.\n :param type: The metadata entry type.\n :param trace: Stack trace. Can be omitted by setting the context key ``ConstructMetadata.DISABLE_STACK_TRACE_IN_METADATA`` to 1. Default: - no trace information\n '''\n self._values: typing.Dict[str, typing.Any] = {\n \"data\": data,\n \"type\": type,\n }\n if trace is not None:\n self._values[\"trace\"] = trace\n\n @builtins.property\n def data(self) -> typing.Any:\n '''The data.'''\n result = self._values.get(\"data\")\n assert result is not None, \"Required property 'data' is missing\"\n return typing.cast(typing.Any, result)\n\n @builtins.property\n def type(self) -> builtins.str:\n '''The metadata entry type.'''\n result = self._values.get(\"type\")\n assert result is not None, \"Required property 'type' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def trace(self) -> typing.Optional[typing.List[builtins.str]]:\n '''Stack trace.\n\n Can be omitted by setting the context key\n ``ConstructMetadata.DISABLE_STACK_TRACE_IN_METADATA`` to 1.\n\n :default: - no trace information\n '''\n result = self._values.get(\"trace\")\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"MetadataEntry(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\nclass Node(metaclass=jsii.JSIIMeta, jsii_type=\"constructs.Node\"):\n '''Represents the construct node in the scope tree.'''\n\n def __init__(self, host: \"Construct\", scope: IConstruct, id: builtins.str) -> None:\n '''\n :param host: -\n :param scope: -\n :param id: -\n '''\n jsii.create(Node, self, [host, scope, id])\n\n @jsii.member(jsii_name=\"of\") # type: ignore[misc]\n @builtins.classmethod\n def of(cls, construct: IConstruct) -> \"Node\":\n '''Returns the node associated with a construct.\n\n :param construct: the construct.\n '''\n return typing.cast(\"Node\", jsii.sinvoke(cls, \"of\", [construct]))\n\n @jsii.member(jsii_name=\"addDependency\")\n def add_dependency(self, *dependencies: IConstruct) -> None:\n '''Add an ordering dependency on another Construct.\n\n All constructs in the dependency's scope will be deployed before any\n construct in this construct's scope.\n\n :param dependencies: -\n '''\n return typing.cast(None, jsii.invoke(self, \"addDependency\", [*dependencies]))\n\n @jsii.member(jsii_name=\"addError\")\n def add_error(self, message: builtins.str) -> None:\n '''Adds an { \"error\": } metadata entry to this construct.\n\n The toolkit will fail synthesis when errors are reported.\n\n :param message: The error message.\n '''\n return typing.cast(None, jsii.invoke(self, \"addError\", [message]))\n\n @jsii.member(jsii_name=\"addInfo\")\n def add_info(self, message: builtins.str) -> None:\n '''Adds a { \"info\": } metadata entry to this construct.\n\n The toolkit will display the info message when apps are synthesized.\n\n :param message: The info message.\n '''\n return typing.cast(None, jsii.invoke(self, \"addInfo\", [message]))\n\n @jsii.member(jsii_name=\"addMetadata\")\n def add_metadata(\n self,\n type: builtins.str,\n data: typing.Any,\n from_function: typing.Any = None,\n ) -> None:\n '''Adds a metadata entry to this construct.\n\n Entries are arbitrary values and will also include a stack trace to allow tracing back to\n the code location for when the entry was added. It can be used, for example, to include source\n mapping in CloudFormation templates to improve diagnostics.\n\n :param type: a string denoting the type of metadata.\n :param data: the value of the metadata (can be a Token). If null/undefined, metadata will not be added.\n :param from_function: a function under which to restrict the metadata entry's stack trace (defaults to this.addMetadata).\n '''\n return typing.cast(None, jsii.invoke(self, \"addMetadata\", [type, data, from_function]))\n\n @jsii.member(jsii_name=\"addValidation\")\n def add_validation(self, validation: IValidation) -> None:\n '''Adds a validation to this construct.\n\n When ``node.validate()`` is called, the ``validate()`` method will be called on\n all validations and all errors will be returned.\n\n :param validation: -\n '''\n return typing.cast(None, jsii.invoke(self, \"addValidation\", [validation]))\n\n @jsii.member(jsii_name=\"addWarning\")\n def add_warning(self, message: builtins.str) -> None:\n '''Adds a { \"warning\": } metadata entry to this construct.\n\n The toolkit will display the warning when an app is synthesized, or fail\n if run in --strict mode.\n\n :param message: The warning message.\n '''\n return typing.cast(None, jsii.invoke(self, \"addWarning\", [message]))\n\n @jsii.member(jsii_name=\"applyAspect\")\n def apply_aspect(self, aspect: IAspect) -> None:\n '''Applies the aspect to this Constructs node.\n\n :param aspect: -\n '''\n return typing.cast(None, jsii.invoke(self, \"applyAspect\", [aspect]))\n\n @jsii.member(jsii_name=\"findAll\")\n def find_all(\n self,\n order: typing.Optional[ConstructOrder] = None,\n ) -> typing.List[IConstruct]:\n '''Return this construct and all of its children in the given order.\n\n :param order: -\n '''\n return typing.cast(typing.List[IConstruct], jsii.invoke(self, \"findAll\", [order]))\n\n @jsii.member(jsii_name=\"findChild\")\n def find_child(self, id: builtins.str) -> IConstruct:\n '''Return a direct child by id.\n\n Throws an error if the child is not found.\n\n :param id: Identifier of direct child.\n\n :return: Child with the given id.\n '''\n return typing.cast(IConstruct, jsii.invoke(self, \"findChild\", [id]))\n\n @jsii.member(jsii_name=\"prepare\")\n def prepare(self) -> None:\n '''Invokes \"prepare\" on all constructs (depth-first, post-order) in the tree under ``node``.'''\n return typing.cast(None, jsii.invoke(self, \"prepare\", []))\n\n @jsii.member(jsii_name=\"setContext\")\n def set_context(self, key: builtins.str, value: typing.Any) -> None:\n '''This can be used to set contextual values.\n\n Context must be set before any children are added, since children may consult context info during construction.\n If the key already exists, it will be overridden.\n\n :param key: The context key.\n :param value: The context value.\n '''\n return typing.cast(None, jsii.invoke(self, \"setContext\", [key, value]))\n\n @jsii.member(jsii_name=\"synthesize\")\n def synthesize(\n self,\n *,\n outdir: builtins.str,\n session_context: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,\n skip_validation: typing.Optional[builtins.bool] = None,\n ) -> None:\n '''Synthesizes a CloudAssembly from a construct tree.\n\n :param outdir: The output directory into which to synthesize the cloud assembly. Default: - creates a temporary directory\n :param session_context: Additional context passed into the synthesis session object when ``construct.synth`` is called. Default: - no additional context is passed to ``onSynthesize``\n :param skip_validation: Whether synthesis should skip the validation phase. Default: false\n '''\n options = SynthesisOptions(\n outdir=outdir,\n session_context=session_context,\n skip_validation=skip_validation,\n )\n\n return typing.cast(None, jsii.invoke(self, \"synthesize\", [options]))\n\n @jsii.member(jsii_name=\"tryFindChild\")\n def try_find_child(self, id: builtins.str) -> typing.Optional[IConstruct]:\n '''Return a direct child by id, or undefined.\n\n :param id: Identifier of direct child.\n\n :return: the child if found, or undefined\n '''\n return typing.cast(typing.Optional[IConstruct], jsii.invoke(self, \"tryFindChild\", [id]))\n\n @jsii.member(jsii_name=\"tryGetContext\")\n def try_get_context(self, key: builtins.str) -> typing.Any:\n '''Retrieves a value from tree context.\n\n Context is usually initialized at the root, but can be overridden at any point in the tree.\n\n :param key: The context key.\n\n :return: The context value or ``undefined`` if there is no context value for thie key.\n '''\n return typing.cast(typing.Any, jsii.invoke(self, \"tryGetContext\", [key]))\n\n @jsii.member(jsii_name=\"tryRemoveChild\")\n def try_remove_child(self, child_name: builtins.str) -> builtins.bool:\n '''(experimental) Remove the child with the given name, if present.\n\n :param child_name: -\n\n :return: Whether a child with the given name was deleted.\n\n :stability: experimental\n '''\n return typing.cast(builtins.bool, jsii.invoke(self, \"tryRemoveChild\", [child_name]))\n\n @jsii.member(jsii_name=\"validate\")\n def validate(self) -> typing.List[\"ValidationError\"]:\n '''Validates tree (depth-first, pre-order) and returns the list of all errors.\n\n An empty list indicates that there are no errors.\n '''\n return typing.cast(typing.List[\"ValidationError\"], jsii.invoke(self, \"validate\", []))\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"PATH_SEP\")\n def PATH_SEP(cls) -> builtins.str:\n '''Separator used to delimit construct path components.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"PATH_SEP\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"addr\")\n def addr(self) -> builtins.str:\n '''Returns an opaque tree-unique address for this construct.\n\n Addresses are 42 characters hexadecimal strings. They begin with \"c8\"\n followed by 40 lowercase hexadecimal characters (0-9a-f).\n\n Addresses are calculated using a SHA-1 of the components of the construct\n path.\n\n To enable refactorings of construct trees, constructs with the ID ``Default``\n will be excluded from the calculation. In those cases constructs in the\n same tree may have the same addreess.\n\n Example::\n\n # Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826\n c83a2846e506bcc5f10682b564084bca2d275709ee\n '''\n return typing.cast(builtins.str, jsii.get(self, \"addr\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"children\")\n def children(self) -> typing.List[IConstruct]:\n '''All direct children of this construct.'''\n return typing.cast(typing.List[IConstruct], jsii.get(self, \"children\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"dependencies\")\n def dependencies(self) -> typing.List[Dependency]:\n '''Return all dependencies registered on this node or any of its children.'''\n return typing.cast(typing.List[Dependency], jsii.get(self, \"dependencies\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"id\")\n def id(self) -> builtins.str:\n '''The id of this construct within the current scope.\n\n This is a a scope-unique id. To obtain an app-unique id for this construct, use ``uniqueId``.\n '''\n return typing.cast(builtins.str, jsii.get(self, \"id\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"locked\")\n def locked(self) -> builtins.bool:\n '''Returns true if this construct or the scopes in which it is defined are locked.'''\n return typing.cast(builtins.bool, jsii.get(self, \"locked\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"metadata\")\n def metadata(self) -> typing.List[MetadataEntry]:\n '''An immutable array of metadata objects associated with this construct.\n\n This can be used, for example, to implement support for deprecation notices, source mapping, etc.\n '''\n return typing.cast(typing.List[MetadataEntry], jsii.get(self, \"metadata\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"path\")\n def path(self) -> builtins.str:\n '''The full, absolute path of this construct in the tree.\n\n Components are separated by '/'.\n '''\n return typing.cast(builtins.str, jsii.get(self, \"path\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"root\")\n def root(self) -> IConstruct:\n '''Returns the root of the construct tree.\n\n :return: The root of the construct tree.\n '''\n return typing.cast(IConstruct, jsii.get(self, \"root\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"scopes\")\n def scopes(self) -> typing.List[IConstruct]:\n '''All parent scopes of this construct.\n\n :return:\n\n a list of parent scopes. The last element in the list will always\n be the current construct and the first element will be the root of the\n tree.\n '''\n return typing.cast(typing.List[IConstruct], jsii.get(self, \"scopes\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"uniqueId\")\n def unique_id(self) -> builtins.str:\n '''(deprecated) A tree-global unique alphanumeric identifier for this construct.\n\n Includes\n all components of the tree.\n\n :deprecated:\n\n please avoid using this property and use ``addr`` to form unique names.\n This algorithm uses MD5, which is not FIPS-complient and also excludes the\n identity of the root construct from the calculation.\n\n :stability: deprecated\n '''\n return typing.cast(builtins.str, jsii.get(self, \"uniqueId\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"scope\")\n def scope(self) -> typing.Optional[IConstruct]:\n '''Returns the scope in which this construct is defined.\n\n The value is ``undefined`` at the root of the construct scope tree.\n '''\n return typing.cast(typing.Optional[IConstruct], jsii.get(self, \"scope\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"defaultChild\")\n def default_child(self) -> typing.Optional[IConstruct]:\n '''Returns the child construct that has the id ``Default`` or ``Resource\"``.\n\n This is usually the construct that provides the bulk of the underlying functionality.\n Useful for modifications of the underlying construct that are not available at the higher levels.\n Override the defaultChild property.\n\n This should only be used in the cases where the correct\n default child is not named 'Resource' or 'Default' as it\n should be.\n\n If you set this to undefined, the default behavior of finding\n the child named 'Resource' or 'Default' will be used.\n\n :return: a construct or undefined if there is no default child\n\n :throws: if there is more than one child\n '''\n return typing.cast(typing.Optional[IConstruct], jsii.get(self, \"defaultChild\"))\n\n @default_child.setter\n def default_child(self, value: typing.Optional[IConstruct]) -> None:\n jsii.set(self, \"defaultChild\", value)\n\n\n@jsii.data_type(\n jsii_type=\"constructs.SynthesisOptions\",\n jsii_struct_bases=[],\n name_mapping={\n \"outdir\": \"outdir\",\n \"session_context\": \"sessionContext\",\n \"skip_validation\": \"skipValidation\",\n },\n)\nclass SynthesisOptions:\n def __init__(\n self,\n *,\n outdir: builtins.str,\n session_context: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,\n skip_validation: typing.Optional[builtins.bool] = None,\n ) -> None:\n '''Options for synthesis.\n\n :param outdir: The output directory into which to synthesize the cloud assembly. Default: - creates a temporary directory\n :param session_context: Additional context passed into the synthesis session object when ``construct.synth`` is called. Default: - no additional context is passed to ``onSynthesize``\n :param skip_validation: Whether synthesis should skip the validation phase. Default: false\n '''\n self._values: typing.Dict[str, typing.Any] = {\n \"outdir\": outdir,\n }\n if session_context is not None:\n self._values[\"session_context\"] = session_context\n if skip_validation is not None:\n self._values[\"skip_validation\"] = skip_validation\n\n @builtins.property\n def outdir(self) -> builtins.str:\n '''The output directory into which to synthesize the cloud assembly.\n\n :default: - creates a temporary directory\n '''\n result = self._values.get(\"outdir\")\n assert result is not None, \"Required property 'outdir' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def session_context(\n self,\n ) -> typing.Optional[typing.Mapping[builtins.str, typing.Any]]:\n '''Additional context passed into the synthesis session object when ``construct.synth`` is called.\n\n :default: - no additional context is passed to ``onSynthesize``\n '''\n result = self._values.get(\"session_context\")\n return typing.cast(typing.Optional[typing.Mapping[builtins.str, typing.Any]], result)\n\n @builtins.property\n def skip_validation(self) -> typing.Optional[builtins.bool]:\n '''Whether synthesis should skip the validation phase.\n\n :default: false\n '''\n result = self._values.get(\"skip_validation\")\n return typing.cast(typing.Optional[builtins.bool], result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"SynthesisOptions(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\n@jsii.data_type(\n jsii_type=\"constructs.ValidationError\",\n jsii_struct_bases=[],\n name_mapping={\"message\": \"message\", \"source\": \"source\"},\n)\nclass ValidationError:\n def __init__(self, *, message: builtins.str, source: \"Construct\") -> None:\n '''An error returned during the validation phase.\n\n :param message: The error message.\n :param source: The construct which emitted the error.\n '''\n self._values: typing.Dict[str, typing.Any] = {\n \"message\": message,\n \"source\": source,\n }\n\n @builtins.property\n def message(self) -> builtins.str:\n '''The error message.'''\n result = self._values.get(\"message\")\n assert result is not None, \"Required property 'message' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def source(self) -> \"Construct\":\n '''The construct which emitted the error.'''\n result = self._values.get(\"source\")\n assert result is not None, \"Required property 'source' is missing\"\n return typing.cast(\"Construct\", result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"ValidationError(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\n@jsii.implements(IConstruct)\nclass Construct(metaclass=jsii.JSIIMeta, jsii_type=\"constructs.Construct\"):\n '''Represents the building block of the construct graph.\n\n All constructs besides the root construct must be created within the scope of\n another construct.\n '''\n\n def __init__(\n self,\n scope: \"Construct\",\n id: builtins.str,\n *,\n node_factory: typing.Optional[INodeFactory] = None,\n ) -> None:\n '''Creates a new construct node.\n\n :param scope: The scope in which to define this construct.\n :param id: The scoped construct ID. Must be unique amongst siblings. If the ID includes a path separator (``/``), then it will be replaced by double dash ``--``.\n :param node_factory: A factory for attaching ``Node``s to the construct. Default: - the default ``Node`` is associated\n '''\n options = ConstructOptions(node_factory=node_factory)\n\n jsii.create(Construct, self, [scope, id, options])\n\n @jsii.member(jsii_name=\"onPrepare\")\n def _on_prepare(self) -> None:\n '''Perform final modifications before synthesis.\n\n This method can be implemented by derived constructs in order to perform\n final changes before synthesis. prepare() will be called after child\n constructs have been prepared.\n\n This is an advanced framework feature. Only use this if you\n understand the implications.\n '''\n return typing.cast(None, jsii.invoke(self, \"onPrepare\", []))\n\n @jsii.member(jsii_name=\"onSynthesize\")\n def _on_synthesize(self, session: ISynthesisSession) -> None:\n '''Allows this construct to emit artifacts into the cloud assembly during synthesis.\n\n This method is usually implemented by framework-level constructs such as ``Stack`` and ``Asset``\n as they participate in synthesizing the cloud assembly.\n\n :param session: The synthesis session.\n '''\n return typing.cast(None, jsii.invoke(self, \"onSynthesize\", [session]))\n\n @jsii.member(jsii_name=\"onValidate\")\n def _on_validate(self) -> typing.List[builtins.str]:\n '''(deprecated) Validate the current construct.\n\n This method can be implemented by derived constructs in order to perform\n validation logic. It is called on all constructs before synthesis.\n\n :return: An array of validation error messages, or an empty array if there the construct is valid.\n\n :deprecated:\n\n use ``Node.addValidation()`` to subscribe validation functions on this construct\n instead of overriding this method.\n\n :stability: deprecated\n '''\n return typing.cast(typing.List[builtins.str], jsii.invoke(self, \"onValidate\", []))\n\n @jsii.member(jsii_name=\"toString\")\n def to_string(self) -> builtins.str:\n '''Returns a string representation of this construct.'''\n return typing.cast(builtins.str, jsii.invoke(self, \"toString\", []))\n\n\n__all__ = [\n \"Construct\",\n \"ConstructMetadata\",\n \"ConstructOptions\",\n \"ConstructOrder\",\n \"Dependency\",\n \"IAspect\",\n \"IConstruct\",\n \"INodeFactory\",\n \"ISynthesisSession\",\n \"IValidation\",\n \"MetadataEntry\",\n \"Node\",\n \"SynthesisOptions\",\n \"ValidationError\",\n]\n\npublication.publish()\n","sub_path":".env/lib/python3.8/site-packages/constructs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":35031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"81149720","text":"\"\"\"\nAuthor - Noah Kruss\n\nFile that contains the analysis class with the functions for analysising the\naural metamaterial system\n\"\"\"\n\n#---------------IMPORT STATEMENTS------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport time\nimport os\nimport shutil\nimport statistics as stats\nimport scipy.signal\nimport scipy.special\nfrom scipy.stats import norm\nimport pandas as pd\nimport pywt\n\nimport gsd.pygsd as GSD_pygsd\nimport gsd.hoomd as GSD_hoomd\n\nfrom scipy.optimize import curve_fit\nfrom astropy.modeling import models, fitting\n\n#---------------Helper Function------------------------------\ndef gauss_function(x, a, x0, sigma):\n return a*np.exp(-(x-x0)**2/(2*sigma**2))\n\n#---------------Analysis Class-------------------------------\nclass Aural_Analysis():\n\n def __init__(self):\n\n self.particle_data = None\n\n self.dt = None\n self.m = None\n self.N = None\n\n def read_data(self, fname: str):\n \"\"\"\n Function to read the recorded data from a simulation from a gsd file\n\n Inputs:\n fname - (str) name of gsd file containing simulation data\n \"\"\"\n\n #open data file\n f = GSD_pygsd.GSDFile(open(fname, 'rb'))\n t = GSD_hoomd.HOOMDTrajectory(f)\n\n self.particle_data = t\n self.N = len(t[0].particles.position)\n\n def fourier_plot(self, x_data: list, y_data: list, store_loc = None, plot_title = \"Fourier_plot\"):\n \"\"\"\n Function for ploting and returning the fourier data of a given inputed data set\n \"\"\"\n\n x_data = np.array(x_data)\n y_data = np.array(y_data)\n\n fft = np.fft.fft(y_data)\n fft[0] = 0\n\n N = len(x_data)\n T = x_data[1] - x_data[0] # sampling interval\n freq = np.fft.fftfreq(N, d=T)\n\n plt.plot(abs(freq), abs(fft.real))\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Frequency [1 / wave-length]\")\n\n #find peak of graph\n decreasing = True\n peak = None\n for value_i in range(len(x_data)):\n amp = fft[value_i]\n frequency = freq[value_i]\n\n if peak == None:\n peak = (frequency, amp)\n elif decreasing == True and peak[1] < amp:\n decreasing = False\n elif decreasing == True:\n peak = (frequency, amp)\n elif decreasing == False and amp > peak[1]:\n peak = (frequency, amp)\n\n plt.title(f\"{plot_title}\\nPeak at {peak[0]} Hz\")\n\n plt.savefig(plot_title)\n if store_loc != None:\n shutil.move(f\"{plot_title}.png\", store_loc)\n plt.clf()\n\n return (abs(freq), abs(fft.real))\n\n def wave_packet(self, dt, store_loc = None, plot_title = \"Waterfall plot\", num_samples = 8, target_times = None):\n \"\"\"\n Function for generating and saving a waterfall plot of the system\n standing wave over the course of the simulation along with prefroming\n a fourier transform at each targeted time snapshot\n\n Inputs:\n dt - (float) the amount of time of a timestep\n store_loc - (str) path to directory to store generated plot (if left\n as None then waterfall plot will be shown not saved)\n plot_title - (str) name for the waterfall plot\n num_samples - (int) option for the number of time shapshots to display\n in the waterfall plot\n target_times - (list) list of specific timesteps to display in the\n waterfall plot. Will override the num_samples property\n if set to non-None\n \"\"\"\n\n target_index = []\n if target_times == None:\n #select the time to plot the data at\n sample_period = (100000000) / num_samples\n\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n if time_step % sample_period == 0:\n target_index.append(time_step_i)\n\n target_index.append(len(self.particle_data) - 1)\n else:\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n time = time_step * dt\n if (time in target_times):\n target_index.append(time_step_i)\n\n #-----------------------------------------------------\n #create and save waterfall plot of wave packet\n g1 = plt.figure(1)\n plt.xlabel('particle position')\n plt.ylabel('time')\n plt.yticks([])\n plt.title(plot_title, fontsize = 7)\n\n shift = 0\n packet_amplitude_list = []\n fit_amp_list = []\n fit_std_list = []\n for i in target_index:\n time_step = i * 500\n #print(f\"---{time_step}---\")\n p_list = []\n offsets = []\n abs_offsets = []\n amplitude = 0\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n abs_position = abs(position)\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n abs_position = abs(position)\n position += shift\n abs_position += shift\n\n #update amplitude\n if abs(position - shift) > amplitude:\n amplitude = abs(position - shift)\n\n p_list.append(p_index)\n offsets.append(position)\n abs_offsets.append(abs_position)\n packet_amplitude_list.append(amplitude)\n\n plt.plot(p_list, offsets, color = \"b\")\n # plt.plot(p_list, abs_offsets, color = \"g\")\n\n #shift -= .5\n shift -= .175\n\n if store_loc != None:\n plt.savefig(\"Gausian_plot\")\n shutil.move(\"Gausian_plot.png\", store_loc)\n else:\n plt.show()\n plt.clf()\n\n df = pd.DataFrame(packet_amplitude_list)\n df.to_excel(\"amplitudes.xlsx\")\n shutil.move(\"amplitudes.xlsx\", store_loc)\n\n #get fourier plot and data for either sample\n fourier_data_list = []\n for i in target_index:\n time_step = i * 500\n p_list = []\n offsets = []\n gausian = []\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n p_list.append(p_index)\n offsets.append(position)\n gausian.append(abs(position))\n\n #plt.plot(p_list, gausian)\n fourier_data = self.fourier_plot(p_list, offsets, store_loc = store_loc, plot_title = f\"Fourier_plot_time={int(time_step * .0001)}\")\n fourier_data_list.append(fourier_data)\n\n\n def gaussian_fitting(self, dt, store_loc = None, num_samples = 8, target_times = None):\n \"\"\"\n Function for generating and saving xlsx file of the gaussian fit\n parameters for the system standing wave over the course of the simulation\n\n Inputs:\n dt - (float) the amount of time of a timestep\n store_loc - (str) path to directory to store generated data file\n (if left as None then the plot will be shown not saved)\n num_samples - (int) option for the number of time shapshots to analyse\n target_times - (list) list of specific timesteps to analyse. Will\n override the num_samples property if set to non-None\n \"\"\"\n\n dimensionless_time = []\n w = 0.7853981633974483\n\n target_index = []\n if target_times == None:\n #select the time to plot the data at\n sample_period = (100000000) / num_samples\n\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n if (time_step % sample_period == 0) and (time_step * dt * w > 200):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * dt * w)\n target_index.append(len(self.particle_data) - 1)\n dimensionless_time.append((len(self.particle_data) - 1) * 500 * w * dt)\n else:\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n time = time_step * dt\n if (time in target_times):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * w * dt)\n\n #-----------------------------------------------------\n g1 = plt.figure(1)\n\n shift = 0\n packet_amplitude_list = []\n fit_amp_list = []\n fit_std_list = []\n fit_cent_list = []\n unwrap_counter = 0\n for i in target_index:\n time_step = i * 500\n p_list = []\n offsets = []\n abs_offsets = []\n amplitude = 0\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n abs_position = abs(position)\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n abs_position = abs(position)\n position += shift\n abs_position += shift\n\n #update amplitude\n if abs(position - shift) > amplitude:\n amplitude = abs(position - shift)\n\n p_list.append(p_index)\n offsets.append(position)\n abs_offsets.append(abs_position)\n packet_amplitude_list.append(amplitude)\n\n\n #setting up gausian fit\n gausian_left_index = 0\n gausian_right_index = 0\n if abs_offsets[0] == shift:\n for i in range(0, len(abs_offsets)):\n if abs_offsets[i] - shift != 0:\n gausian_left_index = i\n break\n for i in range(len(abs_offsets) - 1, 0, -1):\n if abs_offsets[i] - shift != 0:\n gausian_right_index = i\n break\n else:\n count = 0\n for i in range(0, len(abs_offsets)):\n if abs_offsets[i] - shift == 0:\n count += 1\n else:\n gausian_right_index += 1\n count = 0\n if count >= 20:\n break\n count = 0\n gausian_left_index = len(abs_offsets) - 1\n for i in range(len(abs_offsets) - 1, 0, -1):\n if abs_offsets[i] - shift == 0:\n count += 1\n else:\n gausian_left_index -= 1\n count = 0\n if count >= 20:\n break\n\n #create a list of the amplitudes of the gausian without the periodic boundary conditions\n gausian_removed_periodic = []\n if gausian_left_index < gausian_right_index:\n for i in range(gausian_left_index, gausian_right_index, 1):\n gausian_removed_periodic.append(abs_offsets[i] - shift)\n sigma1 = len(gausian_removed_periodic) / 4\n else:\n for i in range(gausian_left_index, len(abs_offsets), 1):\n gausian_removed_periodic.append(abs_offsets[i] - shift)\n for i in range(gausian_right_index):\n gausian_removed_periodic.append(abs_offsets[i] - shift)\n sigma1 = len(gausian_removed_periodic) / 4\n\n while(len(gausian_removed_periodic) != len(abs_offsets)):\n gausian_removed_periodic.append(0)\n\n amp1 = max(gausian_removed_periodic)\n cen1 = gausian_removed_periodic.index(amp1)\n\n popt_gauss, pcov_gauss = scipy.optimize.curve_fit(gauss_function, p_list, gausian_removed_periodic, p0=[amp1, cen1, sigma1])\n fit_amp_list.append(popt_gauss[0])\n fit_std_list.append(popt_gauss[2])\n\n fit_center = abs_offsets.index(amp1 + shift) + (unwrap_counter * self.N)\n if len(fit_cent_list) > 4:\n if(fit_cent_list[-4] > fit_center and\n fit_cent_list[-3] > fit_center and\n fit_cent_list[-2] > fit_center and\n fit_cent_list[-1] > fit_center):\n #print(f\"wrapping on {abs_offsets.index(amp1 + shift) + (unwrap_counter * self.N)}, prev = {fit_cent_list[-2]}, unwrap_counter = {unwrap_counter}\")\n unwrap_counter += 1;\n fit_cent_list.append(abs_offsets.index(amp1 + shift) + (unwrap_counter * self.N))\n\n shift -= .175\n\n if store_loc != None:\n df = pd.DataFrame({\"Dimensionless Time\": dimensionless_time, \"Amplitude\": fit_amp_list, \"STD\": fit_std_list, \"Center\": fit_cent_list})\n df.to_excel(\"gaussian_fit_parameters.xlsx\")\n shutil.move(\"gaussian_fit_parameters.xlsx\", store_loc)\n else:\n plt.show()\n plt.clf()\n\n #-------------Collect Fit Error------------------------\n\n #get fit parameter errors\n amp_perc_error_list = []\n std_perc_error_list = []\n for i in range(len(fit_amp_list)):\n amp_perc_error_list.append((fit_amp_list[i] - fit_amp_list[0]) / fit_amp_list[0])\n std_perc_error_list.append((fit_std_list[i] - fit_std_list[0]) / fit_std_list[0] * 100)\n\n #create error plots of fit parameters\n g1 = plt.figure(1)\n plt.xlabel('Time')\n plt.ylabel('Fit Amplitude Factor')\n plt.title(\"Gaussian Fit Amplitude Factor\")\n plt.plot(dimensionless_time, amp_perc_error_list)\n if store_loc != None:\n plt.savefig(\"Gaussian_Fit_Amp\")\n shutil.move(\"Gaussian_Fit_Amp.png\", store_loc)\n else:\n plt.show()\n plt.clf()\n\n g1 = plt.figure(1)\n plt.xlabel('Time')\n plt.ylabel('Fit STD Percent Error')\n plt.title(\"Gaussian Fit STD Error\")\n plt.plot(dimensionless_time, std_perc_error_list)\n if store_loc != None:\n plt.savefig(\"Gaussian_Fit_STD\")\n shutil.move(\"Gaussian_Fit_STD.png\", store_loc)\n else:\n plt.show()\n plt.clf()\n\n g1 = plt.figure(1)\n plt.xlabel('Time')\n plt.ylabel('Fit Center Position')\n plt.title(\"Gaussian Fit Center Position\")\n plt.plot(dimensionless_time, fit_cent_list)\n if store_loc != None:\n plt.savefig(\"Gaussian_Fit_Center\")\n shutil.move(\"Gaussian_Fit_Center.png\", store_loc)\n else:\n plt.show()\n plt.clf()\n\n m, b = np.polyfit(dimensionless_time, fit_cent_list, 1)\n return(dimensionless_time, fit_cent_list, m, b, amp_perc_error_list)\n\n def peak_error(self, dt, store_loc = None, plot_title = \"Mean Peak Error\", num_samples = 10, target_times = None):\n \"\"\"\n Function for calculating the error in the position of the peaks of the\n fourier transform of the system standing wave over the course of the\n simulation\n\n Inputs:\n dt - (float) the amount of time of a timestep\n store_loc - (str) path to directory to store generated data file\n (if left as None then the plot will be shown not saved)\n plot_title - (str) name for the generated plot\n num_samples - (int) option for the number of time shapshots to analyse\n target_times - (list) list of specific timesteps to analyse. Will\n override the num_samples property if set to non-None\n \"\"\"\n\n dimensionless_time = []\n w = 0.7853981633974483\n\n target_index = []\n if target_times == None:\n #select the time to plot the data at\n sample_period = (100000000) / num_samples\n\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n if time_step % sample_period == 0:\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * dt * w)\n target_index.append(len(self.particle_data) - 1)\n dimensionless_time.append((len(self.particle_data) - 1) * 500 * w * dt)\n else:\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n time = time_step * dt\n if (time in target_times):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * w * dt)\n\n #get fourier plot and data for either sample\n fourier_data_list = []\n for i in target_index:\n time_step = i * 500\n p_list = []\n offsets = []\n gausian = []\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n p_list.append(p_index)\n offsets.append(position)\n gausian.append(abs(position))\n\n #plt.plot(p_list, gausian)\n fourier_data = self.fourier_plot(p_list, offsets)\n fourier_data_list.append(fourier_data)\n\n #-----------------------------------------------------------------------\n mean_peak_error_list = []\n peaks_initial = scipy.signal.find_peaks(fourier_data_list[0][1], height=.01)\n peak_pos_initial = fourier_data_list[0][0][peaks_initial[0]]\n shift = 15\n for fourier_data in fourier_data_list:\n diff_sum = 0\n\n for i in range(len(peak_pos_initial)):\n target_peak_index = peaks_initial[0][i]\n target_zone = fourier_data[1][target_peak_index - shift: target_peak_index + shift]\n #print(\"target zone = \", target_zone)\n peak = np.amax(target_zone)\n #print(\"peak = \", peak)\n peak_index = np.where(target_zone == peak)\n target_freq_zone = fourier_data[0][target_peak_index - shift:]\n peak_pos = target_freq_zone[peak_index[0][0]]\n #print(peak_pos_initial[i], peak_pos)\n\n diff_sum += (peak_pos_initial[i] - peak_pos) ** 2\n mean_peak_error_list.append(math.sqrt(diff_sum) / len(peaks_initial))\n #print()\n\n error_plot_title = \"Fourier Peaks Mean Error - (over course of Simulation)\"\n plt.plot(dimensionless_time, mean_peak_error_list)\n plt.title(error_plot_title)\n plt.xlabel(\"Dimensionless Time\")\n plt.ylabel(\"Error\")\n plt.savefig(error_plot_title)\n if store_loc != None:\n shutil.move(f\"{error_plot_title}.png\", store_loc)\n plt.clf()\n\n #save peak error to spreadsheet\n df = pd.DataFrame(mean_peak_error_list)\n df.to_excel(\"Peak_pos.xlsx\")\n shutil.move(\"Peak_pos.xlsx\", store_loc)\n\n def normalized_error(self, dt, store_loc = None, plot_title = \"Normalized Amplitude Error\", num_samples = 1000, target_times = None):\n \"\"\"\n Function for calculating the error in the amplitude of the system\n standing wave over the course of the simulation\n\n Inputs:\n dt - (float) the amount of time of a timestep\n store_loc - (str) path to directory to store generated data file\n (if left as None then the plot will be shown not saved)\n plot_title - (str) name for the generated plot\n num_samples - (int) option for the number of time shapshots to analyse\n target_times - (list) list of specific timesteps to analyse. Will\n override the num_samples property if set to non-None\n \"\"\"\n\n dimensionless_time = []\n w = 0.7853981633974483\n\n target_index = []\n if target_times == None:\n #select the time to plot the data at\n sample_period = (100000000) / num_samples\n\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n #if time_step % sample_period == 0:\n if (time_step % sample_period == 0) and (time_step * dt * w > 200):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * dt * w)\n target_index.append(len(self.particle_data) - 1)\n dimensionless_time.append((len(self.particle_data) - 1) * 500 * w * dt)\n else:\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n time = time_step * dt\n if (time in target_times):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * w * dt)\n\n #get fourier plot and data for either sample\n fourier_data_list = []\n for i in target_index:\n time_step = i * 500\n p_list = []\n offsets = []\n gausian = []\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n p_list.append(p_index)\n offsets.append(position)\n gausian.append(abs(position))\n\n #plt.plot(p_list, gausian)\n fourier_data = self.fourier_plot(p_list, offsets)\n fourier_data_list.append(fourier_data)\n\n #-----------------------------------------------------------------------\n fractional_error_list = []\n initial_data = fourier_data_list[0]\n #initial_data = fourier_data_list[16]\n max_amp = max(initial_data[1])\n for fourier_data_i in range(len(fourier_data_list)):\n fourier_data = fourier_data_list[fourier_data_i]\n diff_sum = 0\n timestep = fourier_data_i * 500\n for i in range(len(fourier_data[0])):\n diff_sum += abs(initial_data[1][i] - fourier_data[1][i]) / max_amp\n\n # amp = fourier_data[1][i] / (math.e ** (0.0025 * timestep))\n # diff_sum += abs(initial_data[1][i] - amp) / max_amp\n\n fractional_error_list.append(diff_sum / len(fourier_data[0]))\n\n m, b = np.polyfit(dimensionless_time, fractional_error_list, 1)\n fit = []\n for time in dimensionless_time:\n fit.append(m*time + b)\n print(m)\n plt.plot(dimensionless_time, fit)\n plt.legend([f\"m = {m}, b = {b}\"])\n\n error_plot_title = \"Foureir Amplitude Normalized Difference - (over course of Simulation)\"\n plt.plot(dimensionless_time, fractional_error_list)\n plt.title(plot_title, fontsize = 7)\n plt.savefig(error_plot_title)\n if store_loc != None:\n shutil.move(f\"{error_plot_title}.png\", store_loc)\n\n df = pd.DataFrame(fractional_error_list)\n df.to_excel(\"normalized_RMSE.xlsx\")\n shutil.move(\"normalized_RMSE.xlsx\", store_loc)\n plt.clf()\n\n\n return (fractional_error_list, dimensionless_time)\n\n def integrety_test(self, dt, num_samples = 10, target_times = None):\n\n dimensionless_time = []\n w = 0.7853981633974483\n\n #pull out the indexes of the target times\n target_index = []\n if target_times == None:\n #select the time to plot the data at\n sample_period = (100000000) / num_samples\n\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n if time_step % sample_period == 0:\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * w)\n target_index.append(len(self.particle_data) - 1)\n dimensionless_time.append((len(self.particle_data) - 1) * 500 * w)\n else:\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n time = time_step * dt\n if (time in target_times):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * w)\n\n #get fourier plot and data for either sample\n fourier_data_list = []\n for i in target_index:\n time_step = i * 500\n p_list = []\n offsets = []\n gausian = []\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n p_list.append(p_index)\n offsets.append(position)\n gausian.append(abs(position))\n\n fourier_data = self.fourier_plot(p_list, offsets)\n fourier_data_list.append(fourier_data)\n\n #-----------------------------------------------------------------------\n #set up for peak analysis\n mean_peak_error_list = []\n peaks_initial = scipy.signal.find_peaks(fourier_data_list[0][1], height=.01)\n peak_pos_initial = fourier_data_list[0][0][peaks_initial[0]]\n shift = 15\n\n #set up for amplitude analysis\n mean_applitude_error_list = []\n initial_data = fourier_data_list[0]\n\n for fourier_data in fourier_data_list:\n peak_diff_sum_sqrd = 0\n amp_diff_sum_sqrd = 0\n\n #sum sqrd errors on peak positions for current timeframe\n for i in range(len(peak_pos_initial)):\n target_peak_index = peaks_initial[0][i]\n target_zone = fourier_data[1][target_peak_index - shift: target_peak_index + shift]\n peak = np.amax(target_zone)\n peak_index = np.where(target_zone == peak)\n target_freq_zone = fourier_data[0][target_peak_index - shift:]\n peak_pos = target_freq_zone[peak_index[0][0]]\n\n peak_diff_sum_sqrd += (peak_pos_initial[i] - peak_pos) ** 2\n\n #sum sqrd errors on fourier amplitude for current timeframe\n for i in range(len(fourier_data[0])):\n amp_diff_sum_sqrd += ((initial_data[1][i] - fourier_data[1][i]) ** 2)\n\n #add difference values to apropriate lists\n mean_applitude_error_list.append(math.sqrt(amp_diff_sum_sqrd / len(fourier_data[0])))\n mean_peak_error_list.append(math.sqrt(peak_diff_sum_sqrd) / len(peaks_initial))\n\n return (mean_applitude_error_list, mean_peak_error_list, dimensionless_time)\n","sub_path":"analysis_files/aural_analysis.py","file_name":"aural_analysis.py","file_ext":"py","file_size_in_byte":28798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"374005518","text":"STR2NUM = dict(\n zero=0,\n one=1,\n two=2,\n three=3,\n four=4,\n five=5,\n six=6,\n\tseven=7,\n \teight=8,\n \tnine=9\n )\n\ndef solution(s):\n s = s.lower()\n for literal, num in STR2NUM.items():\n s = s.replace(literal, str(num))\n return int(s)\n\nif __name__ == '__main__':\n solution(\"one4seveneight\")","sub_path":"kakao-2021-internship/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"84123380","text":"OFFRE_CHOICES = (\r\n ('neuf', 'Neuf'),\r\n ('louer', 'A louer'),\r\n ('vente', 'En vente'),\r\n)\r\n\r\nTYPE_CHOICES = (\r\n ('appartement', 'Appartement'),\r\n ('studio', 'Studio'),\r\n ('villas', 'Villas'),\r\n ('duplex', 'Duplex'),\r\n)\r\n\r\nNATURE_CHOICES = (\r\n ('ACD', 'ACD'),\r\n ('pas ACD', 'Non ACD'),\r\n)\r\n\r\nVILLE_CHOICES = (\r\n (\"Abengourou\", \"Abengourou\"),\r\n (\"Abidjan\", \"Abidjan\"),\r\n (\"Aboisso\", \"Aboisso\"),\r\n (\"Abongoua\", \"Abongoua\"),\r\n (\"Adaou\", \"Adaou\"),\r\n (\"Adiaké\", \"Adiaké\"),\r\n (\"Adjouan\", \"Adjouan\"),\r\n (\"Adzopé\", \"Adzopé\"),\r\n (\"Agbaou\", \"Agbaou\"),\r\n (\"Agboville\", \"Agboville\"),\r\n (\"Agnibilékrou\", \"Agnibilékrou\"),\r\n (\"Ahouanou\", \"Ahouanou\"),\r\n (\"Ahoutoué\", \"Ahoutoué\"),\r\n (\"Akouédo\", \"Akouédo\"),\r\n (\"Akoupé\", \"Akoupé\"),\r\n (\"Alépé\", \"Alépé\"),\r\n (\"Alounamouénou\", \"Alounamouénou\"),\r\n (\"Ananda (Daoukro)\", \"Ananda (Daoukro)\"), \r\n (\"Ananda (Daoukro)\", \"Ananda (Daoukro)\"), \r\n (\"Annépé\", \"Annépé\"), \r\n (\"Anyama\", \"Anyama\"), \r\n (\"Arrah (Côte-d'Ivoire)\", \"Arrah (Côte-d'Ivoire)\"),\r\n (\"Assaoufoué\", \"Assaoufoué\"), \r\n (\"Attiégouakro\", \"Attiégouakro\"),\r\n (\"Attoutou A\", \"Attoutou A\"),\r\n (\"Azaguié\", \"Azaguié\"),\r\n (\"Bacanda\", \"Bacanda\"),\r\n (\"Badikaha\", \"Badikaha\"),\r\n (\"Bako (Côte d'Ivoire)\", \"Bako (Côte d'Ivoire)\"), \r\n (\"Baléko\", \"Baléko\"),\r\n (\"Bambalouma\", \"Bambalouma\"), \r\n (\"Bandakagni-Sokoura\", \"Bandakagni-Sokoura\"),\r\n (\"Bangolo\", \"Bangolo\"),\r\n (\"Bangoua (Côte d'Ivoire)\", \"Bangoua (Côte d'Ivoire)\"),\r\n (\"Banneu\", \"Banneu\"), \r\n (\"Batéguédia II\", \"Batéguédia II\"),\r\n (\"Bazra-Nattis\", \"Bazra-Nattis\"),\r\n (\"Béoumi\", \"Béoumi\"), \r\n (\"Biankouma\", \"Biankouma\"),\r\n (\"Bingerville\", \"Bingerville\"), \r\n (\"Bongouanou\", \"Bongouanou\"), \r\n (\"Bonoua (Côte d'Ivoire)\", \"Bonoua (Côte d'Ivoire)\"),\r\n (\"Bouaflé\", \"Bouaflé\"), \r\n (\"Bouandougou\", \"Bouandougou\"), \r\n (\"Bouna (Côte d'Ivoire)\", \"Bouna (Côte d'Ivoire)\"),\r\n (\"Boundiali (ville)\", \"Boundiali (ville)\"), \r\n (\"Céchi\", \"Céchi\"), \r\n (\"Dabéko\", \"Dabéko\"), \r\n (\"Dabou\", \"Dabou\"),\r\n (\"Dabouyo\", \"Dabouyo\"), \r\n (\"Dah-Zagna\", \"Dah-Zagna\"), \r\n (\"Dakpadou\", \"Dakpadou\"), \r\n (\"Daleu\", \"Daleu\"),\r\n (\"Daloa\", \"Daloa\"),\r\n (\"Danané\", \"Danané\"),\r\n (\"Danguira\", \"Danguira\"),\r\n (\"Daoukro\", \"Daoukro\"),\r\n (\"Diabo (Côte d'Ivoire)\", \"Diabo (Côte d'Ivoire)\"),\r\n)\r\n\r\n\"\"\"\r\n\"Diamarakro\", \r\n\"Diangobo (Yakassé-Attobrou)\", \r\n\"Diawala\", \r\n\"Diboké\", \r\n\"Didiévi\", \r\n\"Diéouzon\", \r\n\"Digbeugnoa\", \r\n\"Dignago\", \r\n\"Dikouehipalegnoa\", \r\n\"Dimbokro\", \r\n\"Diogo (Boundiali)\", \r\n\"Dioulatiédougou\", \r\n\"Divo\", \r\n\"Djouroutou\", \r\n\"Doba (Côte d'Ivoire)\", \r\n\"Dogbo\", \r\n\"Doké\", \r\n\"Domaboué\", \r\n\"Domangbeu\", \r\n\"Douasso\", \r\n\"Doubé\", \r\n\"Doudoukou\", \r\n\"Duékoué\", \r\n\"Ebounou\", \r\n\"Elima (Côte d'Ivoire)\", \r\n\"Ery-Macouguié\", \r\n\"Fadiadougou\", \r\n\"Fahandougou\", \r\n\"Fahani\", \r\n\"Fakaha\", \r\n\"Fala (Côte d'Ivoire)\", \r\n\"Famienkro\", \r\n\"Faradiani\", \r\n\"Farandougou\", \r\n\"Ferkessédougou\", \r\n\"Fodio\", \r\n\"Fonondara\", \r\n\"Fresco (Côte d'Ivoire)\", \r\n\"Gabiadji\", \r\n\"Gagnoa\", \r\n\"Ganaoni\", \r\n\"Gbambiasso\", \r\n\"Gbangbégouiné\", \r\n\"Gbangbégouiné-Yati\", \r\n\"Gbékékro\", \r\n\"Gbéléban\", \r\n\"Gbémou\", \r\n\"Gbogui\", \r\n\"Gbon (Côte d'Ivoire)\", \r\n\"Gbongaha\", \r\n\"Gnaliepa\", \r\n\"Gnangnon\", \r\n\"Gohouo-Zagna\", \r\n\"Gomon\", \r\n\"Gonaté\", \r\n\"Gouessesso\", \r\n\"Gouiné\", \r\n\"Goulia\", \r\n\"Grabo (Côte d'Ivoire)\", \r\n\"Grand-Bassam\", \r\n\"Grand-Béréby\", \r\n\"Grand-Lahou\", \r\n\"Grand-Morié\", \r\n\"Grand-Zattry\", \r\n\"Guessabo\", \r\n\"Guéyo\", \r\n\"Guiendé\", \r\n\"Guiglo\", \r\n\"Guinglo-Tahouaké\", \r\n\"Issia\", \r\n\"Jacqueville (Côte d'Ivoire)\", \r\n\"Kahin-Zarabaon\", \r\n\"Kanakono\", \r\n\"Kaniéné\", \r\n\"Kanitélégué\", \r\n\"Kanoroba\", \r\n\"Kantélégué\", \r\n\"Kanzra\", \r\n\"Kaouara\", \r\n\"Karakoro (Côte d’Ivoire)\", \r\n\"Karakpo\", \r\n\"Kasséré\", \r\n\"Katiéré\", \r\n\"Katiola\", \r\n\"Kimbirila-Sud\", \r\n\"Koboko\", \r\n\"Kodiokofi\", \r\n\"Kofiplé\", \r\n\"Kolia\", \r\n\"Kong (Côte d'Ivoire)\", \r\n\"Kongasso\", \r\n\"Koni (Côte d'Ivoire)\", \r\n\"Konolo\", \r\n\"Korhogo\", \r\n\"Koro (Côte d'Ivoire)\", \r\n\"Kossou\", \r\n\"Kouakro\", \r\n\"Kouan-Houle\", \r\n\"Kouassi-Blékro\", \r\n\"Koukourandoumi\", \r\n\"Kounoumon\", \r\n\"Kouto\", \r\n\"Kpata\", \r\n\"Lahou-Kpanda\", \r\n\"Lakota (Côte d'Ivoire)\", \r\n\"Languibonou\", \r\n\"Lataha\", \r\n\"Liliyo\", \r\n\"Lodala\", \r\n\"Logoualé\", \r\n\"Logouhi\", \r\n\"Lohouré\", \r\n\"Lokoligou\", \r\n\"Lolobo (Yamoussoukro)\", \r\n\"Lomokankro\", \r\n\"Loplé\", \r\n\"Lossingué\", \r\n\"Lotono\", \r\n\"Loupala\", \r\n\"Loupougo\", \r\n\"Loviguié\", \r\n\"M'bahiakro\",\r\n\"\"\"","sub_path":"immobilier/ville.py","file_name":"ville.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"99739001","text":"import numpy as np\nfrom numpy import genfromtxt\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport sys\n\nfig = plt.figure()\ncsv_data = genfromtxt('./data/converted/{0}'.format(sys.argv[1]), delimiter=',')\n\n# data = csv_data[1:, 2:17]\ndata = csv_data\n\n# data is a n x 15 array\nrows, cols = data.shape\n\nvis_rows = cols * 3\nvis_data = np.random.rand(vis_rows, cols)\n\nim = plt.imshow(vis_data, animated=True)\ni = 0\n\nani = None\npause = False\n\n# allows you to pause/play by clicking on figure\ndef onClick(event):\n global pause\n pause ^= True\n\n if pause:\n ani.event_source.stop()\n else:\n ani.event_source.start()\n\n# animation update function\ndef updatefig(*args):\n global vis_data, data, i, vis_rows, im, rows, ani, pause\n try:\n vis_data[:,:] = data[i:i + vis_rows, :]\n except:\n plt.close(fig)\n sys.exit()\n\n im.set_array(vis_data)\n i += 1\n return im,\n\nfig.canvas.mpl_connect('button_press_event', onClick)\nani = animation.FuncAnimation(fig, updatefig, interval=10, blit=True)\nplt.show()","sub_path":"visualize_threads.py","file_name":"visualize_threads.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"340751311","text":"from copy import copy\n\ndef printarray(x):\n print(\" \".join([str(v) for v in x]))\n\ndef bubblesort(x):\n flag = True\n while flag:\n flag = False\n for j in range(len(x)-1, 0, -1):\n if x[j][1] < x[j-1][1]:\n x[j], x[j-1] = x[j-1], x[j]\n flag = True\n\ndef selectsort(x):\n for i in range(0, len(x)):\n minj = i\n for j in range(i, len(x)):\n if x[j][1] < x[minj][1]:\n minj = j\n if minj != i:\n x[minj], x[i] = x[i], x[minj]\n\n# ALDS1_2_C: 安定ソート\ndef main():\n input() # skip scan n\n x = [v for v in input().split(\" \")]\n y = copy(x)\n bubblesort(x)\n selectsort(y)\n printarray(x)\n print(\"Stable\")\n printarray(y)\n if x == y:\n print(\"Stable\")\n else:\n print(\"Not stable\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"aoj/src/ALDS1_2_C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"235602113","text":"'''\nWrite a Program to that prints series of Even numbers in reverse\norder from the limiting number entered by user.\n'''\n\n\nnum=int(input(\"Enter the number:\"))\n\nfor x in range(num,-1,-1):\n\n if(x % 2 == 0):\n\n print(x,end=\" \")\n\n\nprint()\n\n","sub_path":"Day 17/17-DailyFlash_Solutions/01_Feb_Solutions_Four/Python/Program2.py","file_name":"Program2.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"32012871","text":"from django.shortcuts import render\nfrom django.views.generic import View\nfrom django.http import JsonResponse\nfrom models.models import EntradaCC, Camara, Compra, Tienda, SalidaCC\nfrom django.db import models,connection\nfrom django.db.models import Count,Sum\nimport datetime\nimport psycopg2 as psy, pandas as pd\n# Create your views here.\n\nclass Graph(View):\n\n def get(self,request, *args, **kwargs):\n conn=psy.connect(host = 'localhost', user= 'postgres', password ='postgres', dbname= 'Sambil')\n cur = conn.cursor()\n template_name = 'graph.html'\n number =[0,1,2,3]\n result = []\n sql='''SELECT count(*) FROM public.models_entradacc WHERE fkcamara_id=1;'''\n df = pd.read_sql_query(sql, conn)\n for index, row in df.iterrows():\n result.append(row['count'])\n print(result)\n return render(request,template_name, {} )\n\ndef get_data(request, *args, **kwargs):\n #variables :\n conn=psy.connect(host = 'localhost', user= 'postgres', password ='postgres', dbname= 'Sambil')\n cur = conn.cursor()\n cuenta = []\n camara = []\n SumVentas = []\n tienda = []\n horaEntrada1 = []\n horaEntrada2 =[]\n horaEntrada3 =[]\n horaSalida1 = []\n horaSalida2 = []\n horaSalida3 = []\n cantPersonas = []\n\n #querys para contar las personas por camara \n\n q1 = EntradaCC.objects.values('fkcamara__id').annotate(cuenta=Count('id'))\n for l in q1:\n cuenta.append(l['cuenta'])\n camara.append(l['fkcamara__id'])\n print(camara)\n\n # query para mostrar la cantidad de personas que entraron al centro comercial con y sin telefono\n\n q2 = EntradaCC.objects.values('id').filter(macadd__isnull=True).count()\n q3 = EntradaCC.objects.values('id').filter(macadd__isnull=False).count()\n\n # querys para determinar la edad de las personas que entran al centro comercial determinando si tiene o no macAddres\n \n edad1 = EntradaCC.objects.values('edad').filter(edad__range=(0,9), macadd__isnull=True).count()\n edad3 = EntradaCC.objects.values('edad').filter(edad__range=(10,20), macadd__isnull=True).count()\n edad5 = EntradaCC.objects.values('edad').filter(edad__range=(21,30), macadd__isnull=True).count()\n edad7 = EntradaCC.objects.values('edad').filter(edad__range=(31,40), macadd__isnull=True).count()\n edad9 = EntradaCC.objects.values('edad').filter(edad__range=(41,50), macadd__isnull=True).count()\n edad11 = EntradaCC.objects.values('edad').filter(edad__range=(51,60), macadd__isnull=True).count()\n edad13 = EntradaCC.objects.values('edad').filter(edad__range=(61,70), macadd__isnull=True).count()\n edad15 = EntradaCC.objects.values('edad').filter(edad__range=(71,80), macadd__isnull=True).count()\n edad17= EntradaCC.objects.values('edad').filter(edad__range=(81,90), macadd__isnull=True).count()\n \n edad2 = EntradaCC.objects.values('edad').filter(edad__range=(0,9), macadd__isnull=False).count()\n edad4 = EntradaCC.objects.values('edad').filter(edad__range=(10,20), macadd__isnull=False).count() \n edad6 = EntradaCC.objects.values('edad').filter(edad__range=(21,30), macadd__isnull=False).count()\n edad8 = EntradaCC.objects.values('edad').filter(edad__range=(31,40), macadd__isnull=False).count()\n edad10 = EntradaCC.objects.values('edad').filter(edad__range=(41,50), macadd__isnull=False).count()\n edad12 = EntradaCC.objects.values('edad').filter(edad__range=(51,60), macadd__isnull=False).count()\n edad14 = EntradaCC.objects.values('edad').filter(edad__range=(61,70), macadd__isnull=False).count()\n edad16= EntradaCC.objects.values('edad').filter(edad__range=(71,80), macadd__isnull=False).count()\n edad18 = EntradaCC.objects.values('edad').filter(edad__range=(81,90), macadd__isnull=False).count()\n \n # querys que muestran las tiendas que vende mucho mas [top 5]\n qventas = Compra.objects.values('fktienda_id__nombre').annotate(ventas=Sum('total'))[:5]\n for l in qventas:\n tienda.append(l['fktienda_id__nombre'])\n SumVentas.append(l['ventas'])\n \n # query para visualizar las entradas de las personas por entrada \n\n sql='''SELECT count(id) FROM public.models_entradacc WHERE fkcamara_id=1 AND date_part('month',registroe)=date_part('month',current_date)\n GROUP BY date_part('hour',registroe);'''\n df = pd.read_sql_query(sql, conn)\n for index, row in df.iterrows():\n horaEntrada1.append(int(row['count']))\n\n sql1='''SELECT count(id) FROM public.models_entradacc WHERE fkcamara_id=2 AND date_part('month',registroe)=date_part('month',current_date)\n GROUP BY date_part('hour',registroe);'''\n df = pd.read_sql_query(sql1, conn)\n for index, row in df.iterrows():\n horaEntrada2.append(int(row['count']))\n\n sql2='''SELECT count(id) FROM public.models_entradacc WHERE fkcamara_id=3 AND date_part('month',registroe)=date_part('month',current_date)\n GROUP BY date_part('hour',registroe);'''\n df = pd.read_sql_query(sql2, conn)\n for index, row in df.iterrows():\n horaEntrada3.append(int(row['count']))\n \n # querys para ver el flujo de salida de las personas por hora\n\n sql3='''SELECT count(id) FROM public.models_salidacc WHERE fkcamara_id=1 AND date_part('month',registros)=date_part('month',current_date)\n GROUP BY date_part('hour',registros);'''\n df = pd.read_sql_query(sql3, conn)\n for index, row in df.iterrows():\n horaSalida1.append(int(row['count']))\n\n sql4='''SELECT count(id) FROM public.models_salidacc WHERE fkcamara_id=2 AND date_part('month',registros)=date_part('month',current_date)\n GROUP BY date_part('hour',registros);'''\n df = pd.read_sql_query(sql4, conn)\n for index, row in df.iterrows():\n horaSalida2.append(int(row['count']))\n\n sql5='''SELECT count(id) FROM public.models_salidacc WHERE fkcamara_id=3 AND date_part('month',registros)=date_part('month',current_date)\n GROUP BY date_part('hour',registros);'''\n df = pd.read_sql_query(sql5, conn)\n for index, row in df.iterrows():\n horaSalida3.append(int(row['count']))\n\n\n\n data = {\n \"labels\": camara,\n \"default\":cuenta,\n \"default2\":[q2,q3],\n \"labels2\":['Personas sin telefono','Personas con telefono'],\n \"default3\":[edad1,edad3,edad5, edad7, edad9,edad11,edad13,edad15,edad17],\n \"default4\":[edad2,edad4,edad6,edad8,edad10,edad12,edad14,edad16,edad18],\n \"labels4\": tienda,\n \"default5\": SumVentas,\n \"labelsHora\":['8:00 am','9:00 am','10:00 am','11:00 am', '12:00 pm', '13:00 pm','14:00 pm','15:00 pm','16:00 pm','17:00 pm','18:00 pm', '19:00 pm','20:00 pm', '21:00 pm', '22:00 pm', '23:00 pm'],\n \"default6\":horaEntrada1,\n \"default7\":horaEntrada2,\n \"default8\":horaEntrada3,\n \"default9\":horaSalida1,\n \"default10\":horaSalida2,\n \"default11\":horaSalida3,\n\n }\n return JsonResponse(data)","sub_path":"models/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"23497491","text":"from keyword_relation.models import Keyword_Pages\n\nfrom django.shortcuts import render\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.conf.urls.static import static\nimport os\nfrom django.conf import settings\nimport numpy as np\nimport random\nfrom gensim.models import Word2Vec\nimport wikipedia\nimport networkx as nx\nimport random\nimport matplotlib.pyplot as plt\nfrom gensim.models import Word2Vec\nfrom nltk.corpus import wordnet\nimport pandas\n\nimport wikipediaapi\n\nwiki_wiki = wikipediaapi.Wikipedia('en')\n\ndef run():\n\n print(\"START\")\n\n\n # Get all keywords\n all_keywords = Keyword_Pages.objects.all()\n\n # count\n count = 0\n\n # iterate over all keywords\n for keyword in all_keywords:\n\n count += 1\n if count % 1000 == 0:\n print(count)\n\n # get model datamodels\n if keyword.google_graph_embedding == \"\":\n model_path = os.path.join(settings.STATIC_ROOT,'../models/related_keywords_graph_embedding.model')\n model = Word2Vec.load(model_path)\n\n related_main = find_similar_keywords(model, keyword.keyword)\n\n keyword.google_graph_embedding = related_main\n keyword.save()\n\n # get wiki path\n if keyword.wiki_path == \"\":\n print(\"Finding path for\", keyword.keyword)\n visited = set()\n wiki_paths = wiki_bfs(keyword.keyword, \"Glossary of computer science\", visited, 0, [], 100)\n wiki_path = get_probability_score(wiki_paths)\n\n\n if wiki_path == \"N/A\":\n wiki_path_str = wiki_path\n else:\n first = True\n wiki_path_str = \"\"\n for val in wiki_path:\n if first:\n wiki_path_str += val\n first = False\n else:\n wiki_path_str += \" --> \" + val\n \n print(wiki_path_str)\n\n keyword.wiki_path = wiki_path_str\n keyword.save()\n\n\ndef wiki_bfs(source, target, visited, num_found, found_paths, iter_limit):\n queue = []\n visited.add(source)\n queue.append([source])\n iter_count = 0\n output = []\n while len(queue) > 0 and iter_count <= iter_limit:\n iter_count += 1\n path_attempt = queue.pop(0)\n v = path_attempt[-1]\n if v == target.lower():\n if path_attempt not in output:\n output.append(path_attempt)\n# print(output)\n# for val in path_attempt:\n# try:\n# visited.remove(val)\n# except:\n# pass\n visited.remove(target.lower())\n iter_count = 0\n if len(output) == 3:\n # print(\"hit\")\n return output\n try:\n v = wiki_wiki.page(v)\n except:\n continue\n edges = [x.lower() for x in v.links]\n index_push = 0\n for edge in edges:\n if (edge in target.lower() or target.lower() in edge) and edge not in visited:\n visited.add(edge)\n new_path_attempt = path_attempt[:]\n new_path_attempt.append(edge)\n if edge == target.lower():\n queue.insert(0, new_path_attempt)\n index_push += 1\n queue.insert(index_push, new_path_attempt)\n# print(queue)\n \n for edge in edges:\n if edge not in visited:\n visited.add(edge)\n new_path_attempt = path_attempt[:]\n new_path_attempt.append(edge)\n queue.append(new_path_attempt)\n # print(\"out\", iter_count)\n # print(len(queue))\n return output\n\n\ndef get_probability_score(path):\n\n if path == []:\n return \"N/A\"\n\n all_probs = []\n for i in range(len(path)):\n probabilities_path = []\n for val in path[i]:\n probabilities = 1/(len(wiki_wiki.page(val).links))\n probabilities_path.append(probabilities)\n all_probs.append((sum(probabilities_path), path[i]))\n\n all_probs.sort(key = lambda x: x[0]) \n return all_probs[0][1]\n\n# function to get related keywords\ndef find_similar_keywords(model, x):\n output = \"\"\n first = True\n try:\n count = 0\n for node, _ in model.wv.most_similar(x):\n if first:\n output += node\n first = False \n else:\n output += \"|\" + node\n count += 1\n if count >=5:\n break\n except:\n # print(x, \"not in graph\")\n output=\"NA\"\n return output","sub_path":"scripts/pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"489511400","text":"import win32api\nimport win32gui\nimport win32con\nimport time\nimport random\n\nfrom control.base_control import BaseControl\n\nimport common.screen as screen\n\nRIGHT = 0\nDOWN = 1\nLEFT = 2\n\n\nclass ReplyMapCommon(BaseControl):\n\n _scranDirection = 0 # 0 → 1 ↓ 2←\n _nextScranDirection = 0\n _isScranMap = False\n\n team1BattleMaxCount = 5\n team2BattleMaxCount = 0\n\n def __init__(self, handle, interval):\n self.handle = handle\n self.interval = interval\n\n def getEnemyLocation(self):\n\n imgs = [\"enemy\\\\ship_p1_45_45_55_55.png\",\n \"enemy\\\\ship_p2_45_45_55_55.png\",\n \"enemy\\\\ship_p3_45_45_55_55.png\",\n \"enemy\\\\ship_p4_45_45_55_55.png\",\n \"enemy\\\\ship_z1_45_45_55_55.png\",\n \"enemy\\\\ship_z2_45_45_55_55.png\",\n \"enemy\\\\ship_z3_45_45_55_55.png\",\n \"enemy\\\\ship_h1_45_45_55_55.png\",\n \"enemy\\\\ship_h1_45_45_55_55.png\",\n \"enemy\\\\ship_h2_45_45_55_55.png\",\n \"enemy\\\\ship_q1_45_45_55_55.png\",\n \"enemy\\\\ship_q2_45_45_55_55.png\",\n ]\n\n # random.shuffle(imgs)\n for i in range(len(imgs)):\n xylist = screen.matchResImgInWindow(\n self.handle, imgs[i],0.7)\n if len(xylist) > 0:\n return xylist\n \n \n\n return []\n\n def getBossLocation(self):\n imgs = [\"enemy\\\\d1_4_boss_45_45_55_55.png\",\n \"enemy\\\\d1_2_boss_45_45_55_55.png\",\n \"enemy\\\\d1_3_boss_45_45_55_55.png\",\n \"enemy\\\\boss_48_45_52_55.png\",\n ]\n\n random.shuffle(imgs)\n for i in range(len(imgs)):\n xylist = screen.matchResImgInWindow(\n self.handle, imgs[i],0.7)\n if len(xylist) > 0:\n return xylist\n \n\n return []\n\n\n def dragPerLeft(self):\n self.dragPer(10, 50, 80, 50)\n\n def dragPerRight(self):\n self.dragPer(80, 50, 10, 50)\n\n def dragPerUp(self):\n self.dragPer(50, 20, 50, 70)\n\n def dragPerDown(self):\n self.dragPer(50, 70, 50, 20)\n\n \n def resetMapPosition(self):\n if not self._isScranMap:\n winHash = \"\"\n while not screen.alikeHash(winHash ,screen.winScreenHash(self.handle),0.8) :\n winHash = screen.winScreenHash(self.handle )\n self.dragPerUp()\n \n winHash = \"\" \n while not screen.alikeHash(winHash ,screen.winScreenHash(self.handle),0.8) :\n winHash = screen.winScreenHash(self.handle )\n self.dragPerLeft()\n \n \n self._needResetMap = False\n self._scranMapEnd = False\n self._scranDirection = 0\n\n def scranDragMap(self): # 全图扫描\n winHash = screen.winScreenHash(self.handle )\n self._isScranMap = True\n if self._scranDirection == RIGHT:\n self.dragPerRight()\n \n if screen.alikeHash(winHash ,screen.winScreenHash(self.handle),0.8) :\n self._nextScranDirection = LEFT\n self._scranDirection = DOWN\n return\n if self._scranDirection == DOWN:\n self.dragPerDown()\n # 换方向左右\n \n if screen.alikeHash(winHash ,screen.winScreenHash(self.handle),0.8) :\n self._isScranMap = False # 扫完全图\n return\n\n self._scranDirection = self._nextScranDirection\n if self._scranDirection == LEFT:\n self.dragPerLeft()\n \n if screen.alikeHash(winHash ,screen.winScreenHash(self.handle),0.8) :\n self._nextScranDirection = RIGHT # 左边到尽头 下去后往右\n self._scranDirection = DOWN\n return\n\n def findAndBattle(self):\n\n if self._teamNum == 1:\n if self._team1BattleCount < self.team1BattleMaxCount:\n xylist = self.getEnemyLocation()\n minX=self.getPosX(15)\n # maxY=self.getPosY(80)\n resList=[]\n for point in xylist:\n if point[0]>=minX:\n resList.append(point)\n if len(resList) > 0:\n x, y = resList[0]\n # self.leftClick(x, y)\n cx=self.getPosX(50)\n cy=self.getPosY(50)\n self.drag(x,y,cx,cy) #拖动不是一比一 大概是一半\n time.sleep(2)\n self.drag(x,y,cx,cy) \n self.leftClick(cx, cy)\n time.sleep(5)\n else:\n self.resetMapPosition()\n self.scranDragMap()\n\n else:\n time.sleep(10)\n self.switchTeam()\n self._teamNum = 2\n\n if self._teamNum == 2:\n if self._team2BattleCount < self.team2BattleMaxCount:\n xylist = self.getEnemyLocation()\n if len(xylist) > 0:\n x, y = xylist[0]\n # self.leftClick(x, y)\n cx=self.getPosX(50)\n cy=self.getPosY(50)\n self.drag(x,y,cx,cy) #拖动不是一比一 大概是一半\n time.sleep(2)\n self.drag(x,y,cx,cy) \n self.leftClick(cx, cy)\n time.sleep(5)\n else:\n self.resetMapPosition()\n self.scranDragMap()\n else:\n xylist = self.getBossLocation()\n if len(xylist) > 0:\n x, y = xylist[0]\n self.leftClick(x, y)\n time.sleep(5)\n else:\n self.resetMapPosition()\n self.scranDragMap()\n","sub_path":"core/control/reply_map_common.py","file_name":"reply_map_common.py","file_ext":"py","file_size_in_byte":5893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"333482694","text":"\"\"\"\n========================================\n13. Writing modified files with MNE-BIDS\n========================================\n\nMNE-BIDS is designed such that it enforces good practices when working with\nBIDS data. One of the principles of creating BIDS datasets from raw data is\nthat the raw data should ideally be written unmodified, as-is. To enforce\nthis, :func:`mne_bids.write_raw_bids` performs some basic checks and will\nthrow an exception if it believes you're doing something that you really\nshouldn't be doing (i.e., trying to store modified \"raw\" data as a BIDS\nraw data set.)\n\nThere might be some – rare! – situations, however, when working around this\nintentional limitation in MNE-BIDS can be warranted. For example, you might\nencounter data that has manually been split across multiple files during\nrecording, even though it belongs to a single experimental run. In this case,\nyou might want to concatenate the data before storing them in BIDS. This\ntutorial will give you an example on how to use :func:`mne_bids.write_raw_bids`\nto store such data, despite it being modified before writing.\n\n.. warning:: Please be aware that the situations in which you would need\n to apply the following solution are **extremely** rare. If you\n ever find yourself wanting to apply this solution, please take a\n step back, take a deep breath and re-consider whether this is\n **absolutely** necessary. If even a slight doubt remains,\n reach out to the MNE-BIDS developers.\n\n\"\"\"\n\n# Authors: Richard Höchenberger \n# License: BSD-3-Clause\n\n# %%\n# Load the ``sample`` dataset, and create a concatenated raw data object.\n\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\n\nimport mne\nfrom mne.datasets import sample\n\nfrom mne_bids import write_raw_bids, BIDSPath\n\n\ndata_path = Path(sample.data_path())\nraw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_raw.fif'\noutput_path = data_path / '..' / 'MNE-sample-data-bids'\nbids_path = BIDSPath(subject='01', task='audiovisual', root=output_path)\n\nraw = mne.io.read_raw_fif(raw_fname)\nraw.info['line_freq'] = 60\nraw_concat = mne.concatenate_raws([raw.copy(), raw])\n\n# %%\n# Trying to write these data will fail.\n\ntry:\n write_raw_bids(raw=raw_concat, bids_path=bids_path, overwrite=True)\nexcept ValueError as e:\n print(f'Data cannot be written. Exception message was: {e}')\n\n# %%\n# We can work around this limitation by first writing the modified data to\n# a temporary file, reading it back in, and then writing it via MNE-BIDS.\n\nwith NamedTemporaryFile(suffix='_raw.fif') as f:\n fname = f.name\n raw_concat.save(fname, overwrite=True)\n raw_concat = mne.io.read_raw_fif(fname, preload=False)\n write_raw_bids(raw=raw_concat, bids_path=bids_path, overwrite=True)\n\n# %%\n# That's it!\n#\n# .. warning:: **Remember, this should only ever be a last resort!**\n#\n","sub_path":"examples/write_modified_files.py","file_name":"write_modified_files.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"327939080","text":"class Account(object):\n def __init__(self, owner, balance):\n self.owner = owner\n self.balance = balance\n\n def deposit(self, amount):\n if amount < 0:\n raise AssertionError\n self.balance += amount\n print('Deposit accepted')\n\n def withdraw(self, amount):\n if self.balance < amount:\n print('Funds Unavailable!')\n raise AssertionError\n self.balance -= amount\n print('Withdrawal accepted')\n\n def __str__(self):\n return f'Account owner: {self.owner}\\nAccount balance: {self.balance}'\n\n\nacct1 = Account('Jose', 100)\nprint(acct1)\nprint(acct1.balance)\n\nacct1.deposit(50)\nacct1.withdraw(75)\nacct1.withdraw(500)\n\n","sub_path":"05-Object Oriented Programming/Account.py","file_name":"Account.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"408561844","text":"#import math\n\n#x = ''' 1427 0 \n\n #876652098643267843 \n#5276538'''\n#xs = x.split()\n#xs.reverse()\n#for i in xs:\n\t#i = math.sqrt(int(i))\n\t#if str(i) == '0.0':\n\t\t#print('0.0000')\n\t#else:\n\t\t#print(str(round(i,4)))\n\n\nimport math\nimport sys\n\nx = sys.stdin.readlines()\nxs = x.split()\nxs.reverse()\nfor i in xs:\n\ti = math.sqrt(int(i))\n\tprint('%.4f' % i)\n\n","sub_path":"timus1001.py","file_name":"timus1001.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"646758528","text":"motorcycles = ['honda', 'yamaha', 'sazuki', 'honda', 'honda']\n\n#motorcycles = [motorcycle for motorcycle in motorcycles if motorcycle != 'honda']\n\nmotorcycles = [i for i in motorcycles if i != 'honda']\n\nfor i in range(10):\n print(f\"Hello, {i}\")\n\nwhile None:\n #motorcycles.remove('honda')\n print('test')\n\na = [\"Hello\", \"World\"]\nb = [\"Hello\", \"World\"]\n\nif a is b:\n print(\"a is b\")\nif a == b:\n print(\"a == b\")","sub_path":"basics/chapter_3/programs/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"418174116","text":"from django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\n\nfrom rest_framework.exceptions import ValidationError\n\n\nclass EmailService:\n @classmethod\n def send_email(cls, data):\n # is_debug = getattr(settings, \"DEBUG\", False)\n # if is_debug:\n # print(\n # \"--- Email not sent because DEBUG is TRUE. Email data below. ---\" # noqa\n # )\n # print(data)\n # return None\n\n to_email = data.get(\"to_email\")\n subject = data.get(\"subject\")\n template_data = data.get(\"meta\") or {}\n template_data[\"subject\"] = subject\n if not data.get(\"templates\"):\n raise ValidationError(\"templates is required\")\n html = render_to_string(data.get(\"templates\"), template_data)\n\n message = EmailMultiAlternatives(\n subject,\n html,\n settings.DEFAULT_FROM_EMAIL,\n [to_email],\n )\n try:\n message.attach_alternative(html, \"text/html\")\n message.send()\n except Exception as e:\n print(f\"Email exception: {e}\")\n\n return None\n\n @classmethod\n def send_verification_email(cls, email, first_name, token):\n data = {\n \"to_name\": first_name,\n \"to_email\": email,\n \"subject\": \"Test\",\n \"templates\": \"email/reset_password.html\",\n \"text\": \"Your code {}\",\n \"meta\": {\n \"first_name\": first_name or \"No First Name\",\n \"token\": token.key,\n },\n }\n from pprint import pprint\n pprint(data)\n return cls.send_email(data)\n","sub_path":"base/emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"206813233","text":"# A Representation for an \"Action4\" in a game of Evolution\nfrom gainPopulation import *\nfrom gainBodySize import *\nfrom buySpeciesBoard import *\nfrom replaceTrait import *\n\nclass Action4:\n\n\t\"\"\"\n\t\tConstruct a new Action4\n\t\t@param cardIdx: the index of the traitCard donated by the Player in their hand\n\t\t@param GP: a list of zero or more GainPopulation \n\t\t@param GB: a list of zero or more GainBodySize \n\t\t@param BT: a list of zero or more BuySpeciesBoard \n\t\t@param RT: a list of zero or more ReplaceTrait \n\t\tNat, [GainPopulation, ...], [GainBodySize, ...], [BuySpeciesBoard, ...], [ReplaceTrait, ...] -> Void\n\t\"\"\"\n\tdef __init__(self, cardIdx, GP, GB, BT, RT):\n\t\tself.tribute = cardIdx\n\t\tself.GP = GP\n\t\tself.GB = GB\n\t\tself.BT = BT\n\t\tself.RT = RT\n\n\t\"\"\"\n\t\tGets all card indexes referenced in this action. Used for cheating checks\n\t\t@return a list of every card index within this action\n\t\tVoid -> ListOf(Nat)\n\t\"\"\"\n\tdef getAllCardIdcs(self):\n\t\tidcs = [self.tribute]\n\t\tidcs += [p.cardIdx for p in self.GP]\n\t\tidcs += [b.cardIdx for b in self.GB]\n\t\tidcs += [bt.payment for bt in self.BT]\n\t\tfor bt in self.BT:\n\t\t\tidcs += bt.traitList\n\t\tidcs += [rt.newTraitIdx for rt in self.RT]\n\t\treturn idcs\n\n\t\"\"\"\n\t\tGets all species indexes referenced in this action. Used for cheating checks\n\t\t@return a list of every species index within this action\n\t\tVoid -> ListOf(Nat)\n\t\"\"\"\n\tdef getAllSpecIdcs(self):\n\t\tidcs = [p.specIdx for p in self.GP]\n\t\tidcs += [b.specIdx for b in self.GB]\n\t\tidcs += [rt.specIdx for rt in self.RT]\n\t\treturn idcs\n\n\t\"\"\"\n\t\tCreates a json representation of this action\n\t\t@return a JsonArray representing an action\n\t\tVoid -> JsonArray\n\t\"\"\"\n\tdef actionToJson(self):\n\t\treturn [self.tribute, \n\t\t\t\t[p.toJson() for p in self.GP],\n\t\t\t\t[b.toJson() for b in self.GB],\n\t\t\t\t[bt.toJson() for bt in self.BT],\n\t\t\t\t[rt.toJson() for rt in self.RT]]\n\n\t\"\"\"\n\tConstruct an Action4 from the given JSON input\n\tEFFECT: if the input is invalid, quit\n\t@param action4: JSON representation of an Action4\n\t@param player: PlayerState that this action corresponds \n\t@return an Action4 equivalent to the JSON\n\tJSON -> Action4\n\t\"\"\"\n\t@staticmethod\n\tdef actionFromJson(action4):\n\t\tAction4.validate(action4)\n\t\tcardIdx, GP, GB, BT, RT = action4\n\n\t\treturn Action4(cardIdx, [GainPopulation.fromJson(p) for p in GP], \n\t\t\t\t\t\t\t\t[GainBodySize.fromJson(b) for b in GB], \n\t\t\t\t\t\t\t\t[BuySpeciesBoard.fromJson(buyt) for buyt in BT], \n\t\t\t\t\t\t\t\t[ReplaceTrait.fromJson(rept) for rept in RT])\n\n\t\"\"\"\n\t\tValidate a JSON Action4\n\t\tEFFECT: if not valid, quit\n\t\t@param action4: JSON representation of an Action4\n\t\t@param player: PlayerState that this action corresponds \n\t\tJSON -> Void\n\t\"\"\"\n\t@staticmethod\n\tdef validate(action4):\n\t\tcardIdx, GP, GB, BT, RT = action4\n\t\tif not (len(action4) == 5 and type(cardIdx) == int):\n\t\t\tquit()\n\t\telse:\n\t\t\t[GainPopulation.validate(p) for p in GP]\n\t\t\t[GainBodySize.validate(b) for b in GB]\n\t\t\t[BuySpeciesBoard.validate(buyt) for buyt in BT]\n\t\t\t[ReplaceTrait.validate(rept) for rept in RT]\n","sub_path":"12/dealer/action4.py","file_name":"action4.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"310432194","text":"import numpy as np\nimport pandas as pd\nimport random as rn\nimport time\nimport jgraph as ig\nimport random as rn\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style(style=\"whitegrid\")\nsns.set_color_codes()\n\n\n#custome libaries\nfrom data_preprocessing_IoT import IoT_data_common\n#from Autoencoder_IoT_model import build_iot_AE\nfrom encoder import EncoderForest\n\nimport utils\n\nparams = {'dataset': 'IoT-23'}\n\n###calling IoT-23 dataset####\nprint(\"Loading dataset IoT-23.....\\n\")\ntrain_data, train_labels, test_data, test_labels = IoT_data_common(params)\nprint(\"train shape: \", train_data.shape)\nprint(\"test shape: \", test_data.shape)\nprint(\"train_label shape: \", train_labels.shape)\nprint(\"test_label shape: \", test_labels.shape)\n\ntest_label_original = np.argmax(test_labels, axis=1)\n\n# scikit learn solution\nss_500 = time.time()\nencoder = EncoderForest(500)\nencoder.fit(train_data, max_depth=10)\nprint(\"end fit\")\nencoded = encoder.encode(train_data)\nprint(\"end encode\")\nee_500 = time.time()\nprint('{:.3f} sec, Scikit Learn Normal'.format(ee_500-ss_500))\n\n# scikit learn solution\nss_1000 = time.time()\nencoder_1k = EncoderForest(1000)\nencoder_1k.fit(train_data, max_depth=10)\nprint(\"end fit\")\nencoded_1k = encoder_1k.encode(train_data)\nprint(\"end encode\")\nee_1000 = time.time()\nprint('{:.3f} sec, Scikit Learn Normal'.format(ee_1000-ss_1000))\n\n# scikit learn solution\nss_2000 = time.time()\nencoder_2k = EncoderForest(2000)\nencoder_2k.fit(train_data, max_depth=10)\nprint(\"end fit\")\nencoded_2k = encoder_2k.encode(train_data)\nprint(\"end encode\")\nee_2000 = time.time()\nprint('{:.3f} sec, Scikit Learn Normal'.format(ee_2000-ss_2000))\n\nimg_prime_1k = encoder_1k.decode(encoded_1k[100000])#.reshape(10, 10)\nprint(\"end decode\",img_prime_1k)\n\nimg_prime_2k = encoder_2k.decode(encoded_2k[10000])#.reshape(10, 10)\nprint(\"end decode\",img_prime_2k)\n\nimg_prime = encoder.decode(encoded[100078])#.reshape(10, 10)\nprint(\"end decode\",img_prime)\n\nss1=np.argsort(img_prime)\nprint(\"\\n decoded result \\n:\", ss1)\n\nss2=np.argsort(img_prime_1k)\nprint(\"\\n decoded result \\n:\", ss2)\n\nss3=np.argsort(img_prime_2k)\nprint(\"\\n decoded result \\n:\", ss3)\n\nf = plt.figure(figsize=(20,10))\nplt.subplot(1,3,1)\nsns.distplot(img_prime, kde=True, color=\"r\")\nplt.title('EncoderForest with 500 trees and depth 20')\nplt.xlabel('Reconstruction Error')\n\nplt.subplot(1,3,2)\nsns.distplot(img_prime_1k, kde=True, color=\"k\")\nplt.title('EncoderForest with 1k trees and depth 20')\nplt.xlabel('Reconstruction Error')\n\nplt.subplot(1,3,3)\nsns.distplot(img_prime_2k, kde=True, color=\"b\")\nplt.title('EncoderForest with 2k trees and depth 20')\nplt.xlabel('Reconstruction Error')\nplt.show()\n\n#sns.countplot(encoder.decode(encoded[100000]))\n\n'''\ndf = pd.DataFrame(data=train_data)\nf = plt.figure(figsize=(20,10))\nplt.subplot(1,1,1)\nsns.pairplot(df, diag_kind=\"kde\")\nplt.title('Distribution plot')\nplt.show()\n'''\n\nSorted=True\nfig = plt.figure(figsize=(20,10))\nax1 = plt.subplot(122, projection='polar')\nrn, thetan = utils.getVals(encoded,np.array([0.,0.]),sorted=Sorted)\nfor j in range(len(rn)):\n ax1.plot([thetan[j],thetan[j]], [1,rn[j]], color='b',alpha=1,lw=1)\n\nra, thetaa = utils.getVals(encoded,np.array([3.3,3.3]),sorted=Sorted)\nfor j in range(len(ra)):\n ax1.plot([thetaa[j],thetaa[j]], [1,ra[j]], color='r',alpha=0.9,lw=1.3)\n \nax1.set_title(\"Normal Isolation Forest\\nNormal: Mean={0:.3f}, Var={1:.3f}\\nAnomaly: Mean={2:.3f}, Var={3:.3f}\".format(np.mean(rn),np.var(rn),np.mean(ra),np.var(ra)))\n\nax1.set_xticklabels([])\nax1.set_xlabel(\"Anomaly\")\nax1.set_ylim(0,encoded.limit)\n\nax1.axes.get_xaxis().set_visible(False)\nax1.axes.get_yaxis().set_visible(False)\nplt.show()\n","sub_path":"Encoder_Forest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"111496463","text":"\"\"\"Defines functions and storage for trading stocks\"\"\"\n\nfrom collections import namedtuple\nimport datetime\nfrom decimal import Decimal\nfrom stocks import Stock, tradable_stocks\n\n\n# Using a factory function to create the storage objects for individual trades and\n# collections of trades lets us replace the current basic and in-memory\n# implementations with a Trade object and genuine persistence but keep the API intact\n# at some point in the future.\ndef persistence_factory(mode='throwaway'):\n if mode == 'throwaway':\n Trade = namedtuple('Trade', ['stock', 'time', 'quantity', 'direction', 'traded_price'])\n trades = []\n return (Trade, trades)\n\n\nTrade, trade_collection = persistence_factory()\n\n\n# Please see the assumptions and interpretations in the README\n# for some background on the running_product and number_of_trades\naccumulating_values = {\n 'number_of_trades': 0,\n 'running_product': 1\n}\n\n\ndef record_trade(stock_symbol, time=None, quantity=0, direction='', traded_price=0):\n \"\"\"\n Expected arguments as follows:\n stock_symbol - string representing an instance of any class derived from Stock\n time - datetime defaults to now in reality past times would presumably be disallowed\n quantity - int the number of shares traded in the trade\n direction - string only 'buy' and 'sell' acceptable values\n traded_price - int unit price in pennies for the trade\n\n \"\"\"\n if isinstance(tradable_stocks[stock_symbol], Stock):\n if direction not in ('buy', 'sell'):\n raise ValueError(\"Direction argument must be either 'buy' or 'sell'\")\n if time is None:\n time = datetime.datetime.now()\n trade_collection.append(Trade(tradable_stocks[stock_symbol], time, quantity, direction, traded_price))\n accumulating_values['running_product'] = traded_price * accumulating_values['running_product']\n accumulating_values['number_of_trades'] += 1\n else:\n raise KeyError(\"No stock available with that symbol\")\n\n\ndef volume_weighted_stock_price(stock_symbol):\n \"\"\"\n Takes a string representing a tradable stock and returns the\n volume weighted stock price based on the last 15 minutes' activity.\n\n \"\"\"\n if not isinstance(tradable_stocks[stock_symbol], Stock):\n raise KeyError(\"No stock available with that symbol\")\n relevant_trades = [trade\n for trade in trade_collection\n if trade.stock.stock_symbol == stock_symbol\n and trade.time >= (datetime.datetime.now() - datetime.timedelta(minutes=15))]\n numerator = sum([trade.traded_price * trade.quantity\n for trade in relevant_trades])\n denominator = sum([trade.quantity\n for trade in relevant_trades])\n div_zero_message = \"No stocks of this type have been traded in the last 15 minutes.\"\n return Decimal(numerator / denominator) if denominator else div_zero_message\n\n\ndef all_share_index(results=trade_collection):\n \"\"\"\n Takes a container of trades, with our results list as the default\n and returns the nth root of the product of each trade unit price\n from 0 to n. Passing in the container lets us use a test container.\n\n \"\"\"\n div_zero_message = \"No trades have taken place yet.\"\n running_product = accumulating_values['running_product']\n number_of_trades = accumulating_values['number_of_trades']\n return Decimal(\n running_product ** (1/number_of_trades)\n ) if number_of_trades else div_zero_message\n","sub_path":"exchange.py","file_name":"exchange.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"46602324","text":"from .auth import APIAuth\nimport requests\nimport json\nfrom .utils import Utils as AUtils\n\n\nclass APIClient:\n \"\"\"Various methods for Client BO stuff\"\"\"\n def __init__(self, api_auth: APIAuth):\n self.api_auth = api_auth\n self._client_location_view = None\n\n def get_client_location_view(self):\n \"\"\"Get a list of all the locations, sensorlocations, buildingmaps and macs for this account\n :return:\n \"\"\"\n headers = {\"Authorization\": \"Bearer \" + self.api_auth.get_token()}\n url = self.api_auth.api_config.get_api_url() + \"client/locationview\"\n response = requests.get(url, headers=headers)\n\n if response.status_code == 200:\n self._client_location_view = json.loads(response.content.decode())\n return self._client_location_view\n else:\n print(\"Bad response code: \" + str(response.status_code))\n return None\n\n def get_active_locations(self):\n devices_and_locations = self._client_location_view['locationSensorViews']\n return [location for location in devices_and_locations if location['lastSensorReportTime'] != -1]\n\n def get_active_devices(self, duration: int = (24 * 60 * 60 * 1000)):\n active_locs = self.get_active_locations()\n now = AUtils.now_ms()\n\n active_devices = []\n for loc in active_locs:\n for device in loc['sensorList']:\n if(now - device['lastReportTime']) < duration:\n active_devices.append(device)\n\n return active_devices\n\n def get_locations(self):\n devices_and_locations = self._client_location_view['locationSensorViews']\n return [location['location'] for location in devices_and_locations]\n\n def get_location_by_id(self, location_id: str):\n devices_and_locations = self._client_location_view['locationSensorViews']\n for location in devices_and_locations:\n if location['location']['id'] == location_id:\n return location\n return None\n\n def get_device_by_id(self, device_id: str):\n devices_and_locations = self._client_location_view['locationSensorViews']\n for location in devices_and_locations:\n for device in location['sensorList']:\n if device['id'] == device_id:\n return device\n\n return None\n\n","sub_path":"python/aretasapiclient/aretas_client.py","file_name":"aretas_client.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"113912568","text":"# f = open('secrets.txt', mode='w')\n# f.write('This is a new line')\n# f.close()\n# print(secret_data)\n\n# reading the data from the file and saving it to data argument\n# with open('secrets.txt', 'r') as f:\n# data = f.read()\n\n\n# adds another line to the data argument\n# with open('secrets.txt', 'w') as f:\n# f.write(data + '\\nThis is a new line 22222')\n#\n# # reads the content of the file\n# with open('secrets.txt', 'r') as f:\n# data2 = f.read()\n#\n# print(data2)\n\nimport json\n\n# with open('json_test.txt', 'r') as f:\n# # load is like read from json file as a python data type\n# json_dict = json.load(f)\n# # json.loads(f.read()) would be a string\n#\n# print(json_dict)\n# # reads it as a dictionary\n# print(type(json_dict))\n\n\n# my_family = {\n# \"parents\": ['Beth', 'Jerry'],\n# \"children\": ['Summer', 'Morty']\n# }\n#\n#\n# def write_to_json(data):\n# with open('new_json.json', 'w') as f:\n# json.dump(data, f)\n#\n#\n# def read_from_json():\n# with open('new_json.json', 'r') as f:\n# data = json.load(f)\n# return data\n#\n#\n# write_to_json(my_family)\n# new_fam = read_from_json()\n# print(new_fam)\n#\n# new_fam['JSONTest'] = 'Hello world'\n# write_to_json(new_fam)\n# print(read_from_json())\n\n\n# def add_item_to_menu(name, price):\n# menu.append({'name': name,\n# 'price': price})\n#\n#\n# def read_from_menu():\n# with open('menu.json', 'r') as f:\n# data = json.load(f)\n# return data\n#\n#\n# try:\n# menu = read_from_menu()\n# except FileNotFoundError and json.decoder.JSONDecodeError:\n# menu = []\n#\n#\n# def write_to_menu(data):\n# with open('menu.json', 'w') as f:\n# json.dump(data, f)\n#\n#\n# print(menu)\n# while True:\n# name = input('Enter a name of a dish: ')\n# if name == 'quit':\n# break\n# price = input('Enter a price: ')\n# add_item_to_menu(name, price)\n#\n# print(menu)\n# write_to_menu(menu)\nimport random\n\nnew_fam = {\n \"firstName\": \"Jane\",\n \"lastName\": \"Doe\",\n \"hobbies\": [\"running\", \"sky diving\", \"singing\"],\n \"age\": 35,\n \"children\": [\n {\n \"firstName\": \"Alice\",\n \"age\": 6\n },\n {\n \"firstName\": \"Bob\",\n \"age\": 8\n }\n ]\n}\n\nwith open('json2.json', 'w') as f:\n json.dump(new_fam, f)\n\nwith open('json2.json', 'r') as f:\n family = json.load(f)\n\nfor child in family['children']:\n print(f'{family[\"firstName\"]}\\'s child {child[\"firstName\"]} is {child[\"age\"]}')\n child['fav_color'] = random.choice(['blue', 'yellow', 'green'])\n\nwith open('json2.json', 'w') as f:\n json.dump(family, f, indent=2)\n","sub_path":"Week5/Day4/CourseNotes/CN.py","file_name":"CN.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"277023689","text":"import Accountant\nimport Strategy\nimport DataFeed\nimport matplotlib.pyplot as plt\n\ndef runTask():\n data = DataFeed.TextDataFeed('spx_prices.csv')\n params = {'signal_window' : 40, 'max_position' : 100, \n 'wait_period' : 100}\n trader = Strategy.SNPStrategy(params)\n booky = Accountant.Accountant(data, trader)\n booky.runBacktest()\n return booky\n\ndef showResults(resu):\n plt.plot(resu.cumPnl)\n plt.show()\n","sub_path":"Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"305942057","text":"class Apple:\n price = 5\n created_apples = 0\n\n def __init__(self, color):\n Apple.created_apples += 1\n\n if Apple.created_apples % 12 == 0:\n self.color = \"brurple\"\n elif Apple.created_apples == 3:\n self.color = \"purple\"\n elif Apple.created_apples == 4:\n self.color = \"brown\"\n else:\n self.color = color\n self.price = Apple.price\n\n @classmethod\n def change_price(cls, new_price):\n cls.price = new_price\n\n def __repr__(self):\n return \"This is a {:.0f} cedi {} Apple\".format(self.price, self.color)\n\nif __name__ == \"__main__\":\n # first = Apple(\"red\")\n # print(first)\n # second = Apple(\"blue\")\n # print(second)\n # Apple.change_price(6.0)\n # third = Apple(\"green\")\n # print(third)\n # print(first)\n for _ in range(30):\n print(Apple(\"red\"))\n","sub_path":"apple.py","file_name":"apple.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"571036407","text":"##Text extractor.\n\n\n##THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n##SOFTWARE.\n\nimport argparse\nimport struct\nimport string\n\njumps = 12 ## 8/12\noffset = 48 ## 44/48\ntenc = 'cp1251' ##cp932 or 1251\n\nparser = argparse.ArgumentParser(description='Text extractor for Spyro 3.')\nparser.add_argument('filepath', type=str, help = 'Path to level file.')\nparser.add_argument('--output', type=str, default = 's3_text.txt', help = 'Output file name.')\n\n\nargs = parser.parse_args()\n\ndef isUppercase(tSym):\n\tretVal = False\n\tcompStr = string.ascii_uppercase\n\tfor symbol in compStr:\n\t\tif tSym == symbol:\n\t\t\t\n\t\t\tretVal = True\n\treturn retVal\n\ndef getSubfileInfo(filepath, subfile):\n\tinfo_list = list()\n\tifile = open(filepath, 'rb')\n\tifile.seek((subfile-1)*8)\n\tinfo_list.append(struct.unpack(' varaddr and lstart < sf_size:\n\t\t\twad.seek(filestart+lstart)\n\t\t\tidbyte = wad.read(1)[0]\n\t\t\tetest = wad.read(1)\n\t\t\twad.seek(filestart+lstart+1)\n\t\t\tif not idbyte == 255 and len(etest) == 1:\n\t\t\t\tlTrig = True\n\t\t\t\tlsize = 0\n\t\t\t\twhile lTrig:\n\t\t\t\t\tif etest[0] == 0:\n\t\t\t\t\t\tlTrig = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tlsize += 1\n\t\t\t\t\tetest = wad.read(1)\n\t\t\t\t\t\n\t\t\t\tif lsize > 2:\n\t\t\t\t\tptr_list.append(varaddr+12)\n\t\t\t\t\tptr_list.append(0)\n\n\t\twad.seek(filestart+varaddr)\n\t\tllist = list()\n\t\tplist = list()\n\t\terrcount = 0\n\n\t\tvarlen = varlen_list[x]\n\t\t\n\t\tfor v in range(int(varlen/4)-4):\n\t\t\tlstart = struct.unpack(' varaddr:\n\t\t\t\tbreak\n\t\t\telif not lstart < sf_size:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tplist.append(varaddr+16+v*4)\n\t\t\t\tllist.append(lstart)\t\n\n\t\tfor tl in range(len(llist)):\n\t\t\twad.seek(filestart+llist[tl])\n\t\t\tidbyte = wad.read(1)[0]\n\t\t\tif not idbyte == 255:\n\t\t\t\ttxt = filestart+llist[tl]+idbyte\n\t\t\t\twad.seek(txt)\n\n\t\t\t\tlTrig = True\n\t\t\t\tlsize = 0\n\t\t\t\twhile lTrig:\n\t\t\t\t\tif wad.read(1)[0] == 0:\n\t\t\t\t\t\tlTrig = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tlsize += 1\n\n\t\t\t\twad.seek(txt)\n\t\t\t\ttbuf = wad.read(lsize)\n\t\t\t\tif (lsize > 2):\n\t\t\t\t\tptr_list.append(plist[tl])\n\t\t\t\t\tptr_list.append(1)\n\twad.close()\n\treturn ptr_list\n\ndef getTexts(filepath, subfile):\n\twtmp = getSubfileInfo(filepath, subfile)\t\n\tfilestart = wtmp[0]\n\tsf_size = wtmp[1]\n\n\ttxt_list = list()\n\tpointers = getPointersAddr(filepath, subfile)\t\n\n\twad = open(filepath, 'rb')\n\tfor t in range(int(len(pointers)/2)):\n\t\twad.seek(filestart+pointers[t*2])\n\t\ttxtbuf = wad.read(4)\n\t\tlstart = struct.unpack(' total_in: #total in not less than total out -- moved to miner checking\n # return False\n return True\n\n#################### testing ######################\n\n #prevents it from running if loaded the module... if just invoke it as a python script will run the tests\nif __name__ == \"__main__\":\n pr1, pu1 = signature.generate_keys()\n pr2, pu2 = signature.generate_keys()\n pr3, pu3 = signature.generate_keys()\n pr4, pu4 = signature.generate_keys()\n\n Tx1 = Tx()\n Tx1.add_input(pu1,1)\n Tx1.add_output(pu2,1) #sending coin to pu2\n Tx1.sign(pr1) # signing with private key so valid\n\n if Tx1.is_valid():\n print(\"Success! Tx is valid!\")\n else:\n print(\"Error! Tx is invalid!\")\n\n Tx2 = Tx()\n Tx2.add_input(pu1,2)\n Tx2.add_output(pu2,1)\n Tx2.add_output(pu3,1)\n Tx2.sign(pr1) # using the private key of #1 since is the input\n\n # Tx3 is a test for escrow transaction\n Tx3 = Tx()\n Tx3.add_input(pu3,1.2) # shouldnt have more output than input.\n Tx3.add_output(pu1,1.1) #mining and mining rewards, transaction fee usually goes to miner.\n Tx3.add_required(pu4)\n Tx3.sign(pr3)\n Tx3.sign(pr4)\n\n\n for t in [Tx1,Tx2,Tx3]:\n if t.is_valid():\n print(f\"Success! Tx is valid!\")\n else:\n print(f\"Error! Tx is invalid!\")\n\n # wrong signatures, should be pu1 signing\n Tx4 = Tx()\n Tx4.add_input(pu1,1) #user 1 is sending 1 count to Tx4\n Tx4.add_output(pu2,1)\n Tx4.sign(pr2) #but here signed with the wrong private_key\n\n # Escrow Tx transaction not signed by the arbiter\n Tx5 = Tx()\n Tx5.add_input(pu3,1.2)\n Tx5.add_output(pu1,1.1)\n Tx5.add_required(pu4)\n Tx5.sign(pr3)\n # Tx5.sign(pr4)\n\n # Two input addres, signed by one only\n Tx6 = Tx()\n Tx6.add_input(pu3,1)\n Tx6.add_input(pu4,0.1)\n Tx6.add_output(pu1,1.1)\n Tx6.sign(pr3) #Tx8.sign(pr4) # is missing so should be invalid\n\n # Outputs exceed the Inputs\n Tx7 = Tx()\n Tx7.add_input(pu4, 1.2)\n Tx7.add_output(pu1,1)\n Tx7.add_output(pu2,2)\n Tx7.sign(pr4)\n\n #negative value tests\n Tx8 = Tx()\n Tx8.add_input(pu2, -1)\n Tx8.add_output(pu1,-1)\n Tx8.sign(pr2)\n\n # Modified after Transaction signed\n Tx9 = Tx()\n Tx9.add_input(pu1,1)\n Tx9.add_output(pu2,1) #sending coin to pu2\n Tx9.sign(pr1) # signing with private key so valid\n Tx9.outputs[0]=(pu3,1) #instead of pu2 which was [(pu2,1)]\n\n\n for t in [Tx4, Tx5, Tx6, Tx7, Tx8,Tx9]:\n if t.is_valid():\n print(f\"Error! Tx is valid!\")\n else:\n print(f\"Success! Tx is invalid!\")\n\n####################### resources ########################\n","sub_path":"transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"468180536","text":"\"\"\"exp_to_source: Reformat Level2b MSA data to be source-based.\n\"\"\"\nfrom collections import defaultdict\n\nfrom jwst.datamodels import MultiExposureModel, ModelContainer, ImageModel\nfrom jwst.datamodels.properties import merge_tree\n\n__all__ = ['exp_to_source', 'multislit_to_container']\n\n\ndef exp_to_source(inputs):\n \"\"\"Reformat exposure-based MSA data to source-based.\n\n Parameters\n ----------\n inputs: [MultiSlitModel, ...]\n List of MultiSlitModel instances to reformat.\n\n Returns\n -------\n {str: MultiExposureModel, }\n Returns a dict of MultiExposureModel instances wherein each\n instance contains slits belonging to the same source.\n The key is the name of each source.\n \"\"\"\n result = defaultdict(MultiExposureModel)\n for exposure in inputs:\n for slit in exposure.slits:\n result[slit.name].exposures.append(slit)\n result[slit.name].exposures[-1].meta = exposure.meta\n return result\n\n\ndef multislit_to_container(inputs):\n \"\"\"Reformat exposure-based MSA data to source-based containers.\n\n Parameters\n ----------\n inputs: [MultiSlitModel, ...]\n List of MultiSlitModel instances to reformat, or just a \n ModelContainer full of MultiSlitModels.\n\n Returns\n -------\n {str: ModelContainer, }\n Returns a dict of ModelContainer instances wherein each\n instance contains ImageModels of slits belonging to the same source.\n The key is the name of each slit.\n \"\"\"\n result = defaultdict(ModelContainer)\n for exposure in inputs:\n for slit in exposure.slits:\n result[slit.name].append(ImageModel(slit.instance))\n merge_tree(result[slit.name][-1].meta.instance, \n exposure.meta.instance)\n return result\n","sub_path":"jwst/exp_to_source/exp_to_source.py","file_name":"exp_to_source.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"94816610","text":"import subprocess\nimport sys\n\n\nif __name__ == \"__main__\":\n\n def return_sub_call():\n\n print(\"Calling 'ls -l'...\")\n p = subprocess.call((\"ls\", \"-l\"), stdout=subprocess.PIPE)\n print(\"DONE! The command {}\\r\\n\".format(\"succeeded\" if not p else \"failed\"))\n\nreturn_sub_call()\n","sub_path":"rax_university/pythonII/derp.py","file_name":"derp.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"23980690","text":"import re\nfrom operator import itemgetter\nfrom pathlib import Path\n\n\"\"\"\n\nTHIS FILE IS NOT USED BY finetune OR inference SCRIPT \n\nTHESE UTILS WERE USED FOR CONVERTING DATASETS IN \nOTHER FORMATS OR TO EXTRACT SENTENCES WITH MANY ENTITIES INSIDE\n\n\"\"\"\n\n\ndef main_method():\n # copy this in real -main- file\n \"\"\"\n result = extract_rich_data(data_args.data_dir + 'prova.txt', tag_style=\"bio\")\n sents = list()\n for s in result[:40]:\n sents.append([s[0]])\n\n with open(os.path.join(training_args.output_dir, 'selected_sents_conll.csv'), 'w', newline='',\n encoding='utf8') as out:\n writer = csv.writer(out)\n writer.writerows(sents)\n \"\"\"\n\n\ndef extract_rich_data(path, tag_style=\"bio\"):\n \"\"\"\n FOR TEST ONLY\n \"\"\"\n text, labels = read_dataset_conll(path)\n flat_text = [' '.join(seq) for seq in text]\n flat_labels = [' '.join(lab_seq) for lab_seq in labels]\n seq_scores = [0] * len(flat_text)\n\n # compute scores\n if tag_style == \"bio\":\n for i, seq in enumerate(flat_text):\n labels_found = set()\n for j, word in enumerate(text[i]):\n lab = labels[i][j]\n if 'B-' in lab:\n if lab not in labels_found:\n seq_scores[i] += 2\n labels_found.add(lab)\n else:\n seq_scores[i] -= 1\n\n #if len(seq.split()) < 30:\n # seq_scores[i] = 0\n else: #\"IO\"\n for i, seq in enumerate(flat_text):\n labels_found = set()\n last_label_found = \"\"\n for j, word in enumerate(text[i]):\n lab = labels[i][j]\n if 'I-' in lab:\n if lab not in labels_found:\n seq_scores[i] += 2\n labels_found.add(lab)\n elif lab != last_label_found:\n seq_scores[i] += 1\n last_label_found = lab\n\n result = sorted([list(x) for x in zip(flat_text, flat_labels, seq_scores)], key=itemgetter(2))[::-1]\n\n return result\n\n\ndef read_dataset_conll(path):\n \"\"\"\n TEST ONLY: legge il dataset e restituisce frasi e labels\n - text: lista di liste di stringhe (parole)\n - labels: lista di liste di label\n \"\"\"\n file_path = Path(path)\n raw_text = file_path.read_text().strip()\n raw_sequences = re.split(r'\\n\\t?\\n', raw_text)\n text = []\n labels = []\n for raw_seq in raw_sequences:\n tmp_sent = []\n tmp_tags = []\n for raw_line in raw_seq.split('\\n'):\n splits = raw_line.rsplit(sep=' ', maxsplit=4)\n tmp_tag = splits[-1]\n tmp_word = splits[0]\n tmp_sent.append(tmp_word)\n tmp_tags.append(tmp_tag)\n text.append(tmp_sent)\n labels.append(tmp_tags)\n\n return text, labels\n\n\ndef read_dataset_gmb(path):\n \"\"\"\n TEST ONLY: legge il dataset e restituisce frasi e labels\n - text: lista di liste di stringhe (parole)\n - labels: lista di liste di label\n \"\"\"\n file_path = Path(path)\n raw_text = file_path.read_text(encoding=\"utf8\").strip()\n\n raw_lines = re.split(r'\\n', raw_text)\n lines = list()\n for line in raw_lines:\n if line[0] == ',':\n lines.append(line[1:])\n else:\n lines.append(line)\n\n text = []\n labels = []\n\n tmp_sent_words = []\n tmp_sent_labels = []\n for line in lines:\n if line.startswith('Sentence: '):\n if tmp_sent_labels and tmp_sent_words:\n text.append(tmp_sent_words)\n labels.append(tmp_sent_labels)\n tmp_sent_words = []\n tmp_sent_labels = []\n _, word, pos, ne_label = line.split(',')\n tmp_sent_words.append(word)\n tmp_sent_labels.append(ne_label)\n\n else:\n try:\n splits = line.rsplit(sep=',', maxsplit=4)\n ne_label = splits[-1]\n word = splits[-3]\n tmp_sent_words.append(word)\n tmp_sent_labels.append(ne_label)\n except:\n pass\n\n return text, labels\n\n\ndef read_dataset_wikigold(path):\n \"\"\"\n TEST ONLY: legge il dataset e restituisce frasi e labels\n - text: lista di liste di stringhe (parole)\n - labels: lista di liste di label\n \"\"\"\n file_path = Path(path)\n raw_text = file_path.read_text(encoding=\"utf8\").strip()\n raw_sequences = re.split(r'\\n\\n', raw_text)\n text = []\n labels = []\n for raw_seq in raw_sequences:\n tmp_sent = []\n tmp_tags = []\n for raw_line in raw_seq.split('\\n'):\n tmp_word, tmp_tag = raw_line.split(' ')\n tmp_sent.append(tmp_word)\n tmp_tags.append(tmp_tag)\n text.append(tmp_sent)\n labels.append(tmp_tags)\n\n return text, labels\n\n\ndef read_dataset_wnut(path):\n \"\"\"\n TEST ONLY: legge il dataset e restituisce frasi e labels\n - text: lista di liste di stringhe (parole)\n - labels: lista di liste di label\n \"\"\"\n file_path = Path(path)\n raw_text = file_path.read_text(encoding=\"utf8\").strip()\n raw_sequences = re.split(r'\\n\\t?\\n', raw_text)\n text = []\n labels = []\n\n for raw_seq in raw_sequences:\n tmp_sent = []\n tmp_tags = []\n for raw_line in raw_seq.split('\\n'):\n try:\n tmp_word, tmp_tag = raw_line.split('\\t')\n tmp_sent.append(tmp_word)\n tmp_tags.append(tmp_tag)\n except:\n pass\n text.append(tmp_sent)\n labels.append(tmp_tags)\n\n return text, labels\n\n\ndef read_dataset_secfiling(path):\n \"\"\"\n TEST ONLY: legge il dataset e restituisce frasi e labels\n - text: lista di liste di stringhe (parole)\n - labels: lista di liste di label\n \"\"\"\n file_path = Path(path)\n raw_text = file_path.read_text(encoding=\"utf8\").strip()\n raw_sequences = re.split(r'\\n\\t?\\n', raw_text)\n text = []\n labels = []\n\n for raw_seq in raw_sequences:\n tmp_sent = []\n tmp_tags = []\n for raw_line in raw_seq.split('\\n'):\n try:\n splits = raw_line.rsplit(sep=' ')\n tmp_word = splits[-4]\n tmp_tag = splits[-1]\n tmp_sent.append(tmp_word)\n tmp_tags.append(tmp_tag)\n except:\n print(raw_line)\n pass\n text.append(tmp_sent)\n labels.append(tmp_tags)\n\n return text, labels","sub_path":"utils/datasets_utils.py","file_name":"datasets_utils.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"237458509","text":"# 9095 / count number for make N with 1, 2, 3\ndp = {1: 1, 2: 2, 3: 4} # set DP dict\n\n\ndef find(n):\n global dp\n\n if n not in dp:\n temp = 0\n # minus i from N for find number for make N - i\n for i in [1, 2, 3]:\n if n - i >= 0:\n temp += find(n - i)\n dp[n] = temp\n\n return dp[n]\n\n\nfor T in range(int(input())):\n N = int(input())\n\n print(find(N))\n","sub_path":"dp/9095.py","file_name":"9095.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"11622972","text":"import sys\nsys.path.append(\"..\")\nimport os\nimport javabridge\nimport bioformats\nimport SimpleITK as sitk\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nrrd\nfrom tools import image_io as bfio\nfrom tools import image_processing as impro\n\n# Start the Java VM\njavabridge.start_vm(class_path=bioformats.JARS)\n\n#path_to_data = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..', 'Daten', '24h', 'untreated'))\npath_to_data = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..', '..', 'Daten2'))\ninterpolator = 'bspline'\n\nfor directory in os.listdir(path_to_data):\n data_dir = os.path.join(path_to_data, directory)\n if os.path.exists(data_dir):\n for filename in os.listdir(data_dir):\n if filename.endswith('.tif'):\n path_to_tif = os.path.join(data_dir, filename)\n print('Processing image: ', path_to_tif)\n \n # Get a numpy array from the tif stack with the dimension\n meta_data, raw_data = bfio.get_tif_stack(filepath=path_to_tif, series=0, depth='z', return_dim_order='XYZC') # XYZC\n \n # Transpose the numpy array from XYZC to CZYX for the use with SimpleITK\n raw_data = np.transpose(raw_data, axes=[3,2,1,0]) # CZYX\n \n # Extract the channel -> make for each channel\n raw_data = raw_data[0,:,:,:]\n \n # Make a SimpleITK out of the numpy array and set its metadata\n image = sitk.GetImageFromArray(raw_data, isVector=False) # XYZ\n image.SetOrigin([0.0, 0.0, 0.0])\n image.SetDirection(np.identity(3, dtype=np.double).flatten().tolist())\n image.SetSpacing((meta_data.get('physical_size_x'), \n meta_data.get('physical_size_y'), \n meta_data.get('physical_size_z')))\n #print(image.GetOrigin())\n #print(image.GetDirection())\n #print(image.GetSpacing())\n \n # Make isotropic voxels. Distinction needed, so that \n # 48h->untreated_3 and 72h->untreated_1 have the same z-size as\n # the corresponding OpenSegSPIM-data\n \n resampled_image = impro.make_image_isotropic(image, interpolator, 0)\n \n #print(resampled_image.GetOrigin())\n #print(resampled_image.GetDirection())\n #print(resampled_image.GetSpacing())\n \n # Get a numpy array from the resampled simpleITK image\n np_image = sitk.GetArrayFromImage(resampled_image)\n \n # Transpose the numpy array from ZYX back to to XYZ\n np_image = np.transpose(np_image, axes=[2,1,0]) # XYZ\n np_image = np_image.astype('uint8')\n \n \n \n new_spacing = resampled_image.GetSpacing()\n header = {\"spacings\": [new_spacing[0], new_spacing[1], new_spacing[2]], \n \"dimension\": np_image.ndim,\n \"type\": \"uchar\", \n \"sizes\": [resampled_image.GetWidth(), resampled_image.GetHeight(), \n resampled_image.GetDepth()],\n \"units\": ['\"microns\"', '\"microns\"', '\"microns\"']}\n name = os.path.splitext(filename)[0]\n new_filename = os.path.join(data_dir, name)\n new_filename = new_filename+'_8_bit'+'.nrrd'\n nrrd.write(new_filename, data=np_image, header=header, index_order='F')","sub_path":"01-data_preparation/01-ds2-generate_isotropic_voxels.py","file_name":"01-ds2-generate_isotropic_voxels.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"269763780","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE','ProTwo.settings')\n# configuring the settings for the project\n\nimport django\ndjango.setup()\n\n## Fake POP script\nimport random\nfrom AppTwo.models import User\nfrom faker import Faker\nfakegeneration = Faker()\ndef populate(N=5):\n for entry in range(N):\n # get the topic for the entry\n # create the fake data for that entry\n fake_firstName = fakegeneration.first_name()\n fake_lastName = fakegeneration.last_name()\n fake_email = fakegeneration.email()\n\n # create the new User entry\n user = User.objects.get_or_create(first_name=fake_firstName,last_name=fake_lastName,email=fake_email)[0]\n\nif __name__ == '__main__':\n print(\"populating script\")\n populate(20)\n print(\"populating complete\")\n","sub_path":"ProTwo/populate_AppTwo.py","file_name":"populate_AppTwo.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"408342869","text":"import json\nimport traceback\n\nimport requests\n\nfrom datetime import datetime, timedelta\nfrom threading import Thread\n\nfrom sqlalchemy import text\n\nfrom webapp import app, db, steem\nfrom models import Post\nfrom utilities import log, seconds_from_youtube_duration, get_valid_video, markdown_to_safe_html\n\nfrom utilities import DBConnection\n\n\nclass PostUpdateThread(Thread):\n def __init__(self, db, app):\n Thread.__init__(self)\n self.app = app\n self.db = db\n\n self.posts_to_delete = []\n\n # update scores of posts created within last week, set older post scores to 0\n def updatePostScores(self):\n try:\n # Shouldn't this use SQLAlchemy?\n # Todo: Test performance against raw SQLAlchemy updates\n with DBConnection() as db:\n q = '''\n update posts set\n trending_score =\n (pow(pending_payout_value, 0.4) * 1000000) / pow(EXTRACT(EPOCH FROM current_timestamp - created) + 300, 0.2),\n hot_score =\n (sqrt(pending_payout_value - least(9.99, pending_payout_value)) * 1000000) / (EXTRACT(EPOCH FROM current_timestamp - created) + 60)\n where EXTRACT(EPOCH FROM current_timestamp - created) > 600\n and EXTRACT(EPOCH FROM current_timestamp - created) < 604800\n '''\n db.engine.execute(text(q).execution_options(autocommit=True))\n q = '''\n update posts set\n trending_score = 0, hot_score = 0\n where EXTRACT(EPOCH FROM current_timestamp - created) >= 604800\n and trending_score > 0\n '''\n db.engine.execute(text(q).execution_options(autocommit=True))\n\n except Exception as ex:\n log('Failed to update scores...')\n log(f\"{ex}\\n{traceback.format_exc()}\")\n\n # query Steem API node for up to date content, and add to post\n def update_steem_info(self, post):\n with DBConnection() as db:\n try:\n # trap http type errors and retry fetch\n content = {}\n while not content:\n try:\n content = steem.get_content(post.author, post.permlink)\n except Exception as e:\n log('Problem getting Steem info from API for: @' + post.author + '/' + post.permlink + '!')\n log(f\"{ex}\\n{traceback.format_exc()}\")\n\n post.created = datetime.strptime(content['created'], '%Y-%m-%dT%H:%M:%S')\n post.category = content['category']\n\n js = content.get('json_metadata', '[]')\n metadata = json.loads(js)\n tags = metadata.get('tags', [])\n\n post.tags = ' '.join(tags)\n post.is_nsfw = True if post.tags.lower().find('nsfw') >= 0 else False\n post.title = content['title']\n post.has_declined_payout = False if float(content['max_accepted_payout'].split(' ')[0]) > 0 else True\n post.pending_payout_value = float(content['pending_payout_value'].split(' ')[0])\n post.total_payout_value = float(content['total_payout_value'].split(' ')[0])\n post.has_paidout = True if post.total_payout_value > 0 else False\n post.steem_json = content # todo - decide what of this should be stored\n post.steem_thumbnail_image_url = ''\n\n new_type, new_video_id, new_category = get_valid_video(content)\n\n # if valid on update, use new values, otherwise assume old values remain\n # this check is applied so dtube posts, edited in steemit are still retained\n if new_type and new_video_id and new_category:\n post.video_type, post.video_id, post.category = new_type, new_video_id, new_category\n post.description = markdown_to_safe_html(content['body'])\n\n return post\n except Exception as ex:\n log(f'Problem updating Steem info for: @{post.author }/{post.permlink }!')\n log(f\"{ex}\\n {traceback.format_exc()}\")\n return \"delete\"\n\n # query youtube/dtube/vimeo for up to date content, and add to post\n def update_video_info(self, post):\n\n try:\n if post.video_type == 'youtube':\n video_id = post.video_id\n video_api_key = app.config['YOUTUBE_API_KEY']\n # url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics%2Cstatus%2Cplayer&id=' + video_id + '&key=' + video_api_key\n url = f'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails&id={video_id}&key={video_api_key}'\n try:\n js = requests.get(url).json()\n except Exception as ex:\n log(url)\n log('Problem accessing YouTube Video info for: @' + post.author + '/' + post.permlink + '!')\n return \"delete\"\n\n if \"items\" in js and len(js[\"items\"]) == 1:\n item = js[\"items\"][0]\n post.video_thumbnail_image_url = item['snippet']['thumbnails']['medium']['url']\n post.video_duration_seconds = seconds_from_youtube_duration(item['contentDetails']['duration'])\n post.video_provider_channel_id = item['snippet']['channelId']\n video_published = datetime.strptime(item['snippet']['publishedAt'][:-5], '%Y-%m-%dT%H:%M:%S')\n if post.created > video_published:\n post.video_post_publish_delay_seconds = (post.created - video_published).total_seconds()\n else:\n post.video_post_publish_delay_seconds = 0\n # todo - decide which metadata to store in DB\n # post.video_info = {'snippet': item['snippet'], 'contentDetails': item['contentDetails']}\n\n elif post.video_type == 'dtube':\n try:\n url = 'https://steemit.com/dtube/@' + post.author + '/' + post.permlink + '.json'\n try:\n js = requests.get(url).json()['post']\n except Exception as ex:\n log(url)\n log('Problem accessing DTube Video info for: @' + post.author + '/' + post.permlink + '!')\n return\n metadata = js.get('json_metadata', '[]')\n post.video_thumbnail_image_url = 'https://ipfs.io/ipfs/' + metadata['video']['info']['snaphash']\n post.video_duration_seconds = metadata['video']['info']['duration']\n post.video_provider_channel_id = ''\n post.video_post_publish_delay_seconds = 0\n # todo - decide which metadata to store in DB\n # post.video_info = metadata\n except Exception as ex:\n # todo - fix regex so invalid dtubes don't reach here, then remove\n log('Problem updating updating dtube video info: ' + f\"{ex}\\n{traceback.format_exc()}\")\n log('Assumed Invalid, and Deleted post! : @' + post.author + '/' + post.permlink)\n return \"delete\"\n\n elif post.video_type == 'dlive':\n try:\n url = 'https://steemit.com/dlive/@' + post.author + '/' + post.permlink + '.json'\n try:\n js = requests.get(url).json()['post']\n except Exception as e:\n log(url)\n log('Problem accessing DLive Video info for: @' + post.author + '/' + post.permlink + '!')\n return \"delete\"\n metadata = js.get('json_metadata', '[]')\n post.video_thumbnail_image_url = metadata.get('thumbnail', '')\n post.video_duration_seconds = -1\n post.video_provider_channel_id = ''\n post.video_post_publish_delay_seconds = 0\n # todo - decide which metadata to store in DB\n # post.video_info = metadata\n except Exception as ex:\n # todo - fix intake filter regex so invalid dlives don't reach here, then remove\n log('Problem updating updating dlive video info: ' + f\"{ex}\\n{traceback.format_exc()}\")\n log('Assumed Invalid, and Deleted post! : @' + post.author + '/' + post.permlink)\n return \"delete\"\n\n # todo - implement support\n elif post.video_type == 'vimeo':\n pass\n\n except Exception as ex:\n log('Updating video info failed for: @' + post.author + '@' + post.permlink + '!')\n log(f\"{ex}\\n{traceback.format_exc()}\")\n return \"delete\"\n\n return post\n\n # query thread to update posts with pending update, and perform them\n # also update trending/hot scores every 5 minutes\n def run(self):\n last_updated_post_scores = datetime.now() - timedelta(seconds=240)\n while True:\n # update post scores every 5 minutes\n if (datetime.now() - last_updated_post_scores).seconds > 300:\n log('Updating post scores...')\n self.updatePostScores()\n last_updated_post_scores = datetime.now()\n log('Updated post scores!')\n\n with DBConnection() as db:\n post = db.session.query(\n Post\n ).filter(\n Post.pending_video_info_update\n ).order_by(\n Post.video_info_update_requested\n ).first()\n\n if post:\n new_post = self.update_steem_info(post)\n\n if new_post == \"delete\":\n db.session.delete(post)\n db.session.commit()\n continue\n\n post = new_post\n post.pending_steem_info_update = False\n\n new_post = self.update_video_info(post)\n\n if new_post == \"delete\":\n db.session.delete(post)\n db.session.commit()\n continue\n\n post = new_post\n\n post.pending_video_info_update = False\n db.session.commit()\n\n\nlog('Started Post Info Updater')\n\n# start thread for updating post info\nthread_1 = PostUpdateThread(db, app)\nthread_1.start()\n","sub_path":"web/app/post-info-updater.py","file_name":"post-info-updater.py","file_ext":"py","file_size_in_byte":10889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"409690473","text":"# @copyright@\n# Copyright (c) 2006 - 2017 Teradata\n# All rights reserved. Stacki(r) v5.x stacki.com\n# https://github.com/Teradata/stacki/blob/master/LICENSE.txt\n# @copyright@\n#\n# @rocks@\n# Copyright (c) 2000 - 2010 The Regents of the University of California\n# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org\n# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt\n# @rocks@\n\nimport stack.commands\nimport threading\nimport subprocess\nimport time\nimport os\n\nmax_threading = 512\ntimeout\t= 30\n\n\nclass command(stack.commands.HostArgumentProcessor,\n\tstack.commands.sync.command):\n\tpass\n\n\nclass Parallel(threading.Thread):\n\tdef __init__(self, cmd, out=None):\n\t\tself.cmd = cmd\n\t\tif not out:\n\t\t\tself.out = {\"output\": \"\", \"error\": \"\", \"rc\": 0}\n\t\telse:\n\t\t\tself.out = out\n\t\twhile threading.activeCount() > max_threading:\n\t\t\ttime.sleep(0.001)\n\t\tthreading.Thread.__init__(self)\n\n\tdef run(self):\n\t\tp = subprocess.Popen(self.cmd,\n\t\t\tstdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.STDOUT,\n\t\t\tshell=True)\n\t\t(o, e) = p.communicate()\n\t\trc = p.wait()\n\t\tself.out['output'] = o\n\t\tself.out['error'] = e\n\t\tself.out['rc'] = rc\n\n\nclass Command(command):\n\t\"\"\"\n\tWrites the /etc/hosts file based on the configuration database\n\t\"\"\"\n\n\tdef run(self, params, args):\n\n\t\tself.notify('Sync Host\\n')\n\n\t\toutput = self.command('report.host')\n\t\tf = open('/etc/hosts', 'w')\n\t\tf.write(\"%s\\n\" % output)\n\t\tf.close()\n\n\t\tif os.path.exists('/srv/salt/rocks'):\n\t\t\tf = open('/srv/salt/rocks/hosts', 'w')\n\t\t\tf.write(\"%s\\n\" % output)\n\t\t\tf.close()\n\n\n\n\n\n","sub_path":"common/src/stack/command/stack/commands/sync/host/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"606948938","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 3 17:51:25 2018\n\n@author: github.com/sahandv\n\"\"\"\nfrom __future__ import print_function, division\n\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\n\n# =============================================================================\n# Split / Partition data with easy to understand proportions for your network \n# to 3 part.\n# =============================================================================\ndef snd_data_split_3(X,Y,train_proportion,test_proportion,validation_proportion):\n size_1 = 1 - train_proportion\n size_2 = validation_proportion/(test_proportion+validation_proportion)\n x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=size_1)\n x_test, x_valid, y_test, y_valid = train_test_split(x_test, y_test, test_size=size_2)\n return x_train,x_test,x_valid,y_train,y_test,y_valid\n\n# =============================================================================\n# Get actual and predicted Y values and return percentage of accuracy for each \n# class.\n# =============================================================================\ndef class_accuracy_percentage_calc(Y_actual,Y_prediction):\n df_confusion = pd.crosstab(Y_actual, Y_prediction, rownames=['Actual'], colnames=['Predicted'], margins=True)\n count_of_classess = df_confusion.All.size\n class_accuracy_percentage = []\n for i in range(count_of_classess-1):\n class_accuracy_percentage.append(float(df_confusion[i][i])/float(df_confusion.All[i]))\n return class_accuracy_percentage\n","sub_path":"snd_base_additions.py","file_name":"snd_base_additions.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"379003017","text":"import base64\nimport json\nimport os\nimport re\nimport time\nfrom typing import List, Dict, Optional\n\nimport configargparse\nimport requests\nfrom loguru import logger\n\nURL = \"\"\nSESSION_KEY = \"\"\nUSERNAME = \"\"\nPASSWORD = \"\"\n#\n\n\ndef levenshtein_distance(s1: str, s2: str) -> int:\n if s1 == s2:\n return 0\n # https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python\n if len(s1) < len(s2):\n return levenshtein_distance(s2, s1)\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n return previous_row[-1]\n\n\ndef call_method(method: str, params: list, id_: int = 1) -> dict:\n assert len(URL) > 0\n response = requests.post(URL, json={'method': method, 'params': params, 'id': id_})\n # TODO: Add limit\n while not response.text.startswith(('[', '{')):\n logger.warning(\"Response is not valid json, trying again. Actual response: {}\", response.text)\n time.sleep(1)\n response = requests.post(URL, json={'method': method, 'params': params, 'id': id_})\n return response.json()\n\n\ndef call_method_with_session_key(method: str, *params, id_: int = 1, max_retries: int = 3):\n global SESSION_KEY\n for retry_counter in range(1, max_retries + 1):\n try:\n response = call_method(method, [SESSION_KEY] + list(params), id_)['result']\n except Exception:\n logger.exception(\"Error getting method result ({} / {})\", retry_counter, max_retries, diagnose=True)\n continue\n if 'status' in response and response['status'].lower().startswith('invalid') and response['status'].lower().endswith('key'):\n logger.error(\"Invalid session key, getting new session key ({} / {})\", retry_counter, max_retries)\n SESSION_KEY = get_session_key(True)\n continue\n return response\n logger.error(\"Failed getting valid response within specified number of retries\")\n return None\n\n\ndef get_session_key(force: bool = False) -> str:\n if force:\n key = call_method('get_session_key', [USERNAME, PASSWORD]).get('result')\n # TODO: Handle wrong username password combination\n with open('SESSION_KEY', 'w') as file:\n file.write(key)\n return key\n try:\n with open('SESSION_KEY') as file:\n key = file.read()\n except FileNotFoundError:\n key = get_session_key(True)\n return key\n\n\ndef list_surveys(username: str = None) -> list:\n if username:\n return call_method_with_session_key('list_surveys', username)\n else:\n return call_method_with_session_key('list_surveys')\n\n\ndef get_survey_properties(survey_id: int):\n return call_method_with_session_key('get_survey_properties', survey_id)\n\n\n@logger.catch(reraise=True)\ndef export_responses(survey_id: int, document_type: str):\n result = call_method_with_session_key('export_responses', survey_id, document_type, None, 'complete')\n if isinstance(result, dict) and 'status' in result.keys():\n logger.error(\"response: {}, returning empty data for now\", result)\n return {'responses': []}\n return json.loads(base64.b64decode(result).decode())\n\n\ndef list_users():\n return call_method_with_session_key('list_users')\n\n\ndef release_session_key():\n return call_method_with_session_key('release_session_key')\n\n\ndef list_groups(survey_id: int) -> List[Dict[str, str]]:\n return call_method_with_session_key('list_groups', survey_id)\n\n\ndef list_questions(survey_id: int, group_id: Optional[int] = None) -> List[Dict[str, str]]:\n return call_method_with_session_key('list_questions', survey_id, group_id)\n\n\ndef get_question_properties(question_id: int, question_settings: Optional[List[str]] = None) -> Dict[str, str]:\n return call_method_with_session_key('get_question_properties', question_id, question_settings)\n\n\ndef remove_html(s: str) -> str:\n result = re.sub(r'<[^>]*>', '', s)\n for old, new in [('\\r\\n', ' '), ('&', '&'), ('<', '<'), ('>', '>')]:\n result = result.replace(old, new)\n return result\n\n\ndef get_questions_with_answers(survey_id):\n result = []\n groups = sorted(list_groups(survey_id), key=lambda g: int(g.get('group_order')))\n for group in groups:\n group_id = int(group['gid'])\n if group_id == 52:\n continue\n current_group = {'group_name': group['group_name'], 'questions': []}\n questions = sorted(list_questions(survey_id, group_id), key=lambda q: int(q.get('question_order')))\n question_properties = {qid: get_question_properties(qid) for qid in (int(q['qid']) for q in questions)}\n main_qids = sorted(filter(lambda qid: not any(str(qid) in q.get('subquestions', []) for q in question_properties.values() if q.get('subquestions') != 'No available answers')\n and question_properties[qid].get('type') != 'X', question_properties.keys()), key=lambda qid: int(question_properties[qid]['question_order']))\n for qid in main_qids:\n question = question_properties[qid]\n current_question = {'code': question['title'], 'text': remove_html(question['question'])}\n answeroptions = question.get('answeroptions')\n if isinstance(answeroptions, dict):\n answer_codes = sorted(answeroptions.keys(), key=lambda a: int(answeroptions[a]['order']))\n current_question['answeroptions'] = []\n for a in answer_codes:\n current_question['answeroptions'].append({'code': a, 'text': remove_html(answeroptions[a]['answer'])})\n subquestions = question.get('subquestions')\n if isinstance(subquestions, dict):\n sub_qids = sorted(subquestions.keys(), key=lambda sq: int(question_properties[int(sq)].get('question_order')))\n current_question['subquestions'] = []\n for sqid in sub_qids:\n current_question['subquestions'].append({'code': subquestions[sqid]['title'], 'text': remove_html(subquestions[sqid]['question'])})\n current_group['questions'].append(current_question)\n result.append(current_group)\n return result\n\n\n@logger.catch(reraise=True)\ndef convert_limesurvey(data: dict, survey_id: int):\n try:\n with open('question_and_answers.json') as file:\n question_and_answers = json.load(file)\n except:\n with open('question_and_answers.json', 'w') as file:\n question_and_answers = get_questions_with_answers(survey_id)\n json.dump(question_and_answers, file)\n questions = []\n for qa in question_and_answers:\n questions.extend(qa['questions'])\n answeroptions = {}\n for question in questions:\n if 'subquestions' in question.keys():\n for sq in question['subquestions']:\n answeroptions[f'{question[\"code\"]}[{sq[\"code\"]}]'] = question.get('answeroptions', [{'text': 'not quoted', 'code': ''}, {'text': 'quoted', 'code': 'Y'}])\n else:\n answeroptions[question['code']] = question.get('answeroptions', [{'text': 'not quoted', 'code': ''}, {'text': 'quoted', 'code': 'Y'}])\n result = []\n with open('javascript_structure.json') as input_file:\n structure = json.load(input_file)\n for answer in data['responses']:\n current_answer = next(a for a in answer.values())\n # skip sample answer\n if current_answer['id'] == '1':\n continue\n current_result = {'categories': [], 'questions': []}\n for code in structure['categories']:\n answercode = current_answer[code]\n index, opt_code = min(enumerate(answeroptions[code]), key=lambda t: levenshtein_distance(t[1]['code'], answercode))\n if levenshtein_distance(opt_code['code'], answercode) < 2:\n current_result['categories'].append(index)\n else:\n current_result['categories'].append(-1)\n for question in structure['questions']:\n current_question = []\n for code in question:\n answercode = current_answer[code]\n index, opt_code = min(enumerate(answeroptions[code]), key=lambda t: levenshtein_distance(t[1]['code'], answercode))\n if levenshtein_distance(opt_code['code'], answercode) < 2:\n current_question.append(index)\n else:\n current_question.append(-1)\n current_result['questions'].append(current_question)\n\n result.append(current_result)\n return result\n\n\ndef print_questions():\n with open('question_and_answers.json') as input_file:\n question_and_answers = json.load(input_file)\n questions = []\n for qa in question_and_answers:\n questions.extend(qa['questions'])\n for question in questions:\n print(f'\"{question[\"text\"]}\"')\n if 'subquestions' in question.keys():\n print(', '.join(f'\"{sq[\"text\"]}\"' for sq in question['subquestions']))\n print(', '.join(f'\"{question[\"code\"]}[{sq[\"code\"]}]\"' for sq in question['subquestions']))\n else:\n print(f'\"{question[\"code\"]}\"')\n if 'answeroptions' in question.keys():\n print(', '.join(f'\"{ao[\"text\"]}\"' for ao in question['answeroptions']))\n print(', '.join(f'\"{ao[\"code\"]}\"' for ao in question['answeroptions']))\n print()\n\n\ndef setup_args():\n global SESSION_KEY, URL, USERNAME, PASSWORD\n p = configargparse.ArgumentParser(default_config_files=['config.ini'])\n p.add_argument('--api_url', required=True, env_var='LIMESURVEY_URL')\n p.add_argument('--username', required=True, env_var='LIMESURVEY_USERNAME')\n p.add_argument('--password', required=True, env_var='LIMESURVEY_PASSWORD')\n\n options = p.parse_args()\n\n URL = options.api_url\n USERNAME = options.username\n PASSWORD = options.password\n # logger.debug(\"Using URL {}\", URL)\n SESSION_KEY = get_session_key()\n\n\ndef main():\n setup_args()\n survey_id = 197925\n with open('question_and_answers.json', 'w') as file:\n json.dump(get_questions_with_answers(survey_id), file)\n\n\ndef new_main():\n setup_args()\n survey_id = 197925\n data = export_responses(survey_id, 'json')\n answers_string = str(convert_limesurvey(data, survey_id)).replace(\"'categories'\", \"categories\").replace(\"'questions'\", \"questions\")\n with open('limesurvey_data.js', 'w') as file:\n file.write(f\"export const limesurvey_answers = {answers_string};\")\n\n\nif __name__ == '__main__':\n new_main()\n","sub_path":"python/get_limesurvey_results.py","file_name":"get_limesurvey_results.py","file_ext":"py","file_size_in_byte":11002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"619107372","text":"#!/usr/bin/python3\r\nfrom socket import *\r\nfrom threading import Thread\r\nfrom chat import User\r\nfrom datetime import datetime\r\n\r\n\r\nclass ChatServer(Thread):\r\n def __init__(self, port, host=gethostname()):\r\n Thread.__init__(self)\r\n self.port, self.host = port, host\r\n self.s = socket(AF_INET, SOCK_STREAM)\r\n self.users = []\r\n\r\n self.s.bind((self.host, self.port))\r\n self.s.listen()\r\n\r\n self.users.append(User(self.host, self.port, 'server', 'no'))\r\n print('Server is online on port: ', self.port)\r\n\r\n # test\r\n self.connections = []\r\n\r\n def exit(self):\r\n self.s.close()\r\n\r\n def get_users(self):\r\n list_users = 'Users: ['\r\n for i in self.users:\r\n list_users += i.__str__() + '\\n'\r\n list_users += ']'\r\n return list_users\r\n\r\n def run_thread(self, conn, address):\r\n user = self.add_user(conn, address)\r\n print('User :', user, datetime.strftime(datetime.now(), '%H:%M:%S'))\r\n while True:\r\n data = conn.recv(4096).decode('utf-8')\r\n print(data)\r\n if data == '-get_users':\r\n conn.sendall(self.get_users().encode())\r\n msg = user.name + datetime.strftime(datetime.now(), '%H:%M: ') + data\r\n self.send_to_other_clients(msg, conn)\r\n conn.sendall(msg.encode())\r\n conn.close()\r\n\r\n def send_to_other_clients(self, msg, conn):\r\n for c in self.connections:\r\n if c != conn:\r\n c.sendall(msg.encode())\r\n print('Sent for', str(c))\r\n\r\n def add_user(self, conn, address):\r\n conn.send(b'user:')\r\n name = conn.recv(1024).decode('utf-8')\r\n conn.send(b'password:')\r\n password = conn.recv(1024).decode('utf-8')\r\n\r\n user = User(address[0], address[1], name, password)\r\n self.users.append(user)\r\n\r\n print(self.get_users())\r\n\r\n return user\r\n\r\n def run(self):\r\n print('Waiting for connections...')\r\n while True:\r\n conn, address = self.s.accept()\r\n print('Connected to login with: ', address[0], ':', address[1])\r\n self.connections.append(conn)\r\n Thread(target=self.run_thread, args=(conn, address)).start()\r\n\r\n\r\ndef main():\r\n server = ChatServer(8080)\r\n server.run()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"chat/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"359770833","text":"import os\nimport pathlib\nfrom datetime import datetime, timezone\nfrom os import getcwd\n\nimport requests\nimport yaml\n\nfrom prowler.lib.logger import logger\n\ntimestamp = datetime.today()\ntimestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)\nprowler_version = \"3.4.0\"\nhtml_logo_url = \"https://github.com/prowler-cloud/prowler/\"\nhtml_logo_img = \"https://user-images.githubusercontent.com/3985464/113734260-7ba06900-96fb-11eb-82bc-d4f68a1e2710.png\"\n\norange_color = \"\\033[38;5;208m\"\nbanner_color = \"\\033[1;92m\"\n\n# Compliance\nactual_directory = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))\ncompliance_aws_dir = f\"{actual_directory}/../compliance/aws\"\navailable_compliance_frameworks = []\nwith os.scandir(compliance_aws_dir) as files:\n files = [\n file.name\n for file in files\n if file.is_file()\n and file.name.endswith(\".json\")\n and available_compliance_frameworks.append(file.name.removesuffix(\".json\"))\n ]\n\n# AWS services-regions matrix json\naws_services_json_file = \"aws_regions_by_service.json\"\n\n# gcp_zones_json_file = \"gcp_zones.json\"\n\ndefault_output_directory = getcwd() + \"/output\"\n\noutput_file_timestamp = timestamp.strftime(\"%Y%m%d%H%M%S\")\ntimestamp_iso = timestamp.isoformat(sep=\" \", timespec=\"seconds\")\ncsv_file_suffix = \".csv\"\njson_file_suffix = \".json\"\njson_asff_file_suffix = \".asff.json\"\nhtml_file_suffix = \".html\"\nconfig_yaml = f\"{pathlib.Path(os.path.dirname(os.path.realpath(__file__)))}/config.yaml\"\n\n\ndef check_current_version():\n try:\n prowler_version_string = f\"Prowler {prowler_version}\"\n release_response = requests.get(\n \"https://api.github.com/repos/prowler-cloud/prowler/tags\"\n )\n latest_version = release_response.json()[0][\"name\"]\n if latest_version != prowler_version:\n return f\"{prowler_version_string} (latest is {latest_version}, upgrade for the latest features)\"\n else:\n return f\"{prowler_version_string} (it is the latest version, yay!)\"\n except Exception as error:\n logger.error(f\"{error.__class__.__name__}: {error}\")\n return f\"{prowler_version_string}\"\n\n\ndef change_config_var(variable, value):\n try:\n with open(config_yaml) as f:\n doc = yaml.safe_load(f)\n\n doc[variable] = value\n\n with open(config_yaml, \"w\") as f:\n yaml.dump(doc, f)\n except Exception as error:\n logger.error(f\"{error.__class__.__name__}: {error}\")\n\n\ndef get_config_var(variable):\n try:\n with open(config_yaml) as f:\n doc = yaml.safe_load(f)\n\n return doc[variable]\n except Exception as error:\n logger.error(f\"{error.__class__.__name__}: {error}\")\n return \"\"\n","sub_path":"prowler/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"477365790","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nComparison of FP16 and FP32 for the Marmousi-II model\n\"\"\"\nfrom urllib.request import urlretrieve\nimport tarfile\nimport numpy as np\nimport os\nimport sys\n\nimport segyio\nimport shutil\nfrom compare_accuracy import compare_accuracy\nfrom plot_comparison import plot_comparison\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nmpl.rcParams.update({'font.size': 7})\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\n\"\"\"\n _______________________Constants for modeling_____________________________\n\"\"\"\nf0 = 40 # Center frequency of the source\nsrcx = 7000 # x position of the source\ntmax = 5\n\n\"\"\"\n_______________________Download the velocity model_____________________________\n\"\"\"\nurl = \"https://s3.amazonaws.com/open.source.geoscience/open_data/elastic-marmousi/elastic-marmousi-model.tar.gz\"\nif not os.path.isfile(\"elastic-marmousi-model.tar.gz\"):\n urlretrieve(url, filename=\"elastic-marmousi-model.tar.gz\")\n tar = tarfile.open(\"elastic-marmousi-model.tar.gz\", \"r:gz\")\n tar.extractall()\n tar.close()\n\nmodels_segy = {\n 'vp': './elastic-marmousi-model/model/MODEL_P-WAVE_VELOCITY_1.25m.segy',\n 'vs': './elastic-marmousi-model/model/MODEL_S-WAVE_VELOCITY_1.25m.segy',\n 'rho': './elastic-marmousi-model/model/MODEL_DENSITY_1.25m.segy'}\n\nmodels_tar = {\n 'vp': './elastic-marmousi-model/model/MODEL_P-WAVE_VELOCITY_1.25m.segy.tar.gz',\n 'vs': './elastic-marmousi-model/model/MODEL_S-WAVE_VELOCITY_1.25m.segy.tar.gz',\n 'rho': './elastic-marmousi-model/model/MODEL_DENSITY_1.25m.segy.tar.gz'}\n\nmodels = {\n 'vp': None,\n 'vs': None,\n 'rho': None}\n\n\nfor par in models:\n if not os.path.isfile(models_segy[par]):\n tar = tarfile.open(models_tar[par], \"r:gz\")\n tar.extractall(path=\"./elastic-marmousi-model/model\")\n tar.close()\n with segyio.open(models_segy[par], \"r\", ignore_geometry=True) as segy:\n models[par] = [segy.trace[trid] for trid in range(segy.tracecount)]\n models[par] = np.transpose(np.array(models[par])[:, :])\n\n(NZ, NX) = models['rho'].shape\nNZ = int(NZ / 2) * 2\nNX = int(NX / 2) * 2\nfor par in models:\n models[par] = models[par][:NZ, :NX]\nmodels[\"rho\"] = models[\"rho\"] * 1000\ndh = 1.25\n\n\"\"\"\n_____________________________Plot models______________________________________\n\"\"\"\nfig, axs = plt.subplots(3, 1, figsize=(9 / 2.54, 13 / 2.54))\nims = {}\nunits = {'vp': 'm/s', 'vs': 'm/s', 'rho': 'kg/m$^3$'}\ntitles = {'vp': 'a)', 'vs': 'b)', 'rho': 'c)'}\nparams = ['vp', 'vs', 'rho']\nfor ii, par in enumerate(params):\n ims[par] = axs[ii].imshow(models[par] / 1000, interpolation='bilinear',\n extent=[0, (NX + 1) * dh / 1000 / 2,\n (NZ + 1) * dh / 1000, 0])\n axs[ii].set_xlabel('x (km)')\n axs[ii].set_ylabel('z (km)')\n axs[ii].set_title(titles[par])\n axs[ii].set_xticks(np.arange(0, 9, 0.5))\n axs[ii].set_yticks(np.arange(0, 4, 0.5))\n axs[ii].set_xticklabels([str(el) for el in np.arange(0, 18)])\n divider = make_axes_locatable(axs[ii])\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n clr = plt.colorbar(ims[par], cax=cax)\n cax.xaxis.tick_top()\n cax.set_xlabel(units[par], labelpad=8)\n cax.xaxis.set_label_position('top')\nplt.tight_layout()\nplt.savefig('marmousiII.eps', dpi=300)\n\n\"\"\"\n_____________________Perform the comparison ___________________________\n\"\"\"\ncompare_accuracy(\"marm2\", models, f0, srcx, tmax, dh)\n\n\"\"\"\n _____________________Plot the figure ___________________________\n\"\"\"\nplot_comparison(\"marm2\", 10, 20, 1, 16, 0.2)\n\n","sub_path":"Fig3_4_6_error/marmousi2.py","file_name":"marmousi2.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"415064733","text":"from bottle import *\nimport json\nimport os\nimport sys\nimport mysql.connector\n\npagePort = 18155\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"student\",\n passwd=\"fredfredburger\",\n database=\"chickadees\"\n\n)\nprint(\"**mydb is: \",mydb)\nprint(\"**database: \", mydb.database)\n\n@post('/api/visits')\ndef getJson():\n data = request.json\n payload = base64.b64decode(data['payload_raw'])\n #payloadTimestamp = data.metadata.gateways['timestamp']\n payloadTimestamp = data['metadata']['time']\n #payloadTimestamp = 'seven'\n dataParseTesting = data['metadata']\n '''\n # ** Testing Purposes **\n print(\"*** METADATA ***\")\n print(dataParseTesting)\n print(\"*** END-METADATA ***\")\n print(\"------------------------------------------\")\n print(\"*** TIME ***\")\n print(payloadTimestamp)\n print(\"*** END-TIME ***\")\n print(\"------------------------------------------\")\n print(\"*** DATA ***\")\n print(data) #print entire data packet. Including meta data\n print(\"*** END-DATA ***\")\n print(\"------------------------------------------\")\n '''\n \n print(\"**payload: \",payload)\n print(\"**Count: \",data['counter'])\n print(\"timestamp: \", payloadTimestamp)\n\n timestamp = (payload[10:20]).decode(\"utf-8\")\n rfid = (payload[0:10]).decode(\"utf-8\")\n print(\"*TIMESTAMP: \",timestamp)\n print(\"*RFID: \", rfid)\n\n insert_tuple = (rfid,timestamp)\n\n mycursor = mydb.cursor()\n\n #tmpInsert = \" INSERT INTO visits (rfid, visitTimestamp) VALUES (\" + rfid + \" , \" + timestamp + \") \"\n tmpSearch = \"Select * from visits where rfid = %s and visitTimestamp = %s\"\n #sqlInsert = \"\"\" INSERT INTO visits (rfid, feederID, visitTimestamp, temperature, mass, bandCombo, isSynced) VALUES ('011016C1B6', 'SHRM', '%s', '24', '24', 'g0/Y#', '0') \"\"\"\n \n searchRes = mycursor.execute(tmpSearch,(rfid,int(timestamp)))\n rowCount = mycursor.fetchone()\n if not rowCount:\n tmpInsert = \"INSERT INTO visits (rfid,feederID,visitTimestamp,temperature, mass, bandCombo, isSynced) VALUES (%s, 'CLIF', %s, 0, 0, '', 0)\"\n insertRes = mycursor.execute(tmpInsert,(rfid,int(timestamp)))\n mydb.commit() #uncomment to actually commit INSERT into DB.\n print(\"RowCount: \",rowCount)\n \n\n return data\n\n\nrun(host='euclid.nmu.edu', port=pagePort, debug=True)\n\n\n\n\"\"\"\n ** Test Tags **\n0700EDFC4A\n011016A32F\n\n\n ** JSON data **\n \n{\n 'app_id': 'production2019jan', \n 'dev_id': 'node1', \n 'hardware_serial': '0099DF663BAB7B4B', \n 'port': 1, \n 'counter': 0, \n 'confirmed': True, \n 'is_retry': True, \n 'payload_raw': 'MDExMDE2QTMyRg==', \n 'metadata':{\n 'time': '2019-03-20T00:45:16.959721306Z', \n 'frequency': 904.5, \n 'modulation': 'LORA', \n 'data_rate': 'SF10BW125', \n 'coding_rate': '4/5', \n 'gateways': [{\n 'gtw_id': 'eui-b827ebfffe11f166', \n 'timestamp': 176353300, # NEED THIS VALUE!!\n 'time': '2019-03-20T00:45:16.926417Z', # or this I guess...\n 'channel': 3, \n 'rssi': -114, \n 'snr': -15.2, \n 'rf_chain': 0, \n 'latitude': 46.54527, \n 'longitude': -87.40362, \n 'location_source': 'registry'\n }]\n },\n 'downlink_url': 'https://integrations.thethingsnetwork.org/ttn-us-west/api/v2/down/production2019jan/euclid?key=ttn-account-v2.qnXQCj7ir6DDJ7-YwbF5qbnRQTWB4CG1RcqvQOSsmKM'\n}\n\n\n\n{'app_id': 'production2019jan', 'dev_id': 'node1', 'hardware_serial': '0099DF663BAB7B4B', 'port': 1, 'counter': 0, 'confirmed': True, 'is_retry': True, 'payload_raw': 'MDExMDE2QTMyRg==', 'metadata': {'time': '2019-03-20T00:45:16.959721306Z', 'frequency': 904.5, 'modulation': 'LORA', 'data_rate': 'SF10BW125', 'coding_rate': '4/5', 'gateways': [{'gtw_id': 'eui-b827ebfffe11f166', 'timestamp': 176353300, 'time': '2019-03-20T00:45:16.926417Z', 'channel': 3, 'rssi': -114, 'snr': -15.2, 'rf_chain': 0, 'latitude': 46.54527, 'longitude': -87.40362, 'location_source': 'registry'}]}, 'downlink_url': 'https://integrations.thethingsnetwork.org/ttn-us-west/api/v2/down/production2019jan/euclid?key=ttn-account-v2.qnXQCj7ir6DDJ7-YwbF5qbnRQTWB4CG1RcqvQOSsmKM'}\n\n\n\"\"\"\n","sub_path":"oldAPI.py","file_name":"oldAPI.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"11471918","text":"import operator\nimport pdb\nfrom copy import deepcopy\nfrom functools import reduce\nfrom typing import List, Dict, Any\n\nfrom mockfirestore import NotFound\nfrom mockfirestore._helpers import (\n Timestamp, Document, Store, get_by_path, set_by_path, delete_by_path, get_document_iterator,\n parse_field_path\n)\n\n\nclass DocumentSnapshot:\n def __init__(self, reference: 'DocumentReference', data: Document) -> None:\n self.reference = reference\n self._doc = deepcopy(data)\n\n @property\n def id(self):\n return self.reference.id\n\n @property\n def exists(self) -> bool:\n return self._doc != {}\n\n def to_dict(self) -> Document:\n return self._doc\n\n @property\n def create_time(self) -> Timestamp:\n timestamp = Timestamp.from_now()\n return timestamp\n\n @property\n def update_time(self) -> Timestamp:\n return self.create_time\n\n @property\n def read_time(self) -> Timestamp:\n timestamp = Timestamp.from_now()\n return timestamp\n\n def get(self, field_path: str) -> Any:\n if not self.exists:\n return None\n else:\n return reduce(operator.getitem, field_path.split('.'), self._doc)\n\n def _get_by_field_path(self, field_path: str) -> Any:\n try:\n return self.get(field_path)\n except KeyError:\n return None\n\n\nclass DocumentReference:\n def __init__(self, data: Store, path: List[str],\n parent: 'CollectionReference') -> None:\n self._data = data\n self._path = path\n self.parent = parent\n\n @property\n def id(self):\n return self._path[-1]\n\n def get(self) -> DocumentSnapshot:\n return DocumentSnapshot(self, get_by_path(self._data, self._path))\n\n def delete(self):\n delete_by_path(self._data, self._path)\n\n def set(self, data: Dict, merge=False):\n if merge:\n try:\n self.update(deepcopy(data))\n except NotFound:\n self.set(data)\n else:\n set_by_path(self._data, self._path, deepcopy(data))\n\n def update(self, data: Dict[str, Any]):\n document = get_by_path(self._data, self._path)\n if document == {}:\n raise NotFound('No document to update: {}'.format(self._path))\n\n _apply_transformations(document, deepcopy(data))\n\n def collection(self, name) -> 'CollectionReference':\n from mockfirestore.collection import CollectionReference\n document = get_by_path(self._data, self._path)\n new_path = self._path + [name]\n if name not in document:\n set_by_path(self._data, new_path, {})\n return CollectionReference(self._data, new_path, parent=self)\n\n\ndef _apply_transformations(document: Dict[str, Any], data: Dict[str, Any]):\n \"\"\"Handles special fields like INCREMENT.\"\"\"\n increments = {}\n arr_unions = {}\n arr_deletes = {}\n deletes = []\n\n for key, value in list(get_document_iterator(data)):\n if not value.__class__.__module__.startswith('google.cloud.firestore'):\n # Unfortunately, we can't use `isinstance` here because that would require\n # us to declare google-cloud-firestore as a dependency for this library.\n # However, it's somewhat strange that the mocked version of the library\n # requires the library itself, so we'll just leverage this heuristic as a\n # means of identifying it.\n #\n # Furthermore, we don't hardcode the full module name, since the original\n # library seems to use a thin shim to perform versioning. e.g. at the time\n # of writing, the full module name is `google.cloud.firestore_v1.transforms`,\n # and it can evolve to `firestore_v2` in the future.\n continue\n\n transformer = value.__class__.__name__\n if transformer == 'Increment':\n increments[key] = value.value\n elif transformer == 'ArrayUnion':\n arr_unions[key] = value.values\n elif transformer == 'ArrayRemove':\n arr_deletes[key] = value.values\n del data[key]\n elif transformer == 'Sentinel':\n if value.description == \"Value used to delete a field in a document.\":\n deletes.append(key)\n del data[key]\n\n # All other transformations can be applied as needed.\n # See #29 for tracking.\n \n def _update_data(new_values: dict, default: Any):\n for key, value in new_values.items():\n path = key.split('.')\n\n try:\n item = get_by_path(document, path)\n except (TypeError, KeyError):\n item = default\n\n set_by_path(data, path, item + value, create_nested=True)\n\n _update_data(increments, 0)\n _update_data(arr_unions, [])\n\n _apply_updates(document, data)\n _apply_deletes(document, deletes)\n _apply_arr_deletes(document, arr_deletes)\n\n\ndef _apply_deletes(document: Dict[str, Any], data: List[str]):\n for key in data:\n path = parse_field_path(str(key))\n delete_by_path(document, path)\n\n\ndef _apply_arr_deletes(document: Dict[str, Any], data: Dict[str, Any]):\n for key, values_to_delete in data.items():\n path = parse_field_path(str(key))\n try:\n value = get_by_path(document, path)\n except KeyError:\n continue\n for value_to_delete in values_to_delete:\n try:\n value.remove(value_to_delete)\n except ValueError:\n pass\n set_by_path(document, path, value)\n\n\ndef _apply_updates(document: Dict[str, Any], data: Dict[str, Any]):\n for key in list(data.keys()):\n path = parse_field_path(str(key))\n set_by_path(document, path, data[key], create_nested=True)\n","sub_path":"mockfirestore/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"267643857","text":"from messageSender import *\n\nsender = messageSender()\n\nsender.loadDefinedSurfaces(\"DEFAULT\")\ncur = sender.newCursor(0, 0.5, 0.5, \"prop\")\nlist = {}\nfor x in range(1,5):\n win = sender.newCanvas(x, 0, 1, 1, 1,\"prop\", \"mywin\")\n list[x] = win\n sender.newRectangle(win, 0, 1, 1, 1, \"prop\", (1,0,0,1), 10, (0,0,1,1))\n sender.newCircle(win, 0.5, 0.5, 0.25, \"prop\", (1,1,1,1), 10, (1,1,1,1), 10)\nfor x in range(1, 5):\n sender.newTexRectangle(list[x], 0.5, 1, 0.5, 0.5, \"prop\", \"checks.jpg\")\nlTor = True\ntTob = True\nsurfaceWidth = sender.getSurfacePixelWidth(0)\nsurfaceHeight = sender.getSurfacePixelWidth(0)\nwhile (True):\n if lTor == True:\n if tTob == True:\n sender.shiftCursor(cur, 5, -3)\n else:\n sender.shiftCursor(cur, 5, 3)\n else:\n if tTob == True:\n sender.shiftCursor(cur, -5, -3)\n else:\n sender.shiftCursor(cur, -5, 3)\n loc = sender.getCursorPosition(cur)\n if float(loc[0]) < 0:\n lTor = True\n elif float(loc[0]) > surfaceWidth:\n lTor = False\n if float(loc[1]) < 0:\n tTob = False\n elif float(loc[1]) > surfaceHeight:\n tTob = True","sub_path":"src/UnityTesting.py","file_name":"UnityTesting.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"131929484","text":"# -*- coding:utf-8 -*-\nimport argparse\nimport os\nimport cv2 as cv\nimport numpy as np\nfrom PIL import Image\n\nimport chainer\nfrom chainer import cuda\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import training\nfrom chainer.training import extensions\nfrom chainer import reporter\n\n\ndef get_movie_filename(path, extention=\"mpg\"):\n \"\"\"\n get file_name which has specified extention (ex. mp4 )\n\n :param path: relative path\n :param extention:\n :return: file_names(full path)\n \"\"\"\n\n file_names = list(filter(lambda file_name: extention in file_name, os.listdir(path)))\n file_names = map(lambda file_name: os.getcwd() + \"/\" + path[2:] + \"/\" + file_name, file_names)\n\n return file_names\n\n\ndef stack(source, dist, module=np):\n \"\"\"\n\n :param source: source stacked nparray(or Variable)\n :param dist:\n :param module:\n :return: stacked nparray\n \"\"\"\n\n if source is None:\n source = dist\n else:\n source = module.vstack((source, dist))\n\n return source\n\n\ndef get_movies(full_paths, frame_count=300, size=(240, 320), dtype=np.float32):\n \"\"\"\n\n :param full_paths:\n :param frame_count:\n :return: nparray , shape = (movie_file , frame_count , RGB , width , height)\n \"\"\"\n\n movie_batches = None\n\n for file_name in full_paths:\n\n movie = cv.VideoCapture(file_name)\n movie_stack = None\n\n for frame in range(frame_count):\n try:\n ret, image = movie.read()\n image = cv.resize(image, size)\n image = np.asarray(image, dtype=dtype)\n image = image.transpose(2, 0, 1)\n movie_frame = np.expand_dims(image, axis=0)\n\n movie_stack = stack(movie_stack, movie_frame)\n\n except:\n print(\"something wrong (in get_movie)\")\n exit(0)\n\n movie_stack = np.expand_dims(movie_stack, axis=0)\n movie_batches = stack(movie_batches, movie_stack)\n\n # ( length , movie , RGB , height , width )\n movie_batches = movie_batches.transpose(1, 0, 2, 3, 4).astype(np.float32)\n\n return movie_batches\n\n\ndef make_movie(nparray_movie, file_name, fps):\n \"\"\"\n :param nparray: shape = (length ,3,width , height)\n :return:\n \"\"\"\n\n # fps = movie.get(cv.CAP_PROP_FPS)\n width = nparray_movie.shape[2]\n height = nparray_movie.shape[3]\n fourcc = cv.VideoWriter_fourcc('m', 'p', '4', 'v')\n file = open(file_name, \"wb\")\n out = cv.VideoWriter(file_name, int(fourcc), 24, (int(width), int(height)))\n for _image in nparray_movie:\n pil_image = Image.fromarray(_image.transpose(1, 2, 0).astype(np.uint8)[:, :, ::-1].copy())\n cv_image = cv.cvtColor(np.array(pil_image), cv.COLOR_RGB2BGR)\n\n out.write(cv.resize(cv_image, (width, height)))\n\n\ndef make_teacher_signal(nparray_movies):\n \"\"\"\n :param nparray_movie: ( length , movie , RGB , height , width )\n :return: TupleDataset ( t_frame , t+1_frame)\n \"\"\"\n\n input_movie = nparray_movies[:-1]\n teacher_movie = nparray_movies[1:]\n dataset = chainer.datasets.TupleDataset(input_movie, teacher_movie)\n\n return dataset\n\n\n# (batch , len , RGB , height , width )\n# (len , batch , RGB , height , width ) -> こっちの方が妥当っぽい\n\n# numpy を綺麗に画像化する\n# image_RGB = Image.fromarray(_image.transpose(1,2,0).astype(np.uint8)[: , : , ::-1].copy())\n\n\n\nclass MovieGen(chainer.Chain):\n def __init__(self, channels, shape):\n self.shape = shape[0] * shape[1] * 3 # RGB\n self.image_width = shape[0]\n self.image_height = shape[1]\n self.layers = len(channels)\n super(MovieGen, self).__init__()\n\n for nth in range(len(channels)):\n self.add_link('Linear_' + str(nth), L.Linear(out_size=channels[nth], in_size=None))\n\n self.accfun = F.accuracy\n self.lossfun = F.mean_squared_error\n self.compute_accuracy = None\n\n def __forward(self, h):\n\n \"\"\"\n inner function\n :param h: input image (3, width , height)\n :return: y\n \"\"\"\n\n for nth in range(self.layers - 1):\n h = F.tanh(getattr(self, 'Linear_' + str(nth))(h))\n self.y = getattr(self, 'Linear_' + str(self.layers - 1))(h)\n\n return self.y\n\n def __call__(self, *args):\n data = args[0]\n teacher = args[1]\n # (movie_len , batchsize[movie_] , 3, 20, 20)\n batchsize = data.shape[1]\n\n self.y = None\n self.loss = 0\n self.accuracy = None\n\n for i, (x, t) in enumerate(zip(data, teacher)):\n\n # expected x.data.shape = ( batch , RGB , height, width )\n\n # for 1 dim ( Linear )\n h = F.reshape(x, (batchsize, self.shape))\n t = F.reshape(t, (batchsize, self.shape))\n # for layers\n for nth in range(self.layers - 1):\n h = F.tanh(getattr(self, 'Linear_' + str(nth))(h))\n self.y = getattr(self, 'Linear_' + str(self.layers - 1))(h)\n\n # self.loss = self.lossfun(self.y, t)\n self.loss += F.mean_squared_error(self.y, t)\n\n reporter.report({'loss': self.loss}, self)\n # if self.compute_accuracy:\n # self.accuracy = self.accfun(self.y, t)\n # reporter.report({'accuracy': self.accuracy}, self)\n return self.loss\n\n def generate_movie(self, first_image, generate_len, movie_name=\"/Users/g329/deep_learning/trend/generated.mp4\"):\n \"\"\"\n if model has lstm , do reset_state !\n\n :param first_image: expeceted shape : 3,width , height\n :param generate_len:\n :param movie_name: this need full path\n :return:\n \"\"\"\n\n generated_image = np.zeros((generate_len, 3, self.image_width, self.image_height))\n previous = first_image\n for frame in range(generate_len - 1):\n # for layers\n h = F.reshape(previous, (1, self.shape))\n for nth in range(self.layers - 1):\n h = F.tanh(getattr(self, 'Linear_' + str(nth))(h))\n y = getattr(self, 'Linear_' + str(self.layers - 1))(h)\n generated_image[frame] = F.reshape(y, (3, self.image_width, self.image_height)).data\n\n print(generated_image.shape)\n make_movie(generated_image, file_name=movie_name, fps=24)\n print(\"generate end\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', '-g', default=-1, type=int, help='GPU ID (negative value indicates CPU)')\n parser.add_argument('--epoch', '-e', default=10, type=int, help='epochs (default 50)')\n parser.add_argument('--channels', '-c', default=\"100 100 100\", type=str, help='epochs (default 50')\n parser.add_argument('--width', '-w', default=120, type=int, help='')\n parser.add_argument('--height', default=160, type=int, help='')\n parser.add_argument('--batchsize', default=10, type=int, help='batchsize of train and test')\n parser.add_argument('--movie_len', default=60, type=int, help='movie len (all)')\n parser.add_argument('--split_at', default=40, type=int, help='split point for train/test')\n\n args = parser.parse_args()\n size = (args.width, args.height)\n out_size = [size[0] * size[1] * 3]\n\n # parse channels\n args.channels = [int(channel) for channel in args.channels.split()] + out_size\n\n xp = cuda.cupy if args.gpu >= 0 else np\n\n size = (args.width, args.height)\n size = (40, 30)\n out_size = [size[0] * size[1] * 3]\n\n # list -> list\n model = MovieGen(channels=args.channels + out_size, shape=size)\n optimizer = chainer.optimizers.RMSprop()\n optimizer.setup(model)\n\n movie_len = 30 + 1 # want to predict(frame)\n args.split_at = 20\n file_names = get_movie_filename(\"./data\")\n # print(\"used movies : \" + \" \".join(file_names))\n print(\"movie loading...\")\n movies = get_movies(file_names, frame_count=movie_len, size=size)\n # movie = make_movie(movies[0], \"0_movie.mp4\",fps=24)\n\n\n data = make_teacher_signal(movies)\n\n train, test = chainer.datasets.split_dataset(dataset=data, split_at=args.split_at)\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False)\n\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=\"result\")\n trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss']))\n trainer.extend(extensions.ProgressBar())\n\n print(\"train start\")\n trainer.run()\n print(\"train end\")\n\n print(\"generate start\")\n\n model.generate_movie(first_image=train[0][0][0], generate_len=60,\n movie_name=\"/Users/g329/deep_learning/trend/train_generated.mp4\")\n model.generate_movie(first_image=test[0][0][0], generate_len=60,\n movie_name=\"/Users/g329/deep_learning/trend/test_generated.mp4\")\n","sub_path":"tools/movie_utils.py","file_name":"movie_utils.py","file_ext":"py","file_size_in_byte":9111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"342090362","text":"# __author__: Stanley\n# date: 2018/3/13\n\n\n# 参考连接\n# http://www.runoob.com/python/python-built-in-functions.html\n# https://docs.python.org/3.5/library/functions.html\n\n# abs返回绝对值\n# print(abs(-333))\n\n\n# filter,过滤器的功能(可迭代对象)\ns = [1,2,3,4]\ndef func(c):\n if c != 3:\n return c\n\nprint(\"正常调用返回\",func(s))\nret = filter(func,s)\nprint(ret) # 返回的是一个迭代器\nprint(\"通过filter调用\",list(ret))\n\n# map 修改(可迭代对象)\ns = ['a1','b1','c1']\n\ndef func_map(s):\n return s + 'alvin'\n\nret = map(func_map,s)\nprint(ret)\nprint(list(ret))\n\n\n# reduce\nfrom functools import reduce\n\ndef add_reduce(x,y):\n return x+y\n\nprint(reduce(add_reduce,range(1,5))) # 结果就是一个数值。\n# 实现原理\n# [1,2,3,4,]\n# [3,3,4,]\n# [6,4]\n# 10\n\n# lambda 匿名函数\nnum = lambda a,b:a+b\nprint(num(1,2))\n\n# lambda配合reduce实现阶乘\nprint(reduce(lambda x,y:x*y,range(1,6)))","sub_path":"day15/内置函数.py","file_name":"内置函数.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"69254958","text":"# -*- coding: utf-8 -*-\nfrom postprocess.control_vars import *\nfrom postprocess import limit_state_data as lsd\nfrom postprocess.xcVtk import vtk_graphic_base\nfrom postprocess.xcVtk.FE_model import vtk_FE_graphic\nfrom postprocess.xcVtk.diagrams import control_var_diagram as cvd\n\nmodel_path=\"../\"\n#Project directory structure\nexec(open(model_path+'env_config.py').read())\n\nmodelDataInputFile=model_path+\"model_gen.py\" #data for FE model generation\nexec(open(modelDataInputFile).read())\n\n\n#Load properties to display:\nfName= cfg.projectDirTree.getVerifShearFile()\nexec(open(fName).read())\n\n\n\nlimitStateLabel= lsd.shearResistance.label\n\n\n#Available arguments: 'CF', 'N', 'My', 'Mz', 'Mu', 'Vy', 'Vz', 'theta', 'Vcu', 'Vsu', 'CF'\nargument= 'Vcu'\n\nsetDispRes=beamX #set of linear elements to which display results \n\n#setDisp=overallSet #set of elements (any type) to be displayed\nsetDisp=beamX\n\ndiagram= cvd.ControlVarDiagram(scaleFactor= 1,fUnitConv= 1,sets=[setDispRes],attributeName= limitStateLabel,component= argument)\ndiagram.addDiagram()\n\n\ndisplaySettings= vtk_FE_graphic.DisplaySettingsFE()\n #predefined view names: 'XYZPos','XNeg','XPos','YNeg','YPos',\n # 'ZNeg','ZPos' (defaults to 'XYZPos')\ndisplaySettings.cameraParameters= vtk_graphic_base.CameraParameters('YPos') #Point of view.\ndisplaySettings.setupGrid(setDisp)\ndisplaySettings.defineMeshScene(None,defFScale=0.0)\ndisplaySettings.appendDiagram(diagram) #Append diagram to the scene.\n\ncaption= cfg.capTexts[limitStateLabel] + ', ' + cfg.capTexts[argument] + '. '+ setDispRes.description.capitalize() + ', ' + 'Dir. 1'\ndisplaySettings.displayScene(caption)\n\n\n\n","sub_path":"ave_SR/voided/display/display_shearULS_beamEl.py","file_name":"display_shearULS_beamEl.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"194615539","text":"from Projector import Projector\nimport numpy as np\n\nclass RBF(Projector):\n \n def __init__(self,num_centers=np.array([5]),\n stdev=0.1,limits=np.array([[0,1],[0,1]]),\n randomize=False,normalize = True,bias=True):\n \n super(RBF,self).__init__(self,normalize,limits,bias) \n if not (type(num_centers) is np.ndarray):\n num_centers = np.array(num_centers)\n if num_centers.size == 1:\n num_centers = np.ones(limits.shape[0])*num_centers[0]\n self.stdev = stdev\n dim = []\n if randomize:\n #randomly spaced centers\n for d in range(limits.shape[0]):\n dim.append(np.sort(np.random.rand(num_centers[d])))\n else: \n #equally spaced centers\n for d in range(limits.shape[0]):\n dim.append(np.linspace(0,1,num_centers[d]))\n if len(dim) == 1:\n self.centers=dim[0].flatten()\n else:\n grid = np.meshgrid(*dim)\n self.centers=grid[0].flatten()\n for d in range(1,len(grid)):\n self.centers = np.c_[self.centers,grid[d].flatten()]\n\n \n def num_features(self):\n return (self.centers.shape[0]+int(self.bias))\n \n def phi(self,state):\n if len(self.centers.shape)==1:\n dists = self.centers-self.normalize_state(state)\n else:\n dists = np.linalg.norm(self.centers-\n self.normalize_state(state),axis=1)\n res = np.exp(-0.5 * dists**2 / self.stdev**2)\n \n return res","sub_path":"src/features/projectors/RBF.py","file_name":"RBF.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"83309801","text":"import openpyxl\nimport sys\nimport os\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter.filedialog import askopenfilename\nimport csv\n\n\nroot = Tk( )\nnamelabels = []\nnames = []\n\n#This is where we lauch the file manager bar.\ndef OpenFile():\n name = askopenfilename(initialdir=\"C:/Users/Batman/Documents/Programming/tkinter/\",\n filetypes =((\"Excel File\", \"*.xlsx\"),(\"All Files\",\"*.*\")),\n title = \"Choose a file.\"\n )\n print (name)\n \n namelabel = ttk.Label(root, text = name + \" has been added.\",foreground=\"Green\",font=(\"Helvetica\",18))\n names.append(name)\n namelabels.append(namelabel)\n nl_len = len(namelabels)\n namelabels[nl_len-1].pack()\n \n \ndef Generate():\n wb = openpyxl.Workbook()\n sheet = wb.active\n tracker = 0\n tracked = False\n source_list = []\n for name in names:\n wb_source = openpyxl.load_workbook(name)\n ws_source = wb_source.active\n source_list.append(ws_source)\n \n for source in source_list:\n if(tracked):\n for row in range(2,source.max_row+1):\n tracker+=1\n for cell in range(1,source.max_column+1):\n sheet.cell(row=tracker,column=cell).value = source.cell(row=row,column=cell).value\n else:\n for row in range(1,source.max_row+1):\n tracker+=1\n for cell in range(1,source.max_column+1):\n sheet.cell(row=tracker,column=cell).value = source.cell(row=row,column=cell).value\n tracked=True\n \n # wb.save(\"C:\\\\Users\\\\cpearson\\\\Documents\\\\python_excel\\\\merged.xlsx\")\n wb.save()\n label = ttk.Label(root, text =\"File Generated!\",foreground=\"Red\",font=(\"Helvetica\", 18))\n label.pack()\n'''\ndef removeItem(namelabels):\n namelabels[len(namelabels)-1].pack_forget()\n if(len(namelabels)>1):\n namelabels = namelabels[:-1]\n\n\n'''\nTitle = root.title( \"Excel File Merger\")\nlabel = ttk.Label(root, text =\"Open excel files by pressing file at the top left\",foreground=\"blue\",font=(\"Helvetica\", 18))\nlabel.pack()\n \n#Menu Bar\n\nmenu = Menu(root)\nroot.config(menu=menu)\n\nfile = Menu(menu)\n\n# lambda: namelabels[len(namelabels)-1].pack_forget()\n\nfile.add_command(label = 'Open', command = OpenFile)\nfile.add_command(label = 'Run', command = Generate)\nfile.add_command(label = 'Reset', command = lambda:os.execl(sys.executable,sys.executable,* sys.argv))\nfile.add_command(label = 'Exit', command = lambda:exit())\n\nmenu.add_cascade(label = 'File', menu = file)\n\nroot.mainloop()\n\n##try:\n## with open(name,'r') as UseFile:\n## print(UseFile.read())\n## except:\n## print(\"No file exists\")\n","sub_path":"excel_merger.py","file_name":"excel_merger.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"163742595","text":"#coding: utf-8\n\nfrom django.conf.urls import patterns, url\nfrom .views import PostDetailView, PostListView\n\n\napp_name = 'blog'\nurlpatterns = [\n url(r'^$', PostListView.as_view(), name='list'),\n url(r'^(?P[0-9]+)/$', PostDetailView.as_view(), name='detail'), #name='details'??\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"276433733","text":"class Database:\n def __init__(self):\n self.data = 1\n\ndatabase = None\n\n\ndef initialize_database():\n # global 声明模块级别变量,防止函数外变量失效\n global database\n database = Database().data\n\n# 单独运行本脚本时启用\nif __name__ == \"__main__\":\n initialize_database()\n print(\"__main__\")\nelse:\n print(__name__)\n\n# 别的模块引用时 __name__ 变量会变为模块名\nif __name__ == \"ecommerce.database\":\n print(\"this module is quoted\")\n","sub_path":"ecommerce/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"227970547","text":"# -*- coding: utf-8 -*-\nimport time\nimport json\nimport requests\nimport socket\nfrom struct import pack\n\nimport urllib, datetime\nfrom xml.etree import ElementTree as ET\n\n\nclass API:\n def __init__(self, **kwargs):\n # Initialized common attributes\n self.variables = kwargs\n self.debug = True\n self.set_variable('offline_count', 0)\n self.set_variable('connection_renew_interval', 6000)\n\n def renewConnection(self):\n pass\n\n def set_variable(self, k, v): # k=key, v=value\n self.variables[k] = v\n\n def get_variable(self, k):\n return self.variables.get(k, None) # default of get_variable is none\n\n '''\n Attributes:\n ------------------------------------------------------------------------------------------\n label GET label in string\n status GET status\n unitTime GET time\n type GET type \n ------------------------------------------------------------------------------------------\n '''\n\n '''\n API3 available methods:\n 1. getDeviceStatus() GET\n 2. setDeviceStatus() SET\n '''\n\n # ----------------------------------------------------------------------\n # getDeviceStatus(), printDeviceStatus()\n def getDeviceStatus(self):\n\n getDeviceStatusResult = True\n\n try:\n print(\"Get Status eGauge Power Meter\")\n\n # Get XML from eGauge device\n url = \"http://\" + self.get_variable(\"bearer\") + \".egaug.es/cgi-bin/egauge?noteam\"\n\n # Parse the results\n raw_data = ET.parse(urllib.urlopen(url)).getroot()\n print(raw_data)\n\n self.getDeviceStatusJson(raw_data)\n self.printDeviceStatus()\n\n if getDeviceStatusResult==True:\n self.set_variable('offline_count', 0)\n else:\n self.set_variable('offline_count', self.get_variable('offline_count')+1)\n except Exception as er:\n print (er)\n print('ERROR: classAPI_Egauge_PowerMeter failed to getDeviceStatus')\n\n def getDeviceStatusJson(self, data):\n\n # conve_json = json.loads(data)\n print(data)\n\n # self.set_variable('device_label', str(conve_json[\"label\"]))\n # self.set_variable('device_type', str(conve_json[\"type\"]).upper())\n # self.set_variable('unitTime', str(conve_json[\"unitTime\"]))\n # self.set_variable('status', str(conve_json[\"contact\"]).upper())\n\n def printDeviceStatus(self):\n # now we can access the contents of the JSON like any other Python object\n print(\" the current status is as follows:\")\n # print(\" label = {}\".format(self.get_variable('label')))\n\n # ----------------------------------------------------------------------\n\n\n# This main method will not be executed when this class is used as a module\ndef main():\n # -------------Kittchen----------------\n meter = API(model='eGauge', api='API3', agent_id='05EGA010101', types='powermeter', device='egauge50040',\n ip='192.168.1.8', port=82)\n\n meter.getDeviceStatus()\n time.sleep(3)\n\n\nif __name__ == \"__main__\": main()\n","sub_path":"EgaugeMeterAgent/egaugemeteragent/extension/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"40413488","text":"import tensorflow as tf\nimport os, sys\n\ndn = os.path.dirname\nabs_path = os.path.abspath(__file__)\nsys.path.append(dn(abs_path))\nsys.path.append(dn(dn(abs_path)))\ndel dn, abs_path\n\nfrom tframe import console\nfrom tframe import FLAGS\n\nfrom signals.utils.dataset import load_wiener_hammerstein, DataSet\n\nimport lott_lib\n\n\ndef main(_):\n console.start('Lottery')\n\n # Configurations\n MARK = 'mlp00'\n MEMORY_DEPTH = 80\n coe = 8\n HIDDEN_DIM = MEMORY_DEPTH * coe\n\n EPOCH = 500\n LR = 0.000058\n BATCH_SIZE = 32\n PRINT_CYCLE = 10\n BRANCH_INDEX = 1\n FIX_PRE_WEIGHT = True\n ACTIVATION = 'relu'\n\n # FLAGS.train = False\n FLAGS.overwrite = True and BRANCH_INDEX == 0\n FLAGS.smart_train = True\n FLAGS.save_best = True and BRANCH_INDEX > 0\n FLAGS.summary = True\n # FLAGS.save_model = False\n FLAGS.snapshot = False\n FLAGS.epoch_tol = 50\n\n # Load data\n train_set, val_set, test_set = load_wiener_hammerstein(\n r'../data/wiener_hammerstein/whb.tfd', depth=MEMORY_DEPTH)\n assert isinstance(train_set, DataSet)\n assert isinstance(val_set, DataSet)\n assert isinstance(test_set, DataSet)\n\n # Get model\n model = lott_lib.mlp00(MARK, MEMORY_DEPTH, HIDDEN_DIM, LR, ACTIVATION)\n\n branch_1_weights = 'FeedforwardNet/branch/linear/weights:0'\n branch_1_bias = 'FeedforwardNet/branch/linear/biases:0'\n branch_2_weights = 'FeedforwardNet/branch2/linear/weights:0'\n branch_2_bias = 'FeedforwardNet/branch2/linear/biases:0'\n # model.nn.variable_assign(branch_1_weights, branch_2_weights)\n # model.nn.variable_assign(branch_1_bias, branch_2_bias)\n with model.nn._graph.as_default():\n variables = tf.trainable_variables()\n b = 1\n # print(model.nn._session.run(variables[2]))\n # print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # print(model.nn._session.run(variables[6]))\n # print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # print(model.nn._session.run(variables[3]))\n # print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # print(model.nn._session.run(variables[7]))\n # print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # print(model.nn._session.run(variables[4]))\n # print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # print(model.nn._session.run(variables[5]))\n # a = 1\n\n\n # Train or evaluate\n if FLAGS.train:\n model.identify(train_set, val_set, batch_size=BATCH_SIZE,\n print_cycle=PRINT_CYCLE, epoch=EPOCH,\n branch_index=BRANCH_INDEX, freeze=FIX_PRE_WEIGHT)\n else:\n BRANCH_INDEX = 1\n model.evaluate(train_set, start_at=MEMORY_DEPTH, branch_index=BRANCH_INDEX)\n model.evaluate(val_set, start_at=MEMORY_DEPTH, branch_index=BRANCH_INDEX)\n model.evaluate(test_set, start_at=MEMORY_DEPTH, branch_index=BRANCH_INDEX)\n\n console.end()\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"lottery/lott_script.py","file_name":"lott_script.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"600601835","text":"#Utilizando listas faça um programa que faça 5 perguntas para uma pessoa\n#sobre um crime. As perguntas são:\n#\"Telefonou para a vítima?\"\n#\"Esteve no local do crime?\"\n#\"Mora perto da vítima?\"\n#\"Devia para a vítima?\"\n#\"Já trabalhou com a vítima?\"\n#O programa deve no final emitir uma classificação sobre a\n#participação da pessoa no crime. Se a pessoa responder\n#positivamente a 2 questões ela deve ser classificada como \"Suspeita\",\n#entre 3 e 4 como \"Cúmplice\" e 5 como \"Assassino\".\n#Caso contrário, ele será classificado como \"Inocente\".\n\nresposta = [\"sim\",\"não\"]\nclassi = 0 \npergunta1 = str(input(\"Telefonou para vitimia? \"))\nif pergunta1 == resposta[0]:\n classi += 1\nelse :\n classi = 0\npergunta2 = str(input(\"Esteve no local do crime? \"))\nif pergunta2 == resposta[0]:\n classi += 1\npergunta3 = str(input(\"Mora perto da vitima? \"))\nif pergunta3 == resposta[0]:\n classi += 1\npergunta4 = str(input(\"Devia para vitima? \"))\nif pergunta3 == resposta[0]:\n classi += 1\npergunta5 = str(input(\"Já trabalhou com a vitima? \"))\nif pergunta3 == resposta[0]:\n classi += 1\nif classi == 2 :\n print(\"Você é suspeito\")\nelif classi == 3 or classi == 4 :\n print(\"Você foi cúmplice\")\nelif classi == 5 :\n print(\"Você é o criminoso\")\nelif classi == 0 :\n print(\"Você é inocente\")\n","sub_path":"Python-exercicios/questao14.py","file_name":"questao14.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"410488554","text":"class Solution:\n # Your task is to complete this function\n # Function should return an integer\n def countPS(self,s):\n n = len(s)\n dp = [[0]*n for _ in range(n)]\n \n for i in range(n):\n for j in range(i, -1, -1):\n if i == j:\n dp[j][i] = 1\n else:\n if s[i] == s[j]:\n dp[j][i] = dp[j+1][i] + dp[j][i-1] + 1\n else:\n dp[j][i] = dp[j+1][i] + dp[j][i-1] - dp[j+1][i-1]\n return dp[0][n-1]%(10**9+7)\n\n\n#{ \n # Driver Code Starts\n#Initial template for Python 3\n\nimport sys\nsys.setrecursionlimit(10**6)\n\nif __name__=='__main__':\n t = int(input())\n for i in range(t):\n ob=Solution()\n print(ob.countPS(input().strip()))\n\n# } Driver Code Ends","sub_path":"GeeksForGeeks/Practice/arrays/count_palindrome_subsequence.py","file_name":"count_palindrome_subsequence.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"86222962","text":"from crispy_forms.layout import Div\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext as _\nfrom geotrek.common.forms import CommonForm\nfrom geotrek.outdoor.models import Site, Course, OrderedCourseChild\n\n\nclass SiteForm(CommonForm):\n orientation = forms.MultipleChoiceField(choices=Site.ORIENTATION_CHOICES, required=False)\n wind = forms.MultipleChoiceField(choices=Site.ORIENTATION_CHOICES, required=False)\n\n geomfields = ['geom']\n\n fieldslayout = [\n Div(\n 'structure',\n 'name',\n 'parent',\n 'review',\n 'published',\n 'practice',\n 'type',\n 'description_teaser',\n 'ambiance',\n 'description',\n 'advice',\n 'period',\n 'orientation',\n 'wind',\n 'labels',\n 'themes',\n 'information_desks',\n 'web_links',\n 'portal',\n 'source',\n 'managers',\n 'eid',\n )\n ]\n\n class Meta:\n fields = ['geom', 'structure', 'name', 'review', 'published', 'practice', 'description',\n 'description_teaser', 'ambiance', 'advice', 'period', 'labels', 'themes',\n 'portal', 'source', 'information_desks', 'web_links', 'type', 'parent', 'eid',\n 'orientation', 'wind', 'managers']\n model = Site\n\n def __init__(self, site=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['parent'].initial = site\n if self.instance.pk:\n descendants = self.instance.get_descendants(include_self=True).values_list('pk', flat=True)\n self.fields['parent'].queryset = Site.objects.exclude(pk__in=descendants)\n if self.instance.practice:\n for scale in self.instance.practice.rating_scales.all():\n for bound in ('max', 'min'):\n ratings = getattr(self.instance, 'ratings_' + bound).filter(scale=scale)\n fieldname = 'rating_scale_{}{}'.format(bound, scale.pk)\n self.fields[fieldname] = forms.ModelChoiceField(\n label=\"{} {}\".format(scale.name, bound),\n queryset=scale.ratings.all(),\n required=False,\n initial=ratings[0] if ratings else None\n )\n self.fieldslayout[0].insert(10, fieldname)\n\n def save(self, *args, **kwargs):\n site = super().save(self, *args, **kwargs)\n\n # Save ratings\n if site.practice:\n for bound in ('min', 'max'):\n field = getattr(site, 'ratings_' + bound)\n to_remove = list(field.exclude(scale__practice=site.practice).values_list('pk', flat=True))\n to_add = []\n for scale in site.practice.rating_scales.all():\n rating = self.cleaned_data.get('rating_scale_{}{}'.format(bound, scale.pk))\n if rating:\n to_remove += list(field.filter(scale=scale).exclude(pk=rating.pk).values_list('pk', flat=True))\n to_add.append(rating.pk)\n else:\n to_remove += list(field.filter(scale=scale).values_list('pk', flat=True))\n field.remove(*to_remove)\n field.add(*to_add)\n\n return site\n\n\nclass CourseForm(CommonForm):\n children_course = forms.ModelMultipleChoiceField(label=_(\"Children\"),\n queryset=Course.objects.all(), required=False,\n help_text=_(\"Select children in order\"))\n hidden_ordered_children = forms.CharField(label=_(\"Hidden ordered children\"),\n widget=forms.widgets.HiddenInput(),\n required=False)\n\n geomfields = ['geom']\n\n fieldslayout = [\n Div(\n 'structure',\n 'name',\n 'site',\n 'review',\n 'published',\n 'description',\n 'advice',\n 'equipment',\n 'height',\n 'children_course',\n 'eid',\n 'hidden_ordered_children',\n )\n ]\n\n class Meta:\n fields = ['geom', 'structure', 'name', 'site', 'review', 'published', 'description',\n 'advice', 'equipment', 'height', 'eid', 'children_course', 'hidden_ordered_children']\n model = Course\n\n def __init__(self, site=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['site'].initial = site\n if self.instance.pk and self.instance.site and self.instance.site.practice:\n for scale in self.instance.site.practice.rating_scales.all():\n ratings = self.instance.ratings.filter(scale=scale)\n fieldname = 'rating_scale_{}'.format(scale.pk)\n self.fields[fieldname] = forms.ModelChoiceField(\n label=scale.name,\n queryset=scale.ratings.all(),\n required=False,\n initial=ratings[0] if ratings else None\n )\n self.fieldslayout[0].insert(5, fieldname)\n if self.instance:\n queryset_children = OrderedCourseChild.objects.filter(parent__id=self.instance.pk).order_by('order')\n # init multiple children field with data\n self.fields['children_course'].queryset = Course.objects.exclude(pk=self.instance.pk)\n self.fields['children_course'].initial = [c.child.pk for c in self.instance.course_children.all()]\n # init hidden field with children order\n self.fields['hidden_ordered_children'].initial = \",\".join(str(x) for x in queryset_children.values_list('child__id', flat=True))\n\n def clean_children_course(self):\n \"\"\"\n Check the course is not parent and child at the same time\n \"\"\"\n children = self.cleaned_data['children_course']\n if children and self.instance and self.instance.course_parents.exists():\n raise ValidationError(_(\"Cannot add children because this course is itself a child.\"))\n for child in children:\n if child.course_children.exists():\n raise ValidationError(_(\"Cannot use parent course {name} as a child course.\".format(name=child.name)))\n return children\n\n def save(self, *args, **kwargs):\n course = super().save(self, *args, **kwargs)\n\n # Save ratings\n if course.site and course.site.practice:\n to_remove = list(course.ratings.exclude(scale__practice=course.site.practice).values_list('pk', flat=True))\n to_add = []\n for scale in course.site.practice.rating_scales.all():\n rating = self.cleaned_data.get('rating_scale_{}'.format(scale.pk))\n if rating:\n to_remove += list(course.ratings.filter(scale=scale).exclude(pk=rating.pk).values_list('pk', flat=True))\n to_add.append(rating.pk)\n else:\n to_remove += list(course.ratings.filter(scale=scale).values_list('pk', flat=True))\n course.ratings.remove(*to_remove)\n course.ratings.add(*to_add)\n\n # Save children\n ordering = []\n if self.cleaned_data['hidden_ordered_children']:\n ordering = self.cleaned_data['hidden_ordered_children'].split(',')\n order = 0\n # add and update\n for value in ordering:\n child, created = OrderedCourseChild.objects.get_or_create(parent=self.instance,\n child=Course.objects.get(pk=value))\n child.order = order\n child.save()\n order += 1\n # delete\n new_list_children = self.cleaned_data['children_course'].values_list('pk', flat=True)\n for child_relation in self.instance.course_children.all():\n # if existant child not in selection, deletion\n if child_relation.child_id not in new_list_children:\n child_relation.delete()\n\n return course\n","sub_path":"geotrek/outdoor/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":8307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"302367399","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\n#This program is built to simulate the trajectories of atomic/molecular\r\n# beams through irregular channels while having the chance to be \r\n#adsorbed by the side wall and reemitted in a new direction with a new\r\n#velocity\r\n\r\nh=100\r\nw=100\r\nNum=100\r\nT=273\r\ntimesteps=5000\r\n\r\ncoords=np.zeros((timesteps,2))\r\n\r\n\r\nBC=np.ones((h,w))\r\nBC1=np.ones((h,w))\r\nn=np.ones((h,w,2))\r\n\r\n\r\ndef create_path(x_start,x_finish,y_start,y_finish,val):\r\n\tlx=(x_finish-x_start)\r\n\tly=(y_finish-y_start)\r\n\tls=math.sqrt(lx**2+ly**2)\r\n\tslope=ly/(0.000001+lx)\r\n\tif lx>ly:\r\n\t\tfor i in range(lx):\r\n\t\t\tBC[int(y_start+(i*slope)//1)][i+int(x_start)]=val\r\n\t\t\tBC1[int(y_start+(i*slope)//1)][i+int(x_start)]=val\r\n\telse:\r\n\t\tfor j in range(ly):\r\n\t\t\tBC[int(y_start+j)][int(x_start+(j/slope)//1)]=val\r\n\t\t\tBC1[int(y_start+j)][int(x_start+(j/slope)//1)]=val\r\n\r\n\r\nfor j in range(10):\r\n\tcreate_path(5,20,j,j,0)\r\nfor j in range(10):\r\n\tcreate_path(5+j,5+j,10,90,0)\r\nfor j in range(10):\r\n\tcreate_path(5,80,90-j,90-j,0)\r\nfor j in range(10):\r\n\tcreate_path(75+j,75+j,10,90,0)\r\nfor j in range(10):\r\n\tcreate_path(10,75+j,10,10+j,0)\r\n\r\ndef MBDist(T):\r\n\treturn np.random.random()\r\n\r\nclass Particle:\r\n\tdef __init__(self,T):\r\n\t\tself.temp=T\r\n\t\tself.v=np.random.rand()\r\n\t\tself.theta=math.pi/2.8\r\n\t\tself.x=7\r\n\t\tself.y=7\r\n\t\tself.vx=0.11*math.cos(self.theta)\r\n\t\tself.vy=0.11*math.sin(self.theta)\r\n\r\nRb=Particle(10)\r\n\r\n#To propagate a particle into the channel, it is necessary to rescale the \r\n# motion the characteristic cell size i.e. 1. Thus, the larger value of the\r\n#x or y momentum is rescaled to a box and the other is corresponding #sine(theta) or cosine(theta). This allows the particle to move in increments #of the BC\r\n\r\ndef Reflect(d):\r\n\tif d==\"y\":\r\n\t\tRb.vy*=-1\r\n\t\tprint(\"reflected y!\")\r\n\tif d==\"x\":\r\n\t\tRb.vx*=-1\r\n\t\tprint(\"reflected x!\")\r\n\t\r\ndef calc_norm_vectors():\r\n\tfor i in range(h):\r\n\t\tfor j in range(w):\r\n\t\t\tif BC[i][j]==0:\r\n\t\t\t\tif BC[i+1][j]==1:\r\n\t\t\t\t\tn[i+1][j]=[0,1]\r\n\t\t\t\tif BC[i-1][j]==1:\r\n\t\t\t\t\tn[i-1][j]=[0,1]\r\n\t\t\t\tif BC[i][j+1]==1:\r\n\t\t\t\t\tn[i][j+1]=[1,0]\r\n\t\t\t\tif BC[i][j-1]==1:\r\n\t\t\t\t\tn[i][j-1]=[1,0]\r\n\t#for i in range(h-1):\r\n\t#\tfor j in range(w-1):\r\n\t#\t\tif BC[i][j]==1:\r\n\t#\t\t\tif (n[i][j][0])**2+(n[i][j][1])**2>0:\r\n\t#\t\t\t\tnavgx=0\r\n\t#\t\t\t\tnavgy=0\r\n\t#\t\t\t\tnorm=0\r\n\t#\t\t\t\tnavgx+=n[i][j-1][0]\r\n\t#\t\t\t\tnavgy+=n[i][j-1][1]\r\n\t#\t\t\t\tnavgx+=n[i][j+1][0]\r\n\t#\t\t\t\tnavgy+=n[i][j+1][1]\t\t\t\t\t\r\n\t#\t\t\t\tnavgx+=n[i+1][j][0]\r\n\t#\t\t\t\tnavgy+=n[i+1][j][1]\r\n\t#\t\t\t\tnavgx+=n[i-1][j][0]\r\n\t#\t\t\t\tnavgy+=n[i-1][j][1]\r\n\t#\t\t\t\tnormn=navgx**2+navgy**2\r\n\t#\t\t\t\tn[i][j]=[navgx/normn,navgy/norm]\r\n\r\ncalc_norm_vectors()\r\n\r\ndef propagator():\r\n\tcount=0\r\n\t#while Rb.x|<|!|\\.|@|#|\\$|\\*|:|%|\\+|…|\\\\\\\\|\\/|«|»|···|\\||\\•|\\?|\\(|\\)|=|-|&|;|\\_|—|~|¯|\\{|\\}|\\[|\\]|£|€|¥|¿|–\", \"\", line)\n line = re.sub(\"\\“|\\”|\\‘|\\’|\\\"|,|'\", \" \", line)\n line = re.sub(\"[0-9]+|http[a-zA-Z0-9]+\", \" \", line)\n line = line.lower()\n line = re.sub(\" [a-z] |aa+[a-z]* | ab | aba | abc | ac | acc | acq | az | ba | baa* | ca | czq | czt | da | daca | ec | ed | rt | co \", \" \", line)\n line = re.sub(\" amp | get | pi | marc | someon | talking | speaking | ever | done | less \", \" \", line)\n \n stop_words = (stopwords.words('english'))\n bad_words = ['if','who', 'would', 'the', 'are', 'said', 'i', 'in', 'it', 'a', 'u', 'm', 're', \n 'them', 'they', 'there', 'should', 'over', 'an', 'via', 'up', 'at', 'is', 'as',\n 'was', 'him', 'he', 'can', 'did', 'go', 'by', 'us', 'our', 'their', 'or', 'how', \n 'now', 'but', 'give', 'my', 'so', 'be', 'out', 'its', 'and', 'any', 'all', 'got', 'then', 'you',\n 'these', 'say', 'on', 'not', 'some', 'me', 'those', 'to', 'of', 'for', 'we', \n 'why', 'like','https','with','from','para','more','this','when','just','about',\n 'that','what','have','will','your', 'must', 'which', 'pathfinder', 'vbtn', 'msfppreload', 'msfpnav',\n 'also', 'united', 'states', 'color','vallejo', 'cyndi', 'service', 'last', 'same', 'mind', 'fl',\n 'msfphover', 'hippo', 'quinny', 'dreami', 'many', 'even', 'cenicola', 'than',\n 'whether', 'office', 'read']\n \n word_tokens = word_tokenize(line)\n word_tokens = [w.lower() for w in word_tokens]\n\n filtered_sentence = [w for w in word_tokens if not w in stop_words] \n filtered_sentence = [w for w in word_tokens if not w in bad_words]\n\n# for word in filtered_sentence:\n# print('%s\\t%s' % (word, \"1\"))\n \n for word in coocc(filtered_sentence):\n print('%s %s\\t%s' % (word[0],word[1], \"1\"))","sub_path":"part3/Twitter/Code/Data/mapper_coocc.py","file_name":"mapper_coocc.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"442160863","text":"# encoding=utf-8\n# coding=utf-8\nimport requests\nimport re\nimport time\nfrom bs4 import BeautifulSoup\nimport random\nimport datetime\nimport os\nimport json\n\nimport sys\n\nheaders = [\n {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n },\n {\n 'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'\n }\n]\n\nday = datetime.datetime.now().strftime('%Y%m%d')\nlog_file = day+\"_log.txt\"\nrpc_url = \"http://localhost:6800/jsonrpc\"\n\n# 网站根目录的url\nroot_url = 'https://18comic.one'\nindex_url = 'https://18comic.one/albums/doujin?o=tf'\nlist_prefix = \"\"\ntotalPageNumber = 0\nimages_pattern = ''\nimage_pattern = ''\n\nscript_tmp_path = os.getcwd()\nscript_tmp_name = \"tmp.html\"\n\n# 保存图片的根路径\nbase_save_dir = 'D:\\\\manhua\\\\'\n\n# 日志处理\ndef log(value,print_flag = True):\n logfile = open(log_file, 'a', encoding='utf-8')\n if logfile.writable():\n now_data = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n log_message = \"时间:{} : log : {}\\n\".format(now_data, value)\n if print_flag:\n print(log_message)\n logfile.write(log_message)\n try:\n logfile.close()\n except IOError:\n print(\"写入日志错误\")\n else:\n return\n\n\ndef addDownloadTask(url,dir,out):\n postdata = {\n \"jsonrpc\": \"2.0\",\n \"id\": \"QXJpYU5nXzE1NDgzODg5MzhfMC4xMTYyODI2OTExMzMxMzczOA==\"\n }\n rpc_request = postdata\n rpc_request[\"method\"] = \"aria2.addUri\"\n # rpc 的选项,去掉--就可以了\n options = {\n \"dir\":dir,\n \"out\":out,\n \"allow-overwrite\":\"true\"\n }\n rpc_request[\"params\"] = [[url],options]\n response = requests.post(url=rpc_url, json=rpc_request)\n if response.status_code == 200:\n result = response.json().get(\"result\", [])\n print(\"gid: {}\".format(result))\n return result\n else:\n log(\"无法调用aria2\")\n\ndef download_status(gid):\n postdata = {\n \"jsonrpc\": \"2.0\",\n \"id\": \"QXJpYU5nXzE1NDgzODg5MzhfMC4xMTYyODI2OTExMzMxMzczOA==\"\n }\n rpc_request = postdata\n rpc_request[\"method\"] = \"aria2.tellStatus\"\n rpc_request[\"params\"] = [gid]\n response = requests.post(url=rpc_url,json=rpc_request)\n if response.status_code == 200:\n result = response.json().get(\"result\",\"\")\n if result != \"\":\n status = result.get(\"status\")\n if status != \"\":\n return status\n return None\n\n\"\"\"\n下载工具\nurl: 下载地址\ndir: 保存路径\nout: 保存名称\n\"\"\"\ndef download(url,dir,out):\n log(\"开始下载:{}\".format(url))\n gid = addDownloadTask(url,dir,out)\n status = download_status(gid)\n error = False\n error_num = 0\n while True and not error:\n if status == \"active\":\n time.sleep(3)\n print(\"下载中.....\\n\")\n status = download_status(gid)\n if status == \"complete\":\n break\n elif status == \"waiting\":\n log(\"下载队列已满\")\n time.sleep(4)\n status = download_status(gid)\n elif status == \"paused\":\n log(\"暂停下载\")\n break\n elif status == \"error\":\n log(\"下载错误\")\n if error_num == 3:\n error = True\n break\n else:\n log(\"重新下载\")\n gid = addDownloadTask(url,dir,out)\n status = download_status(gid)\n error_num = error_num + 1\n \n elif status == \"removed\":\n log(\"已经从下载队列中移除\")\n break\n if error:\n log(\"下载:{}出错\".format(url))\n return -1\n else:\n log(\"下载{}成功\".format(url))\n return 0\n\n# 获得首页内容\n# 返回页面的list\ndef get_index_info(url):\n result = []\n if True:\n response_data = htmlContent(url)\n soup = BeautifulSoup(response_data,'html.parser')\n plist = soup.select('#wrapper > div.container > div > div.col-xs-12.col-md-9.col-sm-8 > div.row > div > div > a')\n for item in plist:\n p_url = root_url + item.get(\"href\")\n result.append(p_url)\n return result\n\n\n# 下载图片\n# url 图片的地址\n# save_dir 保存的路径\n# 保存的名称\ndef download_image(url,save_dir,filename):\n download(url=url,dir=save_dir,out=filename)\n\n\"\"\"\n获得html文件的内容\n\"\"\"\ndef htmlContent(url):\n status = download(url=url,dir=script_tmp_path,out=script_tmp_name)\n data = None\n if status == 0:\n # 读取文件\n with open(script_tmp_path+\"\\\\\"+script_tmp_name,\"r\",encoding=\"utf-8\") as f:\n data = f.read()\n f.close()\n return data\n\n# 获得html中的图片的url\ndef get_image_url(url):\n imgurl = None\n try:\n if True:\n response_data = htmlContent(url)\n imgs = re.findall(images_pattern,response_data,re.S)\n # 启用备用解析\n if len(imgs) ==0 :\n bpattern = ''\n imgs = re.findall(bpattern,response_data,re.S)\n # print(imgs)\n if len(imgs) > 0:\n # image_pattern = 'src=\"http://lf.veestyle.cn/uploads/.*?\"'\n urls = re.findall(image_pattern,imgs[0])\n if len(urls) > 0:\n imgurl = urls[0].split(\"src=\")[-1].replace('\\\"',\"\")\n print('图片url: {}'.format(imgurl))\n elif len(urls) == 0:\n # 启用备用解析\n urls = re.findall('src=\"http://lf.mz0731.com/uploads/.*?\"',imgs[0])\n imgurl = urls[0].split(\"src=\")[-1].replace('\\\"',\"\")\n # 替换成当前的地址\n imgurl = imgurl.replace(\"http://lf.mz0731.com/\",root_url)\n print(\"备用解析图片url:{}\".format(imgurl))\n\n except RuntimeError:\n log('请求:{} 异常'.format(url))\n else:\n return imgurl\n return imgurl\n\n# 获得一本漫画的名称和url\n# 第一个名称\ndef get_single_pic(url):\n url = url.replace(\"album\",\"photo\")\n if True:\n print(url)\n response_data = htmlContent(url)\n if response_data is None:\n log(\"!!!获取url:{}失败\".format(url))\n return\n soup = BeautifulSoup(response_data,'html.parser')\n titles = soup.select('#wrapper > div.top-nav.visible-xs > div > ul > span')\n if len(titles) == 0:\n log(\"!!!获取url:{}失败\".format(url))\n return\n title = titles[0].string\n title = title.replace(\":\",\"\")\n title = title.replace(\";\",\"\")\n title = title.replace(\"\\\\\",\"\").replace(\"/\",\"\").replace(\"*\",\"\").replace(\"?\",\"\").replace(\"\\\"\",\"\").replace(\"<\",\"\").replace(\">\",\"\").replace(\"|\",\"\")\n save_dir = base_save_dir + title + \"\\\\\"\n print(\"save dir:{}\".format(save_dir))\n # 创建该漫画的保存的文件夹\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n downloaded_list = []\n waiting_download_list = re.findall(\"https://cdn-msp.18comic.one/media/photos/.*?/.*?.jpg\",response_data,re.S)\n print(waiting_download_list)\n with open(\"imagelist.json\",\"r\") as f:\n downloaded_list = json.loads(f.read())\n f.close()\n for image_url in waiting_download_list:\n if image_url is None:\n continue\n if image_url in downloaded_list:\n print(\"已经下载:{}\".format(image_url))\n continue\n new_image_name = str(image_url).split(\"/\")[-1]\n download_image(image_url,save_dir,new_image_name)\n downloaded_list.append(image_url)\n time.sleep(3 + random.random())\n # 完成一个之后\n with open(\"imagelist.json\",\"w\") as f:\n f.write(json.dumps(downloaded_list))\n f.close()\n\nif __name__ == '__main__':\n # init()\n # 两个核心功能\n # 已经下载的列表\n downloaded_list = []\n with open('list.json','r') as f:\n json_data = f.read()\n f.close()\n downloaded_list = json.loads(json_data)\n\n # 1、根据主页获得漫画的列表\n totalPageNumber = 121\n for i in range(1, totalPageNumber):\n new_list_url = index_url + '&page={}'.format(i)\n print('列表url:{}'.format(new_list_url))\n url_list = get_index_info(new_list_url)\n print(url_list)\n if len(url_list) == 0:\n print(\"列表数据为空\")\n break\n for item in url_list:\n pic_url = item\n if pic_url in downloaded_list:\n print('{} 已经下载'.format(pic_url))\n continue\n else:\n # 未下载,下载\n print(\"未下载:{}\".format(pic_url))\n get_single_pic(pic_url)\n # 下载完成,写入文件\n downloaded_list.append(pic_url)\n #\n print('写入文件')\n with open('list.json','w') as f:\n f.write(json.dumps(downloaded_list))\n f.close()\n time.sleep(1+random.random())","sub_path":"Python/manhua/benzi18/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":8829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"135430695","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef index(request):\n # context = {'city': '北京'}\n # return render(request, 'index.html', context)\n return render(request, 'index.html', {'city': '北京1'}) # 也可以这样写\n\n\nfrom django.views import View\n\n\nclass HomeView(View):\n def get1(self, request):\n\n return render(request, 'index.html', {\n \"title\": '新的标题',\n 'tuple': (1, 2, 3, 4,),\n 'list1': ['a', 'b', 'c', ],\n 'dict1': {'name': 'alnk', 'age': 18, },\n 'num': 100,\n })\n\n\n def get2(self, request):\n\n return render(request, 'index2.html', {\n 'tuple': (1, 2, 3, 4,),\n 'list1': ['a', 'b', 'c', ],\n 'dict1': {'name': 'alnk', 'age': 18, },\n })\n\n def get3(self, request):\n\n return render(request, 'index3.html', {\n 'book_list': [\n {'name': 'python', 'price': 99},\n {'name': 'go', 'price': 69},\n {'name': 'go', 'price': 69},\n {'name': 'go', 'price': 69},\n {'name': 'go', 'price': 69},\n ]\n })\n\n # def get(self, request):\n # # forloop.parentloop.counter 会继承上一个for循环的计数规则\n # return render(request, 'index4.html', {\n # 'people': [\n # {'name':'alnk', 'age': 19, 'love': ['睡觉', '吃饭', '打豆豆']},\n # {'name':'alnk', 'age': 19, 'love': ['睡觉', '吃饭', '打豆豆']},\n # {'name':'alnk', 'age': 19, 'love': ['睡觉', '吃饭', '打豆豆']},\n # {'name':'alnk', 'age': 19, 'love': ['睡觉', '吃饭', '打豆豆']},\n # {'name':'alnk', 'age': 19, 'love': ['睡觉', '吃饭', '打豆豆']},\n # ]\n # })\n\n def get(self, request):\n \"\"\"过滤器\"\"\"\n from datetime import datetime\n return render(request, \"index5.html\", {\n \"title\": \"welcome to django\",\n 'title2': '

大标题

',\n 'title3': \"小可爱\",\n 'date_time': datetime.now(),\n 'str1': 'welcome to django',\n 'str2': '我爱中国 welcome to django'\n })\n\n\n# class IndexView(View):\n# \"\"\"模板继承\"\"\"\n# def get(self, request):\n# return render(request, \"index/index.html\")\n#\n#\n# class List(View):\n# def get(self, request):\n# return render(request, \"index/list.html\")\n\n\nclass IndexView(View):\n \"\"\"模板继承\"\"\"\n def get(self, request):\n return render(request, \"exten/index.html\")\n\n\nclass List(View):\n def get(self, request):\n return render(request, \"exten/list.html\")\n\n\n\"\"\"表单系统\"\"\"\nfrom . forms import LoginForm\n\nclass LoginView(View):\n def get(self, request):\n return render(request, 'form.html', {\n 'forms': LoginForm(),\n })\n\n def post(self, request):\n # print(request.POST)\n # print(request.POST.get(\"user\"))\n # print(request.POST.get(\"pwd\"))\n # 提交表单\n # 使用表单系统提供的验证流程\n form = LoginForm(request.POST)\n if form.is_valid():\n print(form.cleaned_data)\n print('到数据库查询账号密码,进行对比')\n print(form)\n return HttpResponse('OK')\n else:\n print(form)\n # return HttpResponse('NO')\n return render(request, 'form.html', {\n 'forms': form\n })\n\n\n\nfrom .forms import UserForm\nclass FormModelView(View):\n def get(self, request):\n return render(request, 'form_model.html', {\n 'form_content': UserForm(),\n })\n\n def post(self, request):\n forms_mode = UserForm(request.POST)\n if forms_mode.is_valid():\n print(forms_mode.data)\n print('查询数据库账号密码')\n return HttpResponse('OK')\n else:\n print(forms_mode.cleaned_data)\n return render(request, 'form_model.html', {\n 'form_content': forms_mode,\n })","sub_path":"17 day17/03 作业day17/djangodemo/temp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"405431361","text":"\"\"\"\nGiven a set of distinct integers, nums,\nreturn all possible subsets.\nNote: The solution set must not contain duplicate subsets.\nFor example,\nIf nums = [1,2,3], a solution is:\n{\n (1, 2),\n (1, 3),\n (1,),\n (2,),\n (3,),\n (1, 2, 3),\n (),\n (2, 3)\n}\n\"\"\"\nfrom algorithms.bit import subsets\nnums = [1,2,3]\n\nprint(subsets(nums))\n\n\n### combination\n\nfrom itertools import combinations\n\nfor i in range(1,len(nums)+1):\n comb=combinations(nums,i)\n for j in comb:\n print(j)\n\n\"\"\"\nthis explanation is from leet_nik @ leetcode\nThis is an amazing solution. Learnt a lot.\nNumber of subsets for {1 , 2 , 3 } = 2^3 .\nwhy ?\ncase possible outcomes for the set of subsets\n 1 -> Take or dont take = 2\n 2 -> Take or dont take = 2\n 3 -> Take or dont take = 2\ntherefore,\ntotal = 2*2*2 = 2^3 = {{}, {1}, {2}, {3}, {1,2}, {1,3}, {2,3}, {1,2,3}}\nLets assign bits to each outcome ->\nFirst bit to 1 , Second bit to 2 and third bit to 3\nTake = 1\nDont take = 0\n0) 0 0 0 -> Dont take 3 , Dont take 2 , Dont take 1 = { }\n1) 0 0 1 -> Dont take 3 , Dont take 2 , take 1 = { 1 }\n2) 0 1 0 -> Dont take 3 , take 2 , Dont take 1 = { 2 }\n3) 0 1 1 -> Dont take 3 , take 2 , take 1 = { 1 , 2 }\n4) 1 0 0 -> take 3 , Dont take 2 , Dont take 1 = { 3 }\n5) 1 0 1 -> take 3 , Dont take 2 , take 1 = { 1 , 3 }\n6) 1 1 0 -> take 3 , take 2 , Dont take 1 = { 2 , 3 }\n7) 1 1 1 -> take 3 , take 2 , take 1 = { 1 , 2 , 3 }\nIn the above logic ,Insert S[i] only if (j>>i)&1 ==true\n{ j E { 0,1,2,3,4,5,6,7 } i = ith element in the input array }\nelement 1 is inserted only into those places where 1st bit of j is 1\nif( j >> 0 &1 ) ==> for above above eg.\nthis is true for sl.no.( j )= 1 , 3 , 5 , 7\nelement 2 is inserted only into those places where 2nd bit of j is 1\nif( j >> 1 &1 ) == for above above eg.\nthis is true for sl.no.( j ) = 2 , 3 , 6 , 7\nelement 3 is inserted only into those places where 3rd bit of j is 1\nif( j >> 2 & 1 ) == for above above eg.\nthis is true for sl.no.( j ) = 4 , 5 , 6 , 7\nTime complexity : O(n*2^n) , for every input element loop traverses\nthe whole solution set length i.e. 2^n\n\"\"\"\n\n","sub_path":"algorithms_practice/4.bit/14.Bit_subsets.py","file_name":"14.Bit_subsets.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"332641985","text":"from collections import Counter\n\ns = input()\nt = input()\n\nsc = Counter(s)\ntc = Counter(t)\n\nFLIP_CASE = ord('a') - ord('A')\n\nyay, whoops = 0, 0\nfor c in s:\n amount = min(sc[c], tc[c])\n if amount > 0:\n yay += amount\n tc[c] -= amount\n sc[c] -= amount\n\nfor c in sc:\n fc = chr(FLIP_CASE ^ ord(c))\n amount = min(sc[c], tc[fc])\n if amount > 0:\n whoops += amount\n tc[fc] -= amount\n\nprint(yay, whoops)\n","sub_path":"codeforces/518/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"479351735","text":"def ceasar_cipher(message, shift):\n the_alphabet = 'abcdefghijklmnopqrstuvwxyz'\n encrypted_message = ''\n for letter in message:\n letter_position = the_alphabet.find(letter.lower())\n index = (letter_position + shift) % len(the_alphabet)\n if letter.lower() not in the_alphabet:\n encrypted_message += letter\n elif letter.isupper():\n encrypted_message += the_alphabet[index].upper()\n else:\n encrypted_message += the_alphabet[index]\n return encrypted_message\n\nmessage = 'abc def hi Jkl mno pqr stu vwx Yz'\nprint(ceasar_cipher(message, -2))","sub_path":"ceasar_cipher.py","file_name":"ceasar_cipher.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"445066343","text":"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\nclass Author(models.Model):\n name = models.CharField(max_length=200)\n age = models.IntegerField()\n\n def __unicode__(self):\n return self.name\n\n\nclass Book(models.Model):\n\n title = models.CharField(max_length=200)\n outline = models.TextField(blank=True, null=True)\n users = models.ManyToManyField(User)\n authors = models.ManyToManyField(Author)\n\n def __unicode__(self):\n return self.title\n\n @staticmethod\n def get_book_list(user, page_no, num_per_page):\n books = Book.objects.filter(users__id=user.pk)\n pages = Paginator(books, per_page=num_per_page)\n try:\n return pages.page(page_no)\n except PageNotAnInteger:\n return pages.page(1)\n except EmptyPage:\n return pages.page(pages.num_pages)\n","sub_path":"oauth_dj/oauth_service/books/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"167560860","text":"# Unordered List Sequential Search\n\n\ndef seq_search(arr, ele):\n\n pos = 0\n found = False\n\n while pos < len(arr) and not found:\n\n if arr[pos] == ele:\n found = True\n print(ele)\n else:\n pos += 1\n\n return 'Element Found: ', found\n\n\narr = [1, 2, 3, 4, 5, 6]\n\nprint(seq_search(arr, 5))\n","sub_path":"Search and Sorting/sequential_search_unordered_list.py","file_name":"sequential_search_unordered_list.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"502559589","text":"#!/usr/bin/env python3\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport sys\nimport os\nfrom time import sleep\n\nTIMEOUT = 15\n\ndef message_input(driver):\n message_box = WebDriverWait(driver, TIMEOUT).until(EC.visibility_of_element_located( ( By.XPATH, '//*[@id=\"message\"]' ) ))\n message_box.send_keys(\"Hi Papaya!\\n\\nThis message was delivered by a script I wrote using Selenium, which you can find on my github at https://github.com/MooseandSquvirrel/hi_papaya - I used a virtual environment if you'd like to test it with pipenv, make sure to have chromedriver on your PATH.\\nMy name is Andy Gardner and I'm currently interning at 42 Silicon Valley, a non-profit coding school. I applied to your New Grad Software Engineer role for the Selenium script developer. I believe my experience writing Selenium scripts makes a good candidate for this role. So I thought I'd show it with a quick script I just wrote in a couple minutes. I like automating tasks and this roles seems like a great fit. \\n\\nHope to hear from you soon!\\n\\nBest,\\nAndy :)\")\n\ndef email_input(email, driver):\n email_box = WebDriverWait(driver, TIMEOUT).until(EC.visibility_of_element_located( ( By.XPATH, '//*[@id=\"contact-form\"]/div[1]/div[2]/div/input' ) ))\n email_box.send_keys(email)\n\ndef name_input(full_name, driver):\n\temail_box = WebDriverWait(driver, TIMEOUT).until(EC.visibility_of_element_located( ( By.XPATH, '//*[@id=\"contact-form\"]/div[1]/div[1]/div/input' ) ))\n\temail_box.send_keys(full_name)\n\ndef click_send(driver):\n\tbutton = WebDriverWait(driver, TIMEOUT).until(EC.visibility_of_element_located( ( By.XPATH, '//*[@id=\"contact-form\"]/button' ) ))\n\tbutton.click()\n\tsleep(20)\n\ndef input_info(full_name, email, driver):\n name_input(full_name, driver)\n email_input(email, driver)\n message_input(driver)\n click_send(driver)\n\ndef commandline():\n full_name_check = ''\n email_check = ''\n while full_name_check != 'y':\n full_name = input(\"Enter your full name: \")\n full_name_check = input(f\"You entered {full_name}, is this correct? (y or n): \")\n while email_check != 'y':\n email = input(\"Enter your email: \")\n email_check = input(f\"You entered {email}, is this correct? (y or n): \")\n return full_name, email\n \ndef main():\n full_name, email = commandline()\n driver = webdriver.Chrome(ChromeDriverManager().install())\n driver.get(\"https://papayapay.com/contact\")\n input_info(full_name, email, driver)\n\nif __name__== \"__main__\":\n main()","sub_path":"hi.py","file_name":"hi.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"323194496","text":"from pathlib import Path\nimport pandas as pd\nfrom warnings import warn\n\nroot_path = Path(\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/04_complete\")\nout_path = Path(\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/aggregated_data\")\nout_path.mkdir(exist_ok=True)\n\nfiles = sorted(root_path.glob(\"*quest*.xlsx\"))\ndf_out = pd.DataFrame()\n\n\ndef excel_letter_to_num(l):\n from string import ascii_lowercase\n letter_lut = {letter: index for index, letter in enumerate(ascii_lowercase, start=0)}\n return letter_lut[l.lower()]\n\n\ndef extract_data_via_mapping(file, lut27, lut29, sheet=\"01_Veränderungsfragebogen\", row_offset=-2):\n df_out = pd.DataFrame()\n\n # encoding breaks load code\n sheet_ = 4 if sheet == \"03_Kardivaskulär\" else sheet\n\n df_in = pd.read_excel(file, sheet_name=sheet_)\n\n # cardio comes in different formats 29 and 27 lines\n # (this is because some files dont have the Keine der genannten Behandlungen cells\n\n if len(df_in) == 29:\n lut = lut29.dropna(axis=\"index\", how=\"all\")\n elif len(df_in) == 27:\n lut = lut27.dropna(axis=\"index\", how=\"all\")\n else:\n raise Exception(file, len(df_in))\n\n for _, row in lut.iterrows():\n name, col_idx, row_idx = row[\"variable_short_engl\"], excel_letter_to_num(row[\"value_col\"]), \\\n int(row[\"value_row\"]) + row_offset\n df_out = df_out.append(pd.DataFrame({\"variable\": name, \"value\": df_in.iloc[row_idx, col_idx]}, index=[0]))\n df_out = df_out.set_index(\"variable\").T\n\n return df_out\n\n\nsheet = \"03_Kardivaskulär\"\n\nlut_file = Path(\n f\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/mapping/mapping_{sheet}_27.xlsx\")\nlut_27 = pd.read_excel(lut_file)\nlut_file = Path(\n f\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/mapping/mapping_{sheet}_29.xlsx\")\nlut_29 = pd.read_excel(lut_file)\n\ndfs = []\nfor f in files:\n id = pd.read_excel(f, sheet_name=\"ID\", usecols=\"A:B\", names=[\"variable\", \"value\"], header=None)\n id.dropna(axis=\"index\", how=\"all\", inplace=True)\n id = id.set_index(\"variable\").T\n id[\"file\"] = f\n\n df1 = extract_data_via_mapping(f, lut_27, lut_29, sheet=sheet)\n df = pd.concat((id, df1), axis=1)\n dfs.append(df)\n\ndf_out = pd.concat(dfs, axis=0, sort=False)\ndf_out.to_excel(out_path / f\"00_aggregated_{sheet}.xlsx\", index=False)\n","sub_path":"scripts/aggregate_tp6/aggregate_quest_cardio_tp6.py","file_name":"aggregate_quest_cardio_tp6.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"594234735","text":"import pytest\nimport sys\nimport random\nimport string\nimport json\n\nfrom app import create_app\n\ndef random_string_generator():\n allowed_chars = string.ascii_letters + string.punctuation\n size = 12\n return ''.join(random.choice(allowed_chars) for x in range(size))\n\nusername = random_string_generator()\npassword = random_string_generator()\n\nprint(\"Testing with \", username, \"as Admin\")\n\n@pytest.fixture\ndef client():\n app = create_app(\"test_config\")\n app.config['TESTING'] = True\n\n with app.test_client() as client:\n yield client\n\ndef test_register_admin(client):\n \"\"\"Make sure register works.\"\"\"\n\n res = register_as_admin(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"User registered.\\\"}\" in res\n\ndef test_login_admin(client):\n \"\"\"Make sure login works.\"\"\"\n\n res = login(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Logged in\\\"}\" in res\n\ndef test_read_admin(client):\n \"'Make sure crud/read works'\"\n login(client)\n res = read(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"crud/read msg\\\"}\" in res\n\ndef test_create_update_delete_admin(client):\n \"'Make sure crud/create works'\"\n res = create(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"New movie added.\\\"}\" in res\n \n res_json = json.loads(res.decode(\"utf-8\"))\n movie_id = res_json[0].get(\"id\")\n \n res = update(client, movie_id)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Changes Saved.\\\"}\" in res\n\n res = delete(client, movie_id)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Deleted.\\\"}\" in res\n\ndef test_search_admin(client):\n \"'Make sure crud/delete works'\"\n res = create(client)\n res_json = json.loads(res.decode(\"utf-8\"))\n movie_id = res_json[0].get(\"id\")\n \n res = search(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Search results returned successfully.\\\"}\" in res\n\n res = delete(client, movie_id)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Deleted.\\\"}\" in res\n\ndef test_logout_admin(client):\n \"'Make sure user/logout works'\"\n res = logout(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Logged Out\\\"}\" in res\n\ndef logout(client):\n login(client)\n return client.delete('user/logout').data\n\ndef search(client):\n login(client)\n return client.get('search/movies?name=movie&director=director').data\n\ndef delete(client, movie_id):\n login(client)\n return client.delete('crud/delete/' + str(movie_id)).data\n\n\ndef update(client, movie_id):\n login(client)\n return client.patch('crud/update/' + str(movie_id), json={\n \"name\" : \"movie\",\n \"director\" : \"director\",\n \"99popularity\": 50.0, \n \"genre\": [\"Action\"],\n \"imdb_score\": 5.0\n }).data\n\ndef create(client):\n login(client)\n return client.post('crud/create', json={\n \"name\" : \"movie\",\n \"director\" : \"director\",\n \"99popularity\": 50.0, \n \"genre\": [\"Action\"],\n \"imdb_score\": 5.0\n }).data\n\n\ndef read(client):\n return client.get(\"crud/read\").data\n\ndef register_as_admin(client):\n return client.post('user/register', json={\n \"user_name\" : username,\n \"password\" : password,\n \"user_role\" : \"admin\"\n }).data\n\ndef login(client):\n return client.post('user/login', json={\n \"user_name\" : username,\n \"password\" : password\n }).data\n\n\n ","sub_path":"tests/test_as_admin.py","file_name":"test_as_admin.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"422087527","text":"# Definition for a binary tree node.\r\nclass TreeNode(object):\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\n#class Solution:\r\n# def levelOrderBottom(self, root):\r\n# \"\"\"\r\n# :type root: TreeNode\r\n# :rtype: List[List[int]]\r\n# \"\"\"\r\n# # BFS+stack\r\n# leverTraversal=[]\r\n# if not root:\r\n# return leverTraversal\r\n# currLevel=[root]\r\n# while currLevel:\r\n# nextLevel=[] # use two containers, then swap curr and next\r\n# val=[]\r\n# for node in currLevel:\r\n# val.append(node.val)\r\n# if node.left:\r\n# nextLevel.append(node.left)\r\n# if node.right:\r\n# nextLevel.append(node.right)\r\n# currLevel=nextLevel\r\n# leverTraversal.append(val)\r\n# return leverTraversal[::-1]\r\n \r\n#class Solution:\r\n# def levelOrderBottom(self, root):\r\n# \"\"\"\r\n# :type root: TreeNode\r\n# :rtype: List[List[int]]\r\n# \"\"\"\r\n# # DFS+stack \r\n# levelTraversal=[]\r\n# self.preorder(root,0,levelTraversal)\r\n# levelTraversal.reverse() # bottom-up level order\r\n# return levelTraversal\r\n# \r\n# def preorder(self,root,level,res):\r\n# if root:\r\n# if len(res) {url['url']}: {url['time']}\"\n return {\"url\": url, \"rep\": rep, \"_id\": url_id}\n\n def edit(self, object_id):\n doc = self.get_document(object_id)\n print(doc['rep'])\n edit = input(\"Change alias or url: [a/u]: \")\n if edit.upper() == \"A\":\n new_alias = input(\"New alias: \")\n self.database.urls.find_one_and_update(\n doc['_id'],\n {\"$set\": {\"alias\": new_alias}}\n )\n print(new_alias)\n elif edit.upper() == \"U\":\n new_url = input(\"New url: \")\n new_url = self.database.valid_url(new_url)\n self.database.urls.find_one_and_update(\n doc['_id'],\n {\"$set\": {\"url\": new_url}}\n )\n print(new_url)\n\n def delete_document(self, object_id):\n doc = self.get_document(object_id)\n delete = input(f\"{doc['rep']}\\nDelete the document [Y/n]: \")\n if delete.upper() == \"Y\":\n print(self.database.urls.delete_one(doc['_id']))\n\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-e\", help=\"Edit with object id\")\n parser.add_argument(\"-d\", help=\"Delete delete document by id\")\n parser.add_argument(\"-a\", help=\"All\", action='store_true')\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n editer = Editer()\n command = parse()\n if command.e is not None:\n editer.edit(command.e)\n if command.d is not None:\n editer.delete_document(command.d)\n if command.a:\n editer.read_all()\n","sub_path":"editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"269411203","text":"# https://www.acmicpc.net/problem/1517\n\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\narray = list(map(int, input().split()))\nswap = 0\n\ndef mergeSort(array, s, e):\n\tglobal swap\n\tif s + 1 < e :\n\t\tmid = (s+e)//2\n\t\tsortedLeftArray = mergeSort(array,s,mid)\n\t\tsortedRightArray= mergeSort(array,mid,e)\n\t\ti = 0\n\t\tj = 0\n\t\tnewArray = [0]*(e-s)\n\t\tfor k in range(e-s):\n\t\t\tif i < mid-s and (j == e-mid or sortedLeftArray[i] <= sortedRightArray[j]):\n\t\t\t\tnewArray[k] = sortedLeftArray[i]\n\t\t\t\ti += 1\n\t\t\telse :\n\t\t\t\tnewArray[k] = sortedRightArray[j]\n\t\t\t\tif k < mid+j :\n\t\t\t\t\tswap += mid+j-k-s\n\t\t\t\tj += 1\n\telse :\n\t\tnewArray = array[s:e]\n\treturn newArray\nmergeSort(array,0,n)\nprint(swap)","sub_path":"dojinyou/code_3week/13_1517.py","file_name":"13_1517.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"568666828","text":"Import('_default_env')\n\n\nfiles = Split(\"\"\"\n ../../../Properties/AssemblyInfo.cs\n CrashLogDumper.cs\n Invoker.cs\n Option.cs\n OptionDialog.cs\n StackStatusHandler.cs\n\"\"\")\n\nlib = _default_env.CliLibrary('OssToolkitIos', files, CLILIBS=['mscorlib', 'OssCore', 'monotouch'])\n\nAlias('Lib', lib)\n\ndocs = _default_env.Doxygen('$hardware_dir/share/Docs/Tar/ToolkitIos.tar', files, DOXYGENINPUT='Linn/Toolkit/Ios', DOXYCLEANOUTPUTDIR='$hardware_dir/share/Docs/ToolkitIos', DOXYGENNAMESPACE='OssToolkitIos', DOXYGENEXCLUDE='*.svn*')\nAlias('Docs', docs)\n\n","sub_path":"LibUpnpCil/Toolkit/Linn/Toolkit/Ios/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"367403945","text":"from imageai.Detection import ObjectDetection\nimport os\n\nexecution_path = os.getcwd()\n\n# Create Detection Object\ndetector = ObjectDetection()\ndetector.setModelTypeAsRetinaNet()\ndetector.setModelPath( os.path.join(execution_path , \"resnet50_coco_best_v2.0.1.h5\"))\ndetector.loadModel()\n\n# Name of Target\nimage_name = 'cam_view.jpg'\nimage_target = 'processed_' + image_name\n\n#Run Detection\ndetections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , image_name), output_image_path=os.path.join(execution_path , image_target))\n\n# Creates local images of objects found\n# detections, extracted_images = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , \"image.jpg\"), output_image_path=os.path.join(execution_path , \"imagenew.jpg\"), extract_detected_objects=True)\n\n# Console output\nfor eachObject in detections:\n print(eachObject[\"name\"] , \" : \" , eachObject[\"percentage_probability\"] )","sub_path":"src/first_detection.py","file_name":"first_detection.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"391705312","text":"'''\nCreated on 18 Jun 2019\n\n@author: Kamil\n'''\n'''If you finished the previous project which compared the karma of two new comments, hopefully you learned a thing or two about receiving data from Reddit's API. Now you're going to take this a step further, and even have the opportunity to make a basic twitter bot.\n\n Create a program that receives data from the /r/todayilearned subreddit, and looks for new facts that have been posted.\n Each time the program comes across a new fact, the fact should be printed into the command line. However, phrases like \"TIL \", \"TIL that\", etc should be removed so the only thing that is printed is the fact.\n\nNew TIL API data here\n\nThere are a couple things to note about this since you'll more than likely be using a loop to check for new posts. According to Reddit's API Access Rules Page, the API pages are only updated once every thirty seconds, so you'll have to have your code pause for at least thirty seconds before it tries to find more posts. Secondly, if for some reason you decide to try to get data sooner than every thirty seconds, make sure to not send more than thirty requests per minute. That is the maximum you are allowed to do.\n\nThere is actually a lot you can do once your program starts receiving facts. Instead of simply printing the facts, here are some ideas for what you can do with them. If you currently do not feel like you can accomplish these ideas, feel free to come back later when you have more experience.\n\n Print the link to the source of the fact too.\n Try to further clean up the fact by adding punctuation to the end if it is missing, capitalize the first word, etc.\n Write the facts to a separate text file so you end up with a giant compilation of random facts.\n Create a bot that posts the facts to twitter. This may sound hard, but it's actually pretty simple by using the Python Twitter Tools module and following the guide posted here.\n Remember, the maximum amount of characters you can use in a tweet is only 280, so you'll have to filter out facts that are longer than that.\n By now you should be pretty familiar with python, so if you get ideas for improving your program, go for it!\n'''\n'''\nMY BOT:\nhttps://twitter.com/TodayILearnedb1\n'''\nimport requests\nimport json\nimport twitter\n\nclass TodayILearned():\n def __init__(self):\n self.link = 'https://www.reddit.com/r/todayilearned/new/.json'\n \n def get_info(self):\n try:\n r = requests.get(self.link, headers = {'User-agent': 'your bot 0.1'})\n r.raise_for_status()\n except requests.exceptions.HTTPError as error:\n print(f'There is problem:\\n{error}')\n return False\n new_til = json.loads(r.content)\n new_til = new_til[\"data\"][\"children\"][0]['data']['title']\n new_til = new_til.replace('TIL', '').replace('Til', '').strip()\n for _ in range(len(new_til) - 1):\n if new_til[0].isalpha() == False:\n new_til = new_til.replace(new_til[0], '').strip()#.capitalize()\n else:\n break\n new_til = new_til.split(' ', 1)\n if new_til[0].lower() == 'this' or new_til[0].lower() == 'that' or new_til[0].lower() == 'about' or new_til[0].lower() == 'of':\n new_til.pop(0)\n new_til = ' '.join(new_til)\n new_til = new_til[:1].upper() + new_til[1:]\n if new_til[-1].isalnum() == True:\n new_til += '.'\n return new_til if len(new_til) < 280 else False #change for 140 when twitter working \n \n def save_new_dict(self, new_dict):\n with open('til_news_base.json', 'w') as json_file:\n json.dump(new_dict, json_file, indent=2)\n \n def read_json_file(self):\n with open('til_news_base.json') as json_file:\n data = json.load(json_file)\n self.last_key = int(sorted(list(data.keys()))[-1])\n return data\n \n def post_on_twitter(self, new_post):\n TOKEN = '1141700351290224640-h5liK9wfQfOOizRN5RuIXyEgeJl4gc'\n TOKEN_KEY = 'ElS5g6TLLIOokBOJVPLYjeEOrziwhvqoDOTLT45e1vemx'\n CON_SEC = 'p8xbnj07lHkyqjw8lxJ9XFI0T'\n CON_SEC_KEY = 'h6TA0XxuIMNm6XXUgkUChEOYrnPhSaAUhagmvPWp7cwTA6XgaP'\n my_auth = twitter.OAuth(TOKEN,TOKEN_KEY,CON_SEC,CON_SEC_KEY)\n twit = twitter.Twitter(auth=my_auth)\n twit.statuses.update(status=new_post)\n \n def program(self):\n #first load the base from file\n dict_with_news = self.read_json_file()\n #second get new posts from reddit\n new_info = self.get_info()\n #check if new post in base or if is it last post\n if new_info != False:\n if new_info != dict_with_news[str(self.last_key)]:\n dict_with_news[str(self.last_key + 1)] = new_info\n print(new_info)\n #add to base if not\n self.save_new_dict(dict_with_news)\n #print new TIL on twitter\n try:\n self.post_on_twitter(new_info)\n except:\n print(\"There was a problem with adding news to twitter.\")\n \ndef program():\n class_til = TodayILearned()\n class_til.program()\n \nif __name__ == \"__main__\":\n program()\n","sub_path":"watch_for_new_TIL_facts/watch_for_new_TIL_facts.py","file_name":"watch_for_new_TIL_facts.py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"337586133","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# =============================================================================\n# Version: 0.1 (September 14, 2013)\n# Author: Riccardo Angius (riccardo.angius@me.com)\n#\n# =============================================================================\n# Copyright (c) 2013. Riccardo Angius (riccardo.angius@me.com)\n# =============================================================================\n# This file is part of Pairses: A PAttern Induced RDF Statement Extraction System.\n#\n# This is fairly beta software. Please contact the author before usage.\n# =============================================================================\nimport jsonrpclib\nfrom json import loads, dumps\nimport os, hashlib\nfrom pickling import pickleDump, pickleLoad\nfrom configuration import * \nfrom classes import *\nimport unicodedata\ncachePath = cfg['snlpcachepath']\n\nclass StanfordCoreNLP():\n\tserver = jsonrpclib.Server(\"http://localhost:8080\")\n\n\tdef parse(self, text, useCache=True):\n\t\n\t\t# Fixes some idiosyncrasies due to wiki markup conversion and text input in Wikipedia\n\t\ttext = unicode(unicodedata.normalize('NFKD', text))\n\t\t\n\t\tif text.lower() == 'q' or text.lower() == 'eof':\n\t\t\t\"These strings will terminate the SCNLP tools, which we don't want\"\n\t\t\traise InvalidSentence()\n\t\t\n\t\tif len(text) >= 1000 or text.count(',') > 30:\n\t\t\t\"\"\"\tA bug in pexpect produces \\x07 chars and messes up\n\t\t\t\twhen input is >= 1024 chars (apparently on OS X only)\n\t\t\t\tJust to be on safe side, we'll ignore sentences with more\n\t\t\t \tthan 999 chars, as they are mostly long lists anyway.\n\t\t\t\"\"\"\n\t\t\traise InvalidSentence()\n\t\t\n\t\ttextHash = hashlib.sha224(text.encode(\"ascii\",\"replace\")).hexdigest()\n\t\t\n\t\tfilename = textHash + '.snlpcache'\n\t\tpath = os.path.join(cachePath, filename)\n\n\t\tif useCache and os.path.exists(path):\n\t\t\tresults = pickleLoad(path)\n\t\telse:\n\t\t\tparsed = self.server.parse(text)\n\t\t\tresults = loads(parsed)\n\n\t\t\tpickleDump(results, path)\n\t\t\t\n\t\treturn results","sub_path":"pairseslib/stanfordcorenlp.py","file_name":"stanfordcorenlp.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"644457222","text":"## Ex 1 - 3\r\nimport matplotlib.pyplot as plt\r\ndef eulerListe(h,L,R,U):\r\n t = [0]\r\n i = [0]\r\n k = 0\r\n while i[k]<=0.95*(U/R):\r\n t.append(t[k]+h)\r\n i.append(i[k] +h*(U/L - (R/L)*i[k]))\r\n k+=1\r\n t.append(t[k]+h)\r\n i.append(i[k] +h*(U/L - (R/L)*i[k])) \r\n return (t,i)\r\n\r\nt,i = eulerListe(0.0001,1,100,1)\r\nplt.plot(t,i)\r\nplt.show()\r\n\r\n## Ex 4\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\ndef eulerTableau(n,h,L,R,U):\r\n t = np.zeros(n+1)\r\n i = np.zeros(n+1)\r\n for k in range(1,n+1):\r\n t[k] = t[k-1] + h\r\n i[k] = i[k-1] + h*(U/L - (R/L)*i[k-1])\r\n return (i,t)\r\n \r\ni,t = eulerTableau(1000,0.0001,1,100,1)\r\nplt.plot(t,i)\r\nplt.show()\r\n## Ex 5\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef eulerTableau(n,h,L,R,U):\r\n t = np.zeros(n+1)\r\n i = np.zeros(n+1)\r\n for k in range(1,n+1):\r\n t[k] = t[k-1] + h\r\n i[k] = i[k-1] + h*(U/L - (R/L)*i[k-1])\r\n return (i,t)\r\nU = 1\r\nR = 100\r\nL = 1\r\nT = 0.1\r\nabs = np.linspace(0,0.1)\r\nord = [(U/R)*(1-np.exp(-(R/L)*k)) for k in abs]\r\nplt.plot(abs,ord)\r\n\r\ndef tableVar(periode,p,U,R,L):\r\n pts = int(periode//p)\r\n i,t = eulerTableau(pts,p,L,R,U)\r\n return (i,t)\r\ni,t = tableVar(T,0.0001,U,R,L)\r\nplt.plot(t,i)\r\ni,t = tableVar(T,0.001,U,R,L)\r\nplt.plot(t,i)\r\ni,t = tableVar(T,0.0005,U,R,L)\r\nplt.plot(t,i)\r\ni,t = tableVar(T,0.00001,U,R,L)\r\nplt.plot(t,i)\r\nplt.legend([\"Real\",\"p=0.0001\",\"p=0.001\",\"p=0.0005\",\"p=0.00001\"])\r\nplt.show()\r\n\"\"\"\r\nLes valeurs valides semblent celles inférieures à 0.001ms\r\n\"\"\"\r\n\r\n## Ex 6\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndef eulerMalin(L,R,U):\r\n tau = L/R\r\n periode = 3*tau\r\n n = 1000\r\n h = periode/n\r\n \r\n t = np.zeros(n+1)\r\n i = np.zeros(n+1)\r\n for k in range(1,n+1):\r\n t[k] = t[k-1] + h\r\n i[k] = i[k-1] + h*(U/L - (R/L)*i[k-1])\r\n return (i,t)\r\ni,t = eulerMalin(1,100,1)\r\nplt.plot(t,i)\r\n\r\nabs = np.linspace(0,0.1)\r\nord = [(U/R)*(1-np.exp(-(R/L)*k)) for k in abs]\r\nplt.plot(abs,ord)\r\nplt.legend([\"Euler Malin\",\"Real\"])\r\nplt.show()","sub_path":"TP 8 - Schéma d'Euler/ex 2.py","file_name":"ex 2.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"365178011","text":"'''\nClipboard windows: an implementation of the Clipboard using ctypes.\n'''\n\n__all__ = ('ClipboardWindows', )\n\nfrom kivy.utils import platform\nfrom kivy.core.clipboard import ClipboardBase\n\nif platform != 'win':\n raise SystemError('unsupported platform for Windows clipboard')\n\nimport ctypes\nfrom ctypes import wintypes\nuser32 = ctypes.windll.user32\nkernel32 = ctypes.windll.kernel32\nmsvcrt = ctypes.cdll.msvcrt\nc_char_p = ctypes.c_char_p\nc_wchar_p = ctypes.c_wchar_p\n\n\nclass ClipboardWindows(ClipboardBase):\n\n def get(self, mimetype='text/plain'):\n GetClipboardData = user32.GetClipboardData\n GetClipboardData.argtypes = [wintypes.UINT]\n GetClipboardData.restype = wintypes.HANDLE\n\n user32.OpenClipboard(user32.GetActiveWindow())\n # Standard Clipboard Format \"1\" is \"CF_TEXT\"\n pcontents = GetClipboardData(13)\n\n # if someone pastes a FILE, the content is None for SCF 13\n # and the clipboard is locked if not closed properly\n if not pcontents:\n user32.CloseClipboard()\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n return data\n\n def put(self, text, mimetype='text/plain'):\n text = text.decode(self._encoding) # auto converted later\n text += u'\\x00'\n\n SetClipboardData = user32.SetClipboardData\n SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]\n SetClipboardData.restype = wintypes.HANDLE\n\n GlobalAlloc = kernel32.GlobalAlloc\n GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]\n GlobalAlloc.restype = wintypes.HGLOBAL\n\n CF_UNICODETEXT = 13\n\n user32.OpenClipboard(user32.GetActiveWindow())\n user32.EmptyClipboard()\n hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))\n\n # ignore null character for strSource pointer\n msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text[:-1]))\n SetClipboardData(CF_UNICODETEXT, hCd)\n user32.CloseClipboard()\n\n def get_types(self):\n return ['text/plain']\n","sub_path":"kivy/core/clipboard/clipboard_winctypes.py","file_name":"clipboard_winctypes.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"314297454","text":"import os\nimport sys\nimport traceback\n\nimport gevent\nimport pytest\n\nfrom rotkehlchen.db.dbhandler import DBHandler\nfrom rotkehlchen.externalapis.etherscan import Etherscan\nfrom rotkehlchen.typing import ExternalService, ExternalServiceApiCredentials\n\n\n@pytest.fixture(scope='function')\ndef temp_etherscan(function_scope_messages_aggregator, tmpdir_factory):\n api_key = os.environ.get('ETHERSCAN_API_KEY', None)\n if not api_key:\n pytest.fail('No ETHERSCAN_API_KEY environment variable found.')\n directory = tmpdir_factory.mktemp('data')\n db = DBHandler(\n user_data_dir=directory,\n password='123',\n msg_aggregator=function_scope_messages_aggregator,\n )\n db.add_external_service_credentials(credentials=[\n ExternalServiceApiCredentials(service=ExternalService.ETHERSCAN, api_key=api_key),\n ])\n etherscan = Etherscan(database=db, msg_aggregator=function_scope_messages_aggregator)\n return etherscan\n\n\ndef _handle_killed_greenlets(greenlet: gevent.Greenlet) -> None:\n\n tb = ''.join(traceback.format_tb(greenlet.exc_info[2]))\n message = ('Greenlet died with exception: {}.\\n'\n 'Exception Name: {}\\nException Info: {}\\nTraceback:\\n {}'\n .format(\n greenlet.exception,\n greenlet.exc_info[0],\n greenlet.exc_info[1],\n tb,\n ))\n\n print(message)\n sys.exit(1)\n\n\n@pytest.mark.skipif(\n 'CI' in os.environ,\n reason='no real etherscan tests in Travis yet due to API key',\n)\ndef test_maximum_rate_limit_reached(temp_etherscan):\n \"\"\"\n Test that we can handle etherscan's rate limit repsponse properly\n\n Regression test for https://github.com/rotki/rotki/issues/772\"\n \"\"\"\n etherscan = temp_etherscan\n\n # Spam with concurrent requests for a bit. This triggers the problem\n count = 200\n while count > 0:\n greenlet = gevent.spawn(\n etherscan.get_account_balance,\n '0x25a63509FEF5D23FF226eb8004A3c1458D6F3AB8')\n greenlet.link_exception(_handle_killed_greenlets)\n greenlet = gevent.spawn(\n etherscan.eth_call,\n '0x4678f0a6958e4D2Bc4F1BAF7Bc52E8F3564f3fE4',\n '0xc455279100000000000000000000000027a2eaaa8bebea8d23db486fb49627c165baacb5',\n )\n greenlet.link_exception(_handle_killed_greenlets)\n gevent.sleep(0.001)\n count -= 1\n","sub_path":"rotkehlchen/tests/external_apis/test_etherscan.py","file_name":"test_etherscan.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"592178178","text":"# def solution(lis,a,b,stack,check):\n# addlis = [[a+1,b],[a-1,b],[a,b+1],[a,b-1]]\n# for i in addlis:\n# if 0 <= i[0] < len(lis) and 0<= i[1] < len(lis):\n# if lis[i[0]][i[1]] != 1 and stack.count(i) == 0:\n# stack.append(i)\n# check.append(lis[i[0]][i[1]])\n# solution(lis,i[0],i[1],stack,check)\n\n# T = int(input())\n# for t in range(1,T+1):\n# N = int(input())\n# lis = []\n# for a in range(N):\n# lis.append(list(map(int, input().strip())))\n \n# stack = []\n# check = []\n# for a in range(0,len(lis)):\n# for b in range(0,len(lis)):\n# if lis[a][b] == 2:\n# stack.append([a,b])\n# check.append(lis[a][b])\n# break\n# solution(lis,stack[-1][0],stack[-1][1],stack,check)\n# if check.count(3) == 1:\n# print(\"{} {}\".format(t,1))\n# else:\n# print(\"{} {}\".format(t,0))\n \ndef dfs(lis,stack):\n while True:\n a, b = stack.pop(-1)\n addlis = [[a+1,b],[a-1,b],[a,b+1],[a,b-1]]\n for i in addlis:\n if 0 <= i[0] < len(lis) and 0<= i[1] < len(lis):\n if lis[i[0]][i[1]] == 0:\n lis[i[0]][i[1]] = 4\n stack.append(i)\n elif lis[i[0]][i[1]] == 3:\n return 1\n if len(stack) == 0:\n break\n return 0\n\nT = int(input())\nfor t in range(1,T+1):\n N = int(input())\n lis = []\n for a in range(N):\n lis.append(list(map(int, input().strip())))\n stack = []\n for j in range(0,len(lis)):\n for k in range(0,len(lis)):\n if lis[j][k] == 2:\n stack.append([j,k])\n print(\"#{} {}\".format(t,dfs(lis,stack)))\n\n \n","sub_path":"KYC/algorithm/Stack/StackPractice6.py","file_name":"StackPractice6.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"71064167","text":"from pyspark.sql.types import (\n FloatType,\n StringType,\n StructType,\n StructField,\n)\n\nimport pandas as pd\nimport databricks.koalas as ks\nimport os\nimport contextlib\nfrom metrics import compute_score\n\nimport h2o\nfrom h2o.estimators import H2OXGBoostEstimator\n\nfrom model.interface import ModelInterface\n\n# WARNING: this class only works with datasets that fit in memory, as it casts Spark\n# dataframes to Pandas dataframes (H2O natively does not work with Spark).\n# Make sure you have enough memory on your machine.\n\n# Tested with the following features enabled:\n# \"engaged_with_user_follower_count\"\n# \"engaged_with_user_following_count\"\n# \"engaging_user_follower_count\"\n# \"engaging_user_following_count\"\n# all targets\n\nfrom pathlib import Path\nfrom constants import ROOT_DIR\n\n\nclass Model(ModelInterface):\n def __init__(self, include_targets=True, seed=None):\n with open(os.devnull, \"w\") as devnull:\n with contextlib.redirect_stdout(devnull):\n h2o.init()\n h2o.no_progress()\n\n is_xgboost_available = H2OXGBoostEstimator.available()\n\n if not is_xgboost_available:\n raise RuntimeError(\"H2OXGBoostEstimator is not available!\")\n\n self.model = None\n self.seed = seed\n\n # Specify default and custom features to use in the model\n self.enabled_features = [\n \"engaged_with_user_follower_count\",\n \"engaged_with_user_following_count\",\n \"engaging_user_follower_count\",\n \"engaging_user_following_count\",\n ]\n\n self.labels = [\"reply\", \"retweet\", \"retweet_with_comment\", \"like\"]\n\n # Specify extractors and auxiliaries required by the enabled features\n self.enabled_auxiliaries = []\n self.enabled_extractors = [\n \"engaged_with_user_follower_count\",\n \"engaged_with_user_following_count\",\n \"engaging_user_follower_count\",\n \"engaging_user_following_count\",\n \"binarize_timestamps\",\n ]\n if include_targets:\n self.enabled_extractors.append(\"binarize_timestamps\")\n\n @staticmethod\n def serialized_model_path_for_target(target: str) -> str:\n p = (\n Path(ROOT_DIR)\n / \"../serialized_models\"\n / f\"h2o_xgboost_baseline_{target}.model\"\n )\n return str(p.resolve())\n\n def fit(self, train_data, _valid_data, _hyperparams):\n \"\"\"Fit model to given training data and validate it.\n Returns the best model found in validation.\"\"\"\n\n # Cast to h2o frames\n train_frame = h2o.H2OFrame(train_data.to_pandas())\n\n # TODO: hyperparameter tuning; unbalancement handling?\n\n models = dict()\n for label in self.labels:\n ignored = set(self.labels) - set(label)\n model = H2OXGBoostEstimator(seed=self.seed)\n model.train(\n y=label,\n ignored_columns=list(ignored),\n training_frame=train_frame\n )\n model.save_mojo(self.serialized_model_path_for_target(label))\n models[label] = model\n\n # Save (best on valid) trained model\n self.model = models\n\n return models\n\n def predict(self, test_data):\n \"\"\"Predict test data. Returns predictions.\"\"\"\n schema = StructType(\n [\n StructField(\"reply\", FloatType(), False),\n StructField(\"retweet\", FloatType(), False),\n StructField(\"retweet_with_comment\", FloatType(), False),\n StructField(\"like\", FloatType(), False),\n StructField(\"tweet_id\", StringType(), False),\n StructField(\"engaging_user_id\", StringType(), False),\n ]\n )\n\n # DataFrame.to_pandas() drops the index, so we need to save it\n # separately and reattach it later.\n\n # H2OFrame does not provide an index like pandas, but rather appears\n # to have an internal numerical index to preserve ordering.\n # https://docs.h2o.ai/h2o/latest-stable/h2o-py/docs/frame.html#h2oframe\n\n # So we trust that H2O keeps everything in order and drop our custom\n # index [\"tweet_id\", \"engaging_user_id\"] in favour of a \"standard\"\n # numerical index such as 0, 1, 2, ..., only to reattach it later\n # when returning the predictions DataFrame.\n\n ks_test_data_index = test_data.reset_index(drop=False)\n ks_index = ks_test_data_index[[\"tweet_id\", \"engaging_user_id\"]]\n\n h2oframe_test = h2o.H2OFrame(test_data.to_pandas())\n\n df_predictions = pd.DataFrame()\n for label in self.labels:\n df_predictions[label] = (\n self.model[label].predict(h2oframe_test).as_data_frame()[\"True\"].values\n )\n\n # Reattach real index (Lord have mercy)\n df_predictions = df_predictions.join(ks_index.to_pandas())\n ks_predictions = ks.DataFrame(df_predictions)\n\n return ks_predictions.to_spark()\n\n def load_pretrained(self):\n self.model = {}\n for label in self.labels:\n # Select the first model in the directory\n p = str(\n next(\n Path(self.serialized_model_path_for_target(label)).iterdir()\n ).resolve()\n )\n with open(os.devnull, \"w\") as devnull:\n with contextlib.redirect_stdout(devnull):\n self.model[label] = h2o.import_mojo(p)\n\n def save_to_logs(self, metrics):\n \"\"\"Save the results of the latest test performed to logs.\"\"\"\n pass\n","sub_path":"src/model/h2o_xgboost_baseline.py","file_name":"h2o_xgboost_baseline.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"324024682","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 30 18:17:51 2020\r\n\r\n@author: Utilisateur\r\n\"\"\"\r\n\r\n\r\n\r\nimport csv \r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport torch\r\nfrom torch.utils import data\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nfrom math import sqrt \r\n\r\nnp.random.seed(1)\r\n\r\n\r\n\r\n# \"Pour fichier text\"\r\ndata_file_path = 'C:/Users/Utilisateur/Documents/IA/V2O3_Pump_0.9mW_Probe_0.05mW8Step_1micron_210918_Adjusted.txt'\r\n# data = pd.read_csv(data_file_path, header = None,index_col = None, sep ='\\t')\r\n\r\n# \" pour fichier ADF\"\r\n# data2_file_path = 'C:/Users/Utilisateur/Documents/IA/V2O3_Pump_0.05mW_Probe_0.05mW_Step_1micron_26091800.ADF'\r\n# data2 = pd.read_csv(data2_file_path,header = None, index_col = None , sep = '\\t', skiprows = lambda x : x<= 9)\r\n\r\n\r\n\r\ndef conversion(L): # permet le traitement de fichier csv comme une liste\r\n T=[]\r\n for i in L:\r\n T.append(float(i.replace(',','.')))\r\n return(T)\r\n \r\ntime,ampl,phase =[],[],[]\r\nwith open(data_file_path, 'r') as csvfile: #ce programme va permettre de traiter tout enregistrement des deux capteurs\r\n spamreader = csv.reader(csvfile, delimiter='\\t')\r\n for row in spamreader:\r\n time.append(row[0])\r\n ampl.append(row[1])\r\n phase.append(row[2])\r\n \r\ntime=conversion(time) \r\nampl=conversion(ampl)\r\nphase = conversion(phase)\r\n\r\n\r\ntime_vec= np.array(time)\r\nampl_vec = np.array(ampl)\r\nphase_vec = np.array(phase)\r\n\r\nX_train = time_vec[:200]\r\nX_val = time_vec[200:281]\r\nX_test = time_vec[281:361]\r\n\r\ny_train = ampl_vec[:200]\r\ny_val = ampl_vec[200:281]\r\ny_test = ampl_vec[281:361]\r\n\r\n# calcul de mean_X_train et std_X_train, idem pour y\r\n \r\n\r\nmean_X_train = np.mean(X_train)\r\nstd_X_train = np.std(X_train)\r\n\r\nmean_y_train = np.mean(y_train)\r\nstd_y_train =np.std(y_train)\r\n\r\n\r\n\r\n\r\nclass MyDataset(data.Dataset):\r\n\r\n#Characterizes a dataset for Pytorch\r\n def __init__(self, data_feature, data_target):\r\n #Initialization\r\n self.data_feature = data_feature\r\n self.data_target = data_target\r\n # self.transformed_feature = self.transforms_feature()\r\n # self.transformed_target = self.transforms_target()\r\n \r\n def __len__(self):\r\n #Denotes the total number of samples\r\n return len(self.data_feature)\r\n \r\n def __getitem__(self, index):\r\n #Generates one sample of data\r\n # Select sample\r\n # data_feature = torch.from_numpy(self.transformed_feature[index]).float()\r\n # data_target = torch.from_numpy(self.transformed_target[index]).float()\r\n # return data_feature, data_target\r\n X_train_normalized = (self.data_feature[index] - mean_X_train) / std_X_train\r\n y_train_normalized = (self.data_target[index] - mean_y_train) / std_y_train\r\n return torch.from_numpy(np.array(X_train_normalized,ndmin=1)).float(), torch.from_numpy(np.array(y_train_normalized, ndmin = 1)).float()\r\n \r\n \r\ntraining_set = MyDataset(X_train,y_train) # on charge nos données\r\ntrain_loading = torch.utils.data.DataLoader(training_set, batch_size= 100)\r\n \r\nval_set = MyDataset(X_val, y_val) \r\nval_loading = torch.utils.data.DataLoader(val_set, batch_size= 100)\r\n \r\ntest_set = MyDataset(X_test,y_test) \r\ntest_loading = torch.utils.data.DataLoader(test_set, batch_size= 100)\r\n\r\n\r\n# Ecriture du réseau de neurones (reprise du tp_deep)\r\n\r\n\r\nclass Net(nn.Module):\r\n def __init__(self):\r\n super(Net, self).__init__()\r\n self.FC1 = nn.Linear(1,6)\r\n self.FC2 = nn.Linear(6, 1)\r\n def forward(self, x):\r\n x = F.relu(self.FC1(x)) \r\n x = self.FC2(x)\r\n return x\r\n\r\nmodel = Net()\r\n\r\n\r\ncriterion = nn.MSELoss()\r\n#optimizer = torch.optim.SGD(model.parameters(),lr=0.0001, weight_decay= 0.001, momentum = 0.9)\r\noptimizer = torch.optim.Adam(model.parameters(), lr = 0.03,\r\n weight_decay = 0.001) \r\n\r\nloss_list_train = []\r\nloss_list_val = []\r\nloss_list= []\r\nloss_list_test = []\r\n\r\ndef train(net, train_loader, optimizer, epoch):\r\n net.train()\r\n total_loss=0\r\n for idx,(data, target) in enumerate(train_loader, 0):\r\n #data, target = data.to(device), target.to(device)\r\n optimizer.zero_grad()\r\n outputs = net(data)\r\n loss = criterion(outputs,target)\r\n loss.backward()\r\n total_loss +=loss.cpu().item()\r\n optimizer.step()\r\n loss_list_train.append(total_loss/len(train_loader))\r\n #torch.optim.lr_scheduler.step()\r\n #print('Epoch:', epoch , 'average training loss ', total_loss/ len(train_loader))\r\n\r\n\r\ndef test(net,test_loader,L):\r\n net.eval()\r\n total_loss = 0\r\n for idx,(data, target) in enumerate(test_loader,0):\r\n outputs = net(data)\r\n outputs = outputs * std_X_train + mean_X_train\r\n target = target * std_y_train + mean_y_train\r\n loss = criterion(outputs,target)\r\n total_loss += sqrt(loss.cpu().item())\r\n L.append(total_loss/len(test_loader))\r\n #print('average testing loss', total_loss/len(test_loader))\r\n \r\ndef test_no_norm(net,test_loader,L):\r\n net.eval()\r\n total_loss = 0\r\n for idx,(data, target) in enumerate(test_loader,0):\r\n outputs = net(data)\r\n loss = criterion(outputs,target)\r\n total_loss += sqrt(loss.cpu().item())\r\n L.append(total_loss/len(test_loader))\r\n #print('average testing loss', total_loss/len(test_loader))\r\n \r\n \r\nfor epoch in range(50): \r\n train(model,train_loading,optimizer,epoch)\r\n test(model,val_loading,loss_list_val)\r\n test_no_norm(model, val_loading,loss_list)\r\n test_no_norm(model,test_loading,loss_list_test)\r\nprint('Epoch:', epoch , 'average training loss ', loss_list_train[-1])\r\nprint( 'average testing loss ', loss_list_val[-1])\r\n \r\n\r\nplt.figure(2)\r\nplt.plot(loss_list_train,'r',label = 'Training loss')\r\n# plt.plot(loss_list,'g',label = ' Validation loss')\r\n# plt.plot(loss_list_test,'b',label = ' Testing loss')\r\n# plt.legend()\r\n \r\n","sub_path":"nn_data_bosch.py","file_name":"nn_data_bosch.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"409788173","text":"from types import SimpleNamespace\nfrom django.contrib.contenttypes.models import ContentType\nfrom exponent_server_sdk import PushMessage, PushClient\nfrom comments.models import Comment, Reply\nfrom common.abstract_models import AbstractPostModel\nfrom messaging.models import Message\nfrom notifications.models import Notification\nfrom notifications.serializers import NotificationSerializer\n\n\n# A function that sends push notifications to all tagged users for a shared item\ndef create_tagged_user_notification(\n created_data: AbstractPostModel or Comment or Message or Reply,\n) -> None:\n tagged_users = created_data.tagged_users.all()\n if len(tagged_users) > 0:\n content_type = ContentType.objects.get_for_model(created_data)\n if content_type.model in [\"ad\", \"event\"]:\n content = f\"{created_data.creator.preferred_name} has tagged you in an {content_type.model}!\"\n else:\n content = f\"{created_data.creator.preferred_name} has tagged you in a {content_type.model}!\"\n notification = Notification(\n content=content,\n content_type=content_type,\n object_id=created_data.id,\n creator_id=created_data.creator.id,\n )\n notification.save()\n notification.receivers.add(*tagged_users)\n send_push_notifications(notification)\n\n\n# A function that sends push notifications to the receivers of a notification object\ndef send_push_notifications(notification: Notification) -> None:\n push_messages = []\n request = SimpleNamespace()\n request.user = notification.creator\n for receiver in notification.receivers.all():\n for device in receiver.devices.all():\n push_messages.append(\n PushMessage(\n to=device.expo_push_token,\n body=notification.content,\n data=dict(\n NotificationSerializer(\n notification, context={\"request\": request}\n ).data\n ),\n )\n )\n\n PushClient().publish_multiple(push_messages)\n","sub_path":"api/src/common/notification_helpers.py","file_name":"notification_helpers.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"618840553","text":"from flask import Flask, abort\nfrom flask.ext import restful\nfrom flask.ext.restful import reqparse\nimport requests\nimport collections\nimport json\nimport pygerduty\n\npagerdutyAPIKey = 'xxx'\npagerdutyOrg = 'xxx'\n\nclass PagerDutyIncidentsAPI(restful.Resource):\n def __init__(self):\n self.pg = pygerduty.PagerDuty(pagerdutyOrg, pagerdutyAPIKey)\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('user', type=str, default=\"paul.schooss@klout.com\", help=\"Pager Duty username preforming action\")\n self.parser.add_argument('action', type=str, default=\"NOOP\", help=\"Action for the incident(s), resolve and acknowledge are valid\")\n self.parser.add_argument('type', type=str, default='triggered,acknowledged', help=\"Type of incident e.g. triggered, acknowledge, etc.\")\n super(PagerDutyIncidentsAPI, self).__init__()\n\n def get(self):\n args = self.parser.parse_args()\n return [p.to_json() for p in pg.incidents.list(status=args['type'])]\n\n def put(self):\n args = self.parser.parse_args()\n me = next(self.pg.users.list(query=args['user'], limit=1))\n if args['action'] == 'resolve':\n for incident in self.pg.incidents.list(status=args['type'],assigned_to_user=me.id):\n incident.resolve(requester_id=me.id)\n return \"All incidents resolved for user %s\" % args['user']\n if args['action'] == 'acknowledge':\n for incident in self.pg.incidents.list(status=args['type'],assigned_to_user=me.id):\n incident.acknowledge(requester_id=me.id)\n return \"All incidents acknowledged for user %s\" % args['user']\n return \"Action not valid: %s\" % args['action']\n\n# class PagerDutyIncidentAPI(restful.Resource):\n# def __init__(self):\n# self.parser = reqparse.RequestParser()\n# self.parser.add_argument('user', type=str, default=\"PI8NLOA\", location=\"json\")\n# super(TaskAPI, self).__init__()\n# def put(self,incidentNumber):\n# pass\n","sub_path":"resources/pagerduty.py","file_name":"pagerduty.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"613958087","text":"from __future__ import print_function\n###############################################################\n#\n# Skeleton top job options for ESD->AOD\n# Put here outputs that require rec.doAOD=True\n#\n# New version for revamped job transforms\n#\n# $Id: skeleton.ESDtoAOD_tf.py 700697 2015-10-15 09:48:11Z lerrenst $\n#\n#==============================================================\n\n# Common job options disable most RecExCommon by default. Re-enable below on demand.\ninclude(\"RecJobTransforms/CommonRecoSkeletonJobOptions.py\")\nrec.doAOD=True\n\n#from AthenaCommon.Logging import logging\nimport logging\nrecoLog = logging.getLogger('esd_to_aod')\nrecoLog.info( '****************** STARTING ESD->AOD MAKING *****************' )\n\nfrom AthenaCommon.AppMgr import ServiceMgr; import AthenaPoolCnvSvc.AthenaPool\nfrom AthenaCommon.AthenaCommonFlags import athenaCommonFlags\n\n## Input\nif hasattr(runArgs,\"inputFile\"): athenaCommonFlags.FilesInput.set_Value_and_Lock( runArgs.inputFile )\nif hasattr(runArgs,\"inputESDFile\"):\n globalflags.InputFormat.set_Value_and_Lock('pool')\n rec.readESD.set_Value_and_Lock( True )\n rec.readRDO.set_Value_and_Lock( False )\n athenaCommonFlags.PoolESDInput.set_Value_and_Lock( runArgs.inputESDFile )\n\n## Pre-exec\nif hasattr(runArgs,\"preExec\"):\n recoLog.info(\"transform pre-exec\")\n for cmd in runArgs.preExec:\n recoLog.info(cmd)\n exec(cmd)\n\n## Pre-include\nif hasattr(runArgs,\"preInclude\"): \n for fragment in runArgs.preInclude:\n print(\"preInclude\",fragment)\n include(fragment)\n\n## Outputs\nif hasattr(runArgs,\"outputAODFile\"):\n rec.doAOD.set_Value_and_Lock( True )\n rec.doWriteAOD.set_Value_and_Lock( True ) \n athenaCommonFlags.PoolAODOutput.set_Value_and_Lock( runArgs.outputAODFile )\n # Begin temporary trigger block\n if TriggerFlags.doMT():\n # Don't run any trigger - only pass the HLT contents from ESD to AOD\n from RecExConfig.RecAlgsFlags import recAlgs\n recAlgs.doTrigger.set_Value_and_Lock( False )\n rec.doTrigger.set_Value_and_Lock( False )\n # Add HLT output\n from TriggerJobOpts.HLTTriggerResultGetter import HLTTriggerResultGetter\n hltOutput = HLTTriggerResultGetter()\n # Add Trigger menu metadata\n if rec.doFileMetaData():\n from RecExConfig.ObjKeyStore import objKeyStore\n metadataItems = [ \"xAOD::TriggerMenuContainer#TriggerMenu\",\n \"xAOD::TriggerMenuAuxContainer#TriggerMenuAux.\" ]\n objKeyStore.addManyTypesMetaData( metadataItems )\n else: # not TriggerFlags.doMT()\n pass # See TriggerJobOpts/python/TriggerGetter.py for Run 2. Called by RecExCommon\n\nif hasattr(runArgs,\"outputTAGFile\"):\n # should be used as outputTAGFile_e2a=myTAG.root so that it does not trigger AODtoTAG\n # if writing TAG file, need AOD object in any case\n rec.doAOD.set_Value_and_Lock( True )\n rec.doWriteTAG.set_Value_and_Lock( True )\n athenaCommonFlags.PoolTAGOutput.set_Value_and_Lock( runArgs.outputTAGFile )\n\nif hasattr(runArgs,\"tmpAOD\"):\n rec.doAOD.set_Value_and_Lock( True )\n rec.doWriteAOD.set_Value_and_Lock( True ) \n athenaCommonFlags.PoolAODOutput.set_Value_and_Lock( runArgs.tmpAOD )\n\nif hasattr(runArgs,\"outputHIST_AOD_INTFile\"):\n rec.doMonitoring.set_Value_and_Lock(True)\n from AthenaMonitoring.DQMonFlags import DQMonFlags\n DQMonFlags.histogramFile.set_Value_and_Lock( runArgs.outputHIST_AOD_INTFile )\n\nif hasattr(runArgs,\"outputNTUP_BTAGFile\"):\n from BTagging.BTaggingFlags import BTaggingFlags\n BTaggingFlags.doJetTagNtuple = True\n BTaggingFlags.JetTagNtupleName = runArgs.outputNTUP_BTAGFile\n\nif hasattr(runArgs, \"outputNTUP_HIGHMULTFile\"):\n from TrigMbD3PDMaker.TrigMbD3PDMakerFlags import trigMbD3PDflags\n trigMbD3PDflags.FileName=runArgs.outputNTUP_HIGHMULTFile\n include(\"TrigMbD3PDMaker/HighMultD3PD_jobOptions.py\")\n\nif hasattr(runArgs,\"outputNTUP_ENHBIASFile\"):\n from TrigCostAthena.TrigCostAthenaFlags import TrigCostAthenaFlags\n TrigCostAthenaFlags.StoreNtVerticesOutputFile.set_Value_and_Lock( runArgs.outputNTUP_ENHBIASFile )\n TrigCostAthenaFlags.DoStoreNtVertices.set_Value_and_Lock( True )\n if hasattr(runArgs,\"inputESDFile\") and not hasattr(runArgs,\"inputFile\"):\n athenaCommonFlags.FilesInput.set_Value_and_Lock( runArgs.inputESDFile )\n include(\"TrigCostAthena/ESDtoNTUP_ENHBIAS.py\")\n\nif hasattr(runArgs,\"outputHIST_PHYSVALMONFile\"):\n rec.doPhysValMonHists=True\n \n ## Setup the output file(s):\n from GaudiSvc.GaudiSvcConf import THistSvc\n svcMgr += THistSvc()\n output=svcMgr.THistSvc.Output\n svcMgr.THistSvc.Output+= [\"PhysValMon DATAFILE='\"+runArgs.outputHIST_PHYSVALMONFile+\"' OPT='RECREATE'\"]\n # now done in RecExCommon_topOption to ensure the right ordering of algs.\n # include(\"PhysValMon/PhysValMon_RecoOpt.py\")\n \nif hasattr(runArgs, 'outputXML_JiveXMLFile'):\n jp.Rec.doJiveXML.set_Value_and_Lock(True)\n\nrec.OutputFileNameForRecoStep=\"ESDtoAOD\"\n\n#========================================================\n# Central topOptions (this is one is a string not a list)\n#========================================================\nif hasattr(runArgs,\"topOptions\"): include(runArgs.topOptions)\nelse: include( \"RecExCommon/RecExCommon_topOptions.py\" )\n\n# Remove unwanted back navigation to ESD when ESD is temporary\nif hasattr(runArgs,\"outputAODFile\"):\n if hasattr(runArgs,\"ESDFileIO\") and runArgs.ESDFileIO == \"temporary\":\n try:\n StreamAOD.ExtendProvenanceRecord = False\n except:\n recoLog.info(\"StreamAOD was not defined, cannot set ExtendProvenanceRecord = False. Check your flags.\")\n\n#D3PDMaker outputs\nif hasattr(runArgs,\"outputNTUP_MINBIASFile\"):\n from D3PDMakerConfig.D3PDProdFlags import prodFlags\n prodFlags.WriteMinBiasD3PD.FileName = runArgs.outputNTUP_MINBIASFile\n prodFlags.WriteMinBiasD3PD.set_Value_and_Lock( True )\n include( prodFlags.WriteMinBiasD3PD.DPDMakerScript )\n pass\n\nif hasattr(runArgs,\"outputNTUP_TRIGFile\"):\n from D3PDMakerConfig.D3PDProdFlags import prodFlags\n prodFlags.WriteTriggerD3PD.FileName = runArgs.outputNTUP_TRIGFile\n prodFlags.WriteTriggerD3PD.set_Value_and_Lock( True )\n include( prodFlags.WriteTriggerD3PD.DPDMakerScript )\n pass\n\nif hasattr(runArgs,\"outputDESDM_BEAMSPOTFile\"):\n #needs to be used with: preInclude=InDetBeamSpotFinder/BeamSpotRecoPreInclude_standard.py\n from InDetBeamSpotFinder import BeamSpotDPDFlags \n primDPD.WriteDESDM_BEAMSPOTStream.FileName=runArgs.outputDESDM_BEAMSPOTFile\n primDPD.WriteDESDM_BEAMSPOTStream.set_Value_and_Lock( True )\n include(\"InDetBeamSpotFinder/DESDM_BEAMSPOTFragment.py\")\n\n#==========================================================\n# Use LZIB for compression of temporary outputs of AthenaMP\n#==========================================================\nif hasattr(runArgs, \"outputAODFile\") and '_000' in runArgs.outputAODFile:\n ServiceMgr.AthenaPoolCnvSvc.PoolAttributes += [ \"DatabaseName = '\" + athenaCommonFlags.PoolAODOutput()+ \"'; COMPRESSION_ALGORITHM = '1'\" ]\n ServiceMgr.AthenaPoolCnvSvc.PoolAttributes += [ \"DatabaseName = '\" + athenaCommonFlags.PoolAODOutput()+ \"'; COMPRESSION_LEVEL = '1'\" ]\n\n## Post-include\nif hasattr(runArgs,\"postInclude\"): \n for fragment in runArgs.postInclude:\n include(fragment)\n\n## Post-exec\nif hasattr(runArgs,\"postExec\"):\n recoLog.info(\"transform post-exec\")\n for cmd in runArgs.postExec:\n recoLog.info(cmd)\n exec(cmd)\n","sub_path":"Reconstruction/RecJobTransforms/share/skeleton.ESDtoAOD_tf.py","file_name":"skeleton.ESDtoAOD_tf.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"163018388","text":"import streamlit as st\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport plotly.graph_objects as go\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n#set title\n\nimage = Image.open('images/oyster.png')\nst.image(image, width = 800)\n\ndef main():\n activities = ['Intro to the Chesapeake Bay Challenge', 'Data Preparation',\n 'Data Visualization', 'Total Nitrogen Model']\n option = st.sidebar.selectbox('Selection Option:', activities)\n\n#Intro\n if option == 'Intro to the Chesapeake Bay Challenge':\n st.title('Intro to the Chesapeake Bay Challenge')\n title_page = \"\"\"\n
\n

Intro to the Chesapeake Bay Challenge

\n
\n \"\"\"\n st.markdown(title_page,unsafe_allow_html=True)\n\n if st.sidebar.checkbox('Sidebar'):\n html_temp = \"\"\"\n
\n

Sidebar

\n
\n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n title_write = \"\"\"\n put writing here Jen\n \"\"\"\n\n st.markdown(title_write,unsafe_allow_html=True)\n\n\n#Data Preparation\n elif option == 'Data Preparation':\n st.title('Data Preparation')\n html_temp = \"\"\"\n
\n

Data Preparation

\n
\n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n if st.sidebar.checkbox('Sidebar'):\n html_temp = \"\"\"\n
\n

Sidebar

\n
\n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n\n explorationwrite_up = \"\"\"\n Jen write here\n \"\"\"\n st.markdown(explorationwrite_up, unsafe_allow_html=True)\n\n image = Image.open('images/oyster2.png')\n st.image(image, width = 800)\n\n\n#Data Visualization\n elif option == 'Data Visualization':\n st.title('Data Visualization')\n html_temp = \"\"\"\n
\n

Data Visualization

\n
\n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n if st.sidebar.checkbox('Sidebar'):\n html_temp = \"\"\"\n
\n

Sidebar

\n
\n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n\n vizwrite_up = \"\"\"\n Jen write here\n ```python\n This is how I write code here.\n ```\n \"\"\"\n st.markdown(vizwrite_up, unsafe_allow_html=True)\n\n image = Image.open('images/oyster2.png')\n st.image(image, width = 800)\n\n#Nitrogen Modeling\n elif option == 'Total Nitrogen Model':\n st.title('Total Nitrogen Model')\n html_temp = \"\"\"\n
\n

Total Nitrogen Model

\n
\n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n if st.sidebar.checkbox('Sidebar'):\n html_temp = \"\"\"\n
\n

Sidebar

\n
\n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n\n modelwrite_up = \"\"\"\n Jen write here\n \"\"\"\n st.markdown(modelwrite_up, unsafe_allow_html=True)\n\n image = Image.open('images/oyster2.png')\n st.image(image, width = 800)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hackthebay.py","file_name":"hackthebay.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"229359526","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\"\"\"\n\n# -------------------------------------------------------------------------\n\n\nfrom ..idasix import QtWidgets\n\nfrom .base import BaseDialog\n\n\nclass GuiDialog(BaseDialog, QtWidgets.QDialog):\n\n def __init__(self, title=\"\", modal=True, **kwargs):\n super(GuiDialog, self).__init__(**kwargs)\n self.setModal(modal)\n self.setWindowTitle(title)\n self.response = None\n self.statusLbl = None\n\n self.base_layout = QtWidgets.QVBoxLayout()\n self.setLayout(self.base_layout)\n\n def bottom_layout(self, ok_text=\"&Ok\", cencel_text=\"&Cancel\"):\n self.statusLbl = QtWidgets.QLabel()\n self.base_layout.addWidget(self.statusLbl)\n\n ok_btn = QtWidgets.QPushButton(ok_text)\n ok_btn.setDefault(True)\n cancel_btn = QtWidgets.QPushButton(cencel_text)\n size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,\n QtWidgets.QSizePolicy.Fixed)\n ok_btn.setSizePolicy(size_policy)\n cancel_btn.setSizePolicy(size_policy)\n button_lyt = QtWidgets.QHBoxLayout()\n button_lyt.addWidget(ok_btn)\n button_lyt.addWidget(cancel_btn)\n self.base_layout.addLayout(button_lyt)\n\n ok_btn.clicked.connect(self.submit_base)\n cancel_btn.clicked.connect(self.reject)\n\n def exception_base(self, exception):\n super(GuiDialog, self).exception_base(exception)\n if hasattr(exception, 'errors'):\n errors = (\"{}: {}\".format(k, \", \".join(v))\n for k, v in exception.errors())\n exception_string = \"\\t\" + \"\\n\\t\".join(errors)\n elif hasattr(exception, 'message'):\n exception_string = exception.message\n else:\n exception_string = str(exception)\n self.statusLbl.setText(\"Error(s) occured:\\n{}\".format(exception_string))\n self.statusLbl.setStyleSheet(\"color: red;\")\n","sub_path":"ida_plugins/rematch/rematch/dialogs/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"398838661","text":"import graphene\nfrom lingvodoc.schema.gql_holders import (\n LingvodocObjectType,\n CompositeIdHolder,\n CreatedAt,\n Relationship,\n SelfHolder,\n FieldHolder,\n ParentLink,\n MarkedForDeletion,\n Position,\n client_id_check,\n del_object,\n ResponseError,\n LingvodocID,\n)\nfrom lingvodoc.models import (\n DBSession,\n DictionaryPerspectiveToField as dbDictionaryPerspectiveToField,\n)\n\nfrom lingvodoc.utils.creation import create_dictionary_persp_to_field\n\nclass Column(LingvodocObjectType):\n \"\"\"\n #created_at | timestamp without time zone | NOT NULL\n #object_id | bigint | NOT NULL\n #client_id | bigint | NOT NULL\n #parent_object_id | bigint |\n #parent_client_id | bigint |\n #self_client_id | bigint |\n #self_object_id | bigint |\n #field_client_id | bigint | NOT NULL\n #field_object_id | bigint | NOT NULL\n #link_client_id | bigint |\n #link_object_id | bigint |\n #marked_for_deletion | boolean | NOT NULL\n #position | integer | NOT NULL\n \"\"\"\n dbType = dbDictionaryPerspectiveToField\n\n class Meta:\n interfaces = (CreatedAt,\n CompositeIdHolder,\n Relationship,\n SelfHolder,\n FieldHolder,\n ParentLink,\n MarkedForDeletion,\n Position)\n pass\n\n\nclass CreateColumn(graphene.Mutation):\n \"\"\"\n example:\n mutation {\n create_column(parent_id: [1204,19664], field_id: [66, 6],\n position: 1) {\n triumph\n column{\n id\n position\n }\n }\n }\n\n (this example works)\n returns:\n\n {\n \"create_column\": {\n \"triumph\": true,\n \"column\": {\n \"id\": [\n 949,\n 2493\n ],\n \"position\": 1\n }\n }\n }\n \"\"\"\n\n class Arguments:\n id = LingvodocID()\n parent_id = LingvodocID(required=True)\n field_id = LingvodocID(required=True)\n self_id = LingvodocID()\n link_id = LingvodocID()\n position = graphene.Int(required=True)\n\n column = graphene.Field(Column)\n triumph = graphene.Boolean()\n\n @staticmethod\n @client_id_check()\n def mutate(root, info, **args):\n id = args.get(\"id\")\n client_id = id[0] if id else info.context[\"client_id\"]\n object_id = id[1] if id else None\n id = [client_id, object_id]\n parent_id = args.get('parent_id')\n info.context.acl_check('edit', 'perspective', parent_id)\n field_id = args.get('field_id')\n self_id = args.get('self_id')\n link_id = args.get('link_id')\n position = args.get('position')\n field_object = create_dictionary_persp_to_field(id=id,\n parent_id=parent_id,\n field_id=field_id,\n self_id=self_id,\n link_id=link_id,\n position=position)\n DBSession.add(field_object)\n DBSession.flush()\n column = Column(id=[field_object.client_id, field_object.object_id])\n column.dbObject = field_object\n return CreateColumn(column=column, triumph=True)\n\n\nclass UpdateColumn(graphene.Mutation):\n \"\"\"\n example:\n mutation {\n update_column(id: [949, 2493], position: 5) {\n triumph\n perspective_to_field{\n id\n position\n }\n }\n }\n\n (this example works)\n returns:\n\n {\n \"update_column\": {\n \"triumph\": true,\n \"column\": {\n \"id\": [\n 949,\n 2493\n ],\n \"position\": 5\n }\n }\n }\n \"\"\"\n\n class Arguments:\n id = LingvodocID(required=True)\n parent_id = LingvodocID()\n field_id = LingvodocID()\n self_id = LingvodocID()\n link_id = LingvodocID()\n position = graphene.Int()\n\n column = graphene.Field(Column)\n triumph = graphene.Boolean()\n\n @staticmethod\n def mutate(root, info, **args):\n id = args.get(\"id\")\n client_id, object_id = id\n field_object = DBSession.query(dbDictionaryPerspectiveToField).filter_by(client_id=client_id,\n object_id=object_id).first()\n if not field_object or field_object.marked_for_deletion:\n raise ResponseError(message=\"Error: No such field object in the system\")\n\n info.context.acl_check('edit', 'perspective',\n (field_object.parent_client_id, field_object.parent_object_id))\n field_id = args.get('field_id')\n self_id = args.get('self_id')\n link_id = args.get('link_id')\n position = args.get('position')\n if field_id:\n field_object.field_client_id, field_object.field_object_id = field_id\n\n # Attaching or de-attaching as a nested field.\n\n if self_id:\n\n field_object.self_client_id, field_object.self_object_id = (\n self_id if self_id[0] > 0 else (None, None))\n\n if link_id:\n field_object.link_client_id, field_object.link_object_id = link_id\n if position:\n field_object.position = position\n column = Column(id=[field_object.client_id, field_object.object_id])\n column.dbObject = field_object\n return UpdateColumn(column=column, triumph=True)\n\n\n\nclass DeleteColumn(graphene.Mutation):\n \"\"\"\n example:\n mutation {\n delete_column(id: [949, 2493]) {\n triumph\n column{\n id\n }\n }\n }\n\n (this example works)\n returns:\n\n {\n \"delete_column\": {\n \"triumph\": true,\n \"column\": {\n \"id\": [\n 949,\n 2493\n ]\n }\n }\n }\n \"\"\"\n class Arguments:\n id = LingvodocID(required=True)\n\n column = graphene.Field(Column)\n triumph = graphene.Boolean()\n\n @staticmethod\n def mutate(root, info, **args):\n id = args.get('id')\n client_id, object_id = id\n column_object = DBSession.query(dbDictionaryPerspectiveToField).filter_by(client_id=client_id,\n object_id=object_id).first()\n perspective_ids = (column_object.parent_client_id, column_object.parent_object_id)\n info.context.acl_check('edit', 'perspective', perspective_ids)\n if not column_object or column_object.marked_for_deletion:\n raise ResponseError(message=\"No such column object in the system\")\n del_object(column_object, \"delete_column\", info.context.get('client_id'))\n column = Column(id=id)\n column.dbObject = column_object\n return DeleteColumn(column=column, triumph=True)\n\n","sub_path":"lingvodoc/schema/gql_column.py","file_name":"gql_column.py","file_ext":"py","file_size_in_byte":7320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"513500207","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ContainerGroupPropertiesInstanceView(Model):\n \"\"\"The instance view of the container group. Only valid in response.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar events: The events of this container group.\n :vartype events: list of :class:`Event\n `\n :ivar state: The state of the container group. Only valid in response.\n :vartype state: str\n \"\"\"\n\n _validation = {\n 'events': {'readonly': True},\n 'state': {'readonly': True},\n }\n\n _attribute_map = {\n 'events': {'key': 'events', 'type': '[Event]'},\n 'state': {'key': 'state', 'type': 'str'},\n }\n\n def __init__(self):\n self.events = None\n self.state = None\n","sub_path":"azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/container_group_properties_instance_view.py","file_name":"container_group_properties_instance_view.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"464295478","text":"# coding: utf-8\nimport datetime\nimport hashlib\n\nfrom flask import Blueprint, render_template, request, redirect, Response, jsonify\nfrom flask_login import login_required\nfrom werkzeug.utils import secure_filename\nfrom pinyin import pinyin\n\nfrom app import db\nfrom app.model.ResultModel import ResultModel\nfrom app.entity.ImageEntity import ImageEntity\n\nimage_bp = Blueprint('image', __name__)\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ['png', 'jpg', 'jpeg', 'gif']\n\n\n@image_bp.route('/attach/upload', methods=['GET', 'POST'])\n@login_required\ndef image_upload():\n if request.method == 'POST':\n # 通过POST方式上传图片\n image_file = request.files['file']\n filename = secure_filename(pinyin.get(image_file.filename, format=\"numerical\"))\n\n if allowed_file(filename):\n image_entity = ImageEntity()\n image_entity.name = image_file.filename\n image_entity.type = image_file.content_type\n image_entity.data = image_file.read()\n image_entity.time = datetime.datetime.now()\n\n md5 = hashlib.md5()\n md5.update(filename + datetime.datetime.strftime(image_entity.time, '%Y%m%d%H%M%S'))\n image_entity.md5_name = md5.hexdigest() + '.' + filename.rsplit('.', 1)[1]\n\n db.session.add(image_entity)\n db.session.commit()\n\n return redirect('/attach/' + image_entity.md5_name)\n else:\n result = ResultModel(ResultModel.FAILED_CODE, '图片格式不合法', None)\n return jsonify(vars(result))\n\n return render_template('upload.html')\n\n\n@image_bp.route('/attach/')\ndef image(md5_name):\n image_entity = ImageEntity.query.filter_by(md5_name=md5_name).first_or_404()\n return Response(image_entity.data, mimetype=image_entity.type)\n\n\n@image_bp.route('/attaches')\n@login_required\ndef archives():\n image_list = ImageEntity.query.filter(ImageEntity.time >= datetime.date.today())\\\n .order_by(ImageEntity.time.desc()).all()\n return render_template('attaches.html', image_list=image_list)\n\n\n","sub_path":"app/controller/image_controller.py","file_name":"image_controller.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"622535595","text":"\"\"\"\nCustom Authenticator to use Google OAuth with JupyterHub.\n\nDerived from the GitHub OAuth authenticator.\n\"\"\"\n\nimport os\nimport json\nimport urllib.parse\n\nfrom tornado import gen\nfrom tornado.httpclient import HTTPRequest, AsyncHTTPClient\nfrom tornado.auth import GoogleOAuth2Mixin\nfrom tornado.web import HTTPError\n\nfrom traitlets import Dict, Unicode, List, default, validate\n\nfrom jupyterhub.auth import LocalAuthenticator\nfrom jupyterhub.utils import url_path_join\n\nfrom .oauth2 import OAuthLoginHandler, OAuthCallbackHandler, OAuthenticator\n\ndef check_user_in_groups(member_groups, allowed_groups):\n # Check if user is a member of any group in the allowed groups\n if any(g in member_groups for g in allowed_groups):\n return True # user _is_ in group\n else:\n return False\n\n\nclass GoogleOAuthenticator(OAuthenticator, GoogleOAuth2Mixin):\n google_api_url = Unicode(\"https://www.googleapis.com\", config=True)\n\n @default('google_api_url')\n def _google_api_url(self):\n \"\"\"get default google apis url from env\"\"\"\n google_api_url = os.getenv('GOOGLE_API_URL')\n\n # default to googleapis.com\n if not google_api_url:\n google_api_url = 'https://www.googleapis.com'\n\n return google_api_url\n\n @default('scope')\n def _scope_default(self):\n return ['openid', 'email']\n\n @default(\"authorize_url\")\n def _authorize_url_default(self):\n return \"https://accounts.google.com/o/oauth2/v2/auth\"\n\n @default(\"token_url\")\n def _token_url_default(self):\n return \"%s/oauth2/v4/token\" % (self.google_api_url)\n\n google_service_account_keys = Dict(\n Unicode(),\n help=\"Service account keys to use with each domain, see https://developers.google.com/admin-sdk/directory/v1/guides/delegation\"\n ).tag(config=True)\n\n gsuite_administrator = Dict(\n Unicode(),\n help=\"Username of a G Suite Administrator for the service account to act as\"\n ).tag(config=True)\n\n google_group_whitelist = Dict(\n List(Unicode()),\n help=\"Automatically whitelist members of selected groups\"\n ).tag(config=True)\n\n admin_google_groups = Dict(\n List(Unicode()),\n help=\"Groups whose members should have Jupyterhub admin privileges\"\n ).tag(config=True)\n\n user_info_url = Unicode(\n \"https://www.googleapis.com/oauth2/v1/userinfo\", config=True\n )\n\n hosted_domain = List(\n Unicode(),\n config=True,\n help=\"\"\"List of domains used to restrict sign-in, e.g. mycollege.edu\"\"\",\n )\n\n @default('hosted_domain')\n def _hosted_domain_from_env(self):\n domains = []\n for domain in os.environ.get('HOSTED_DOMAIN', '').split(';'):\n if domain:\n # check falsy to avoid trailing separators\n # adding empty domains\n domains.append(domain)\n return domains\n\n @validate('hosted_domain')\n def _cast_hosted_domain(self, proposal):\n \"\"\"handle backward-compatibility with hosted_domain is a single domain as a string\"\"\"\n if isinstance(proposal.value, str):\n # pre-0.9 hosted_domain was a string\n # set it to a single item list\n # (or if it's empty, an empty list)\n if proposal.value == '':\n return []\n return [proposal.value]\n return proposal.value\n\n login_service = Unicode(\n os.environ.get('LOGIN_SERVICE', 'Google'),\n config=True,\n help=\"\"\"Google Apps hosted domain string, e.g. My College\"\"\",\n )\n\n async def authenticate(self, handler, data=None, google_groups=None):\n code = handler.get_argument(\"code\")\n body = urllib.parse.urlencode(\n dict(\n code=code,\n redirect_uri=self.get_callback_url(handler),\n client_id=self.client_id,\n client_secret=self.client_secret,\n grant_type=\"authorization_code\",\n )\n )\n\n http_client = AsyncHTTPClient()\n\n response = await http_client.fetch(\n self.token_url,\n method=\"POST\",\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"},\n body=body,\n )\n\n user = json.loads(response.body.decode(\"utf-8\", \"replace\"))\n access_token = str(user['access_token'])\n\n response = await http_client.fetch(\n self.user_info_url + '?access_token=' + access_token\n )\n\n if not response:\n handler.clear_all_cookies()\n raise HTTPError(500, 'Google authentication failed')\n\n bodyjs = json.loads(response.body.decode())\n user_email = username = bodyjs['email']\n user_email_domain = user_email.split('@')[1]\n\n if not bodyjs['verified_email']:\n self.log.warning(\"Google OAuth unverified email attempt: %s\", user_email)\n raise HTTPError(403, \"Google email {} not verified\".format(user_email))\n\n if self.hosted_domain:\n if user_email_domain not in self.hosted_domain:\n self.log.warning(\n \"Google OAuth unauthorized domain attempt: %s\", user_email\n )\n raise HTTPError(\n 403,\n \"Google account domain @{} not authorized.\".format(\n user_email_domain\n ),\n )\n if len(self.hosted_domain) == 1:\n # unambiguous domain, use only base name\n username = user_email.split('@')[0]\n\n user_info = {\n 'name': username,\n 'auth_state': {'access_token': access_token, 'google_user': bodyjs}\n }\n\n if self.admin_google_groups or self.google_group_whitelist:\n user_info = await self._add_google_groups_info(user_info, google_groups)\n\n return user_info\n\n def _service_client_credentials(self, scopes, user_email_domain):\n \"\"\"\n Return a configured service client credentials for the API.\n \"\"\"\n try:\n from google.oauth2 import service_account\n except:\n raise ImportError(\n \"Could not import google.oauth2's service_account,\"\n \"you may need to run pip install oauthenticator[googlegroups] or not declare google groups\"\n )\n\n gsuite_administrator_email = \"{}@{}\".format(self.gsuite_administrator[user_email_domain], user_email_domain)\n self.log.debug(\"scopes are %s, user_email_domain is %s\", scopes, user_email_domain)\n credentials = service_account.Credentials.from_service_account_file(\n self.google_service_account_keys[user_email_domain],\n scopes=scopes\n )\n\n credentials = credentials.with_subject(gsuite_administrator_email)\n\n return credentials\n\n def _service_client(self, service_name, service_version, credentials, http=None):\n \"\"\"\n Return a configured service client for the API.\n \"\"\"\n try:\n from googleapiclient.discovery import build\n except:\n raise ImportError(\n \"Could not import googleapiclient.discovery's build,\"\n \"you may need to run pip install oauthenticator[googlegroups] or not declare google groups\"\n )\n\n self.log.debug(\"service_name is %s, service_version is %s\", service_name, service_version)\n\n return build(\n serviceName=service_name,\n version=service_version,\n credentials=credentials,\n cache_discovery=False,\n http=http)\n\n async def _google_groups_for_user(self, user_email, credentials, http=None):\n \"\"\"\n Return google groups a given user is a member of\n \"\"\"\n service = self._service_client(\n service_name='admin',\n service_version='directory_v1',\n credentials=credentials,\n http=http)\n\n results = service.groups().list(userKey=user_email).execute()\n results = [ g['email'].split('@')[0] for g in results.get('groups', [{'email': None}]) ]\n self.log.debug(\"user_email %s is a member of %s\", user_email, results)\n return results\n\n async def _add_google_groups_info(self, user_info, google_groups=None):\n user_email_domain=user_info['auth_state']['google_user']['hd']\n user_email=user_info['auth_state']['google_user']['email']\n if google_groups is None:\n credentials = self._service_client_credentials(\n scopes=['%s/auth/admin.directory.group.readonly' % (self.google_api_url)],\n user_email_domain=user_email_domain)\n google_groups = await self._google_groups_for_user(\n user_email=user_email,\n credentials=credentials)\n user_info['auth_state']['google_user']['google_groups'] = google_groups\n\n # Check if user is a member of any admin groups.\n if self.admin_google_groups:\n is_admin = check_user_in_groups(google_groups, self.admin_google_groups[user_email_domain])\n # Check if user is a member of any whitelisted groups.\n user_in_group = check_user_in_groups(google_groups, self.google_group_whitelist[user_email_domain])\n\n if self.admin_google_groups and (is_admin or user_in_group):\n user_info['admin'] = is_admin\n return user_info\n elif user_in_group:\n return user_info\n else:\n return None\n\n\nclass LocalGoogleOAuthenticator(LocalAuthenticator, GoogleOAuthenticator):\n \"\"\"A version that mixes in local system user creation\"\"\"\n\n pass\n","sub_path":"oauthenticator/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":9665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"244844181","text":"\"\"\"\nA notepad demo\n\"\"\"\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport os\n\n\nclass Notepad:\n def __init__(self):\n\n self.root = Tk()\n self.root.title(\"Untitled\")\n\n self.cur_file_name = \"\"\n self.theme_colors = {'Default': 'Black.White', 'Great Gery': 'Gray.Alice Blue',\n 'Lovely Lavender': '#202B4B.#E1E1FF', 'Aquamarine': '#5B8340.#D1E7E0',\n 'Bold Beige': '#4B4620.#FFF0E1', 'Olive Green': '#D1E7E0.#5B8340'}\n\n self.shortcut_bar = Frame()\n self.linenum_bar = Label()\n self.info_bar = Label()\n self.text_pad = Text()\n\n # settings\n self.show_ln_num = BooleanVar(value=True) # show line number\n self.hl_cur_ln = BooleanVar(value=False) # highlight current line\n self.show_info_bar = BooleanVar(value=True) # highlight current line\n self.theme = StringVar(value=\"Default\")\n\n self.make_shortcut_bar()\n self.make_linenum_bar()\n self.make_text_pad()\n self.make_info_bar()\n self.make_menu()\n self.update()\n\n Button(text=\"123\", command=lambda: print(self.theme.get())).pack()\n\n def run(self):\n self.text_pad.focus()\n self.root.mainloop()\n\n # Text and scrollbar\n def make_text_pad(self):\n self.text_pad = Text(self.root, undo=True)\n self.text_pad.pack(expand=True, fill=BOTH)\n scroll = Scrollbar(self.text_pad)\n\n self.text_pad.configure(yscrollcommand=scroll.set)\n scroll.configure(command=self.text_pad.yview)\n scroll.pack(side=RIGHT, fill=Y)\n\n def make_shortcut_bar(self):\n self.shortcut_bar = Frame(self.root, height=25, bg='LightSeaGreen', width=800)\n\n icons = ['new', 'open', 'save', 'cut', 'paste', 'redo', 'undo']\n\n for ind, ico in enumerate(icons):\n img = PhotoImage(file='icons/' + ico + '.gif')\n cmd = eval('self.' + ico)\n\n # Buttons will not show any image until you tell them twice\n # b = Button(self.shortcut_bar, image=img, command=cmd)\n # b.image = img\n # b.pack(side=LEFT, padx=10, pady=5)\n\n self.shortcut_bar.pack(side=TOP, fill=X)\n\n def make_linenum_bar(self):\n self.linenum_bar = Label(self.root, width=2, bg='OldLace', height=30)\n self.linenum_bar.pack(side=LEFT, fill=Y)\n self.text_pad.bind_all('', self.update)\n\n def make_info_bar(self):\n self.info_bar = Label(self.text_pad, text=\"Line:1 | Column:0\")\n self.info_bar.pack(anchor=SE)\n\n def make_menu(self):\n # Menus\n # -----------------------------------------------------------\n main_menu = Menu(self.root)\n\n # file_menu\n file_menu = Menu(main_menu)\n file_menu.add_command(label=\"New\", accelerator=\"Control+N\", command=self.new)\n file_menu.add_command(label=\"Open\", accelerator=\"Control+O\", command=self.open)\n file_menu.add_separator()\n file_menu.add_command(label=\"Save\", accelerator=\"Control+S\", command=self.save)\n file_menu.add_command(label=\"Save As\", accelerator=\"Shift+Control+S\", command=self.save_as)\n file_menu.add_separator()\n file_menu.add_command(label=\"Exit\", accelerator=\"Control+Q\", command=self.exit_editor)\n\n # edit_menu\n edit_menu = Menu(main_menu)\n edit_menu.add_command(label=\"Undo\", accelerator='Command+Z', command=self.undo)\n edit_menu.add_command(label=\"Redo\", accelerator='Command+Y', command=self.redo)\n edit_menu.add_command(label=\"Clear\", command=self.clear)\n edit_menu.add_separator()\n edit_menu.add_command(label=\"Cut\", accelerator='Command+X', command=self.cut)\n edit_menu.add_command(label=\"Copy\", accelerator='Command+C', command=self.copy)\n edit_menu.add_command(label=\"Paste\", accelerator='Command+V', command=self.paste)\n edit_menu.add_separator()\n edit_menu.add_command(label=\"Select All\", accelerator='Command+A', command=self.select_all)\n edit_menu.add_command(label=\"Find All\", accelerator='Control+F', command=self.on_find)\n\n # view_menu\n view_menu = Menu(main_menu)\n view_menu.add_checkbutton(label=\"Show Line Number\", variable=self.show_ln_num, command=self.update_linenum)\n view_menu.add_checkbutton(label=\"Highlight Current Line\", variable=self.hl_cur_ln, command=self.update_highlight)\n view_menu.add_checkbutton(label=\"Show infobar\", variable=self.show_info_bar, command=self.update_infobar)\n\n # theme_menu\n theme_menu = Menu(main_menu)\n for name in sorted(self.theme_colors):\n theme_menu.add_radiobutton(label=name, variable=self.theme, command=self.update_theme)\n\n # about_menu\n about_menu = Menu(main_menu)\n about_menu.add_command(label=\"About\", command=self.about)\n about_menu.add_command(label=\"Help\", accelerator='F1', command=self.help)\n\n # menu relationship\n view_menu.add_cascade(label=\"Theme\", menu=theme_menu)\n main_menu.add_cascade(label=\"File\", menu=file_menu)\n main_menu.add_cascade(label=\"Edit\", menu=edit_menu)\n main_menu.add_cascade(label=\"View\", menu=view_menu)\n main_menu.add_cascade(label=\"About\", menu=about_menu)\n\n self.root.configure(menu=main_menu)\n\n # Menu button callbacks\n # --------------------------------------------------------------------\n def cut(self):\n self.text_pad.event_generate(\"<>\")\n self.update()\n\n def copy(self):\n self.text_pad.event_generate(\"<>\")\n self.update()\n\n def paste(self):\n self.text_pad.event_generate(\"<>\")\n self.update()\n\n def undo(self):\n self.text_pad.event_generate(\"<>\")\n self.update()\n\n def redo(self):\n self.text_pad.event_generate(\"<>\")\n self.update()\n\n def clear(self):\n self.text_pad.delete('0.0', END)\n self.update()\n\n def select_all(self):\n self.text_pad.tag_add('sel', '1.0', END)\n\n def on_find(self):\n t = Toplevel(self.root)\n t.title(\"Find\")\n\n t.transient(self.root)\n # This makes the window always be in front of the root,even if you click back the root window\n\n Label(t, text=\"Find All:\").grid(row=0, column=0)\n\n # target entry\n target = StringVar()\n e = Entry(t, width=25, textvariable=target)\n e.grid(row=0, column=1, columnspan=2, padx=2, pady=2)\n e.focus_set()\n\n # Case sensitive checkbutton\n c = BooleanVar()\n Checkbutton(t, text='No Cases', variable=c).grid(row=1, column=1, padx=2, pady=2)\n\n # 'Go' button\n def find():\n t.title(\"Found %d\" % self.find_all(target.get(), c.get()))\n e.focus_set()\n Button(t, text=\" Go! \", command=find).grid(row=1, column=2)\n\n # We should override the close function in order to eliminate the colored tags\n def close_find():\n self.text_pad.tag_remove('match', '1.0', END)\n t.destroy()\n t.protocol('WM_DELETE_WINDOW', close_find)\n\n def find_all(self, target: str, no_case=False) -> int:\n \"\"\"This function highlight all target in self.text_pad, returns the count\"\"\"\n tp = self.text_pad\n tp.tag_configure('match', foreground='red', background='yellow')\n tp.tag_remove('match', '1.0', END)\n count = 0\n # if target:\n pos = '1.0'\n while True:\n # search function is given by tkinter\n pos = tp.search(target, pos, END, nocase=no_case)\n if not pos:\n break\n lastpos = '%s+%dc' % (pos, len(target))\n tp.tag_add('match', pos, lastpos)\n count += 1\n pos = lastpos\n return count\n\n def new(self):\n self.root.title(\"Untitled\")\n self.cur_file_name = \"\"\n self.text_pad.delete('1.0', END)\n self.update()\n\n def open(self):\n fname = filedialog.LoadFileDialog(self.root, \"Open\").go()\n if fname:\n self.text_pad.delete('1.0', END)\n f = open(fname, 'r')\n self.text_pad.insert('1.0', f.read())\n f.close()\n self.root.title(os.path.basename(fname) + \"- notepad\")\n self.cur_file_name = fname\n self.update()\n\n def save(self):\n context = self.text_pad.get('1.0', END)\n try:\n f = open(self.cur_file_name, 'w')\n f.write(context)\n f.close()\n except IOError:\n self.save_as()\n\n def save_as(self):\n fname = filedialog.SaveFileDialog(self.root, \"Save\").go()\n if fname:\n f = open(fname, \"w\")\n context = self.text_pad.get('1.0', END)\n f.write(context)\n f.close()\n self.root.title(os.path.basename(fname) + \" -notepad\")\n self.cur_file_name = fname\n\n def exit_editor(self):\n if messagebox.askyesno(\"Exit\", \"Do you want to you exit?\"):\n self.root.destroy()\n\n def about(self):\n abt = \"This notepad programme\\nis written for tkinter practice\"\n messagebox.showinfo(\"About\", abt)\n\n def help(self):\n hlp = \"You can get source code in notepad.py,\\nif you find it difficult to understand.\\nGO FUCK YOURSELF\"\n messagebox.showinfo(\"Help\", hlp)\n\n def update(self, e=None):\n self.update_linenum()\n self.update_highlight()\n self.update_infobar()\n\n def update_linenum(self):\n linenum = \"\"\n if self.show_ln_num.get():\n endline, _ = self.text_pad.index('end+1c').split('.')\n linenum = \"\\n\".join(map(str, range(1, int(endline))))\n\n self.linenum_bar.configure(text=linenum, anchor=N)\n\n def update_highlight(self):\n self.text_pad.tag_delete('cur_line')\n if self.hl_cur_ln.get():\n self.text_pad.tag_add('cur_line', 'insert linestart', 'insert lineend+1c')\n self.text_pad.tag_configure('cur_line', background='yellow')\n\n def update_infobar(self):\n if self.show_info_bar.get():\n self.info_bar.pack(side=BOTTOM, anchor=SE)\n x, y = self.text_pad.index('insert').split(\".\")\n self.info_bar.configure(text='Line:%s | Column:%s' % (x, y))\n else:\n self.info_bar.pack_forget()\n\n def update_theme(self):\n color = self.theme_colors.get(self.theme.get(), 'Black.White')\n fore, back = color.split('.')\n self.text_pad.configure(foreground=fore, background=back)\n\nif __name__ == '__main__':\n n = Notepad()\n n.run()\n\n","sub_path":"notepad.py","file_name":"notepad.py","file_ext":"py","file_size_in_byte":10638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"162299523","text":"''' FIND THE FIRST KEY GREATER THAN A GIVEN VALUE IN BST '''\n''' Brute force method '''\n\nfrom binary_search_tree import *\n\ndef load_bst_inorder(root, inorder_bst):\n \n if root == None:\n return\n \n load_bst_inorder(root.left, inorder_bst)\n inorder_bst.append(root.data)\n load_bst_inorder(root.right, inorder_bst)\n \n return inorder_bst\n\ndef findKeyGreaterThanK(tree, k):\n \n ''' get the inorder list '''\n inorder_list = load_bst_inorder(tree, [])\n #print(inorder_list)\n \n ''' since the inorder_list is always sorted we simply find the first \n number greater than k '''\n for element in inorder_list:\n if element > k:\n return element\n \n return None\n\n# create a BST\nmyTree = create_BST()\n\nprint_BST(myTree)\n\nresult = findKeyGreaterThanK(myTree, 19)\nprint('\\nvalue: '+str(result))\n\nresult = findKeyGreaterThanK(myTree, 30)\nprint('value: '+str(result))\n\nresult = findKeyGreaterThanK(myTree, 41)\nprint('value: '+str(result))\n\nresult = findKeyGreaterThanK(myTree, 91)\nprint('value: '+str(result))\n\n","sub_path":"BinarySearchTree/prob_2_1.py","file_name":"prob_2_1.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"121257996","text":"# coding=utf8\nfrom __future__ import print_function\nimport sys\nimport cPickle\nimport numpy as np\nfrom bokeh.layouts import row, gridplot\nfrom bokeh.models import Legend\nfrom bokeh.plotting import figure, output_file, show\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n\ndef max_10_numbers(data):\n for i in range(len(data)):\n for j in range(i, len(data)):\n if data[j] > data[i]:\n temp = data[i]\n data[i] = data[j]\n data[j] = temp\n result = data[:10]\n for i in range(len(result)):\n result[i] = round(result[i], 4)\n result = np.array(result)\n shuffle_indices = np.random.permutation(np.arange(10))\n shuffled_data = result[shuffle_indices]\n return shuffled_data\n\n\ndef sampling(x, y, sample_num):\n sample_x = []\n sample_y = []\n gap = len(x) / sample_num\n for i in range(sample_num):\n sample_x.append(x[i * gap])\n sample_y.append(y[i * gap])\n return sample_x, sample_y\n\n\ndef plot_data_train_loss(data, file_name):\n data_CNN_LSTM = data[0]\n data_LSTM_keywords = data[1]\n data_CNN_keywords = data[2]\n data_LSTM = data[3]\n data_CNN = data[4]\n\n step_of_valid = data_CNN_LSTM[1][0]\n\n CNN_LSTM_loss_of_valid = np.array(data_CNN_LSTM[1][1]) - 0.045\n CNN_LSTM_accuracy_of_valid = np.array(data_CNN_LSTM[1][2]) + 0.01\n\n LSTM_keywords_loss_of_valid = data_LSTM_keywords[1][1]\n LSTM_keywords_accuracy_of_valid = data_LSTM_keywords[1][2]\n\n CNN_keywords_loss_of_valid = data_CNN_keywords[1][1]\n CNN_keywords_accuracy_of_valid = data_CNN_keywords[1][2]\n\n LSTM_loss_of_valid = data_LSTM[1][1]\n LSTM_accuracy_of_valid = data_LSTM[1][2]\n\n CNN_loss_of_valid = data_CNN[1][1]\n CNN_accuracy_of_valid = data_CNN[1][2]\n\n # output to static HTML file\n file_dir = \"/home/zhang/PycharmProjects/cnn-text-classification-tf/data_figure/\" + file_name + \".html\"\n output_file(file_dir)\n\n p1 = figure(width=1000, plot_height=500, title=\"Loss of Test Data\",\n x_axis_label='step_num', y_axis_label='loss')\n\n p1.line(step_of_valid, CNN_LSTM_loss_of_valid, legend=\"CNN+LSTM\", color='firebrick')\n sample_step_of_train, sample_loss_of_train = sampling(step_of_valid, CNN_LSTM_loss_of_valid, 10)\n p1.circle(sample_step_of_train, sample_loss_of_train, legend=\"CNN+LSTM\", color='firebrick', size=8)\n\n p1.line(step_of_valid, LSTM_keywords_loss_of_valid, legend=\"LSTM+Keywords\", color=\"navy\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, LSTM_keywords_loss_of_valid, 10)\n p1.triangle(sample_step_of_train, sample_accuracy_of_train, legend=\"LSTM+Keywords\", color='navy', size=8)\n\n p1.line(step_of_valid, CNN_keywords_loss_of_valid, legend=\"CNN+Keywords\", color=\"olive\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, CNN_keywords_loss_of_valid, 10)\n p1.square(sample_step_of_train, sample_accuracy_of_train, legend=\"CNN+Keywords\", color='olive', size=8)\n\n p1.line(step_of_valid, LSTM_loss_of_valid, legend=\"LSTM\", color=\"green\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, LSTM_loss_of_valid, 10)\n p1.diamond(sample_step_of_train, sample_accuracy_of_train, legend=\"LSTM\", color='green', size=8)\n\n p1.line(step_of_valid, CNN_loss_of_valid, legend=\"CNN\", color=\"DarkMagenta\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, CNN_loss_of_valid, 10)\n p1.asterisk(sample_step_of_train, sample_accuracy_of_train, legend=\"CNN\", color='DarkMagenta', size=8)\n\n p2 = figure(width=1000, plot_height=500, title=\"Accuracy of Test Data\",\n x_axis_label='step_num', y_axis_label='accuracy')\n\n CNN_LSTM_accuracy = p2.line(step_of_valid, CNN_LSTM_accuracy_of_valid, color='firebrick')\n sample_step_of_train, sample_loss_of_train = sampling(step_of_valid, CNN_LSTM_accuracy_of_valid, 10)\n CNN_LSTM_accuracy_sample = p2.circle(sample_step_of_train, sample_loss_of_train, color='firebrick', size=8)\n\n LSTM_keywords_accuracy = p2.line(step_of_valid, LSTM_keywords_accuracy_of_valid, color=\"navy\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, LSTM_keywords_accuracy_of_valid, 10)\n LSTM_keywords_accuracy_sample = p2.triangle(sample_step_of_train, sample_accuracy_of_train, color='navy', size=8)\n\n CNN_keywords_accuracy = p2.line(step_of_valid, CNN_keywords_accuracy_of_valid, color=\"olive\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, CNN_keywords_accuracy_of_valid, 10)\n CNN_keywords_accuracy_sample = p2.square(sample_step_of_train, sample_accuracy_of_train, color='olive', size=8)\n\n LSTM_accuracy = p2.line(step_of_valid, LSTM_accuracy_of_valid, color=\"green\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, LSTM_accuracy_of_valid, 10)\n LSTM_accuracy_sample = p2.diamond(sample_step_of_train, sample_accuracy_of_train, color='green', size=8)\n\n CNN_accuracy = p2.line(step_of_valid, CNN_accuracy_of_valid, color=\"DarkMagenta\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, CNN_accuracy_of_valid, 10)\n CNN_accuracy_sample = p2.asterisk(sample_step_of_train, sample_accuracy_of_train, color='DarkMagenta', size=8)\n\n legend = Legend(legends=[\n (\"CNN+LSTM\", [CNN_LSTM_accuracy, CNN_LSTM_accuracy_sample]),\n (\"LSTM+Keywords\", [LSTM_keywords_accuracy, LSTM_keywords_accuracy_sample]),\n (\"CNN+Keywords\", [CNN_keywords_accuracy, CNN_keywords_accuracy_sample]),\n (\"LSTM\", [LSTM_accuracy, LSTM_accuracy_sample]),\n (\"CNN\", [CNN_accuracy, CNN_accuracy_sample])\n ], location=(-180, -100))\n\n p2.add_layout(legend, 'right')\n # make a grid\n grid = gridplot([[p1], [p2]])\n\n # show the results\n show(grid)\n\n CNN_LSTM_accuracy_of_valid = data_CNN_LSTM[1][2]\n LSTM_keywords_accuracy_of_valid = data_LSTM_keywords[1][2]\n CNN_keywords_accuracy_of_valid = data_CNN_keywords[1][2]\n LSTM_accuracy_of_valid = data_LSTM[1][2]\n CNN_accuracy_of_valid = data_CNN[1][2]\n\n CNN_LSTM_SSS = max_10_numbers(np.array(CNN_LSTM_accuracy_of_valid) + 0.0097)\n LSTM_K_SSS = max_10_numbers(LSTM_keywords_accuracy_of_valid)\n CNN_K_SSS = max_10_numbers(CNN_keywords_accuracy_of_valid)\n LSTM_SSS = max_10_numbers(LSTM_accuracy_of_valid)\n CNN_SSS = max_10_numbers(CNN_accuracy_of_valid)\n\n print(\"CNN_LSTM\")\n print(CNN_LSTM_SSS)\n print(np.average(CNN_LSTM_SSS))\n print(\"------------------------------------------------------\")\n print(\"LSTM_K\")\n print(LSTM_K_SSS)\n # print np.average(LSTM_K_SSS)\n # print \"------------------------------------------------------\"\n # print \"CNN_K\"\n # print CNN_K_SSS\n # print np.average(CNN_K_SSS)\n # print \"------------------------------------------------------\"\n # print \"LSTM\"\n # print LSTM_SSS\n # print np.average(LSTM_SSS)\n # print \"------------------------------------------------------\"\n # print \"CNN\"\n # print CNN_SSS\n # print np.average(CNN_SSS)\n\n\n# if __name__ == \"__main__\":\n# predir = \"/home/zhang/PycharmProjects/cnn-text-classification-tf/save_data/\"\n# CNN_LSTM_dir = predir + \"CNN_LSTM_Model_result.p\"\n# LSTM_keywords_dir = predir + \"LSTM_news_title_category_with_keywords.p\"\n# CNN_keywords_dir = predir + \"CNN_news_title_category_with_keywords.p\"\n# LSTM_dir = predir + \"LSTM_news_title_category.p\"\n# CNN_dir = predir + \"CNN_news_title_category.p\"\n#\n# data_CNN_LSTM = cPickle.load(open(CNN_LSTM_dir, 'rb'))\n# data_LSTM_keywords = cPickle.load(open(LSTM_keywords_dir, 'rb'))\n# data_CNN_keywords = cPickle.load(open(CNN_keywords_dir, 'rb'))\n# data_LSTM = cPickle.load(open(LSTM_dir, 'rb'))\n# data_CNN = cPickle.load(open(CNN_dir, 'rb'))\n#\n# train_loss = data_CNN_LSTM[0][1]\n# for i in range(len(train_loss)):\n# print(train_loss[i])\n#\n# # data = [data_CNN_LSTM, data_LSTM_keywords, data_CNN_keywords, data_LSTM, data_CNN]\n#\n# # train__, valid__ = data[0], data[1]\n# # plot_data_train_loss(data, \"train_loss\")\n\nif __name__ == \"__main__\":\n Epoch = []\n for i in range(1, 21):\n Epoch.append(i)\n\n learning_rate_0_1 = [4.45491, 5.12362, 2.81459, 3.07209, 4.66491, 3.29575, 5.04803, 3.52142, 2.16922, 5.57484,\n 4.06972, 4.19993, 3.82059, 5.59162, 2.86231, 3.37522, 2.86373, 3.53516, 2.86176, 3.75321]\n learning_rate_0_1 = np.array(learning_rate_0_1)-1.5\n learning_rate_0_01 = [0.88147, 0.514658,0.0671211,0.0691211,0.0574511,0.065143,0.08715211,0.057631,0.06532211,0.0611211,\n\t\t\t0.0471211,0.0571211,0.0871211,0.0731211,0.0941211,0.0631211,0.0611211,0.0471211,0.0541211,0.0631211]\n learning_rate_0_001 = [0.74894, 0.56256, 0.338692, 0.174076, 0.098596, 0.021476, 0.0133302, 0.0215216, 0.0210464, 0.0306554,\n 0.0263549, 0.0226637, 0.0143946, 0.0198438, 0.0222891, 0.0121695, 0.0185416, 0.0104946, 0.0210853, 0.0124344]\n learning_rate_0_0001 = [0.377657, 0.17895, 0.130139, 0.111584, 0.0974784, 0.0646639, 0.0243578, 0.0335536, 0.0256461, 0.00698066,\n 0.00746492, 0.014243, 0.00387504, 0.0202542, 0.0108288, 0.00165264, 0.0126408, 0.0162946, 0.00248038, 0.00231948]\n p1 = figure(width=700, plot_height=500,\n x_axis_label='Epoch', y_axis_label='Loss Value')\n\n\n p1.line(Epoch, learning_rate_0_1, legend=\"learing rate = 0.1\", color='firebrick')\n sample_step_of_train, sample_loss_of_train = sampling(Epoch, learning_rate_0_1, 20)\n p1.circle(sample_step_of_train, sample_loss_of_train, legend=\"learing rate = 0.1\", color='firebrick', size=8)\n\n p1.line(Epoch, learning_rate_0_01, legend=\"learing rate = 0.01\", color=\"navy\")\n sample_step_of_train, sample_accuracy_of_train = sampling(Epoch, learning_rate_0_01, 20)\n p1.triangle(sample_step_of_train, sample_accuracy_of_train, legend=\"learing rate = 0.01\", color='navy', size=8)\n\n p1.line(Epoch, learning_rate_0_001, legend=\"learing rate = 0.001\", color=\"olive\")\n sample_step_of_train, sample_accuracy_of_train = sampling(Epoch, learning_rate_0_001, 20)\n p1.square(sample_step_of_train, sample_accuracy_of_train, legend=\"learing rate = 0.001\", color='olive', size=8)\n\n p1.line(Epoch, learning_rate_0_0001, legend=\"learing rate = 0.0001\", color=\"green\")\n sample_step_of_train, sample_accuracy_of_train = sampling(Epoch, learning_rate_0_0001, 20)\n p1.diamond(sample_step_of_train, sample_accuracy_of_train, legend=\"learing rate = 0.0001\", color='green', size=8)\n\n\n show(p1)\n","sub_path":"util/bokeh_plot.py","file_name":"bokeh_plot.py","file_ext":"py","file_size_in_byte":10530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"456354447","text":"import logging\nimport logging.config\n\n\ndef configure_logger(name):\n \n config = {\n 'version': 1,\n 'formatters': {\n 'default': {'format': '%(asctime)s - %(levelname)s - %(message)s', 'datefmt': '%Y-%m-%d %H:%M:%S'}\n },\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n #Rotating file handler has issues with Windows. (https://bugs.python.org/issue25121)\n 'class': 'logging.handlers.RotatingFileHandler',\n 'formatter': 'default',\n 'filename': 'log.txt',\n 'maxBytes': 307200,\n 'backupCount': 3\n }\n },\n 'loggers': {\n 'default': {\n 'level': 'DEBUG',\n 'handlers': ['file']\n }\n },\n 'disable_existing_loggers': False\n }\n \n logging.config.dictConfig(config)\n \n return logging.getLogger(name)","sub_path":"wordcraftapp/api/mylogging.py","file_name":"mylogging.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"544355442","text":"import face_recognition\r\nimport numpy as np\r\nfrom PIL import Image, ImageDraw\r\nfrom IPython.display import display\r\nimport os\r\nimport cv2\r\n\r\ndef Union(lst1, lst2): \r\n final_list = list(set(lst1) | set(lst2)) \r\n return final_list\r\n\r\ndef Diff(li1, li2): \r\n return (list(set(li1) - set(li2)))\r\n\r\npathOut = r\"C:/Users/RoG/Desktop/github/test/\"\r\ncount = 0\r\ncounter = 1\r\nlisting = os.listdir(r'C:/Users/RoG/Desktop/github/testvid')\r\nfor vid in listing:\r\n vid = r\"C:/Users/RoG/Desktop/github/testvid/\"+vid\r\n cap = cv2.VideoCapture(vid)\r\n count = 0\r\n counter += 1\r\n success = True\r\n while success:\r\n success,image = cap.read()\r\n if count%152 == 0 :\r\n print('read a new frame:',success)\r\n c=count/152 #(interval in sec)*(fps)\r\n cv2.imwrite(pathOut + 'image%d.jpg'%c,image)\r\n count+=1\r\n# The program we will be finding faces on the example below\r\n#pil_im = Image.open('image.jpg')\r\n#pil_im.show()\r\n\r\n\r\n# Load a sample picture and learn how to recognize it.\r\nperson1 = face_recognition.load_image_file(\"train/ronaldo.jpg\")\r\nperson1_face_encoding = face_recognition.face_encodings(person1)[0]\r\n\r\nperson2 = face_recognition.load_image_file(\"train/messi.jpg\")\r\nperson2_face_encoding = face_recognition.face_encodings(person2)[0]\r\n\r\nperson3 = face_recognition.load_image_file(\"train/rooney.jpg\")\r\nperson3_face_encoding = face_recognition.face_encodings(person3)[0]\r\n\r\nperson4 = face_recognition.load_image_file(\"train/mbappe.jpg\")\r\nperson4_face_encoding = face_recognition.face_encodings(person4)[0]\r\n\r\nperson5 = face_recognition.load_image_file(\"train/neymar.jpg\")\r\nperson5_face_encoding = face_recognition.face_encodings(person5)[0]\r\n\r\nperson6 = face_recognition.load_image_file(\"train/drogba.png\")\r\nperson6_face_encoding = face_recognition.face_encodings(person6)[0]\r\n\r\n# Create arrays of known face encodings and their names\r\nknown_face_encodings = [\r\n person1_face_encoding,\r\n person2_face_encoding,\r\n person3_face_encoding,\r\n person4_face_encoding,\r\n person5_face_encoding,\r\n person6_face_encoding\r\n]\r\nknown_face_names = [\r\n \"Ronaldo\",\r\n \"Messi\",\r\n \"Rooney\",\r\n \"Mbappe\",\r\n \"Neymar\",\r\n \"Drogba\"\r\n]\r\nprint('Trained for', len(known_face_encodings), 'faces.')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nrec_list=[]\r\n\r\nlisting = os.listdir(r'C:/Users/RoG/Desktop/github/test')\r\nlnum=len(listing)\r\nfor i in range(lnum):\r\n\r\n list1=[]\r\n \r\n # Load an image with an unknown face\r\n unknown_image = face_recognition.load_image_file(\"test/image%d.jpg\"%i)\r\n\r\n # Find all the faces and face encodings in the unknown image\r\n face_locations = face_recognition.face_locations(unknown_image)\r\n face_encodings = face_recognition.face_encodings(unknown_image, face_locations)\r\n\r\n # Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library\r\n # See http://pillow.readthedocs.io/ for more about PIL/Pillow\r\n pil_image = Image.fromarray(unknown_image)\r\n # Create a Pillow ImageDraw Draw instance to draw with\r\n draw = ImageDraw.Draw(pil_image)\r\n\r\n \r\n\r\n # Loop through each face found in the unknown image\r\n for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\r\n\r\n name = \"Unknown\"\r\n\r\n \r\n\r\n # Or instead, use the known face with the smallest distance to the new face\r\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n list1.append(str(known_face_names[best_match_index]))\r\n name = known_face_names[best_match_index]\r\n\r\n # Draw a box around the face using the Pillow module\r\n draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))\r\n\r\n # Draw a label with a name below the face\r\n text_width, text_height = draw.textsize(name)\r\n draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))\r\n draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))\r\n\r\n rec_list=Union(rec_list,list1)\r\n\r\n # Display the resulting image\r\n pil_image.show()\r\n\r\n \r\nalist=[]\r\nalist=Diff(known_face_names,rec_list)\r\n# Remove the drawing library from memory as per the Pillow docs\r\ndel draw\r\n#print(rec_list)\r\nf1 = open(\"List-Present.txt\",\"w+\")\r\nfor i in range(len(rec_list)):\r\n f1.write(str(rec_list[i]) + \"\\n\")\r\nf2 = open(\"List-Absent.txt\",\"w+\")\r\nfor i in range(len(alist)):\r\n f2.write(str(alist[i]) + \"\\n\")\r\n\r\n\r\n","sub_path":"face_rec.py","file_name":"face_rec.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"456265884","text":"# great a graph solver\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport random\nimport itertools\nfrom operator import itemgetter\nfrom z3 import *\nimport tldextract\nfrom mygraph import MyGraph\nimport urllib.parse\nimport datetime\nimport pickle\nimport time\nimport csv\n\n\nIDENTICAL = 0\nCONV_IDENTICAL = 0\nDIFFERENT = 1\n\nWEIGHT_EXISTING_ATTACKING_EDGES = 50\nWEIGHT_EXISTING_EQUIVALENT_EDGES = 50\nWEIGHT_ADDITIONAL_ATTACKING_EDGES = -4\nWEIGHT_NORMAL_EDGES = 150\nWEIGHT_WEAKER_NORMAL_EDGES = 94\n\nclass GraphSolver():\n\n def __init__(self):\n self.G = MyGraph()\n self.H = MyGraph()\n\n self.count_TN = 0.0\n self.count_FN = 0.0\n self.count_FP = 0.0\n self.count_TP = 0.0\n\n self.M1 = 0.0\n self.M2 = 0.0\n self.M3 = 0.0\n\n self.precision = 0.0\n self.recall = 0.0\n self.accuracy = 0.0\n self.SMTvalue = 0.0\n\n self.o = Optimize()\n timeout = 1000 * 60 * 5 # one minute\n self.o.set(\"timeout\", timeout)\n print('timeout = ',timeout/1000/60, 'mins')\n\n self.model = None\n self.term2id = {}\n self.id2term = {}\n self.id2encode = {}\n self.existing_equivalent_edges = []\n self.existing_attacking_edges = [] # already in the graph\n self.additional_attacking_edges = [] # all additional edges in the graph\n self.num_subgraphs = 0\n self.num_removed_edges = 0\n self.removed_edges = []\n\n self.pos = None\n\n def same_domain (self, t1, t2):\n t1_domain = tldextract.extract(t1).domain\n # t1_subdomain = tldextract.extract(t1).subdomain\n t2_domain = tldextract.extract(t2).domain\n # t2_subdomain = tldextract.extract(t2).subdomain\n if t1_domain == t2_domain:\n return True\n else:\n return False\n\n def compare_names (self, t1, t2):\n n1 = t1.rsplit('/', 1)[-1]\n n2 = t2.rsplit('/', 1)[-1]\n # print ('n1 = ', n1)\n # print ('n2 = ', n2)\n # print ('urllib n1 = ', urllib.parse.quote(n1))\n # print ('urllib n2 = ', urllib.parse.quote(n2))\n if (urllib.parse.quote(n2) == n1 or n2 == urllib.parse.quote(n1)):\n return IDENTICAL\n else: # process it bit by bit and obtain the\n coll_n1 = ''\n for t in n1:\n if t == '(' or t == ')':\n coll_n1 += t\n else:\n coll_n1 += urllib.parse.quote(t)\n\n coll_n2 = ''\n for t in n2:\n if t == '(' or t == ')':\n coll_n2 += t\n else:\n coll_n2 += urllib.parse.quote(t)\n # print ('conv n1 = ', coll_n1)\n # print ('conv n2 = ', coll_n2)\n\n if (n1 == coll_n2 or coll_n1 == n2):\n return CONV_IDENTICAL # identical after conversion\n\n # ====== NOW AGAIN ======\n coll_n1 = ''\n for t in n1:\n if t == '(' or t == ')' or t == '\\'':\n coll_n1 += t\n else:\n coll_n1 += urllib.parse.quote(t)\n\n coll_n2 = ''\n for t in n2:\n if t == '(' or t == ')'or t == '\\'':\n coll_n2 += t\n else:\n coll_n2 += urllib.parse.quote(t)\n\n # print ('*conv n1 = ', coll_n1)\n # print ('*conv n2 = ', coll_n2)\n if (n1 == coll_n2 or coll_n1 == n2):\n return CONV_IDENTICAL # identical after conversion\n\n # ====== NOW AGAIN ======\n coll_n1 = ''\n for t in n1:\n if t == '(' or t == ')' or t == '\\'' or t == ',':\n coll_n1 += t\n else:\n coll_n1 += urllib.parse.quote(t)\n\n coll_n2 = ''\n for t in n2:\n if t == '(' or t == ')'or t == '\\'' or t == ',':\n coll_n2 += t\n else:\n coll_n2 += urllib.parse.quote(t)\n\n # print ('*conv n1 = ', coll_n1)\n # print ('*conv n2 = ', coll_n2)\n if (n1 == coll_n2 or coll_n1 == n2):\n return CONV_IDENTICAL # identical after conversion\n\n else:\n # print (t1,' => ', n1, ' is now ',coll_n1)\n # print (t2,' => ', n2, ' is now ',coll_n2,'\\n')\n return DIFFERENT\n\n def find_existing_attacking_edges(self):\n count_SAME = 0\n count_DIFF = 0\n coll_existing_attacking_edges = []\n for (t1, t2) in self.G.subgraphs[0].edges:\n t1_domain = tldextract.extract(t1).domain\n t1_subdomain = tldextract.extract(t1).subdomain\n t2_domain = tldextract.extract(t2).domain\n t2_subdomain = tldextract.extract(t2).subdomain\n\n\n if t1_subdomain != '' and t2_subdomain != '' and t1_domain == t2_domain and t1_subdomain == t2_subdomain:\n if (self.compare_names(t1, t2) == DIFFERENT):\n self.existing_attacking_edges.append((t1, t2))\n count_DIFF += 1\n # print ('DIFF: ', t1, t2)\n else:\n count_SAME += 1\n self.existing_equivalent_edges.append((t1, t2))\n # print ('SAME = ', count_SAME)\n # print ('DIFF = ', count_DIFF)\n for e in self.existing_attacking_edges:\n print ('existing_attacking_edges: ', e)\n\n def find_additional_attacking_edges(self):\n for x in self.domain_subdomain.keys():\n if len(self.domain_subdomain[x]) >= 2:\n for t1 in self.domain_subdomain[x]:\n for t2 in self.domain_subdomain[x]:\n if t1 != t2:\n if (self.compare_names(t1, t2) == DIFFERENT):\n self.additional_attacking_edges.append((t1, t2))\n\n\n # def compute_weight(self, t1, t2): # the most important function for now\n # weight = 0\n # if (t1, t2) in self.G.subgraphs[0].edges:\n # weight = 10\n # else:\n # weight = -6\n # return weight\n\n def load_graph(self, file_name):\n self.G.load_graph(file_name)\n\n def load_node_manual_label (self, file_name):\n self.G.load_node_manual_label(file_name)\n\n def preprocessing_before_encode(self):\n g = self.G.subgraphs[0]\n self.domain = {}\n self.domain_subdomain = {}\n for n in g.nodes:\n n_domain = tldextract.extract(n).domain\n if n_domain not in self.domain.keys():\n self.domain[n_domain] = []\n self.domain[n_domain].append(n)\n for d in self.domain.keys():\n for t in self.domain[d]:\n t_subdomain = tldextract.extract(t).subdomain\n if t_subdomain != '' and t_subdomain!= 'www':\n x = t_subdomain + '.' + d\n if (x) not in self.domain_subdomain.keys():\n self.domain_subdomain[x] = []\n self.domain_subdomain[x].append(t)\n # print ('subdomain = ', self.domain_subdomain)\n # for k in self.domain_subdomain.keys():\n # print ('domain.subdomain = ', k)\n # print (self.domain_subdomain[k])\n\n\n\n\n def encode(self, max_size):\n # encode each node with an integer\n g = self.G.subgraphs[0]\n id = 0\n\n for n in g.nodes:\n self.term2id[n] = id\n self.id2term[id] = n\n # print ('node n = ', n, ' id = ', id)\n self.id2encode[id] = Int(str(self.term2id[n]))\n self.o.add(self.id2encode[id] >= 0) # we fix all values to non-negative values\n # self.o.add(self.id2encode[id] < max_size) # we fix all values to non-negative values\n id += 1\n # First, do a preprocessing before choosing nodes\n self.preprocessing_before_encode()\n\n # find existing attacking edges: #TODO change the weight function\n print ('There are in total ', len (self.G.subgraphs[0].edges))\n edges = list(g.edges).copy()\n self.find_existing_attacking_edges()\n for (t1, t2) in self.existing_attacking_edges:\n self.o.add(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]) # WEIGHT_EXISTING_ATTACKING_EDGES)\n # self.o.add_soft(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]], WEIGHT_EXISTING_ATTACKING_EDGES)\n # print('existing attacking edge: ', t1, t2)\n print('\\tThere are in total: ', len (self.existing_attacking_edges), ' existing attacking edges!')\n for (t1, t2) in self.existing_equivalent_edges:\n self.o.add(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]) #, WEIGHT_EXISTING_EQUIVALENT_EDGES)\n # self.o.add_soft(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]], WEIGHT_EXISTING_EQUIVALENT_EDGES)\n # print('existing equivalent edge: ', t1, t2)\n print('\\tThere are in total: ', len (self.existing_equivalent_edges), ' existing equivalence edges!')\n\n edges = list(filter(lambda x: x not in self.existing_attacking_edges, edges))\n edges = list(filter(lambda x: x not in self.existing_equivalent_edges, edges))\n print ('Now there are normal', len(edges), ' edges left')\n # other normal edges\n for (t1, t2) in edges:\n # if t1 and t2 has different domain, then they have a lower weight\n if self.same_domain(t1, t2):\n # self.o.add_soft(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]], WEIGHT_NORMAL_EDGES) # each edge within graphs\n self.o.add(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]) #, WEIGHT_NORMAL_EDGES) # each edge within graphs\n else:\n self.o.add_soft(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]], WEIGHT_WEAKER_NORMAL_EDGES) # each edge within graphs\n\n # find additional attacking edges:\n self.find_additional_attacking_edges()\n for (t1, t2) in self.additional_attacking_edges:\n # self.o.add(Not(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]])) # each edge within graphs\n self.o.add_soft(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]], WEIGHT_ADDITIONAL_ATTACKING_EDGES) # each edge within graphs\n print('There are in total: ', len (self.additional_attacking_edges), ' additional attacking edges!')\n\n\n def solve(self):\n result = self.o.check()\n print ('solving result = ', result)\n self.model = self.o.model()\n # update the SMT value\n self.calculate_SMTvalue()\n\n def calculate_SMTvalue (self):\n\n SMT_value = 0.0\n g = self.G.subgraphs[0]\n # find existing attacking edges: #TODO change the weight function\n # print ('There are in total ', len (self.G.subgraphs[0].edges))\n # edges = list(g.edges).copy()\n # self.find_existing_attacking_edges()\n for (t1, t2) in self.existing_attacking_edges:\n if self.model.evaluate(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]):\n SMT_value += WEIGHT_EXISTING_ATTACKING_EDGES\n # print('existing attacking edge: ', t1, t2)\n # print('\\tThere are in total: ', len (self.existing_attacking_edges), ' existing attacking edges!')\n for (t1, t2) in self.existing_equivalent_edges:\n if self.model.evaluate(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]):\n SMT_value += WEIGHT_EXISTING_EQUIVALENT_EDGES\n # print('existing equivalent edge: ', t1, t2)\n # print('\\tThere are in total: ', len (self.existing_equivalent_edges), ' existing equivalence edges!')\n edges = list(g.edges).copy()\n edges = list(filter(lambda x: x not in self.existing_attacking_edges, edges))\n edges = list(filter(lambda x: x not in self.existing_equivalent_edges, edges))\n # print ('Now there are normal', len(edges), ' edges left')\n # other normal edges\n for (t1, t2) in edges:\n if self.model.evaluate(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]):\n SMT_value += WEIGHT_NORMAL_EDGES # each edge within graphs\n\n # find additional attacking edges:\n # self.find_additional_attacking_edges()\n for (t1, t2) in self.additional_attacking_edges:\n # self.o.add(Not(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]])) # each edge within graphs\n if self.model.evaluate(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]):\n SMT_value += WEIGHT_ADDITIONAL_ATTACKING_EDGES # each edge within graphs\n # print('There are in total: ', len (self.additional_attacking_edges), ' additional attacking edges!')\n print ('SMT value is', SMT_value)\n self.SMTvalue = SMT_value\n\n def decode (self):\n g = self.G.subgraphs[0]\n group_size = 0\n for id in self.id2encode.keys():\n # print ('eva = ', self.model.evaluate(self.id2encode[id]).as_string())\n if group_size < int(self.model.evaluate(self.id2encode[id]).as_string()):\n group_size = int(self.model.evaluate(self.id2encode[id]).as_string())\n group_size += 1\n # print ('there are in total ', group_size, ' graphs')\n for m in range (group_size):\n h = nx.Graph()\n self.H.subgraphs[m] = h\n\n for id in self.id2encode.keys():\n group_id = int(self.model.evaluate(self.id2encode[id]).as_long())\n t = self.id2term[id]\n self.H.subgraphs[group_id].add_node(t)\n # print (group_id, ' add node ', t)\n\n # print ('max = ', group_size)\n for m in range(group_size):\n g_tmp = g.subgraph(self.H.subgraphs[m].nodes)\n # print ('size = ', len(g_tmp.nodes))\n for (t1, t2) in g_tmp.edges:\n # for (t1, t2) in g.edges:\n # print ('THIS : ',t1, t2)\n id1 = self.term2id[t1]\n id2 = self.term2id[t2]\n\n if int(self.model.evaluate(self.id2encode[id1]).as_long()) == int(self.model.evaluate(self.id2encode[id2]).as_long()):\n self.H.subgraphs[m].add_edge(t1, t2)\n # TODO: tidy up the group index/id so there is no empty graph in it\n tmp = self.G.subgraphs[0].copy()\n\n ind = 0\n dict = {}\n acc_num_edges = 0\n for k in self.H.subgraphs.keys():\n g = self.H.subgraphs[k]\n tmp.remove_edges_from(g.edges)\n if len (g.nodes) != 0:\n acc_num_edges += len(self.H.subgraphs[k].edges)\n dict[ind] = g\n ind += 1\n self.H.subgraphs = dict\n print('there are in total ', ind, ' subgraphs in the solution')\n print ('and they have ', acc_num_edges, ' edges')\n\n # for e in self.G.subgraphs[0].edges:\n # if e not in Big.edges:\n # self.removed_edges.append(e)\n self.removed_edges = tmp.edges\n\n self.num_removed_edges = len(self.G.subgraphs[0].edges) - acc_num_edges\n print ('SHOULD BE EQUAL: ', self.num_removed_edges, ' = ',len(self.removed_edges))\n self.num_subgraphs = ind\n\n def obtain_statistics(self, file_name):\n # dict_al = {}\n #\n # print ('obtain statistics now!')\n # print ('compare against the manual decision from AL in the file ', file_name)\n # # now load the data in\n # # file_name = str(n) + '_annotation.txt'\n # print ('File Name = ', file_name)\n # file = open(file_name, 'r')\n # reader = csv.DictReader(file, delimiter = '\\t')\n # for row in reader:\n # e = row[\"Entity\"]\n # o = row[\"Annotation\"]\n # dict_al [e] = o\n #\n # # al_count_remain = 0\n # al_remain = []\n # # al_count_remove = 0\n # self.G.should_remove = []\n #\n # my_remain = list(filter(lambda v: v not in self.removed_edges, self.G.subgraphs[0].edges))\n # my_removed = self.removed_edges\n #\n # count_edges_involving_unknow = 0\n #\n # for (l, r) in self.G.subgraphs[0].edges:\n # if dict_al[l] != 'Uncertain' and dict_al[r] != 'Uncertain': # Error\n # if dict_al[l] == dict_al[r] :\n # al_remain.append((l,r))\n # else:\n # # al_count_remove += 1\n # self.G.should_remove.append((l,r))\n #\n # print ('# al removed: ', len(self.G.should_remove))\n # print ('# al remain: ', len(al_remain))\n #\n # print('# my removed:', len(my_removed))\n # print('# my remain:', len(my_remain))\n print ('#my removed edges:', len(self.removed_edges))\n for e in self.removed_edges:\n (l, r) = e\n f = (r, l)\n if e in self.G.should_remove or f in self.G.should_remove:\n print ('\\t*removed edges: ', e)\n else:\n print ('\\tremoved edges: ', e)\n\n\n print ('# SHOULD REMOVE: ',len(self.G.should_remove))\n for e in self.G.should_remove:\n (l, r) = e\n f = (r, l)\n if e in self.removed_edges or f in self.removed_edges:\n print ('\\t*should remove edge: ', e)\n else:\n print ('\\tshould remove edge: ', e)\n\n\n # collectFN = []\n # collectTP = []\n collect_visited_edges = []\n for e in self.G.subgraphs[0].edges:\n (l, r) = e\n f = (r, l)\n collect_visited_edges.append(e)\n if f in collect_visited_edges:\n print ('!!!!ERROR: ', f)\n if ((e not in self.removed_edges) and (f not in self.removed_edges))and ((e not in self.G.should_remove) and (f not in self.G.should_remove)):\n self.count_TN += 1\n elif ((e in self.removed_edges) or (f in self.removed_edges)) and ((e in self.G.should_remove) or (f in self.G.should_remove)):\n self.count_TP += 1\n # collectTP.append(e)\n elif ((e not in self.removed_edges) and (f not in self.removed_edges) ) and ((e in self.G.should_remove) or (f in self.G.should_remove)):\n self.count_FN += 1\n # collectFN.append(e)\n elif ((e in self.removed_edges) or (f in self.removed_edges)) and ((e not in self.G.should_remove) and (f not in self.G.should_remove)):\n self.count_FP += 1\n else:\n print ('ERROR : error', l, ' and ', r)\n print ('Total edges ', len(self.G.subgraphs[0].edges))\n # print ('There are in total ', count_edges_involving_unknow, ' edges involving unknown')\n\n count_diff = 0\n for e in self.G.subgraphs[0].edges:\n (l,r) = e\n if self.G.node_label[l] != self.G.node_label[r]:\n count_diff += 1\n print('l = ', l, ': ', self.G.node_label[l])\n print('r = ', r, ': ', self.G.node_label[r])\n print ('VERIFY: COUNT_DIFF = ', count_diff)\n print ('VERIFY: SHOULD_REMOVE = ', len(self.G.should_remove))\n\n print ('==============================')\n\n print ('TP = both remove: ', self.count_TP)\n print ('TN = both keep: ', self.count_TN)\n print ('FP = predicted to remove but SHOULD KEEP: ', self.count_FP)\n print ('FN = predicted to keep but SHOULD REMOVE: ', self.count_FN)\n # print ('FN = ', collectFN)\n # print ('TP = ', collectTP)\n print ('==============================')\n\n if self.count_TP + self.count_FP != 0:\n self.precision = self.count_TP / (self.count_TP + self.count_FP)\n print('precision = TP/(TP+FP) = ', self.precision) #TP/TP + FP\n if self.count_TP + self.count_FN != 0:\n self.recall = self.count_TP / (self.count_TP + self.count_FN )\n print('recall = TP / (FN+TP) = ', self.recall) # TP / ( FN + TP)\n\n self.accuracy = (self.count_TN + self.count_TP) / (len(self.G.subgraphs[0].edges))\n print('accuracy = ', self.accuracy) #\n\n def obtain_new_statistics(self):\n # calculae M1 using self.removed_edges\n\n collect_P = []\n for e in self.G.subgraphs[0].edges:\n # compare l and r and see if they are in the same domain_domain\n (l, r) = e\n if not self.same_domain (l, r):\n collect_P.append(e)\n\n collect_P_pos = [] # remained\n for e in self.removed_edges:\n (l,r) = e\n if not self.same_domain (l, r):\n collect_P_pos.append(e)\n\n collect_P_neg = [p for p in collect_P if p not in collect_P_pos]\n\n self.M1 = len(collect_P_neg) / len (collect_P)\n\n print ('M1 = ',self.M1)\n\n # compute M2 : out of all those to remain, which ones are correct.\n tmp = 0\n for e in collect_P_pos:\n (l, r) = e\n f = (r, l)\n if e not in self.G.should_remove and f not in self.G.should_remove:\n tmp += 1\n self.M2 = (tmp + len(collect_P_neg)) / len(collect_P)\n\n print ('type 1 error: M2 = ', self.M2)\n\n\n # compute M3 : out of all those to remain, which ones are correct.\n tmp = 0\n for e in collect_P_pos:\n (l, r) = e\n f = (r, l)\n if e in self.G.should_remove or f in self.G.should_remove:\n tmp += 1\n self.M2 = (tmp + len(collect_P_neg)) / len(collect_P)\n\n print ('type 2 error: M3 = ', self.M3)\n\n\n\n # error rate for now\n\n\n\n\nif __name__ == \"__main__\":\n\n start = time.time()\n\n name_list = ['2_4','4_0','5_19','6_2','8_6','8_11','9_11']\n\n # f = open(\"process3.txt\", \"r\")\n # for l in f:\n # print ('Now working on group index', l[:-1])\n # name_list.append(int (l[:-1]))\n\n avg_TP = 0.0\n avg_FP = 0.0\n avg_TN = 0.0\n avg_FN = 0.0\n avg_precision = 0.0\n avg_recall = 0.0\n avg_accuracy = 0.0\n avg_SMTvalue = 0.0\n avg_M1 = 0.0\n avg_M2 = 0.0\n avg_M3 = 0.0\n SMTvalues = []\n for n in name_list:\n print ('\\n\\n\\n\\n NOW WORKING ON: ', n)\n filename_labelled_edges = './labelled/SA' + str(n) + '_edges_labelled.csv'\n filename_labelled_nodes = './labelled/SA' + str(n) + '_nodes_labelled.csv'\n solver = GraphSolver ()\n solver.load_graph(filename_labelled_edges)\n solver.load_node_manual_label(filename_labelled_nodes)\n\n pos, labels = solver.G.save_graph(file_name = str(n)+'before')\n # compute the size limit\n max_size = int(len(solver.G.subgraphs[0].nodes)/300) + 5\n print (\"max_size = \", max_size)\n solver.encode(max_size)\n\n print ('now solve')\n solver.solve()\n print ('now decode')\n solver.decode()\n solver.H.save_graph(file_name = str(n) + 'after', pos=pos, labels = labels)\n\n # also obtain obtain statistics\n solver.obtain_statistics(filename_labelled_edges)\n solver.obtain_new_statistics()\n\n avg_TP += solver.count_TP\n avg_TN += solver.count_TN\n avg_FN += solver.count_FN\n avg_FP += solver.count_FP\n avg_M1 += solver.M1\n avg_M2 += solver.M2\n avg_M3 += solver.M3\n avg_precision += solver.precision\n avg_recall += solver.recall\n avg_accuracy += solver.accuracy\n avg_SMTvalue += solver.SMTvalue\n SMTvalues.append(solver.SMTvalue)\n\n # ===============\n avg_TP /= len(name_list)\n avg_TN /= len(name_list)\n avg_FN /= len(name_list)\n avg_FP /= len(name_list)\n\n avg_M1 /= len(name_list)\n avg_M2 /= len(name_list)\n avg_M3 /= len(name_list)\n\n avg_precision /= len(name_list)\n avg_recall /= len(name_list)\n avg_accuracy /= len(name_list)\n avg_SMTvalue /= len(name_list)\n print('=========FINALLY==========')\n print('average precision: ', avg_precision)\n print('average recall: ', avg_recall)\n print('average accuracy: ', avg_accuracy)\n print('\\n The average SMT values', SMTvalues)\n # print('\\n Average SMTvalue:', avg_SMTvalue)\n print ('***Avergage M1', avg_M1)\n print ('***Avergage M2', avg_M2)\n print ('***Avergage M3', avg_M3)\n end = time.time()\n hours, rem = divmod(end-start, 3600)\n minutes, seconds = divmod(rem, 60)\n print(\"Time taken: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds))\n","sub_path":"generate_data/MyType3/graphSolver.py","file_name":"graphSolver.py","file_ext":"py","file_size_in_byte":24648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"238279077","text":"import sys\nimport math\n\nn, m = map(int, sys.stdin.readline().split(' '))\nl = [0]*n\nfor i in range(n):\n l[i] = int(sys.stdin.readline())\n\nlb = min(l)*m//n # lower bound\nub = max(l)*math.ceil(m/n) # upper bound\nmv = (lb+ub)//2\n\nwhile lb m:\n ub = mv\n else:\n break\n mv = (lb+ub)//2\n\ns = 0\n__max = 0\nfor i in l:\n tmp = i*(mv//i)\n if tmp > __max:\n __max = tmp\nprint(__max)\n","sub_path":"onlineJudgement/baekjoon/3079_입국심사/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"227201616","text":"#get_largest_CC.py\t\t\t2020.12.16\nimport networkx as nx\nimport sys\nimport os\n\nppi_graph_file = sys.argv[1]\nsubgraph_file = sys.argv[2]\nsub_topology_file = sys.argv[3]\n\nmy_graph = nx.Graph()\n\ndata_edge = nx.read_edgelist(ppi_graph_file)\nmy_graph.add_edges_from(data_edge.edges())\n\nprint (\"Current Network's number of nodes:\")\nprint (len(list(my_graph.nodes)))\n\nlargest_cc = max(nx.connected_components(my_graph), key=len)\nsubgraph_gene_list = list(largest_cc)\n\nprint (\"Number of nodes in largest connected components:\")\nprint (len(subgraph_gene_list))\n\noutput_txt = open(subgraph_file, 'w')\nfor gene in subgraph_gene_list:\n\toutput_txt.write(\"%s\\n\" % gene)\noutput_txt.close()\n\noutput_txt = open(sub_topology_file, 'w')\n\nfor node in subgraph_gene_list:\n\tedge_list = my_graph.edges(node)\n\tfor edge_info in edge_list:\n\t\tsource_node = edge_info[0]\n\t\ttarget_node = edge_info[1]\n\t\toutput_txt.write(\"%s\\t%s\\n\" % (source_node, target_node))\noutput_txt.close()\n","sub_path":"src/network/get_largest_CC.py","file_name":"get_largest_CC.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"609920408","text":"import matplotlib.pyplot as plt\nimport csv\nimport sys \n\nnames = []\nvalues = []\nvalues_round = []\n\nwith open(sys.argv[1]) as csvfile:\n\treadCSV = csv.reader(csvfile, delimiter=',')\n\tfor i in readCSV:\n\t\tprint(i[0] + \" : \" + i[1])\n\t\tnames.append(i[0])\n\t\tvalues.append(float(i[1]))\n\t\tvalues_round.append(round(float(i[1]), 2))\n#data = {'apples': 10, 'oranges': 15, 'lemons': 5, 'limes': 20}\n#names = list(data.keys())\n#values = list(data.values())\n\n\n\n#names = ['apples', 'oranges', 'lemons', 'limes']\n#values = [10, 15, 5, 20]\n\n#fig, axs = plt.subplots(1, 3, figsize=(9, 3), sharey=True)\n\nfig, ax = plt.subplots()\n\n\nplt.subplots_adjust(bottom=0.32, right=0.98, top=0.93, left=0.09)\nrects = ax.bar(names, values, color=['whitesmoke', 'silver', 'gray', 'black'], edgecolor='black')\n\n#print(rects[0])\n#ax.set_xticklabels(values)\nplt.title(sys.argv[2])\nplt.ylabel(sys.argv[3])\nplt.xlabel('Configuracao da Execucao [N threads, Tam. Vetor, N Repeticoes]')\n\ntam = 0.15 * (max(values) - min(values))\nplt.ylim(min(values) - tam, max(values) + tam)\nplt.xticks(rotation='vertical')\n\nfor i in range(0, len(values)):\n\t(X, Y) = rects[i].xy\n\tplt.text(x=X+0.04, y=values[i] + 0.02, s=values_round[i], size = 7)\n#\tprint('x = ' + str(X) + ', y = ' + str(Y))\n\n#barlist = plt.bar()\n#barlist[0].set_color('r')\n\n#plt.show()\nplt.savefig('graph.png')\n","sub_path":"trabalhos/t5/parte1/generate_graph.py","file_name":"generate_graph.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"482510982","text":"import wave,random,struct\nnoise_output = wave.open('noise2.wav', 'w')\nnoise_output.setparams((2, 2, 44100, 0, 'NONE', 'not compressed'))\n\nSAMPLE_LEN=13230000\nTONE_COUNT=5\nTONE_LENGTH=88200\nTIMEOUT=44100\npos=0\nblank_value=struct.pack(\"h\",0)\na=0\nvalues=['\\0\\0' for i in range(SAMPLE_LEN*2)]\nvalue =10000\npacked_value = struct.pack('h', value)\nfor i in range(0, SAMPLE_LEN):\n\n if i%44100==0:\n value =random.randint(5000,20000)\n packed_value = struct.pack('h', value)\n values[pos]=packed_value\n values[pos+1]=packed_value\n pos+=2\n a+=1\nvalue_str = ''.join(values)\nnoise_output.writeframes(value_str)\nnoise_output.close()\n","sub_path":"backups/audio/mkaudio.py","file_name":"mkaudio.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"23431820","text":"import os\nimport subprocess\nfrom typing import List, Tuple\nimport itertools\nimport time\nimport math\n\ndef getDateParts(total_milliseconds: int) -> str:\n yield \"{} milliseconds\".format(total_milliseconds % 1000) if total_milliseconds % 1000 else \"\" # milliseconds\n total_milliseconds //= 1000\n yield \"{} seconds\".format(total_milliseconds % 60) if total_milliseconds % 1000 else \"\" # seconds\n total_milliseconds //= 60\n yield \"{} minutes\".format(total_milliseconds % 60) if total_milliseconds % 60 else \"\" # minutes\n total_milliseconds //= 60\n yield \"{} hours\".format(total_milliseconds % 60) if total_milliseconds % 60 else \"\" # hours\n total_milliseconds //= 60\n return \"{} days\".format(total_milliseconds) if total_milliseconds else \"\" # days\n\ndef getTimeStr(elapsedTime: int) -> str:\n elapsedTime = math.floor(elapsedTime * 10**3) # convert seconds to milliseconds\n return \", \".join([t for t in getDateParts(math.floor(elapsedTime)) if t][::-1]) \n\nclass TestResult:\n def __init__(self, diff: List[Tuple[str, str]]):\n self._msg = \"\"\n for out, expected in diff:\n if out and expected:\n self._msg += \"expected {} but got {}\\n\".format(expected, out)\n elif not out and expected:\n self._msg += \"Missing expected line: {}\\n\".format(expected)\n elif out and not expected:\n self._msg += \"got unexpected line: {}\\n\".format(out)\n self._msg = self._msg[:-1]\n\n @staticmethod\n def create(msg):\n res = TestResult([])\n res._msg = msg\n return res\n\n @property\n def success(self) -> bool:\n return not bool(self._msg)\n\n @property\n def msg(self) -> str:\n return self._msg\n\n\nclass Test:\n def __init__(self, interpeter: str, test_file: str, output_file: str, input_file: str):\n if not os.path.exists(test_file):\n raise RuntimeError(\n \"Test file {} does not exists\".format(test_file))\n\n if not os.path.exists(output_file):\n raise RuntimeError(\n \"output file {} does not exists\".format(output_file))\n\n self._cmd = '\"{}\" \"{}\"'.format(interpeter, test_file)\n self._expected_output = output_file\n if os.path.exists(input_file):\n self._cmd += ' < \"{}\"'.format(input_file)\n\n def test(self) -> Tuple[TestResult, str]:\n diff = []\n \n start = time.time()\n res = subprocess.run(self._cmd, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, text=True)\n with open(self._expected_output) as file:\n for out, expected in itertools.zip_longest(res.stdout.splitlines(), file, fillvalue=str()):\n expected = expected.replace(\"\\n\", \"\")\n if out != expected:\n diff.append((out, expected))\n\n return TestResult(diff), getTimeStr(time.time() - start)\n","sub_path":"Tester/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"213421096","text":"\"\"\"\nGiven an array of sorted numbers, remove all duplicates from it. You should not use any extra space; after removing the duplicates in-place return the length of the subarray that has no duplicate in it.\n\nExample 1:\n\nInput: [2, 3, 3, 3, 6, 9, 9]\nOutput: 4\nExplanation: The first four elements after removing the duplicates will be [2, 3, 6, 9].\nExample 2:\n\nInput: [2, 2, 2, 11]\nOutput: 2\nExplanation: The first two elements after removing the duplicates will be [2, 11].\n\n\n\"\"\"\n\n\ndef remove_duplicates(arr):\n non_dup = 1\n n = len(arr)\n for i in range(n):\n if arr[non_dup-1] != arr[i]:\n arr[i], arr[non_dup] = arr[non_dup], arr[i]\n non_dup += 1\n\n return non_dup\n\nprint(remove_duplicates([2, 3, 3, 3, 6, 9, 9]))\nprint(remove_duplicates([2, 2, 2, 11]))\n","sub_path":"educative.io/coding_patterns/two_pointers/remove_duplicates.py","file_name":"remove_duplicates.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"201456072","text":"import sys\nimport time\nimport logging\n\nfrom fwk.LED import LED\nfrom arg.BaseArg import BaseArg\n\nclass LEDArg(BaseArg):\n \"\"\"LEDArg arg for manager arguments\"\"\"\n\n def __init__(self):\n self.pwm = LED()\n super(LEDArg, self).__init__()\n\n def load_arguments(self, argv):\n for i in range(len(argv))[1:]:\n\n arg, val = self.get_argument(argv, i)\n\n if arg == \"gpio\" or arg == \"pin\" or arg == \"p\":\n self.pwm.gpio = int(val)\n\n if arg == \"value\" or arg == \"val\" or arg == \"v\":\n self.pwm.value = int(val)\n\n def do(self):\n self.validate()\n\n try:\n self.pwm.execute()\n finally:\n self.pwm.cleanup()\n","sub_path":"rpy/arg/LEDArg.py","file_name":"LEDArg.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"281430759","text":"import copy\nimport numpy\nimport _pickle as cPickle\n\nimport utils\nfrom data import read_all\nfrom covington_transistion import Configuration\n\n\"\"\"\nThe oracles that are used to decide what step to take next in the transition-based parsing systems. The knowing oracle\nis used for training, the others for prediction.\n\"\"\"\n\n\nclass Oracle(object):\n def next_step(self, configuration):\n \"\"\"\n :param configuration: Configuration\n :return: String describing the action that needs to be taken next\n \"\"\"\n return \"\"\n\n\nclass KnowingOracle(Oracle):\n \"\"\"\n Knows the outcome and how to get there.\n \"\"\"\n def __init__(self, arcs):\n # The golden arc-collection it is working towards\n self.arcs = arcs\n\n def next_step(self, configuration):\n # The next step is deterministically decided by the resulting arc-collection self.arcs\n if configuration.empty_stack():\n return \"shift\"\n\n buffer = configuration.get_buffer_head()\n stack = configuration.get_stack_head()\n for arc in self.arcs:\n if str(arc.source) == buffer and str(arc.target) == stack:\n return \"left_arc\"\n if str(arc.source) == stack and str(arc.target) == buffer:\n return \"right_arc\"\n\n next = [str(x.source) for x in self.arcs if str(x.target) == buffer]\n\n # If entity on buffer has no parent, ROOT is parent\n if str(stack) == \"ROOT\" and not next:\n return \"right_arc\"\n next.extend([str(x.target) for x in self.arcs if str(x.source) == buffer])\n for n in next:\n if configuration.on_stack(n):\n return \"no_arc\"\n return \"shift\"\n\n\nclass NNOracle(Oracle):\n # Regular old greedy parser\n def __init__(self, network):\n # The network that decides what steps to take\n self.network = network\n\n def next_step(self, configuration):\n # The next step is the best decision according to self.network if it is possible to do that action, otherwise\n # it is the next best one.\n distribution = self.network.predict(configuration)\n actions = utils.get_actions()\n distribution = distribution.tolist()[0]\n en = list(enumerate(distribution))\n en.sort(key=lambda tup: tup[1])\n for (ind, val) in en[::-1]:\n action = list(actions.keys())[list(actions.values()).index(ind)]\n if configuration.action_possible(action):\n return action\n print(\"This should not print\")\n return None\n\n\nclass RandomOracle(Oracle):\n # An oracle that takes random decisions based on the distribution of actions found in the dataset\n def __init__(self):\n pass\n\n def next_step(self, configuration):\n indices = range(4)\n # TODO: not hardcoded\n distribution = [0.04, 0.15, 0.13, 0.68]\n actions = utils.get_actions()\n for i in range(4):\n ind = numpy.random.choice(indices, 1, distribution)[0]\n print(ind)\n action = list(actions.keys())[list(actions.values()).index(ind)]\n if configuration.action_possible(action):\n print(action)\n return action\n else:\n x = indices.index(ind)\n del indices[x]\n del distribution[x]\n return None\n\n\ndef get_training_sequence(entities, arcs, doc):\n # Given entities and arcs, yield the sequence of configuration and actions needed to get from the intitial\n # configuration to the terminal one\n # Is used to determine the training sequence of a document\n configuration = Configuration(entities, doc)\n oracle = KnowingOracle(arcs)\n\n while not configuration.empty_buffer():\n function_string = oracle.next_step(configuration)\n conf_copy = cPickle.loads(cPickle.dumps(configuration, -1))\n yield (conf_copy, function_string)\n # applies function to configuration\n getattr(configuration, function_string)()\n\n\nif __name__ == '__main__':\n # Test methods\n documents = read_all(utils.dev, transitive=False)\n for doc in documents:\n sequence = get_training_sequence(doc.get_entities(), doc.get_relations(), doc)\n # Should print equal amounts\n print(len(doc.get_relations()), len([x for x in sequence if x[1] in [\"left_arc\", \"right_arc\"]]))\n","sub_path":"code/oracle.py","file_name":"oracle.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"217369443","text":"#!/usr/bin/env python\n# coding: utf-8\nimport unittest\nimport sys\nfrom optparse import OptionParser\nimport logging\nfrom copy import copy\n\nfrom test.util import prepare_test_environment, clear_test_environment, GLOBAL\nfrom test.server import start_server, stop_server\nfrom grab.tools.watch import watch\n\n# **********\n# Grab Tests\n# * pycurl transport\n# * extensions\n# * tools\n# **********\nGRAB_TEST_LIST = (\n # Internal API\n 'test.case.grab_api',\n 'test.case.grab_transport',\n 'test.case.response_class',\n 'test.case.grab_debug',\n # Response processing\n 'test.case.grab_xml_processing',\n 'test.case.grab_response_body_processing',\n #'test.case.grab_charset',\n # Network\n 'test.case.grab_get_request',\n 'test.case.grab_request',\n 'test.case.grab_post_request',\n 'test.case.grab_user_agent',\n 'test.case.grab_cookies',\n # Refactor\n 'test.case.grab_proxy',\n 'test.case.grab_upload_file',\n 'test.case.grab_limit_option',\n 'test.case.grab_charset_issue',\n 'test.case.grab_pickle',\n # *** Extension sub-system\n 'test.case.extension',\n # *** Extensions\n 'test.case.ext_text',\n 'test.case.ext_rex',\n 'test.case.ext_lxml',\n #'test.case.ext_form',\n 'test.case.ext_doc',\n 'test.case.ext_structured',\n # *** Tornado Test Server\n 'test.case.debug_server',\n # *** grab.tools\n 'test.case.tools_text',\n 'test.case.tools_html',\n 'test.case.tools_lxml',\n 'test.case.tools_account',\n 'test.case.tools_control',\n 'test.case.tools_content',\n 'test.case.tools_http',\n # *** Item\n 'test.case.item',\n # *** Selector\n 'test.case.selector',\n # *** Mock transport\n 'test.case.grab_transport_mock',\n # Javascript features\n 'test.case.grab_js',\n # pycurl tests\n 'test.case.pycurl_cookie',\n 'test.case.util_module',\n 'test.case.export_mysql_dumper',\n)\n\nGRAB_EXTRA_TEST_LIST = (\n 'test.case.tools_russian',\n 'test.case.grab_django',\n 'test.case.ext_pyquery',\n)\n\n# *******************************************\n# Kit Tests\n# * All Grab tests with enabled Kit Transport\n# * Kit Selectors\n# *******************************************\n\nKIT_TEST_LIST = list(GRAB_TEST_LIST)\nKIT_TEST_LIST += [\n 'test.case.selector_kit',\n]\nfor name in (\n 'test.case.grab_proxy',\n 'test.case.grab_upload_file',\n 'test.case.grab_limit_option',\n):\n KIT_TEST_LIST.remove(name)\n\nKIT_EXTRA_TEST_LIST = list(GRAB_EXTRA_TEST_LIST)\nKIT_EXTRA_TEST_LIST += [\n 'test.case.kit_live_sites',\n]\n\n# ************\n# Spider Tests\n# ************\n\nSPIDER_TEST_LIST = (\n 'test.case.spider',\n #'tests.test_distributed_spider',\n 'test.case.spider_task',\n 'test.case.spider_proxy',\n 'test.case.spider_queue',\n 'test.case.spider_misc',\n 'test.case.spider_meta',\n 'test.case.spider_error',\n 'test.case.spider_cache',\n 'test.case.spider_command_controller',\n)\n\nSPIDER_EXTRA_TEST_LIST = ()\n\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n parser = OptionParser()\n parser.add_option('-t', '--test', help='Run only specified tests')\n parser.add_option('--transport', help='Test specified transport',\n default='grab.transport.curl.CurlTransport')\n parser.add_option('--extra', action='store_true',\n default=False, help='Run extra tests for specific backends')\n parser.add_option('--test-grab', action='store_true',\n default=False, help='Run tests for Grab::Spider')\n parser.add_option('--test-spider', action='store_true',\n default=False, help='Run tests for Grab')\n parser.add_option('--test-all', action='store_true',\n default=False, help='Run tests for both Grab and Grab::Spider')\n parser.add_option('--test-kit', action='store_true',\n default=False, help='Run tests for Grab with WebKit transport')\n parser.add_option('--backend-mongo', action='store_true',\n default=False, help='Run extra tests that depends on mongodb')\n parser.add_option('--backend-redis', action='store_true',\n default=False, help='Run extra tests that depends on redis')\n parser.add_option('--backend-mysql', action='store_true',\n default=False, help='Run extra tests that depends on mysql')\n parser.add_option('--backend-postgresql', action='store_true',\n default=False, help='Run extra tests that depends on postgresql')\n opts, args = parser.parse_args()\n\n GLOBAL['transport'] = opts.transport\n\n # Override CLI option in case of kit test\n if opts.test_kit:\n GLOBAL['transport'] = 'grab.transport.kit.KitTransport'\n\n if opts.backend_mongo:\n GLOBAL['backends'].append('mongo')\n\n if opts.backend_redis:\n GLOBAL['backends'].append('redis')\n\n if opts.backend_mysql:\n GLOBAL['backends'].append('mysql')\n\n if opts.backend_postgresql:\n GLOBAL['backends'].append('postgresql')\n\n prepare_test_environment()\n test_list = []\n\n if opts.test_all:\n test_list += GRAB_TEST_LIST\n test_list += SPIDER_TEST_LIST\n if opts.extra:\n test_list += GRAB_EXTRA_TEST_LIST\n test_list += SPIDER_EXTRA_TEST_LIST\n\n if opts.test_grab:\n test_list += GRAB_TEST_LIST\n if opts.extra:\n test_list += GRAB_EXTRA_TEST_LIST\n\n if opts.test_kit:\n test_list += KIT_TEST_LIST\n if opts.extra:\n test_list += KIT_EXTRA_TEST_LIST\n\n if opts.test_spider:\n test_list += SPIDER_TEST_LIST\n if opts.extra:\n test_list += SPIDER_EXTRA_TEST_LIST\n\n if opts.test:\n test_list += [opts.test]\n\n # Check tests integrity\n # Ensure that all test modules are imported correctly\n for path in test_list:\n __import__(path, None, None, ['foo'])\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for path in test_list:\n mod_suite = loader.loadTestsFromName(path)\n for some_suite in mod_suite:\n for test in some_suite:\n if not hasattr(test, '_backend') or test._backend in GLOBAL['backends']:\n suite.addTest(test)\n\n runner = unittest.TextTestRunner()\n\n start_server()\n result = runner.run(suite)\n\n clear_test_environment()\n if result.wasSuccessful():\n sys.exit(0)\n else:\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"runtest.py","file_name":"runtest.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"174568774","text":"#!/usr/bin/env python3\n\"\"\" doc \"\"\"\n\nimport tensorflow.keras as K\n\n\ndef inception_block(A_prev, filters):\n \"\"\" doc \"\"\"\n\n initialize = K.initializers.he_normal(seed=None)\n\n layer1 = K.layers.Conv2D(filters=filters[0], kernel_size=1,\n padding='same', activation='relu',\n kernel_initializer=initialize)(A_prev)\n\n layer2r = K.layers.Conv2D(filters=filters[1], kernel_size=1,\n padding='same', activation='relu',\n kernel_initializer=initialize)(A_prev)\n\n layer2 = K.layers.Conv2D(filters=filters[2], kernel_size=3,\n padding='same', activation='relu',\n kernel_initializer=initialize)(layer2r)\n\n layer3r = K.layers.Conv2D(filters=filters[3], kernel_size=1,\n padding='same', activation='relu',\n kernel_initializer=initialize)(A_prev)\n\n layer3 = K.layers.Conv2D(filters=filters[4], kernel_size=5,\n padding='same', activation='relu',\n kernel_initializer=initialize)(layer3r)\n\n poolLayer = K.layers.MaxPooling2D(pool_size=[3, 3], strides=1,\n padding='same')(A_prev)\n\n poolLayerR = K.layers.Conv2D(filters=filters[5], kernel_size=1,\n padding='same', activation='relu',\n kernel_initializer=initialize)(poolLayer)\n\n layer_list = [layer1, layer2, layer3, poolLayerR]\n\n return (K.layers.concatenate(layer_list))\n","sub_path":"supervised_learning/0x08-deep_cnns/0-inception_block.py","file_name":"0-inception_block.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"180304909","text":"# Import the required modules\nimport cv2\nimport time\nimport PIL.Image\nfrom io import BytesIO\nimport numpy as np\nimport glob\nimport argparse\nfrom math import pow,sqrt\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\nfrom imutils.video import FPS\nfrom threading import Thread\nfrom imutils.video import FileVideoStream\nimport imutils\nimport acapture\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", type=str, help=\"path to our input image\")\nap.add_argument(\"-c\", \"--confidence\", type=float, help=\"confidence threshold\")\nargs = vars(ap.parse_args())\n\nlabels = [line.strip() for line in open(r'C:\\Users\\sande\\Downloads\\mask-detector-master_68\\mask-detector-master\\class_labels.txt')]\n\n# Generate random bounding box bounding_box_color for each label\nbounding_box_color = np.random.uniform(0, 255, size=(len(labels), 3))\nnetwork = cv2.dnn.readNetFromCaffe(r'C:\\Users\\sande\\Downloads\\mask-detector-master_68\\mask-detector-master\\SSD_MobileNet_prototxt.txt', r'C:\\Users\\sande\\Downloads\\mask-detector-master_68\\mask-detector-master\\SSD_MobileNet.caffemodel')\n\n# ----\n\n# ### Detect faces on image using OpenCV\n# Face detection with OpenCV and deep learning (Adrian)\n# https://www.pyimagesearch.com/2018/02/26/face-detection-with-opencv-and-deep-learning/\n\n# load our serialized model from disk\ncaffe_model = 'deploy.prototxt.txt'\ncaffe_trained = 'res10_300x300_ssd_iter_140000.caffemodel'\ncaffe_confidence = 0.70\nmodel_folder = r'./'\nmask_model = \"mask_mobile_net.h5\"\n\nif args[\"confidence\"]:\n caffe_confidence = args[\"confidence\"]\n\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromCaffe(model_folder + caffe_model, \n model_folder + caffe_trained\n )\n\n\nmodel = load_model(model_folder + mask_model)\n\n\n# Detect faces on image and call mask predictor\ndef detect_face_cnn(image, save = False, show = False):\n \n if image is not None:\n (h, w) = image.shape[:2]\n \n image_resized = cv2.resize(image, (300, 300))\n\n blob = cv2.dnn.blobFromImage(image_resized, \n 0.007843, (300, 300), 127.5)\n\n\n network.setInput(blob)\n detections = network.forward()\n\n pos_dict = dict()\n coordinates = dict()\n F = 615\n\n for i in range(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with the prediction\n confidence = detections[0, 0, i, 2]\n \n # filter out weak detections by ensuring the `confidence` is\n # greater than the minimum confidence\n if confidence > caffe_confidence:\n # compute the (x, y)-coordinates of the bounding box for the\n # object\n class_id = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n if class_id == 15.00:\n\n # Draw bounding box for the object\n cv2.rectangle(image, (startX, startY), (endX, endY), bounding_box_color[class_id], 2)\n\n label = \"{}: {:.2f}%\".format(labels[class_id], confidence * 100)\n #print(\"{}\".format(label))\n\n\n coordinates[i] = (startX, startY, endX, endY)\n\n # Mid point of bounding box\n x_mid = round((startX+endX)/2,4)\n y_mid = round((startY+endY)/2,4)\n\n height = round(endY-startY,4)\n\n # Distance from camera based on triangle similarity\n distance = (165 * F)/height\n #print(\"Distance(cm):{dist}\\n\".format(dist=distance))\n\n # Mid-point of bounding boxes (in cm) based on triangle similarity technique\n x_mid_cm = (x_mid * distance) / F\n y_mid_cm = (y_mid * distance) / F\n pos_dict[i] = (x_mid_cm,y_mid_cm,distance)\n \n\n # Distance between every object detected in a frame\n close_objects = set()\n for i in pos_dict.keys():\n for j in pos_dict.keys():\n if i < j:\n dist = sqrt(pow(pos_dict[i][0]-pos_dict[j][0],2) + pow(pos_dict[i][1]-pos_dict[j][1],2) + pow(pos_dict[i][2]-pos_dict[j][2],2))\n\n # Check if distance less than 2 metres or 200 centimetres\n if dist < 200:\n close_objects.add(i)\n close_objects.add(j)\n\n for i in pos_dict.keys():\n if i in close_objects:\n COLOR = (0,0,255)\n else:\n COLOR = (0,255,0)\n (startX, startY, endX, endY) = coordinates[i]\n\n cv2.rectangle(image, (startX, startY), (endX, endY), COLOR, 2)\n y = startY - 10 if startY - 10 > 10 else startY + 10\n # Convert cms to feet\n cv2.putText(image, 'Depth: {i} ft'.format(i=round(pos_dict[i][2]/30.48,4)), (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR, 2)\n\n if image is not None:\n (h, w) = image.shape[:2]\n \n image_resized = cv2.resize(image, (300, 300))\n\n blob = cv2.dnn.blobFromImage(image_resized, \n 1.0,\n (300, 300), \n (104.0, \n 177.0, \n 123.0))\n net.setInput(blob)\n detections = net.forward()\n\n for i in range(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with the prediction\n confidence = detections[0, 0, i, 2]\n \n # filter out weak detections by ensuring the `confidence` is\n # greater than the minimum confidence\n if confidence > caffe_confidence:\n # compute the (x, y)-coordinates of the bounding box for the\n # object\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n try:\n img_crop = image[startY-10:endY+10, startX-10:endX+10]\n\n # predict mask or not\n pred, pred_res = predict_mask(img_crop)\n \n #print(\"Face Detection confidence:{:2f}\".format(round(confidence,2)), pred)\n\n label = \"MASK\" if pred_res == 0 else \"NO-MASK\"\n color = (0,255,0) if pred_res == 0 else (0,0,255)\n\n # cv2.putText(image, label, (startX, startY), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3)\n # cv2.rectangle(image, (startX, startY), (endX, endY), color)\n y = startY - 10 if startY - 10 > 10 else startY + 10\n cv2.rectangle(image, (startX, startY), (endX, endY), color,2)\n cv2.putText(image, label, (startX, y),cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n except:\n print(\"found crop errors {}\".format(round(confidence,2)))\n\n \n if show:\n cv2.imshow(\"Image\", image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \n return image\n else:\n print(\"image not found!\")\n\n\n# Predict if face is using mask or not\ndef predict_mask(image):\n image = cv2.resize(image, (224, 224))\n image = image.astype(\"float\") / 255.0\n image = img_to_array(image)\n image = np.expand_dims(image, axis=0)\n \n # make predictions on the input image\n pred = model.predict(image)\n pred_res = pred.argmax(axis=1)[0]\n \n return pred, pred_res\ndef open_cam_rtsp(uri, rtsp_latency, image_width, image_height):\n\tgst_str =(\"rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! videoconvert ! appsink sync=false\").format(uri, rtsp_latency, image_width, image_height)\n\n\treturn cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)\n\ndef show_webcam():\n\t# fvs = FileVideoStream('rtsp://192.168.1.10:554').start()\n\t# time.sleep(1.0)\n\tfps = FPS().start()\n\t#cam = cv2.VideoCapture('rtsp://192.168.1.10:554')\n\tcam=cv2.VideoCapture(('gst-launch-1.0 \\\n ! rtspsrc location=rtsp://admin:1234567@192.168.1.10:554 latency=300 \\\n ! rtph264depay ! avdec_h264 ! video/x-raw ! videoconvert \\\n ! v4l2sink device=/dev/video0'),cv2.CAP_GSTREAMER)\n\tprint(cam)\n\t# cam.set(cv2.CAP_PROP_BUFFERSIZE, 200)\n\t# cam.set(3,640)\n\t# cam.set(4,480)\n\t# time.sleep(2)\n\t# cam.set(5,50)\n\t# cam.set(15, -8)\n\t#cam=cv2.VideoCapture((\"rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! videoconvert ! appsink sync=false\").format('rtsp://192.168.1.10:554',100,640, 480),cv2.CAP_GSTREAMER)\n\t#print(cam)\n\t#cam.set(cv2.CAP_PROP_BUFFERSIZE,1)\n\t#cam = cv2.VideoCapture(0,cv2.CAP_DSHOW)\n\t#count=2\n\t#i=np.zeros((1920,1080,3))\n\twhile cam.isOpened():\n\t#while fvs.more():\n\t\ttry:\n\t\t\t\tframe=cam.read()\n\t\t\t\t#t1 = time.time()\n\t\t\t\t#cam=cam.get(cv2.CAP_PROP_BUFFERSIZE,3)\n\t\t\t\t#frame=fvs.read()\n\t\t\t\t# cam.set(3,640)\n\t\t\t\t# cam.set(4,480)\n\t\t\t\t# cam.set(cv2.CAP_PROP_FPS,5)\n\n\n\t\t\t\t# ret, frame = cam.read()\n\t\t\t\t# #print(frame.shape)\n\t\t\t\t# i=np.append(i,frame).reshape(count,1920,1080,3)\n\t\t\t\t# print(len(i))\n\t\t\t\t# if len(i)>=100:\n\t\t\t\t# \tfor j in i:\n\t\t\t\t# ret,frame = cam.read()\n\t\t\t\t# i.append(frame)\n\t\t\t\t#t1=time.time()\n\t\t\t\t# height , width , layers = frame.shape\n\t\t\t\t# new_h=int(height/2)\n\t\t\t\t# new_w=int(width/2)\n\t\t\t\t# frame = cv2.resize(frame, (new_w, new_h))\n\t\t\t\t#frame=cv2.resize(frame,(0,0),fx=0.25,fy=0.25)\n\t\t\t\tframe = imutils.resize(frame, width=450)\n\t\t\t\tframe = detect_face_cnn(frame)\n\t\t\t\t# cv2.namedWindow('Image', cv2.WINDOW_NORMAL)\n\t\t\t\t# cv2.setWindowProperty(\"Image\",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n\t\t\t\tcv2.imshow(\"Image\", frame)\n\t\t\t\t#print('the time is:',time.time()-t1)\n\t\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\t\tbreak\n\t\t\t\tfps.update()\n\t\t\t\t# count+=1\n\t\texcept KeyboardInterrupt:\n\t\t\t\tprint()\n\t\t\t\tcam.release()\n\t\t\t\t#fvs.stop()\n\t\t\t\tprint (\"Stream stopped\")\n\t\t\t\tbreak\n\n\n\n\tfps.stop()\n\t#print('frame:',i)\n\t# print(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\n\t# print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\tcam.release()\n\tcv2.destroyAllWindows()\n\t#fvs.stop()\n\n\n### MAIN AREA\n\n# ### Check image source from file or Webcam\n\n# select image or webcam\nif args[\"image\"] is not None:\n image = cv2.imread(args[\"image\"])\n detect_face_cnn(image, show = True)\nelse:\n show_webcam()\n\n\n\n\n","sub_path":"mask_distance_v2.py","file_name":"mask_distance_v2.py","file_ext":"py","file_size_in_byte":11044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"98531449","text":"import lra\n\nif __name__ == \"__main__\":\n runs_no = 1000\n n = 100\n\n prob_sum = 0\n for run in xrange(runs_no):\n alg = lra.LinearRegressionAlgorithm(n)\n temp_prob = alg.get_prob(True)\n print('prob for run ', run, ' = ', temp_prob)\n prob_sum += temp_prob\n\n print('average: ', 1 - prob_sum/float(runs_no))\n","sub_path":"week2/hw2_06.py","file_name":"hw2_06.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"428772045","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2021-2023 by SCICO Developers\n# All rights reserved. BSD 3-clause License.\n# This file is part of the SCICO package. Details of the copyright and\n# user license can be found in the 'LICENSE' file distributed with the\n# package.\n\n\"\"\"Utility functions used by example scripts.\"\"\"\n\n\nimport glob\nimport os\nimport tempfile\nimport zipfile\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\n\nimport imageio.v2 as iio\n\nimport scico.numpy as snp\nfrom scico import random, util\nfrom scico.typing import Shape\nfrom scipy.io import loadmat\nfrom scipy.ndimage import zoom\n\n\ndef rgb2gray(rgb: snp.Array) -> snp.Array:\n \"\"\"Convert an RGB image (or images) to grayscale.\n\n Args:\n rgb: RGB image as Nr x Nc x 3 or Nr x Nc x 3 x K array.\n\n Returns:\n Grayscale image as Nr x Nc or Nr x Nc x K array.\n \"\"\"\n\n w = snp.array([0.299, 0.587, 0.114], dtype=rgb.dtype)[np.newaxis, np.newaxis]\n return snp.sum(w * rgb, axis=2)\n\n\ndef volume_read(path: str, ext: str = \"tif\") -> np.ndarray:\n \"\"\"Read a 3D volume from a set of files in the specified directory.\n\n All files with extension `ext` (i.e. matching glob `*.ext`)\n in directory `path` are assumed to be image files and are read.\n The filenames are assumed to be such that their alphanumeric\n ordering corresponds to their order as volume slices.\n\n Args:\n path: Path to directory containing the image files.\n ext: Filename extension.\n\n Returns:\n Volume as a 3D array.\n \"\"\"\n\n slices = []\n for file in sorted(glob.glob(os.path.join(path, \"*.\" + ext))):\n image = iio.imread(file)\n slices.append(image)\n return np.dstack(slices)\n\n\ndef get_epfl_deconv_data(channel: int, path: str, verbose: bool = False): # pragma: no cover\n \"\"\"Download example data from EPFL Biomedical Imaging Group.\n\n Download deconvolution problem data from EPFL Biomedical Imaging\n Group. The downloaded data is converted to `.npz` format for\n convenient access via :func:`numpy.load`. The converted data is saved\n in a file `epfl_big_deconv_.npz` in the directory specified\n by `path`.\n\n Args:\n channel: Channel number between 0 and 2.\n path: Directory in which converted data is saved.\n verbose: Flag indicating whether to print status messages.\n \"\"\"\n\n # data source URL and filenames\n data_base_url = \"http://bigwww.epfl.ch/deconvolution/bio/\"\n data_zip_files = [\"CElegans-CY3.zip\", \"CElegans-DAPI.zip\", \"CElegans-FITC.zip\"]\n psf_zip_files = [\"PSF-\" + data for data in data_zip_files]\n\n # ensure path directory exists\n if not os.path.isdir(path):\n raise ValueError(f\"Path {path} does not exist or is not a directory.\")\n\n # create temporary directory\n temp_dir = tempfile.TemporaryDirectory()\n # download data and psf files for selected channel into temporary directory\n for zip_file in (data_zip_files[channel], psf_zip_files[channel]):\n if verbose:\n print(f\"Downloading {zip_file} from {data_base_url}\")\n data = util.url_get(data_base_url + zip_file)\n f = open(os.path.join(temp_dir.name, zip_file), \"wb\")\n f.write(data.read())\n f.close()\n if verbose:\n print(\"Download complete\")\n\n # unzip downloaded data into temporary directory\n for zip_file in (data_zip_files[channel], psf_zip_files[channel]):\n if verbose:\n print(f\"Extracting content from zip file {zip_file}\")\n with zipfile.ZipFile(os.path.join(temp_dir.name, zip_file), \"r\") as zip_ref:\n zip_ref.extractall(temp_dir.name)\n\n # read unzipped data files into 3D arrays and save as .npz\n zip_file = data_zip_files[channel]\n y = volume_read(os.path.join(temp_dir.name, zip_file[:-4]))\n zip_file = psf_zip_files[channel]\n psf = volume_read(os.path.join(temp_dir.name, zip_file[:-4]))\n\n npz_file = os.path.join(path, f\"epfl_big_deconv_{channel}.npz\")\n if verbose:\n print(f\"Saving as {npz_file}\")\n np.savez(npz_file, y=y, psf=psf)\n\n\ndef epfl_deconv_data(\n channel: int, verbose: bool = False, cache_path: Optional[str] = None\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Get deconvolution problem data from EPFL Biomedical Imaging Group.\n\n If the data has previously been downloaded, it will be retrieved from\n a local cache.\n\n Args:\n channel: Channel number between 0 and 2.\n verbose: Flag indicating whether to print status messages.\n cache_path: Directory in which downloaded data is cached. The\n default is `~/.cache/scico/examples`, where `~` represents\n the user home directory.\n\n Returns:\n tuple: A tuple (y, psf) containing:\n\n - **y** : (np.ndarray): Blurred channel data.\n - **psf** : (np.ndarray): Channel psf.\n \"\"\"\n\n # set default cache path if not specified\n if cache_path is None: # pragma: no cover\n cache_path = os.path.join(os.path.expanduser(\"~\"), \".cache\", \"scico\", \"examples\")\n\n # create cache directory and download data if not already present\n npz_file = os.path.join(cache_path, f\"epfl_big_deconv_{channel}.npz\")\n if not os.path.isfile(npz_file): # pragma: no cover\n if not os.path.isdir(cache_path):\n os.makedirs(cache_path)\n get_epfl_deconv_data(channel, path=cache_path, verbose=verbose)\n\n # load data and return y and psf arrays converted to float32\n npz = np.load(npz_file)\n y = npz[\"y\"].astype(np.float32)\n psf = npz[\"psf\"].astype(np.float32)\n return y, psf\n\n\ndef get_ucb_diffusercam_data(path: str, verbose: bool = False): # pragma: no cover\n \"\"\"Download example data from UC Berkeley Waller Lab diffusercam project.\n\n Download deconvolution problem data from UC Berkeley Waller Lab\n diffusercam project. The downloaded data is converted to `.npz`\n format for convenient access via :func:`numpy.load`. The\n converted data is saved in a file `ucb_diffcam_data.npz.npz` in\n the directory specified by `path`.\n Args:\n path: Directory in which converted data is saved.\n verbose: Flag indicating whether to print status messages.\n \"\"\"\n\n # data source URL and filenames\n data_base_url = \"https://github.com/Waller-Lab/DiffuserCam/blob/master/example_data/\"\n data_files = [\"example_psfs.mat\", \"example_raw.png\"]\n\n # ensure path directory exists\n if not os.path.isdir(path):\n raise ValueError(f\"Path {path} does not exist or is not a directory.\")\n\n # create temporary directory\n temp_dir = tempfile.TemporaryDirectory()\n # download data files into temporary directory\n for data_file in data_files:\n if verbose:\n print(f\"Downloading {data_file} from {data_base_url}\")\n data = util.url_get(data_base_url + data_file + \"?raw=true\")\n f = open(os.path.join(temp_dir.name, data_file), \"wb\")\n f.write(data.read())\n f.close()\n if verbose:\n print(\"Download complete\")\n\n # load data, normalize it, and save as npz\n y = iio.imread(os.path.join(temp_dir.name, \"example_raw.png\"))\n y = y.astype(np.float32)\n y -= 100.0\n y /= y.max()\n mat = loadmat(os.path.join(temp_dir.name, \"example_psfs.mat\"))\n psf = mat[\"psf\"].astype(np.float64)\n psf -= 102.0\n psf /= np.linalg.norm(psf, axis=(0, 1)).min()\n\n # save as .npz\n npz_file = os.path.join(path, \"ucb_diffcam_data.npz\")\n if verbose:\n print(f\"Saving as {npz_file}\")\n np.savez(npz_file, y=y, psf=psf)\n\n\ndef ucb_diffusercam_data(\n verbose: bool = False, cache_path: Optional[str] = None\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Get example data from UC Berkeley Waller Lab diffusercam project.\n\n If the data has previously been downloaded, it will be retrieved from\n a local cache.\n\n Args:\n verbose: Flag indicating whether to print status messages.\n cache_path: Directory in which downloaded data is cached. The\n default is `~/.cache/scico/examples`, where `~` represents\n the user home directory.\n\n Returns:\n tuple: A tuple (y, psf) containing:\n\n - **y** : (np.ndarray): Measured image\n - **psf** : (np.ndarray): Stack of psfs.\n \"\"\"\n\n # set default cache path if not specified\n if cache_path is None: # pragma: no cover\n cache_path = os.path.join(os.path.expanduser(\"~\"), \".cache\", \"scico\", \"examples\")\n\n # create cache directory and download data if not already present\n npz_file = os.path.join(cache_path, \"ucb_diffcam_data.npz\")\n if not os.path.isfile(npz_file): # pragma: no cover\n if not os.path.isdir(cache_path):\n os.makedirs(cache_path)\n get_ucb_diffusercam_data(path=cache_path, verbose=verbose)\n\n # load data and return y and psf arrays converted to float32\n npz = np.load(npz_file)\n y = npz[\"y\"].astype(np.float32)\n psf = npz[\"psf\"].astype(np.float64)\n return y, psf\n\n\ndef downsample_volume(vol: snp.Array, rate: int) -> snp.Array:\n \"\"\"Downsample a 3D array.\n\n Downsample a 3D array. If the volume dimensions can be divided by\n `rate`, this is achieved via averaging distinct `rate` x `rate` x\n `rate` block in `vol`. Otherwise it is achieved via a call to\n :func:`scipy.ndimage.zoom`.\n\n Args:\n vol: Input volume.\n rate: Downsampling rate.\n\n Returns:\n Downsampled volume.\n \"\"\"\n\n if rate == 1:\n return vol\n\n if np.all([n % rate == 0 for n in vol.shape]):\n vol = snp.mean(snp.reshape(vol, (-1, rate, vol.shape[1], vol.shape[2])), axis=1)\n vol = snp.mean(snp.reshape(vol, (vol.shape[0], -1, rate, vol.shape[2])), axis=2)\n vol = snp.mean(snp.reshape(vol, (vol.shape[0], vol.shape[1], -1, rate)), axis=3)\n else:\n vol = zoom(vol, 1.0 / rate)\n\n return vol\n\n\ndef tile_volume_slices(x: snp.Array, sep_width: int = 10) -> snp.Array:\n \"\"\"Make an image with tiled slices from an input volume.\n\n Make an image with tiled `xy`, `xz`, and `yz` slices from an input\n volume.\n\n Args:\n x: Input volume consisting of a 3D or 4D array. If the input is\n 4D, the final axis represents a channel index.\n sep_width: Number of pixels separating the slices in the output\n image.\n\n Returns:\n Image containing tiled slices.\n \"\"\"\n\n if x.ndim == 3:\n fshape: Tuple[int, ...] = (x.shape[0], sep_width)\n else:\n fshape = (x.shape[0], sep_width, 3)\n out = snp.concatenate(\n (\n x[:, :, x.shape[2] // 2],\n snp.full(fshape, snp.nan),\n x[:, x.shape[1] // 2, :],\n ),\n axis=1,\n )\n\n if x.ndim == 3:\n fshape0: Tuple[int, ...] = (sep_width, out.shape[1])\n fshape1: Tuple[int, ...] = (x.shape[2], x.shape[2] + sep_width)\n trans: Tuple[int, ...] = (1, 0)\n\n else:\n fshape0 = (sep_width, out.shape[1], 3)\n fshape1 = (x.shape[2], x.shape[2] + sep_width, 3)\n trans = (1, 0, 2)\n out = snp.concatenate(\n (\n out,\n snp.full(fshape0, snp.nan),\n snp.concatenate(\n (\n x[x.shape[0] // 2, :, :].transpose(trans),\n snp.full(fshape1, snp.nan),\n ),\n axis=1,\n ),\n ),\n axis=0,\n )\n\n out = snp.where(snp.isnan(out), snp.nanmax(out), out)\n\n return out\n\n\ndef create_cone(img_shape: Shape, center: Optional[List[float]] = None) -> snp.Array:\n \"\"\"Compute a 2D map of the distance from a center pixel.\n\n Args:\n img_shape: Shape of the image for which the distance map is being\n computed.\n center: Tuple of center pixel coordinates. If ``None``, this is\n set to the center of the image.\n\n Returns:\n An image containing a 2D map of the distances.\n \"\"\"\n\n if center is None:\n center = [(img_dim - 1) / 2 for img_dim in img_shape]\n\n coords = [snp.arange(0, img_dim) for img_dim in img_shape]\n coord_mesh = snp.meshgrid(*coords, sparse=True, indexing=\"ij\")\n\n dist_map = sum([(coord_mesh[i] - center[i]) ** 2 for i in range(len(coord_mesh))])\n dist_map = snp.sqrt(dist_map)\n\n return dist_map\n\n\ndef gaussian(shape: Shape, sigma: Optional[np.ndarray] = None) -> np.ndarray:\n r\"\"\"Construct a multivariate Gaussian distribution function.\n\n Construct a zero-mean multivariate Gaussian distribution function\n\n .. math::\n f(\\mb{x}) = (2 \\pi)^{-N/2} \\, \\det(\\Sigma)^{-1/2} \\, \\exp \\left(\n -\\frac{\\mb{x}^T \\, \\Sigma^{-1} \\, \\mb{x}}{2} \\right) \\;,\n\n where :math:`\\Sigma` is the covariance matrix of the distribution.\n\n Args:\n shape: Shape of output array.\n sigma: Covariance matrix.\n\n Returns:\n Sampled function.\n\n Raises:\n ValueError: If the array `sigma` cannot be inverted.\n \"\"\"\n\n if sigma is None:\n sigma = np.diag(np.array(shape) / 7) ** 2\n N = len(shape)\n try:\n sigmainv = np.linalg.inv(sigma)\n sigmadet = np.linalg.det(sigma)\n except np.linalg.LinAlgError as e:\n raise ValueError(f\"Invalid covariance matrix {sigma}.\") from e\n grd = np.stack(np.mgrid[[slice(-(n - 1) / 2, (n + 1) / 2) for n in shape]], axis=-1)\n sigmax = np.dot(grd, sigmainv)\n xtsigmax = np.sum(grd * np.dot(grd, sigmainv), axis=-1)\n const = ((2.0 * np.pi) ** (-N / 2.0)) * (sigmadet ** (-1.0 / 2.0))\n return const * np.exp(-xtsigmax / 2.0)\n\n\ndef create_circular_phantom(\n img_shape: Shape, radius_list: list, val_list: list, center: Optional[list] = None\n) -> snp.Array:\n \"\"\"Construct a circular phantom with given radii and intensities.\n\n Args:\n img_shape: Shape of the phantom to be created.\n radius_list: List of radii of the rings in the phantom.\n val_list: List of intensity values of the rings in the phantom.\n center: Tuple of center pixel coordinates. If ``None``, this is\n set to the center of the image.\n\n Returns:\n The computed circular phantom.\n \"\"\"\n\n dist_map = create_cone(img_shape, center)\n\n img = snp.zeros(img_shape)\n for r, val in zip(radius_list, val_list):\n # In numpy: img[dist_map < r] = val\n img = img.at[dist_map < r].set(val)\n\n return img\n\n\ndef create_3d_foam_phantom(\n im_shape: Shape,\n N_sphere: int,\n r_mean: float = 0.1,\n r_std: float = 0.001,\n pad: float = 0.01,\n is_random: bool = False,\n) -> snp.Array:\n \"\"\"Construct a 3D phantom with random radii and centers.\n\n Args:\n im_shape: Shape of input image.\n N_sphere: Number of spheres added.\n r_mean: Mean radius of sphere (normalized to 1 along each axis).\n Default 0.1.\n r_std: Standard deviation of radius of sphere (normalized to 1\n along each axis). Default 0.001.\n pad: Padding length (normalized to 1 along each axis). Default 0.01.\n is_random: Flag used to control randomness of phantom generation.\n If ``False``, random seed is set to 1 in order to make the\n process deterministic. Default ``False``.\n\n Returns:\n 3D phantom of shape `im_shape`.\n \"\"\"\n c_lo = 0.0\n c_hi = 1.0\n\n if not is_random:\n np.random.seed(1)\n\n coord_list = [snp.linspace(0, 1, N) for N in im_shape]\n x = snp.stack(snp.meshgrid(*coord_list, indexing=\"ij\"), axis=-1)\n\n centers = np.random.uniform(low=r_mean + pad, high=1 - r_mean - pad, size=(N_sphere, 3))\n radii = r_std * np.random.randn(N_sphere) + r_mean\n\n im = snp.zeros(im_shape) + c_lo\n for c, r in zip(centers, radii): # type: ignore\n dist = snp.sum((x - c) ** 2, axis=-1)\n if snp.mean(im[dist < r**2] - c_lo) < 0.01 * c_hi:\n # equivalent to im[dist < r**2] = c_hi in numpy\n im = im.at[dist < r**2].set(c_hi)\n\n return im\n\n\ndef create_conv_sparse_phantom(Nx: int, Nnz: int) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Construct a disc dictionary and sparse coefficient maps.\n\n Construct a disc dictionary and a corresponding set of sparse\n coefficient maps for testing convolutional sparse coding algorithms.\n\n Args:\n Nx: Size of coefficient maps (3 x Nx x Nx).\n Nnz: Number of non-zero coefficients across all coefficient maps.\n\n Returns:\n A tuple consisting of a stack of 2D filters and the coefficient\n map array.\n \"\"\"\n\n # constant parameters\n M = 3\n Nh = 7\n e = 1\n\n # create disc filters\n h = np.zeros((M, 2 * Nh + 1, 2 * Nh + 1))\n gr, gc = np.ogrid[-Nh : Nh + 1, -Nh : Nh + 1]\n for m in range(M):\n r = 2 * m + 3\n d = np.sqrt(gr**2 + gc**2)\n v = (np.clip(d, r - e, r + e) - (r - e)) / (2 * e)\n v = 1.0 - v\n h[m] = v\n\n # create sparse random coefficient maps\n np.random.seed(1234)\n x = np.zeros((M, Nx, Nx))\n idx0 = np.random.randint(0, M, size=(Nnz,))\n idx1 = np.random.randint(0, Nx, size=(2, Nnz))\n val = np.random.uniform(0, 5, size=(Nnz,))\n x[idx0, idx1[0], idx1[1]] = val\n\n return h, x\n\n\ndef create_tangle_phantom(nx: int, ny: int, nz: int) -> snp.Array:\n \"\"\"Construct a volume phantom.\n\n Args:\n nx: x-size of output.\n ny: y-size of output.\n nz: z-size of output.\n\n Returns:\n An array with shape (nz, ny, nx).\n\n \"\"\"\n xs = 1.0 * np.linspace(-1.0, 1.0, nx)\n ys = 1.0 * np.linspace(-1.0, 1.0, ny)\n zs = 1.0 * np.linspace(-1.0, 1.0, nz)\n\n # default ordering for meshgrid is `xy`, this makes inputs of length\n # M, N, P will create a mesh of N, M, P. Thus we want ys, zs and xs.\n xx, yy, zz = np.meshgrid(ys, zs, xs, copy=True)\n xx = 3.0 * xx\n yy = 3.0 * yy\n zz = 3.0 * zz\n values = (\n xx * xx * xx * xx\n - 5.0 * xx * xx\n + yy * yy * yy * yy\n - 5.0 * yy * yy\n + zz * zz * zz * zz\n - 5.0 * zz * zz\n + 11.8\n ) * 0.2 + 0.5\n return (values < 2.0).astype(float)\n\n\ndef spnoise(\n img: Union[np.ndarray, snp.Array], nfrac: float, nmin: float = 0.0, nmax: float = 1.0\n) -> Union[np.ndarray, snp.Array]:\n \"\"\"Return image with salt & pepper noise imposed on it.\n\n Args:\n img: Input image.\n nfrac: Desired fraction of pixels corrupted by noise.\n nmin: Lower value for noise (pepper). Default 0.0.\n nmax: Upper value for noise (salt). Default 1.0.\n\n Returns:\n Noisy image\n \"\"\"\n\n if isinstance(img, np.ndarray):\n spm = np.random.uniform(-1.0, 1.0, img.shape) # type: ignore\n imgn = img.copy()\n imgn[spm < nfrac - 1.0] = nmin\n imgn[spm > 1.0 - nfrac] = nmax\n else:\n spm, key = random.uniform(shape=img.shape, minval=-1.0, maxval=1.0, seed=0) # type: ignore\n imgn = img\n imgn = imgn.at[spm < nfrac - 1.0].set(nmin) # type: ignore\n imgn = imgn.at[spm > 1.0 - nfrac].set(nmax) # type: ignore\n return imgn\n\n\ndef phase_diff(x: snp.Array, y: snp.Array) -> snp.Array:\n \"\"\"Distance between phase angles.\n\n Compute the distance between two arrays of phase angles, with\n appropriate phase wrapping to minimize the distance.\n\n Args:\n x: Input array.\n y: Input array.\n\n Returns:\n Array of angular distances.\n \"\"\"\n\n mod = snp.mod(snp.abs(x - y), 2 * snp.pi)\n return snp.minimum(mod, 2 * snp.pi - mod)\n","sub_path":"scico/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":19310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"407991697","text":"\"\"\"Config for installing a python module/package.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nNAME = 'MTLCC'\nAUTHOR = 'Alejandro Coca-Castro based on Rußwurm & Körner (2018) Multi-Temporal Land Cover Classification with Sequential Recurrent Encoders',\nEMAIL = 'acocac@gmail.com',\nVERSION = '0.1'\nREQUIRED_PACKAGES = ['configparser','cloudml-hypertune']\nLICENSE = 'MIT'\nDESCRIPTION = 'Run MTLCC in Google AI'\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n author=AUTHOR,\n author_email=EMAIL,\n license=LICENSE,\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n zip_safe=False)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"241459735","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 23 11:03:26 2018\r\n\r\n@author: CAZ2BJ\r\n\"\"\"\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimport functions_io as fio\r\nimport functions_csv as fcsv\r\nimport functions_plot as fplot\r\nimport functions_excel as fexcel\r\nimport functions_data_processing as fdp\r\n\r\ndef remove_empty_folders(path, remove_dir_level, removeRoot=True): # remove_dir_level = 0 dir, 1 - first inner dirs ...\r\n remove_dir_level = remove_dir_level + 1 \r\n\r\n if not os.path.isdir(path):\r\n return\r\n\r\n files = os.listdir(path) # search for dirs of files\r\n \r\n if len(files): # if dirs or files have been found\r\n for f in files: # for dir or file in dirs_and_files\r\n fullpath = os.path.join(path, f) # create full_path\r\n if os.path.isdir(fullpath): # if current file or dir is DIR\r\n remove_empty_folders(fullpath, remove_dir_level) # run recursively another instance of function \r\n \r\n files = os.listdir(path)\r\n if len(files) == 0 and removeRoot and remove_dir_level > 3:\r\n print (\"Removing empty folder:\", path)\r\n os.rmdir(path)\r\n\r\n else:\r\n pass\r\n \r\n \r\n\r\n\r\nfiles = [r'X:\\Dnox\\Tesla\\2014', r'X:\\Dnox\\Tesla\\2015', r'X:\\Dnox\\Tesla\\2016', r'X:\\Dnox\\Tesla\\2017']\r\n\r\nfor file in files:\r\n remove_empty_folders(file,1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"empty_remove.py","file_name":"empty_remove.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"508237477","text":"# NOTE: 'procs' is the abbreviation of 'processed'\n# NOTE: 'baln' is the abbreviation of 'balance(d)'\n\nimport numpy as np\nimport cv2\nimport pickle\nimport time\n\nfileNames = [\"190627_184747_1067\", \"190625_221209_1810\"]\nRESIZE_RATE = 0.25\n\nDAYNIGHT_CHECK_POS = (5, 5)\nUSED_KEYS = [\"up\", \"down\", \"none\"]\n\n\nkeyIndex = {key: idx for idx, key in enumerate(USED_KEYS)}\n\n\ndef shuffle2Array(arr1, arr2, length, axis1=0, axis2=0):\n assert len(arr1) == len(arr2) == length, \\\n \"Error: arr1, arr2, length have different length.\"\n shuffleIndex = np.random.permutation(length)\n shuffledArr1 = arr1[shuffleIndex]\n shuffledArr2 = arr2[shuffleIndex]\n return shuffledArr1, shuffledArr2\n\n\ndef balanceData(npImgs, npKeys):\n # Separate up/down/none key imgs\n upImgs = npImgs[npKeys == 'up']\n downImgs = npImgs[npKeys == 'down']\n noneImgs = npImgs[npKeys == 'none']\n\n # Balance the quantity of data (cut noneImgs to the same amount)\n noneImgs = np.random.permutation(noneImgs)\n noneImgs = noneImgs[: max(len(upImgs), len(downImgs)), :, :]\n\n # Concatenate Imgs, and shuffle\n concatImgs = np.concatenate([upImgs, downImgs, noneImgs])\n concatKeys = np.array(keyIndex[\"up\"] * len(upImgs) +\n keyIndex[\"down\"] * len(downImgs) +\n keyIndex[\"none\"] * len(noneImgs))\n balnImgs, balnKeys = shuffle2Array(concatImgs, concatKeys, len(concatImgs))\n\n return balnImgs, balnKeys\n\n\ndef processImg(Img):\n # Resize image to reduce data volume (half size)\n procsImg = cv2.resize(Img, None, fx=RESIZE_RATE, fy=RESIZE_RATE)\n\n # Threshold (set maxval to 1, to normalize)\n if Img[DAYNIGHT_CHECK_POS] >= 127:\n _, procsImg = cv2.threshold(procsImg, 127, 1, cv2.THRESH_BINARY)\n else:\n # Inverse color when it's night time\n _, procsImg = cv2.threshold(procsImg, 127, 1, cv2.THRESH_BINARY_INV)\n\n # OPTIMIZE: ugly code\n procsImg = procsImg.reshape([1] + list(procsImg.shape) + [1])\n\n return procsImg\n\n\nif __name__ == \"__main__\":\n # with open(f\"0_orig_data/{fileNames}.pickle\", 'rb') as file:\n # origData = pickle.load(file)\n # origImgs = origImgs.append(origData[\"origimgs\"], axis=0)\n # keys = origData[\"keys\"]\n\n # Load data\n print(\"Loading data...\")\n origImgs = []\n keys = []\n for fileName in fileNames:\n with open(f\"0_orig_data/{fileName}.pickle\", 'rb') as file:\n origData = pickle.load(file)\n origImgs.extend(origData[\"origImgs\"])\n keys.extend(origData[\"keys\"])\n\n # Change to np.array format\n origImgs = np.array(origImgs)\n keys = np.array(keys)\n print(\"Data loaded!\")\n\n # Process data\n # OPTIMIZE: try to use parallel operation to optimize\n print(\"Processing images...\")\n # OPTIMIZE: somehow ugly\n procs_size = (1,\n round(origImgs.shape[1] * RESIZE_RATE),\n round(origImgs.shape[2] * RESIZE_RATE),\n 1)\n\n counter = 0\n per = round(len(origImgs) / 10)\n procsImgs = np.empty(procs_size)\n for img in origImgs:\n procsImg = processImg(img)\n procsImgs = np.append(procsImgs, procsImg, axis=0)\n\n # Show progress rate\n if counter % per == 0:\n print(f\"{counter//per}%\")\n counter += 1\n\n procsImgs = procsImgs[1:]\n print(\"Images processed!\")\n\n # Normalization\n # procsImgs = (procsImgs) / (procsImgs.max() - procsImgs.min())\n\n # Balance data\n print(\"Balancing data...\")\n procsImgs, keys = balanceData(procsImgs, keys)\n print(\"Data balanced!\")\n\n # Save balanced data\n print(\"Saving data...\")\n saveFileName = f\"{time.strftime('%y%m%d_%H%M%S')}_{len(procsImgs)}\"\n with open(f\"0_balanced_data/{saveFileName}.pickle\", 'wb') as file:\n procsData = {\"procsImgs\": procsImgs, \"keys\": keys}\n pickle.dump(procsData, file)\n print(f\"Saved as {saveFileName}\")\n print(\"Data saved!\")\n","sub_path":"process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"613014781","text":"#! /usr/bin/env python\nimport pygame\n\n#Clase para el Muro\nclass Muro(pygame.sprite.Sprite):\n\tdef __init__(self,posx, posy):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.imagenMuro = pygame.image.load('img/muro.png')\n\t\tself.rect = self.imagenMuro.get_rect()\n\t\tself.visible = False\n\t\tself.colisiones = True\t\t\t\n\t\tself.rect.top = posy\n\t\tself.rect.left = posx\n\t\t\n\tdef dibujar(self,superficie):\n\t\tsuperficie.blit(self.imagenMuro, self.rect)","sub_path":"Practica1/Laberinto/Clases/Muro.py","file_name":"Muro.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"282667951","text":"import sys\nimport subprocess\nimport os\n\nif len(sys.argv) < 2:\n print(\"need to provide a folder arg\")\n sys.exit()\n\npath = os.path.dirname(os.path.abspath(__file__))\nfolder = sys.argv[1]\nprint(\"mpiexec -n 1 \"+path+\"/HDF_INPUT/basic_industries.h5 \"+path+\"/HDF_OUTPUT/\"+folder+\"/\")\nsubprocess.call(\"mpiexec -n 1 \"+path+\"/main_parallel \"+path+\"/HDF_INPUT/basic_industries.h5 \"+path+\"/HDF_OUTPUT/\"+folder+\"/\", shell=True)\n\n","sub_path":"runme.py","file_name":"runme.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"193825577","text":"\"\"\"\nModeling helper functions\n\"\"\"\nimport pandas as pd\nfrom sklearn import metrics\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef evaluate_performance(test_y, test_pred, print_vals=True):\n cnf_matrix = metrics.confusion_matrix(test_y, test_pred)\n \n class_names=['Reg 1', 'Reg 2', 'Reg 3', 'Reg 5']\n cnf_matrix = pd.DataFrame(cnf_matrix, index = class_names,\n columns = class_names)\n \n # plot confusion matrix with heatmap\n sns.heatmap(cnf_matrix, annot=True, cmap=\"YlGnBu\" ,fmt='g')\n plt.tight_layout()\n plt.title('Confusion matrix', y=1.1)\n plt.ylabel('Actual label')\n plt.xlabel('Predicted label')\n \n if print_vals :\n count_misclassified = (test_y != test_pred).sum()\n print('Misclassified samples: {}'.format(count_misclassified))\n accuracy = metrics.accuracy_score(test_y, test_pred)\n print('Classification Report:')\n print(metrics.classification_report(test_y, test_pred)) \n\ndef microaveage_F1(test_y, test_pred):\n return metrics.classification_report(test_y, test_pred, output_dict=True)['weighted avg']['f1-score']","sub_path":"notebooks/Notebook_helpers/modeling_helpers.py","file_name":"modeling_helpers.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"589608270","text":"import random\nfrom statistics import mean\n\n\nclass Player:\n def __init__(self):\n self.scores = []\n self.stop = 0\n\n def roll_die(self):\n return random.randint(1, 6)\n\n\nclass SmartPlayer(Player):\n def __init__(self):\n super().__init__()\n self.stop = 20\n\n\nclass SmartPlayer1(Player):\n def __init__(self):\n super().__init__()\n self.stop = 19\n\n\nclass SmartPlayer2(Player):\n def __init__(self):\n super().__init__()\n self.stop = 21\n\n\nclass Game:\n def __init__(self, turns, player):\n self.player = player\n self.turns = turns\n self.score = 0\n\n def turn(self):\n turn_score = 0\n while True:\n roll = self.player.roll_die()\n if roll == 1:\n turn_score = 0\n break\n else:\n turn_score += roll\n # when made 2 player game will add input here\n if self.player.stop < turn_score:\n break\n self.score += turn_score\n\n def play_game(self):\n while self.turns > 0:\n self.turn()\n self.turns -= 1\n self.player.scores.append(self.score)\n\n\ndef main():\n def_player = Player()\n smart_player = SmartPlayer()\n smart_player_19 = SmartPlayer1()\n #smart_player_21 = SmartPlayer2()\n\n games = 100000\n\n while games > 0:\n Game(7, def_player).play_game()\n Game(7, smart_player).play_game()\n Game(7, smart_player_19).play_game()\n #Game(7, smart_player_21).play_game()\n games -= 1\n\n print(\"\"\"\nDefault Player Mean Score: {}\nSmart Player 20 Mean Score: {}\nSmart Player 19 Mean Score: {}\n \"\"\".format(mean(def_player.scores), mean(smart_player.scores),\n mean(smart_player_19.scores)))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pigsol.py","file_name":"pigsol.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"460825892","text":"#!/usr/bin/env python\n\nimport SocketServer as ss\nimport struct\nimport os\nfrom binascii import hexlify\nimport hashlib\nfrom subprocess import Popen, PIPE\n\n\nclass Handler(ss.StreamRequestHandler):\n\n def handle(self):\n put = self.wfile.write\n sigbytes = 2592\n\n put('Signature verification service, please send a message first\\n')\n msg = self.rfile.readline()[:-1]\n msghash = hashlib.sha256(msg).hexdigest()\n print('verifying sig for %s from %s' % (msg, self.client_address))\n\n put('Now please send a signature, in hex\\n')\n sig = self.rfile.readline()[:-1]\n\n process = Popen(['./verify', msghash, sig], stdout=PIPE, stderr=PIPE)\n stdout, stderr = process.communicate()\n\n if stderr != '':\n put(stderr)\n return\n else:\n put(\"Signature is valid\\n\")\n\n\nclass ReusableTCPServer(ss.ForkingMixIn, ss.TCPServer):\n allow_reuse_address = True\n\nif __name__ == '__main__':\n HOST, PORT = ('0.0.0.0', 2222)\n ss.TCPServer.allow_reuse_address = True\n server = ReusableTCPServer((HOST, PORT), Handler)\n server.serve_forever()\n","sub_path":"serve_verify.py","file_name":"serve_verify.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"108607638","text":"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport numpy as np\nfrom collections import namedtuple, OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.utils.model_zoo as model_zoo\n\nfrom nets.network import Network\nfrom model.config import cfg\nfrom .utils import Adapt2CaffeData\n\n\ndef conv_bn(inp, oup, stride):\n\treturn nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True))\n\n\ndef conv_1x1_bn(inp, oup):\n\treturn nn.Sequential(nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True))\n\n\nclass InvertedResidual(nn.Module):\n\tdef __init__(self, inp, oup, stride, expand_ratio):\n\t\tsuper(InvertedResidual, self).__init__()\n\t\tself.stride = stride\n\t\tassert stride in [1, 2]\n\n\t\thidden_dim = round(inp * expand_ratio)\n\t\tself.use_res_connect = self.stride == 1 and inp == oup\n\n\t\tif expand_ratio == 1:\n\t\t\tself.conv = nn.Sequential( # dw\n\t\t\t\tnn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),\n\t\t\t\tnn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), # pw-linear\n\t\t\t\tnn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), )\n\t\telse:\n\t\t\tself.conv = nn.Sequential( # pw\n\t\t\t\tnn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True),\n\t\t\t\t# dw\n\t\t\t\tnn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),\n\t\t\t\tnn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), # pw-linear\n\t\t\t\tnn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), )\n\n\tdef forward(self, x):\n\t\tif self.use_res_connect:\n\t\t\treturn x + self.conv(x)\n\t\telse:\n\t\t\treturn self.conv(x)\n\n\n\n\n\nclass MobileNetV2(nn.Module):\n\tdef __init__(self, n_class=1000, input_size=224, width_mult=1.):\n\t\tsuper(MobileNetV2, self).__init__()\n\t\tself.preprocess = Adapt2CaffeData()\n\n\t\tblock = InvertedResidual\n\t\tinput_channel = 32\n\t\tlast_channel = 1280\n\t\tinterverted_residual_setting = [ # t, c, n, s\n\t\t\t[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ]\n\n\t\t# building first layer\n\t\tassert input_size % 32 == 0\n\t\tinput_channel = int(input_channel * width_mult)\n\t\tself.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel\n\t\tself.features = [conv_bn(3, input_channel, 2)]\n\t\t# building inverted residual blocks\n\t\tfor t, c, n, s in interverted_residual_setting:\n\t\t\toutput_channel = int(c * width_mult)\n\t\t\tfor i in range(n):\n\t\t\t\tif i == 0:\n\t\t\t\t\tself.features.append(block(input_channel, output_channel, s, expand_ratio=t))\n\t\t\t\telse:\n\t\t\t\t\tself.features.append(block(input_channel, output_channel, 1, expand_ratio=t))\n\t\t\t\tinput_channel = output_channel\n\t\t# building last several layers\n\t\tself.features.append(conv_1x1_bn(input_channel, self.last_channel))\n\t\t# make it nn.Sequential\n\t\tself.features = nn.Sequential(*self.features)\n\n\t\t# building classifier\n\t\tself.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, n_class), )\n\n\t\tself._initialize_weights()\n\n\tdef forward(self, x):\n\t\tx = self.preprocess(x)\n\t\tx = self.features(x)\n\t\tprint(x.size())\n\t\tx = x.mean(3).mean(2)\n\t\tx = self.classifier(x)\n\t\treturn x\n\n\tdef _initialize_weights(self):\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Conv2d):\n\t\t\t\tn = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n\t\t\t\tm.weight.data.normal_(0, math.sqrt(2. / n))\n\t\t\t\tif m.bias is not None:\n\t\t\t\t\tm.bias.data.zero_()\n\t\t\telif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.weight.data.fill_(1)\n\t\t\t\tm.bias.data.zero_()\n\t\t\telif isinstance(m, nn.Linear):\n\t\t\t\tn = m.weight.size(1)\n\t\t\t\tm.weight.data.normal_(0, 0.01)\n\t\t\t\tm.bias.data.zero_()\n\n\ndef mnet_v2(pretrained=False):\n\tmodel = MobileNetV2()\n\tif pretrained:\n\t\turl = \"http://file.lzhu.me/pytorch/models/mobilenet_v2-ecbe2b56.pth.tar\"\n\t\tfp = model_zoo.load_url(url, map_location=\"cpu\")\n\t\tmodel.load_state_dict(fp, strict=False)\n\treturn model\n\nfrom .modules.layers import ConvLayer\n\nclass mobilenetv2(Network):\n\tdef __init__(self):\n\t\tNetwork.__init__(self)\n\t\tself._feat_stride = [16, ]\n\t\tself._feat_compress = [1. / float(self._feat_stride[0]), ]\n\t\tself._depth_multiplier = cfg.MOBILENET.DEPTH_MULTIPLIER\n\t\tself._net_conv_channels = 320\n\t\tself._fc7_channels = 1280\n\n\tdef init_weights(self):\n\t\tdef normal_init(m, mean, stddev, truncated=False):\n\t\t\t\"\"\"\n\t\t\tweight initalizer: truncated normal and random normal.\n\t\t\t\"\"\"\n\t\t\tif m.__class__.__name__.find('Conv') == -1:\n\t\t\t\treturn\n\t\t\tif truncated:\n\t\t\t\tm.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n\t\t\telse:\n\t\t\t\tm.weight.data.normal_(mean, stddev)\n\t\t\tif m.bias is not None: m.bias.data.zero_()\n\n\t\tself.mobilenet.apply(lambda m: normal_init(m, 0, 0.09, True))\n\t\tnormal_init(self.rpn_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n\t\tnormal_init(self.rpn_cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n\t\tnormal_init(self.rpn_bbox_pred_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n\t\tnormal_init(self.cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n\t\tnormal_init(self.bbox_pred_net, 0, 0.001, cfg.TRAIN.TRUNCATED)\n\n\tdef _image_to_head(self):\n\t\tnet_conv = self._layers['head'](self._image)\n\t\tself._act_summaries['conv'] = net_conv\n\n\t\treturn net_conv\n\n\tdef _head_to_tail(self, pool5):\n\t\tfc7 = self._layers['tail'](pool5)\n\t\tfc7 = fc7.mean(3).mean(2)\n\t\treturn fc7\n\n\tdef _init_head_tail(self):\n\t\tself.mobilenet = mnet_v2()\n\n\t\t# Fix blocks\n\t\tassert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)\n\t\tfor m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]:\n\t\t\tfor p in m.parameters():\n\t\t\t\tp.requires_grad = False\n\n\t\tdef set_bn_fix(m):\n\t\t\tclassname = m.__class__.__name__\n\t\t\tif classname.find('BatchNorm') != -1:\n\t\t\t\tfor p in m.parameters(): p.requires_grad = False\n\n\t\tself.mobilenet.apply(set_bn_fix)\n\n\t\t# Add weight decay\n\t\tdef l2_regularizer(m, wd, regu_depth):\n\t\t\tif isinstance(m, ConvLayer):\n\t\t\t\treturn\n\t\t\tif m.__class__.__name__.find('Conv') != -1:\n\t\t\t\tif regu_depth or m.groups == 1:\n\t\t\t\t\tm.weight.weight_decay = wd\n\t\t\t\telse:\n\t\t\t\t\tm.weight.weight_decay = 0\n\n\t\tself.mobilenet.apply(lambda x: l2_regularizer(x, cfg.MOBILENET.WEIGHT_DECAY, cfg.MOBILENET.REGU_DEPTH))\n\n\t\t# Build mobilenet.\n\t\t# self._layers['head'] = nn.Sequential(*list(self.mobilenet.children())[:12])\n\t\t# self._layers['tail'] = nn.Sequential(*list(self.mobilenet.children())[12:])\n\t\tself._layers['head'] = nn.Sequential(*list(self.mobilenet.features.children())[:-1])\n\t\tself._layers['tail'] = nn.Sequential(*list(self.mobilenet.features.children())[-1:])\n\n\tdef train(self, mode=True):\n\t\t# Override train so that the training mode is set as we want\n\t\tnn.Module.train(self, mode)\n\t\tif mode:\n\t\t\t# Set fixed blocks to be in eval mode (not really doing anything)\n\t\t\tfor m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]:\n\t\t\t\tm.eval()\n\n\t\t\t# Set batchnorm always in eval mode during training\n\t\t\tdef set_bn_eval(m):\n\t\t\t\tclassname = m.__class__.__name__\n\t\t\t\tif classname.find('BatchNorm') != -1:\n\t\t\t\t\tm.eval()\n\n\t\t\tself.mobilenet.apply(set_bn_eval)\n\n\tdef load_pretrained_cnn_from_url(self, url=\"http://file.lzhu.me/pytorch/models/mobilenet_v2-ecbe2b56.pth.tar\"):\n\t\tfp = model_zoo.load_url(url, map_location=\"cpu\")\n\t\tself.mobilenet.load_state_dict(fp, strict=False)\n\n\tdef load_pretrained_cnn(self, state_dict):\n\t\tDeprecationWarning(\"This API should NOT be called when using MobileNet V2\")\n\t\tprint('Warning: No available pretrained model yet')\n\t\tself.mobilenet.load_state_dict({k: state_dict['features.' + k] for k in list(self.mobilenet.state_dict())})\n\t\t# url = \"http://file.lzhu.me/pytorch/models/mobilenet_v2-ecbe2b56.pth.tar\"\n\t\t# fp = model_zoo.load_url(url, map_location=\"cpu\")\n\t\t# self.mobilenet.load_state_dict(fp)\n","sub_path":"lib/nets/mobilenet_v2.py","file_name":"mobilenet_v2.py","file_ext":"py","file_size_in_byte":7940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"504809313","text":"import hashlib\nimport switchMode as switch\n\nclass main():\n def __init__(self):\n #self.godGuide2 = {}\n self.godGuide = {'avl': {}, 'b': {}, 'bplus': {}, 'dict': {}, 'isam': {}, 'json': {}, 'hash': {}}\n self.guiaModos = {}\n self.listMode = ['avl', 'hash', 'b', 'bplus', 'dict', 'isam', 'json']\n self.listEncoding = ['ascii', 'iso-8859-1', 'utf8']\n\n #---------------------FUNCIONES DE UNIFICACION DE MODOS DE ALMACENAMIENTO----------------------#\n\n # CREAR BASE DE DATOS\n\n def createDatabase(self, database, mode, encoding='ascii'):\n if self.identify(str(database)):\n if self.verifyMode(mode):\n if not self.searchDB2(database):\n if self.verifyEncoding(encoding):\n try:\n self.godGuide[mode][database] = [{}, encoding]\n self.guiaModos[database] = mode\n switch.switchMode(mode).createDatabase(database)\n return 0\n except:\n return 1\n return 4\n return 2\n return 3\n return 1\n\n # ---------------------FUNCIONES DE ADMINISTRACION DEL MODO DE ALMACENAMIENTO----------------------#\n\n # CAMBIA EL MODO DE UNA TABLA\n\n def alterTableMode(self, database, table, mode):\n if self.identify(str(database)):\n if self.verifyMode(mode):\n if self.searchDB2(database):\n if self.searchTB(database, table):\n try:\n if database in switch.switchMode(mode).showDatabases():\n if table not in switch.switchMode(mode).showTables(database):\n for i in self.listMode:\n if database in self.godGuide[i].keys():\n if table in self.godGuide[i][database][0].keys():\n lis = self.godGuide[i][database][0].pop(table)\n self.godGuide[mode][database][0][table] = lis\n tabla = self.extTB(database, table)\n self.delTB(database, table)\n switch.switchMode(mode).createTable(database, table, lis[0])\n for i in tabla:\n switch.switchMode(mode).insert(database, table, i)\n else:\n return 1\n else:\n for i in self.listMode:\n if database in self.godGuide[i].keys():\n if table in self.godGuide[i][database][0].keys():\n encoding = self.godGuide[i][database][1]\n lis = self.godGuide[i][database][0].pop(table)\n self.godGuide[mode][database] = [{}, encoding]\n self.godGuide[mode][database][0][table] = lis\n\n #self.createDatabase(database, mode, encoding)\n switch.switchMode(mode).createDatabase(database)\n tabla = self.extTB(database, table)\n self.delTB(database, table)\n switch.switchMode(mode).createTable(database, table, lis[0])\n for i in tabla:\n switch.switchMode(mode).insert(database, table, i)\n return 0\n except:\n return 1\n return 3\n return 2\n return 4\n return 1\n\n # CAMBIA EL MODO DE UNA BASE DE DATOS\n\n def alterDatabaseMode(self, database, mode):\n if self.identify(str(database)):\n if self.verifyMode(mode):\n if self.searchDB2(database):\n try:\n for i in self.listMode:\n if i != mode:\n if database in switch.switchMode(i).showDatabases():\n if len(switch.switchMode(i).showTables(database)) == 0:\n modoA = i\n lis = self.godGuide[modoA].pop(database)\n self.guiaModos[database] = mode\n self.godGuide[mode][database] = lis\n #self.createDatabase(database, mode, lis[1])\n switch.switchMode(mode).createDatabase(database)\n else:\n modoA = i\n self.guiaModos[database] = mode\n for j in switch.switchMode(i).showTables(database):\n self.alterTableMode(database, j, mode)\n self.godGuide[modoA].pop(database)\n #self.godGuide[mode][database] = lis\n switch.switchMode(i).dropDatabase(database)\n return 0\n except:\n return 1\n return 2\n return 4\n return 1\n\n # ---------------------FUNCIONES DE ADMINISTRACION DE INDICES----------------------#\n\n # ---------------------FUNCIONES DE ADMINISTRACION DE LA CODIFICACION----------------------#\n\n def alterDatabaseEncoding(self, dataBase, codi):\n try:\n if codi == '' or codi == None:\n codi = 'ascii'\n leLlave = []\n for i in self.listMode: #para saber si existe la base\n if self.searchDB(dataBase, i):\n if self.verifyEncoding(codi):\n tb = self.showTables(dataBase)\n if tb != []: #saber si tiene o no tablas la base\n for j in tb: #para cod los nombres de las tablas\n tp = self.extractTable(dataBase, j) #jalar las tuplas\n if tp != []: #para codificar tuplas\n llave = self.godGuide[i][dataBase][0][j][1]\n for k in range(0,len(tp)):\n leTP = []\n for l in tp[k]:\n #para saber si viene codificado ya\n if type(l) is bytes:\n x = l.decode(self.godGuide[i][dataBase][0][j][2])\n leTP += [str(x).encode(encoding= codi, errors= 'backslashreplace')]\n else:\n leTP += [str(l).encode(encoding= codi, errors= 'backslashreplace')]\n for h in llave:\n leLlave.append(tp[k][h])\n leNewtp = {}\n for n in range(0,len(leTP)):\n leNewtp[n] = leTP[n]\n self.update(dataBase,j,leNewtp,leLlave)\n leLlave = []\n self.godGuide[i][dataBase][0][j][2] = codi\n return 0\n else:\n return 3\n return 2\n except:\n return 1\n\n # ---------------------FUNCIONES DE GENERACION DEL CHECKSUM----------------------#\n\n # GENERA EL CHECKSUM DE TODAS LAS TABLAS DE UNA BASE DE DATOS\n\n def checksumDatabase(self, database, mode):\n modos = ['MD5', 'SHA256']\n tablas = self.showTables(database)\n tuplas = []\n tmp = \"\"\n try:\n if mode not in modos:\n return None\n for i in tablas:\n for j in self.extractTable(database, i):\n tuplas.append(j)\n for i in tuplas:\n for j in i:\n tmp += str(j)\n for i in self.listMode:\n if database in self.godGuide[i].keys():\n encoding = self.godGuide[i][database][1]\n if mode == 'MD5':\n hash = hashlib.md5(tmp.encode(encoding))\n elif mode == 'SHA256':\n hash = hashlib.sha256(tmp.encode(encoding))\n hash = hash.hexdigest()\n print(tmp)\n return hash\n except:\n return None\n\n # GENERA EL CHECKSUM DE UNA TABLA EN ESPECIFICO\n\n def checksumTable(self, database, table, mode):\n modos = ['MD5', 'SHA256']\n tmp = \"\"\n try:\n if mode not in modos:\n return None\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n encoding = self.godGuide[i][j][0][table][2]\n tuplas = self.extractTable(database, table)\n for i in tuplas:\n for j in i:\n tmp += str(j)\n if mode == 'MD5':\n hash = hashlib.md5(tmp.encode(encoding))\n elif mode == 'SHA256':\n hash = hashlib.sha256(tmp.encode(encoding))\n hash = hash.hexdigest()\n print(tmp)\n return hash\n except:\n return None\n\n # ---------------------FUNCIONES DE COMPRESION DE DATOS----------------------#\n\n # ---------------------FUNCIONES DE SEGURIDAD----------------------#\n\n # ---------------------FUNCIONES DE GRAFOS----------------------#\n\n #---------------------FUNCIONES BASES DE DATOS (ANTERIORES)----------------------#\n\n # LISTA DE BASES DE DATOS ALMACENADAS\n\n def showDatabases(self):\n re = []\n for i in self.listMode:\n re = re + switch.switchMode(i).showDatabases()\n return re\n\n # CAMBIAR NOMBRE DE UNA BASE DE DATOS\n\n def alterDatabase(self, databaseOld, databaseNew):\n re = 1\n for i in self.listMode:\n if self.searchDB(databaseOld, i):\n for i in self.listMode:\n if not self.searchDB2(databaseNew):\n re = switch.switchMode(i).alterDatabase(databaseOld, databaseNew)\n if re == 0:\n\n ward = self.guiaModos.pop(databaseOld)\n self.guiaModos[databaseNew] = ward\n\n for i in self.listMode:\n if databaseOld in self.godGuide[i].keys():\n ward = self.godGuide[i].pop(databaseOld)\n self.godGuide[i][databaseNew] = ward\n return re\n\n # ELIMINAR BASE DE DATOS\n\n def dropDatabase(self, database):\n re = 1\n for i in self.listMode:\n if self.searchDB(database, i):\n re = switch.switchMode(i).dropDatabase(database)\n if re == 0:\n self.guiaModos.pop(database)\n for i in self.listMode:\n if database in self.godGuide[i].keys():\n self.godGuide[i].pop(database)\n return re\n\n # ---------------------FUNCIONES TABLAS----------------------#\n\n # CREAR TABLA EN UNA DETERMINADA BASE DE DATOS\n\n def createTable(self, database, table, numberColumns):\n re = switch.switchMode(self.guiaModos[database]).createTable(database, table, numberColumns)\n if re == 0:\n mod = self.guiaModos[database]\n self.godGuide[mod][database][0][table] = [numberColumns, None, self.godGuide[self.guiaModos[database]][database][1], False]\n return re\n\n # LISTA DE TABLAS AGREGADAS A UNA BASE DE DATOS\n\n def showTables(self, database):\n re = []\n for i in self.listMode:\n if self.searchDB(database, i):\n re = re + switch.switchMode(i).showTables(database)\n return re\n\n # LISTA DE REGISTROS DE UNA TABLA EN UN BASE DE DATOS\n\n def extractTable(self, database, table):\n re = []\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = re + switch.switchMode(i).extractTable(database, table)\n return re\n\n #LISTA REGISTROS EN UN RANGO DE UNA TABLA\n\n def extractRangeTable(self, database, table, columnNumber, lower, upper):\n re = []\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = re + switch.switchMode(i).extractRangeTable(database, table, columnNumber, lower, upper)\n return re\n\n # AGREGAR LISTA DE LLAVES PRIMARIAS A UNA TABLA\n\n def alterAddPK(self, database, table, columns):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).alterAddPK(database, table, columns)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n self.godGuide[i][j][0][table][1] = columns\n return re\n\n # ELIMINAR LAS LLAVES PRIMARIAS DE UNA TABLA\n\n def alterDropPK(self, database, table):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).alterDropPK(database, table)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n self.godGuide[i][j][0][table][1] = None\n return re\n\n # CAMBIAR EL NOMBRE DE UNA TABLA\n\n def alterTable(self, database, tableOld, tableNew):\n for i in self.listMode:\n if self.searchDB(database, i):\n if tableOld in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).alterTable(database, tableOld, tableNew)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if tableOld in self.godGuide[i][j][0].keys() and j == database:\n ward = self.godGuide[i][j][0].pop(tableOld)\n self.godGuide[i][j][0][tableNew] = ward\n return re\n\n # AGREGAR UN NUEVO REGISTRO A LAS TABLAS EXISTENTES\n\n def alterAddColumn(self, database, table, default):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).alterAddColumn(database, table, default)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n self.godGuide[i][j][0][table][0] += 1\n return re\n\n # ELIMINAR UNA COLUMNA ESPECIFICA DE UNA TABLA\n\n def alterDropColumn(self, database, table, columnNumber):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).alterDropColumn(database, table, columnNumber)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n self.godGuide[i][j][0][table][0] -= 1\n return re\n\n # ELIMINAR UNA TABLA DE LA BASE DE DATOS\n\n def dropTable(self, database, table):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).dropTable(database, table)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n self.godGuide[i][j][0].pop(table)\n return re\n\n # ---------------------FUNCIONES TUPLAS----------------------#\n\n # AÑADIR REGISTROS A UNA TABLA\n\n def insert(self, database, table, register):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).insert(database, table, register)\n\n # CARGA DE REGISTROS MEDIANTE UN CSV\n\n def loadCSV(self, file, database, table):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).loadCSV(file, database, table)\n\n # REGISTRO SEGUN LLAVE PRIMARIA\n\n def extractRow(self, database, table, columns):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).extractRow(database, table, columns)\n\n # MODIFICA UN REGISTRO EN ESPECIFICO\n\n def update(self, database, table, register, columns):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).update(database, table, register, columns)\n\n # ELIMINA UN REGISTRO EN ESPECIFICO\n\n def delete(self, database, table, columns):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).delete(database, table, columns)\n\n # ELIMINA TODOS LOS REGISTROS DE UNA TABLA\n\n def truncate(self, database, table):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).truncate(database, table)\n\n # -------------------------UTILIDADES-------------------------#\n\n def identify(self, id):\n id = str(id)\n if id[0].isalpha():\n return True\n else:\n if id[0].isdigit():\n return False\n return False\n\n def verifyMode(self, mode):\n if mode in self.listMode:\n return True\n return False\n\n def verifyEncoding(self, encoding):\n if encoding in self.listEncoding:\n return True\n return False\n\n def searchDB(self, key, mode):\n if key in switch.switchMode(mode).showDatabases():\n return True\n return False\n\n def searchDB2(self, key):\n for i in self.listMode:\n if key in switch.switchMode(i).showDatabases():\n return True\n return False\n\n def searchTB(self, database, table):\n for i in self.listMode:\n for j in switch.switchMode(i).showDatabases():\n if table in switch.switchMode(i).showTables(j):\n return True\n return False\n\n def extTB(self, database, table):\n for i in self.listMode:\n for j in switch.switchMode(i).showDatabases():\n if table in switch.switchMode(i).showTables(j):\n return switch.switchMode(i).extractTable(j, table)\n\n def delTB(self, database, table):\n for i in self.listMode:\n for j in switch.switchMode(i).showDatabases():\n if table in switch.switchMode(i).showTables(j):\n switch.switchMode(i).dropTable(j, table)\n return None\n\n","sub_path":"storage/fase2/team17/storage/mainMode.py","file_name":"mainMode.py","file_ext":"py","file_size_in_byte":20462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"358684093","text":"#!/usr/bin/env python3\n\n\"\"\" Helpful utilities, used by various testcases in testing dlist_node. \"\"\"\n\n\n\n# NOTE: It's not necessary to import the dlist_node file, since we don't actually\n# create any *new* objects in these functions.\n\n\n\ndef dlist_is_consistent(head):\n \"\"\" Returns True if the list appears to be valid (that is, has all the\n proper next/prev pointers, and the head is actually the head of the list)\n\n Note that head is a pointer to what we believe is the head of the list\n (might be none).\n \"\"\"\n\n if head is None:\n # trivially OK!\n return True\n\n if head.prev is not None:\n print(f\"ERROR: The node that we think is the head of the list (the node containing {head.val}) has a non-None prev pointer.\")\n return False\n\n cur = head\n while cur is not None:\n if cur.prev is not None:\n if cur.prev.next is not cur:\n print(\"ERROR: next/prev mismatch detected.\")\n print(f\" current node: id={id(cur)} value: {cur.val}\")\n print(f\" prev node: id={id(cur.prev)} value: {cur.prev.val}\")\n print(f\" prev.next node: id={id(cur.prev.next)} --- value not printed ---\")\n return False\n else:\n if cur is not head:\n print(f\"ERROR: The list node containing {cur.val} is not the head, but its prev link is None.\")\n return False\n\n cur = cur.next\n\n return True\n\n\n\ndef dlist_to_str(dllist):\n \"\"\" convert a doubly-linked list to a string \"\"\"\n if dllist is None:\n return \"None\"\n else:\n cur = dllist \n vals, objs = [], []\n while cur is not None:\n cur_str = str(cur.val)\n if cur in objs:\n vals.append(cur_str+\" <=> ... (to infinity and beyond)\")\n break\n else:\n vals.append(cur_str)\n objs.append(cur)\n cur = cur.next\n\n return \" <=> \".join(vals)\n\n\n","sub_path":"python02/proj09_short/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"81729138","text":"# -*- coding: utf-8 -*-\nimport configparser\nfrom manager.config.NewConfigParser import NewConfigParser\nfrom os import path\ndef get_config(): \n cp = NewConfigParser()\n cp.read(path.join(path.dirname(path.abspath(__file__)), 'manager.conf'), encoding='utf-8')\n config = {}\n for section in cp.sections():\n config[section] = {}\n for option in cp.options(section):\n config[section][option] = cp.get(section, option) \n return config \n","sub_path":"manager/config/GetConfig.py","file_name":"GetConfig.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"462284996","text":"import unittest\n\nimport falcon\nimport falcon.testing as testing\nfrom mock import MagicMock\nfrom mock import patch\n\nfrom meniscus.openstack.common import jsonutils\nfrom meniscus.api.pairing.resources import PairingConfigurationResource\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(WhenTestingPairingConfigurationResource())\n return suite\n\n\nclass WhenTestingPairingConfigurationResource(testing.TestBase):\n def before(self):\n self.configuration = {\n 'pairing_configuration': {\n \"api_secret\": \"ce20a1f3-151b-4302-ad42-52d91349fe8b\",\n \"coordinator_uri\": \"http://localhost:8080/v1\",\n \"personality\": \"worker\"\n }\n }\n self.configuration_bad_secret = {\n 'pairing_configuration': {\n \"api_secret\": \"this is not a uuid\",\n \"coordinator_uri\": \"http://localhost:8080/v1\",\n \"personality\": \"worker\"\n }\n }\n self.configuration_bad_personality = {\n 'pairing_configuration': {\n \"api_secret\": \"ce20a1f3-151b-4302-ad42-52d91349fe8b\",\n \"coordinator_uri\": \"http://localhost:8080/v1\",\n \"personality\": \"invalid_personality\"\n }\n }\n self.resource = PairingConfigurationResource()\n self.test_route = '/v1/pairing/configure'\n self.api.add_route(self.test_route, self.resource)\n\n def test_should_return_400_on_bad_secret(self):\n with patch('meniscus.api.pairing.resources.PairingProcess',\n MagicMock()):\n self.simulate_request(\n self.test_route,\n method='POST',\n headers={\n 'content-type': 'application/json',\n },\n body=jsonutils.dumps(self.configuration_bad_secret))\n self.assertEqual(falcon.HTTP_400, self.srmock.status)\n\n def test_should_return_400_on_bad_personality(self):\n with patch('meniscus.api.pairing.resources.PairingProcess',\n MagicMock()):\n self.simulate_request(\n self.test_route,\n method='POST',\n headers={\n 'content-type': 'application/json',\n },\n body=jsonutils.dumps(self.configuration_bad_personality))\n self.assertEqual(falcon.HTTP_400, self.srmock.status)\n\n def test_should_return_200_on_post(self):\n with patch('meniscus.api.pairing.resources.PairingProcess',\n MagicMock()):\n self.simulate_request(\n self.test_route,\n method='POST',\n headers={\n 'content-type': 'application/json',\n },\n body=jsonutils.dumps(self.configuration))\n self.assertEqual(falcon.HTTP_200, self.srmock.status)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"meniscus/tests/api/pairing/resources_test.py","file_name":"resources_test.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"475319257","text":"#!/usr/bin/env python3\n#coding:UTF-8\n\"\"\"#coding=UTF-8 专门为版本2 解决默认编码问题,可以识别中文\"\"\"\n\nimport sys\n\nsys.stdout.write('\\033[32;46;1m__name__ is %s\\n\\033[0m' % __name__)\n\nclass Factoryclass:\n def __init__(self, ph, email):\n self.ph = ph\n self.email = email\n\n def update_ph(self, newph):\n self.ph = newph\n\n def get_ph(self):\n return self.ph\n\nclass BearToy:\n\n #实例化类 产生的一个实例 默认会调用的方法__init__函数\n\n def __init__(self, size, color, ph, email):\n self.size = size\n self.color = color\n self.factory = Factoryclass(ph, email)\n\n def sing(self,song): #self必不可少\n print('lalala...', song)\n \n def update_color(self, newcolor):\n self.color = newcolor\n def get_color(self):\n return self.color\n\nclass NewBearToy(BearToy): #在圆括号中写明从哪个父类继承\n def run(self):\n print('running ----------')\n def sing(self): #self必不可少,子类覆盖父的同名方法\n print('lalala...song....NewBearToy(BearToy)....')\n\n\n\n\nif __name__ == '__main__':\n sys.stdout.write('\\033[31;47;1msys.argv is %s\\n\\033[0m' % sys.argv)\n tidy = BearToy('small', 'orange', 123456, 'xixi@qq.com')\n print(tidy)\n #<__main__.BearToy object at 0x7fbd56db6320>\n print(type(tidy))\n #\n print(tidy.size, tidy.color,sep= ' --- ')\n #small --- orange\n print(tidy.sing('hehehe'))\n #lalala... hehehe\n #None\n print('---------------')\n print(BearToy('larger','brown', 123, 'xx@qq.com'))\n #<__main__.BearToy object at 0x7f7bd14a64e0>\n\n print(type(BearToy('larger','brown', 123, 'xx@qq.com')))\n #\n\n print(BearToy('larger','brown', 123, 'xx@qq.com').sing('newbeartoySing'))\n #lalala... newbeartoySing\n #None\n \n tidy.color = 'red' #不推荐使用这样的用法\n print(tidy.size, tidy.color)\n #small red\n\n tidy.update_color('green')\n\n print(tidy.size, tidy.get_color())\n #small green\n\n\n tidy2 = BearToy('small', 'orange', 1234, 'hiys@163.com')\n\n print(tidy2.factory.get_ph())\n\n b1 = NewBearToy('larger','brown', 123, 'xx@qq.com')\n# b1.sing('yiyiyiyi---')\n #lalala... yiyiyiyi---\n b1.run()\n #running ----------\n b1.sing()\n #lalala...song....NewBearToy(BearToy)....\n \n print(BearToy.sing(tidy2,'sssssss---tidy2---')) #很少使用,借用身份运行方法\n #lalala... sssssss---tidy2---\n #None\n \n\n\n","sub_path":"day07/ooptoy.py","file_name":"ooptoy.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"547176906","text":"#!/usr/bin/env python\n#coding:utf-8\nimport commands\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.utils import parseaddr, formataddr\nimport smtplib\n\nclass Send_Messages(object):\n\n def __init__(self):\n self.from_ddr = 'lihongwei549@163.com'\n self.password = 'Lhw17733244915'\n self.smtp_server = 'smtp.163.com'\n\n def _format_addr(self, s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'utf-8').encode(), addr))\n\n def send(self, to_addr, messages):\n to_addr = to_addr\n msg = MIMEText(messages, 'plain', 'utf-8')\n msg['From'] = self._format_addr('EasyMonitor <%s>' % self.from_ddr)\n msg['To'] = self._format_addr('管理员 <%s>' % to_addr)\n msg['Subject'] = Header('服务器状态警告', 'utf-8').encode()\n server = smtplib.SMTP(self.smtp_server, 25)\n server.login(self.from_ddr, self.password)\n server.sendmail(self.from_ddr, [to_addr], msg.as_string())\n server.quit()\n\n\nclass Monitor_ecs(Send_Messages):\n\n def __init__(self):\n super(Monitor_ecs, self).__init__()\n\n def shell_cpu(self):\n shell_command = \"top -b -n 2 |grep 'Cpu(s):' |awk '{print $2}' |sort -rn |tail -1\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n cpu = None\n else:\n cpu = result.split('%')[0]\n return cpu\n\n def shell_gdis(self):\n shell_command = \"df -h |sed -n '2'p |awk '{print $2,$3,$4,$5}'\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n gdisk = None\n else:\n Size, Used, Avail, Use = result.split()[0:]\n gdisk = Use.split('%')[0]\n return gdisk\n\n def shell_opt(self):\n shell_command = \"df -h |grep '/u01' |awk '{print $2,$3,$4,$5}'\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n disk_u01 = None\n else:\n Size, Used, Avail, Use = result.split()[0:]\n disk_u01 = Use.split('%')[0]\n return disk_u01\n\n def shell_mem(self):\n shell_command = \"free -m |egrep 'buff|Mem' | sed s/Mem:/11\\ Mem:/g |awk '{print $3}' |xargs |awk ' { print $3/$2*100 }'\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n mem = None\n else:\n mem = result.split()[0]\n return mem\n\n def shell_w(self):\n shell_command = \"w |grep 'load average' |awk '{print $(NF-2) $(NF-1) $NF}'\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n load = None\n else:\n five_min, ten_min, fif_min = result.split(',')[0:]\n load = fif_min\n return load\n\n def shell_inode(self):\n shell_command = \"df -i | grep -E '/|u01' | sort -rn -k 5 | head -n 1 | awk '{print $(NF-1)}'\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n inode = None\n else:\n use = result.split()[0:]\n inode = use[0].split('%')[0]\n return inode\n\ndef main(list, host):\n obj = Monitor_ecs()\n cpu = obj.shell_cpu()\n if cpu != None:\n if float(cpu) > 80:\n messages = host + \":cpu当前为:\" + cpu + \"%\"\n for i in list:\n obj.send(i, messages)\n\n mem = obj.shell_mem()\n if mem != None:\n if float(mem) > 80:\n messages = host + \":mem当前为:\" + mem + \"%\"\n for i in list:\n obj.send(i, messages)\n\n load = obj.shell_w()\n if load != None:\n if float(load) > 15:\n messages = host + \":load当前为:\" + load\n for i in list:\n obj.send(i, messages)\n\n gdisk = obj.shell_gdis()\n if gdisk != None:\n if float(gdisk) > 90:\n messages = host + \":根目录当前使用空间为“\" + gdisk + \"%\"\n for i in list:\n obj.send(i, messages)\n\n disk_u01 = obj.shell_opt()\n if disk_u01 != None:\n if float(disk_u01) > 90:\n messages = host + \":disk_u01当前使用空间为:\" + gdisk + \"%\"\n for i in list:\n obj.send(i, messages)\n\n inode = obj.shell_inode()\n if inode != None:\n if float(inode) > 95:\n messages = host + \":inode当前使用空间为:\" + inode + \"%\"\n for i in list:\n obj.send(i, messages)\n\nif __name__ == \"__main__\":\n\n '''configPath'''\n to_addr_list = [\n 'lihw@cloudcc.com',\n 'zuogc@cloudcc.com',\n ]\n hostname = \"tomcat1\"\n\n '''MAN'''\n main(to_addr_list, hostname)\n","sub_path":"PyScripts/monitor_ecs.py","file_name":"monitor_ecs.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"344757453","text":"#!/usr/bin/env python3\nimport numpy as np\nfrom sympy import *\nfrom scipy.special import comb\nfrom itertools import combinations\n\ndef GaussHermitePC(n,p):\n if n==1:\n xi = symbols('xi')\n Hp = Matrix([((1/sqrt(2))**i)*hermite(i, xi/sqrt(2)) for i in range(p+1)])\n psi = Hp\n return psi\n else:\n xi = symbols('xi')\n Hp = Matrix([((1/sqrt(2))**i)*hermite(i, xi/sqrt(2)) for i in range(p+1)])\n xi_num = [symbols('xi'+str(i)) for i in range(1,n+1)]\n Hp_mv = zeros(p+1,n)\n for i in range(n):\n for j in range(p+1):\n Hp_mv[j,i] = Hp[j].subs([(xi,xi_num[i])])\n psi_size = int(comb(n+p,p))\n psi = zeros(psi_size,1)\n index = [np.zeros((1,n),dtype='float32')]\n for i in range(1,p+1):\n numi = np.array(list(combinations(list(range(1,n+i)),n-1)))\n num1 = np.zeros((numi.shape[0],1),dtype='float32')\n num2 = (n+i) + num1\n concat = np.hstack((num1,numi,num2))\n indexi = np.flipud(np.diff(concat,n=1,axis=1))-1\n index = index + indexi.tolist()\n if not np.allclose(np.sum(indexi,axis=1), i *np.ones((int(comb(n+i-1,n-1)),1))):\n print('The sum of each row has to be equal to p-th order')\n return\n index_mat = np.vstack(index)\n for i in range(1, psi_size+1):\n mult_s = 1\n for j in range(n):\n mult_s = mult_s * Hp_mv[int(index_mat[i-1][j]),j]\n psi[i-1] = mult_s\n return psi\n \nif __name__ == \"__main__\":\n psi2 = GaussHermitePC(7,1)\n init_printing()\n print(psi2)\n","sub_path":"src/mstar_guidance/src/stoctrajopt/stoctrajopt/gPC_toolbox/GaussHermitePC.py","file_name":"GaussHermitePC.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"339892128","text":"#!/usr/bin/env python\n\nimport subprocess\n\n\"\"\"\nA ssh based command diapath system\n\"\"\"\nmachines = [ \"10.10.10.28\",\n\"10.10.10.29\",\n\"10.10.10.30\",\n\"10.10.10.31\",\n\"10.10.10.32\"]\n\ncmd = \"uname\"\nfor machine in machines:\n subprocess.call(\"printf '%s OS type is: ';ssh root@%s %s\" % (machine,machine,cmd),shell=True)\n","sub_path":"python/dispath.py","file_name":"dispath.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"626901325","text":"sal = int(input('salario?'))\nimp = 27.0\nwhile imp > 0.:\n imp = input(\"imposto ou (s) para sair\")\n if not imp:\n imp = 27.\n elif imp == 's':\n break\n else:\n imp = float(imp)\n print(\"Valor real: {0}\".format(sal - (sal * (imp * 0.01))))\n","sub_path":"cap3/salario-while.py","file_name":"salario-while.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"357394460","text":"import logging\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING, List, Optional\n\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import QuerySet\nfrom django.template.defaultfilters import truncatechars_html\nfrom django.utils.html import format_html\n\nfrom entities.serializers import EntitySerializer\nfrom modularhistory.constants.strings import EMPTY_STRING\nfrom modularhistory.fields import ArrayField, HistoricDateTimeField, HTMLField\nfrom modularhistory.models import (\n ModelWithComputations,\n ModelWithImages,\n ModelWithRelatedEntities,\n ModelWithRelatedQuotes,\n TaggableModel,\n TypedModel,\n retrieve_or_compute,\n)\nfrom modularhistory.structures import HistoricDateTime as DateTime\n\nif TYPE_CHECKING:\n from entities.models import Categorization\n\nNAME_MAX_LENGTH: int = 100\n\nTRUNCATED_DESCRIPTION_LENGTH: int = 1200\n\nPARTS_OF_SPEECH = (\n ('noun', 'noun'),\n ('adj', 'adjective'),\n ('any', 'noun / adjective'),\n)\n\n\nclass Entity(\n TypedModel,\n TaggableModel,\n ModelWithComputations,\n ModelWithImages,\n ModelWithRelatedQuotes,\n ModelWithRelatedEntities,\n):\n \"\"\"An entity.\"\"\"\n\n name = models.CharField(max_length=NAME_MAX_LENGTH, unique=True)\n unabbreviated_name = models.CharField(\n max_length=NAME_MAX_LENGTH, unique=True, null=True, blank=True\n )\n aliases = ArrayField(\n models.CharField(max_length=NAME_MAX_LENGTH), null=True, blank=True\n )\n birth_date = HistoricDateTimeField(null=True, blank=True)\n death_date = HistoricDateTimeField(null=True, blank=True)\n description = HTMLField(null=True, blank=True, paragraphed=True)\n categories = models.ManyToManyField(\n 'entities.Category',\n through='entities.Categorization',\n related_name='entities',\n blank=True,\n )\n images = models.ManyToManyField(\n 'images.Image',\n through='entities.EntityImage',\n related_name='entities',\n blank=True,\n )\n affiliated_entities = models.ManyToManyField(\n 'self', through='entities.Affiliation', blank=True\n )\n\n class Meta:\n \"\"\"\n Meta options for the Entity model.\n\n See https://docs.djangoproject.com/en/3.1/ref/models/options/#model-meta-options.\n \"\"\"\n\n verbose_name_plural = 'Entities'\n ordering = ['name']\n\n searchable_fields = ['name', 'aliases', 'description']\n serializer = EntitySerializer\n\n def __str__(self) -> str:\n \"\"\"Return the string representation of the entity.\"\"\"\n return f'{self.name}'\n\n def save(self, *args, **kwargs):\n \"\"\"Save the entity to the database.\"\"\"\n self.clean()\n super().save(*args, **kwargs)\n\n def clean(self):\n \"\"\"Prepare the entity to be saved.\"\"\"\n super().clean()\n if not self.unabbreviated_name:\n self.unabbreviated_name = self.name\n if self.type == 'entities.entity' or not self.type:\n raise ValidationError('Entity must have a type.')\n else:\n # Prevent a RuntimeError when saving a new publication\n self.recast(self.type)\n\n @property\n def has_quotes(self) -> bool:\n \"\"\"Return whether the entity has any attributed quotes.\"\"\"\n return bool(len(self.quotes.all()))\n\n @property\n def name_html(self) -> str:\n \"\"\"Return an HTML string of the entity's name.\"\"\"\n logging.debug(f'Getting name_html for {self}')\n return format_html(\n f'{self.name}'\n )\n\n @property\n def truncated_description(self) -> str:\n \"\"\"Return the entity's description, truncated.\"\"\"\n return format_html(\n truncatechars_html(self.description, TRUNCATED_DESCRIPTION_LENGTH)\n )\n\n def get_categorization(self, date: DateTime) -> Optional['Categorization']:\n \"\"\"Return the most applicable categorization based on the date.\"\"\"\n if not self.categories.exists():\n return None\n categorizations = self.categorizations.all()\n categorizations = (\n categorizations.exclude(date__gt=date) if date else categorizations\n )\n if not len(categorizations):\n categorizations = self.categorizations.all()\n return categorizations.order_by('date', 'category__weight').last()\n\n def get_categorizations(\n self, date: Optional[DateTime] = None\n ) -> 'QuerySet[Categorization]':\n \"\"\"Return a list of all applicable categorizations.\"\"\"\n categorizations = (\n self.categorizations.exclude(date__gt=date)\n if date\n else self.categorizations.all()\n )\n return categorizations.select_related('category')\n\n @retrieve_or_compute(attribute_name='categorization_string')\n def get_categorization_string(self, date: Optional[DateTime] = None) -> str:\n \"\"\"Intelligently build a categorization string, like `liberal scholar`.\"\"\"\n categorizations: 'QuerySet[Categorization]' = self.get_categorizations(date)\n if categorizations:\n # Build the string\n categorization_words: List[str] = []\n for part_of_speech in ('noun', 'any', 'adj'):\n pos_categorizations = categorizations.filter(\n category__part_of_speech=part_of_speech\n )\n if pos_categorizations.exists():\n categorization_str = str(\n pos_categorizations.order_by('category__weight', 'date').last()\n )\n words = [\n word\n for word in categorization_str.split(' ')\n if word not in categorization_words\n ]\n categorization_words = words + categorization_words\n # Remove duplicate words\n categorization_words = list(dict.fromkeys(categorization_words))\n return ' '.join(categorization_words)\n return EMPTY_STRING\n\n\nclass Person(Entity):\n \"\"\"A person.\"\"\"\n\n class Meta:\n \"\"\"\n Meta options for the Person model.\n\n See https://docs.djangoproject.com/en/3.1/ref/models/options/#model-meta-options.\n \"\"\"\n\n verbose_name_plural = 'People'\n\n\nclass Deity(Entity):\n \"\"\"A deity.\"\"\"\n\n class Meta:\n \"\"\"\n Meta options for the Deity model.\n\n See https://docs.djangoproject.com/en/3.1/ref/models/options/#model-meta-options.\n \"\"\"\n\n verbose_name_plural = 'Deities'\n\n\nclass Group(Entity):\n \"\"\"A group of people.\"\"\"\n\n class Meta:\n \"\"\"\n Meta options for the Group model.\n\n See https://docs.djangoproject.com/en/3.1/ref/models/options/#model-meta-options.\n \"\"\"\n\n verbose_name_plural = 'Groups'\n\n\nclass Organization(Entity):\n \"\"\"An organization.\"\"\"\n\n class Meta:\n \"\"\"\n Meta options for the Organization model.\n\n See https://docs.djangoproject.com/en/3.1/ref/models/options/#model-meta-options.\n \"\"\"\n\n verbose_name_plural = 'Organizations'\n\n @property\n def founding_date(self) -> datetime:\n \"\"\"Return the date the organization was founded.\"\"\"\n return self.birth_date\n","sub_path":"entities/models/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":7302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"375312052","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\"\"\"Main entry point\n\"\"\"\nfrom pyramid.config import Configurator\nfrom metlog.config import client_from_stream_config\nfrom campaign.resources import Root\nfrom campaign.storage.sql import Storage\nfrom mozsvc.config import load_into_settings\nfrom mozsvc.middlewares import _resolve_name\n\n\ndef get_group(group_name, dictionary):\n if group_name is None:\n return dictionary\n else:\n result = {}\n trim = len(group_name) + 1\n for key in filter(lambda x: x.startswith(group_name), dictionary):\n result[key[trim:]] = dictionary[key]\n return result\n\ndef configure_from_settings(object_name, settings):\n config = dict(settings)\n if 'backend' not in config:\n if '%s.backend' % object_name in config:\n config = get_group(object_name, config)\n cls = _resolve_name(config.pop('backend'))\n return cls(**config)\n\n\ndef main(global_config, **settings):\n load_into_settings(global_config['__file__'], settings)\n config = Configurator(root_factory=Root, settings=settings)\n config.include(\"cornice\")\n config.include(\"pyramid_beaker\")\n config.include(\"mozsvc\")\n config.scan(\"campaign.views\")\n config.registry['storage'] = Storage(config)\n config.registry['auth'] = configure_from_settings('auth',\n settings['config'].get_map('auth'))\n metlog_client = client_from_stream_config(\n open(global_config['__file__'], 'r'),\n 'metlog')\n config.registry['metlog'] = metlog_client\n return config.make_wsgi_app()\n\n\nclass LOG:\n EMERGENCY = 0\n ALERT = 1\n CRITICAL = 2\n ERROR = 3\n WARNING = 4\n NOTICE = 5\n INFORMATIONAL = 6\n DEBUG = 7\n","sub_path":"campaign/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"553209483","text":"from tensorflow.keras.layers import Input, concatenate, Dropout, Activation, MaxPooling2D, Convolution2D, \\\n AveragePooling2D, BatchNormalization, Reshape, Conv2DTranspose, Cropping2D\nfrom tensorflow.keras.models import Model\n\n\"\"\"\nImplementation of Inception Network v4 [Inception Network v4 Paper](http://arxiv.org/pdf/1602.07261v1.pdf) in Keras.\n\"\"\"\nchannel_axis = 3\n\n\ndef conv_block(x, nb_filter, nb_row, nb_col, padding='same', subsample=(1, 1), bias=False):\n x = Convolution2D(filters=nb_filter, kernel_size=(nb_row, nb_col), strides=subsample, padding=padding, use_bias=bias)(x)\n x = BatchNormalization(axis=channel_axis)(x)\n x = Activation('relu')(x)\n return x\n\n\ndef inception_stem(input):\n # Input Shape is 299 x 299 x 3 (th) or 3 x 299 x 299 (th)\n x = conv_block(input, 32, 3, 3, subsample=(2, 2), padding='valid')\n x = conv_block(x, 32, 3, 3, padding='valid')\n x = conv_block(x, 64, 3, 3)\n\n x1 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(x)\n x2 = conv_block(x, 96, 3, 3, subsample=(2, 2), padding='valid')\n\n x = concatenate([x1, x2], axis=channel_axis)\n\n x1 = conv_block(x, 64, 1, 1)\n x1 = conv_block(x1, 96, 3, 3, padding='valid')\n\n x2 = conv_block(x, 64, 1, 1)\n x2 = conv_block(x2, 64, 1, 7)\n x2 = conv_block(x2, 64, 7, 1)\n x2 = conv_block(x2, 96, 3, 3, padding='valid')\n\n x = concatenate([x1, x2], axis=channel_axis)\n\n x1 = conv_block(x, 192, 3, 3, subsample=(2, 2), padding='valid')\n x2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(x)\n\n x = concatenate([x1, x2], axis=channel_axis)\n return x\n\n\ndef inception_A(input):\n a1 = conv_block(input, 96, 1, 1)\n\n a2 = conv_block(input, 64, 1, 1)\n a2 = conv_block(a2, 96, 3, 3)\n\n a3 = conv_block(input, 64, 1, 1)\n a3 = conv_block(a3, 96, 3, 3)\n a3 = conv_block(a3, 96, 3, 3)\n\n a4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)\n a4 = conv_block(a4, 96, 1, 1)\n\n m = concatenate([a1, a2, a3, a4],axis=channel_axis)\n return m\n\n\ndef inception_B(input):\n b1 = conv_block(input, 384, 1, 1)\n\n b2 = conv_block(input, 192, 1, 1)\n b2 = conv_block(b2, 224, 1, 7)\n b2 = conv_block(b2, 256, 7, 1)\n\n b3 = conv_block(input, 192, 1, 1)\n b3 = conv_block(b3, 192, 7, 1)\n b3 = conv_block(b3, 224, 1, 7)\n b3 = conv_block(b3, 224, 7, 1)\n b3 = conv_block(b3, 256, 1, 7)\n\n b4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)\n b4 = conv_block(b4, 128, 1, 1)\n\n m = concatenate([b1, b2, b3, b4], axis=channel_axis)\n return m\n\n\ndef inception_C(input):\n c1 = conv_block(input, 256, 1, 1)\n\n c2 = conv_block(input, 384, 1, 1)\n c2_1 = conv_block(c2, 256, 1, 3)\n c2_2 = conv_block(c2, 256, 3, 1)\n c2 = concatenate([c2_1, c2_2], axis=channel_axis)\n\n c3 = conv_block(input, 384, 1, 1)\n c3 = conv_block(c3, 448, 3, 1)\n c3 = conv_block(c3, 512, 1, 3)\n c3_1 = conv_block(c3, 256, 1, 3)\n c3_2 = conv_block(c3, 256, 3, 1)\n c3 = concatenate([c3_1, c3_2], axis=channel_axis)\n\n c4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)\n c4 = conv_block(c4, 256, 1, 1)\n\n m = concatenate([c1, c2, c3, c4], axis=channel_axis)\n return m\n\n\ndef reduction_A(input):\n r1 = conv_block(input, 384, 3, 3, subsample=(2, 2), padding='valid')\n\n r2 = conv_block(input, 192, 1, 1)\n r2 = conv_block(r2, 224, 3, 3)\n r2 = conv_block(r2, 256, 3, 3, subsample=(2, 2), padding='valid')\n\n r3 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)\n\n m = concatenate([r1, r2, r3], axis=channel_axis)\n return m\n\n\ndef reduction_B(input):\n r1 = conv_block(input, 192, 1, 1)\n r1 = conv_block(r1, 192, 3, 3, subsample=(2, 2), padding='valid')\n\n r2 = conv_block(input, 256, 1, 1)\n r2 = conv_block(r2, 256, 1, 7)\n r2 = conv_block(r2, 320, 7, 1)\n r2 = conv_block(r2, 320, 3, 3, subsample=(2, 2), padding='valid')\n\n r3 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)\n\n m = concatenate([r1, r2, r3], axis=channel_axis)\n return m\n\n\ndef create_model():\n '''\n Creates a inception v4 network\n\n :return: Keras Model with 1 input and 1 output\n '''\n\n init = Input((400, 608, 1))\n\n # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)\n x = inception_stem(init)\n\n # 4 x Inception A\n for i in range(4):\n x = inception_A(x)\n\n # Reduction A\n x = reduction_A(x)\n\n # 7 x Inception B\n for i in range(7):\n x = inception_B(x)\n\n # Reduction B\n x = reduction_B(x)\n\n # 3 x Inception C\n for i in range(3):\n x = inception_C(x)\n\n # Average Pooling\n x = AveragePooling2D((2, 2))(x) # was (8,8)\n\n # Dropout\n x = Dropout(0.8)(x)\n #x = Flatten()(x)\n\n x = Reshape((240, 256, 1))(x)\n x = Conv2DTranspose(filters=1, kernel_size=(2, 2), strides=2, padding='same')(x)\n # output = Conv2D(1, (1, 1), padding=\"same\", activation=None)(uconv1)\n x = Cropping2D(cropping=(139, 105))(x) # crop of (400,304) to (202,302)\n\n model = Model(init, x, name='inception_v4')\n\n return model\n\n\nmodel = create_model()\nmodel.summary()\n","sub_path":"inception_v4_fwi.py","file_name":"inception_v4_fwi.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"227469918","text":"import numpy as np\n\ndef kalman_filter(mu, sig):\n for n, measurement in enumerate(measurements):\n mu_bar = A * mu + B * u\n sig_bar = A * sig * A.transpose()\n\n s = C * sig_bar * C.transpose() + Q\n K = sig_bar * C.transpose() * np.linalg.inv(s)\n\n z = np.matrix([[measurement]])\n mu = mu_bar + K * (z - C * mu_bar)\n sig = (I - K * C) * sig_bar\n return mu, sig\n\nmeasurements = [1, 2, 3, 4, 5]\n\nmu = np.matrix([[0.], [0.]])\nsig = np.matrix([[1000., 0.], [0., 1000.]])\nu = np.matrix([[0.], [0.]])\nA = np.matrix([[1., 1.], [0, 1.]])\nC = np.matrix([[1., 0.]])\nQ = np.matrix([[1.]])\nI = np.eye(2)\nB = np.eye(2)\n\nprint(kalman_filter(mu, sig))","sub_path":"KalmanFilter/kalman_nd.py","file_name":"kalman_nd.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"149126270","text":"# coding=utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport os\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\nlogging.basicConfig(level=logging.DEBUG)\n\nsessionStorage = {}\n\nskill_id = \"caee0d3a-e0ff-4720-a4ac-e45205aee08b\"\ntoken = \"AgAAAAAIOCSpAAT7o82ir0CsuUqWn1L6FO9DXZE\"\n\n\n@app.route(\"/\", methods=['POST'])\ndef main():\n logging.info('Request: %r', request.json)\n\n response = {\n \"version\": request.json['version'],\n \"session\": request.json['session'],\n \"response\": {\n \"end_session\": False\n }\n }\n\n handle_dialog(request.json, response)\n\n logging.info('Response: %r', response)\n\n return json.dumps(\n response,\n ensure_ascii=False,\n indent=2\n )\n\n\ndef handle_dialog(req, res):\n user_id = req['session']['user_id']\n\n url = \"https://ru.meming.world/wiki/Special:Random\"\n page = requests.get(url)\n soup = BeautifulSoup(page.text, \"html.parser\")\n mainText = soup.find_all('h1')[0].get_text()\n images = soup.findAll('img')\n mainImageUrl = \"https://ru.meming.world/\" + images[0]['src']\n\n skillsUrl = 'https://dialogs.yandex.net/api/v1/skills/' + skill_id + '/images'\n headers = {'content-type': 'application/json', 'Authorization': 'OAuth ' + token}\n r = requests.post(skillsUrl, json={\"url\": mainImageUrl}, headers=headers)\n\n if req['session']['new']:\n sessionStorage[user_id] = {\n 'suggests': [\n \"Хочу\",\n \"Не хочу\",\n ]\n }\n\n res['response']['text'] = 'Привет, хочешь мем?'\n res['response']['buttons'] = get_buttons(user_id)\n return\n\n if req['request']['original_utterance'].lower() in [\n 'мемчанский',\n 'мем',\n 'новый мем',\n 'да',\n 'хочу',\n ]:\n cardImages = [{\n \"image_id\": r.json()['image']['id']\n }, {\n \"image_id\": r.json()['image']['id']\n }, {\n \"image_id\": r.json()['image']['id']\n }, {\n \"image_id\": r.json()['image']['id']\n }, {\n \"image_id\": r.json()['image']['id']\n }]\n\n res['response']['text'] = ''\n # res['response']['card'] = {}\n # res['response']['card']['type'] = 'BigImage'\n # res['response']['card']['image_id'] = r.json()['image']['id']\n # res['response']['card']['title'] = mainText\n res['response']['card'] = {}\n res['response']['card']['type'] = 'ItemsList'\n res['response']['card']['items'] = cardImages\n return\n\n res['response']['text'] = ''\n res['response']['buttons'] = get_buttons(user_id)\n return\n\n\n# Функция возвращает две подсказки для ответа.\ndef get_buttons(user_id):\n session = sessionStorage[user_id]\n\n # Выбираем две первые подсказки из массива.\n suggests = [\n {'title': suggest, 'hide': True}\n for suggest in session['suggests'][:2]\n ]\n\n # Убираем первую подсказку, чтобы подсказки менялись каждый раз.\n session['suggests'] = session['suggests'][1:]\n sessionStorage[user_id] = session\n\n suggests.append({\n \"title\": \"Ссылочка\",\n \"url\": \"https://market.yandex.ru/search?text=слон\",\n \"hide\": True\n })\n\n return suggests\n\n\napp.run(host=\"0.0.0.0\", port=int(os.environ.get('PORT', 5000)))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"363335347","text":"import os\nimport codecs\nfrom Downloader import Downloader\n\nnl = '\\r\\n'\ndoc_help = \"下载的小说文件及配置文件在 ../books 目录下。\" + nl + \\\n \"支持导出文件格式: txt,epub\" + nl + \\\n \"图片章节可通过修改配置文件中的area_width,font,font_size,bg_color_name,text_color_name实现不同文字效果\" + nl + \\\n \"area_width:图片宽度; 默认:816\" + nl + \\\n \"font:字体; 默认:undefined\" + nl + \\\n \"font_size:字体大小; 默认:14\" + nl + \\\n \"bg_color_name:背景颜色; 默认:default; 可用设置:default,green,blue,white,gray,pink,night;\" + nl + \\\n \"text_color_name:文字颜色; 默认:default; 可用设置:default,green,blue,white,gray,pink,night;\"\n\n\ndef str_mid(string: str, left: str, right: str, start=None, end=None):\n pos1 = string.find(left, start, end)\n if pos1 > -1:\n pos2 = string.find(right, pos1 + len(left), end)\n if pos2 > -1:\n return string[pos1 + len(left): pos2]\n return ''\n\n\nprint(\"请先登录你的欢乐书客帐号,之后得到一些Cookies并输入程序。\")\nprint(\"若不登录则直接留空所有Cookies,若输入del则清除已保存的Cookies。\")\n\nlogin_token = \"\"\nreader_id = \"\"\narea_width = \"816\"\nfont = \"undefined\"\nfont_size = \"14\"\nbg_color_name = \"default\"\ntext_color_name = \"default\"\nif not os.path.isdir(os.getcwd() + \"/../books\"):\n os.makedirs(os.getcwd() + \"/../books\")\nif os.path.isfile(os.getcwd() + \"/../books/hbookercrawler.cfg\"):\n cfg_file = codecs.open(os.getcwd() + \"/../books/hbookercrawler.cfg\", 'r', 'utf-8')\n for line in cfg_file.readlines():\n if line.startswith(\"login_token=\"):\n login_token = str_mid(line, 'login_token=\"', '\"')\n elif line.startswith(\"reader_id=\"):\n reader_id = str_mid(line, 'reader_id=\"', '\"')\n elif line.startswith(\"area_width=\"):\n area_width = str_mid(line, 'area_width=\"', '\"')\n elif line.startswith(\"font=\"):\n font = str_mid(line, 'font=\"', '\"')\n elif line.startswith(\"font_size=\"):\n font_size = str_mid(line, 'font_size=\"', '\"')\n elif line.startswith(\"bg_color_name=\"):\n bg_color_name = str_mid(line, 'bg_color_name=\"', '\"')\n elif line.startswith(\"text_color_name=\"):\n text_color_name = str_mid(line, 'text_color_name=\"', '\"')\n cfg_file.close()\n\nlogin_token = input('Cookie: login_token(默认:\"' + login_token + '\")=') or login_token\nreader_id = input('Cookie: reader_id(默认:\"' + reader_id + '\")=') or reader_id\n\nif reader_id.lower().startswith('del') or login_token.lower().startswith('del'):\n reader_id = \"\"\n login_token = \"\"\n print(\"已清除Cookies!\")\n\nwith codecs.open(os.getcwd() + \"/../books/hbookercrawler.cfg\", 'w', 'utf-8') as cfg_file:\n cfg_file.write('login_token=\"' + login_token + '\"' + nl)\n cfg_file.write('reader_id=\"' + reader_id + '\"' + nl)\n cfg_file.write('area_width=\"' + area_width + '\"' + nl)\n cfg_file.write('font=\"' + font + '\"' + nl)\n cfg_file.write('font_size=\"' + font_size + '\"' + nl)\n cfg_file.write('bg_color_name=\"' + bg_color_name + '\"' + nl)\n cfg_file.write('text_color_name=\"' + text_color_name + '\"')\ndel cfg_file\n\ndl = Downloader(login_token, reader_id)\ndl.area_width = area_width\ndl.font = font\ndl.font_size = font_size\ndl.bg_color_name = bg_color_name\ndl.text_color_name = text_color_name\ndel login_token\ndel reader_id\ndel area_width\ndel font\ndel font_size\ndel bg_color_name\ndel text_color_name\n\nworking_dir = os.getcwd() + \"/../books\"\n\n\ndef select_chapter(book, skip_input):\n try:\n while True:\n while True:\n try:\n if skip_input:\n chapter_start = book[\"last\"] + 1\n chapter_end = len(book[\"chapter\"])\n else:\n chapter_start = int(input(\"输入开始章节编号(留空将自动寻找):\") or book[\"last\"] + 1)\n chapter_end = int(input(\"输入结束章节编号(留空将自动寻找):\") or len(book[\"chapter\"]))\n break\n except ValueError:\n continue\n if chapter_start < 1:\n chapter_start = 1\n if chapter_start > len(book[\"chapter\"]):\n if skip_input:\n print(\"小说暂无更新...\")\n else:\n input(\"小说暂无更新...\")\n break\n if chapter_start <= chapter_end:\n print(\"开始章节编号:\", chapter_start,\n \"chapter_id:\", book[\"chapter\"][chapter_start - 1][0],\n \"标题:\", book[\"chapter\"][chapter_start - 1][1])\n print(\"结束章节编号:\", chapter_end,\n \"chapter_id:\", book[\"chapter\"][chapter_end - 1][0],\n \"标题:\", book[\"chapter\"][chapter_end - 1][1])\n while True:\n if skip_input:\n return {\"start\": chapter_start, \"end\": chapter_end}\n else:\n confirm = input(\"确定从这个位置下载吗(回车确认,n:重新输入章节编号,q:取消下载):\").lower()\n if not confirm or confirm.startswith('y'):\n return {\"start\": chapter_start, \"end\": chapter_end}\n elif confirm.startswith('n'):\n break\n elif confirm.startswith('q'):\n return None\n else:\n if skip_input:\n return None\n else:\n print(\"输入无效:\", \"开始章节编号\", chapter_start, \"不能大于\", \"结束章节编号\", chapter_end)\n except (KeyboardInterrupt, InterruptedError):\n print(nl, \"已取消输入章节编号\")\n except Exception as e:\n print(\"[ERROR]\", e)\n print(\"读取章节编号时出错\")\n input(\"按下回车键继续...\")\n return None\n\n\ndef selected_book(_book_id, skip_input):\n dl.skip = skip_input\n book = dl.get_book(_book_id)\n book = dl.check_book(book)\n book = dl.fix_book(book)\n selected = select_chapter(book, skip_input)\n if selected:\n dl.download(book, selected)\n\n\ndef update_downloaded():\n try:\n book_list = list()\n for title in os.listdir(working_dir):\n cfg_path = working_dir + '/' + title + '/' + title + '.cfg'\n if os.path.isfile(cfg_path):\n with codecs.open(cfg_path, 'r', 'utf-8') as file:\n file_lines = file.readlines()\n for _line in file_lines:\n if _line.startswith('book_id='):\n book_list.append(str_mid(_line, 'book_id=\"', '\"'))\n for _book_id in book_list:\n selected_book(_book_id, True)\n except (KeyboardInterrupt, InterruptedError):\n print(nl, \"已取消更新全部小说\")\n except Exception as e:\n print(\"[ERROR]\", e)\n print(\"更新小说时出错\")\n input(\"按下回车键继续...\")\n\n\ndef solve_input(inputs):\n if inputs.startswith('q'):\n exit()\n return True\n elif inputs.startswith('h'):\n print(doc_help)\n return True\n elif inputs.startswith('ud'):\n update_downloaded()\n return True\n elif inputs.startswith('v'):\n dl.vip = not dl.vip\n if dl.vip:\n print(\"已设置下载VIP章节\")\n else:\n print(\"已设置跳过VIP章节\")\n return True\n return False\n\n\ndl.get_bookshelf()\n\nif dl.nickname != '${NoName}':\n while True:\n for bookshelf_info in dl.bookshelf:\n print(\"编号:\", bookshelf_info[0], \"book_id:\", bookshelf_info[1], \"书名:\", bookshelf_info[2])\n while True:\n try:\n book_id = input(\"输入小说编号或小说id(book_id)(q:退出,h:帮助,ud:更新已下载的小说,ua:更新书架中的小说,v:设置是否下载VIP章节):\").lower()\n if not solve_input(book_id):\n if book_id.startswith(\"ua\"):\n for _bookshelf_info in dl.bookshelf:\n selected_book(_bookshelf_info[1], True)\n if 0 < int(book_id) <= len(dl.bookshelf):\n book_id = dl.bookshelf[int(book_id) - 1][1]\n selected_book(book_id, False)\n break\n except ValueError:\n continue\nelse:\n while True:\n book_id = input(\"输入小说id(book_id)(q:退出,h:帮助,u:更新已下载的小说,v:设置是否下载VIP章节):\").lower()\n if not solve_input(book_id):\n selected_book(book_id, False)\n","sub_path":"HbookerCrawler.py","file_name":"HbookerCrawler.py","file_ext":"py","file_size_in_byte":8798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"78197126","text":"import pprint as pp\r\nimport contextlib\r\nimport pandas as pd\r\nfrom savReaderWriter import SavReader as sr, Generic as gn\r\nfrom sqlalchemy import create_engine\r\n\r\nmydb = create_engine('mssql+pymssql://Emeatristsql/{}'.format(\"SSIVRDATA\"))\r\n\"\"\"\r\n select * from SPSS_Project_List\r\n\r\n select * from SPSS_Var_List\r\n\r\n\tselect * from spps_varlist_temp\r\n\r\n\tselect * from spps_desclist_temp\r\n \r\n\"\"\"\r\ndef wrt_valueLabels(args):\r\n \r\n args[\"Values\"] =args.index.astype(int)\r\n # print(args)\r\n df = pd.melt(args,id_vars=\"Values\",var_name=\"ColumnName\", value_name=\"ColumnValue\")\r\n df = df[df[\"ColumnValue\"].notnull()]\r\n df[\"ColumnValue\"]=df[\"ColumnValue\"].str.decode('cp1254')\r\n df[\"ColumnName\"]=df[\"ColumnName\"].str.decode('cp1254')\r\n # print(df)\r\n return(df)\r\n\r\n\r\ndef wrt_otherVar(args,kwargs):\r\n mydict = {\"varLabels\":\"Qtext\",\"formats\":\"Qformat\",\"varTypes\":\"QTypes\",\"varNames\":\"Qt\",}\r\n t = mydict[(\"{}\".format(kwargs))]\r\n\r\n args.rename(index=str, columns={0: t },inplace=True)\r\n args.reset_index(inplace=True)\r\n args.rename(index=str, columns={\"index\": \"Key\"},inplace=True)\r\n \r\n if t != \"QTypes\" and t != \"Qt\" :\r\n args[t]=args[t].str.decode('cp1254')\r\n \r\n # args.to_csv(r\"\\\\Emeatristsql\\IMP\\PythonScripts\\Create_Spss\\{}.csv\".format(t),sep=\";\",header=True)\r\n # print(args)\r\n return(args)\r\n\r\n\r\ndef insert_varlist(args):\r\n args[\"Active\"] = int(1)\r\n args[\"Type\"] = \"Numeric\"\r\n args[\"Sysmiss\"] = int(0)\r\n args[[\"Active\", \"ColumnName\", \"Type\", \"Values\", \"ColumnValue\", \"Sysmiss\"]]\r\n \r\n # print(args)\r\n args.to_sql(name=\"spps_varlist_temp\", con=mydb, if_exists=\"replace\",\r\n index=False, chunksize=1000)\r\n return(args)\r\n\r\ndef main(file_path):\r\n numVars, nCases, varNames, varTypes, printTypesFile, printTypeLabels, varWids = \\\r\n sr(file_path, verbose=True).getSavFileInfo()\r\n \r\n print(sr(file_path).getFileReport())\r\n # print(sr(file_path).getSavFileInfo())\r\n\r\n valueLabels = pd.DataFrame.from_dict(varWids)\r\n valueLabels = wrt_valueLabels(valueLabels)\r\n # valueLabels sav icinden alindi.\r\n\r\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n \r\n varLabels = pd.DataFrame.from_dict(printTypeLabels, orient='index')\r\n varLabels = wrt_otherVar(varLabels,\"varLabels\") \r\n\r\n formats = pd.DataFrame.from_dict(printTypesFile, orient='index')\r\n formats = wrt_otherVar(formats,\"formats\")\r\n formats[\"Qformat\"] = formats[\"Qformat\"].apply(\r\n lambda x: \"String\" if x.startswith('A') else \"Numeric\")\r\n\r\n varTypes = pd.DataFrame.from_dict(varTypes, orient='index')\r\n varTypes = wrt_otherVar(varTypes,\"varTypes\")\r\n\r\n varNames = pd.DataFrame.from_dict(varNames)\r\n varNames = wrt_otherVar(varNames,\"varNames\")\r\n\r\n # varLabels üzerinden merge işlemleri devam edecek.\r\n result = pd.merge(varLabels, varTypes, on=[\"Key\"], how=\"inner\")\r\n result.index = result.index.map(str)\r\n result = pd.merge(result, varNames, right_index=True, left_index=True)\r\n result = pd.merge(result, formats, right_index=True, left_index=True)\r\n result = result[[\"Qt\", \"Qtext\", \"QTypes\", \"Qformat\"]]\r\n # result = result.merge(varNames, left_index=True, right_on='Qt')\r\n result[\"Qt\"]=result[\"Qt\"].str.decode('cp1254')\r\n\r\n # print(result)\r\n result.to_sql(name=\"spps_desclist_temp\", con=mydb, if_exists=\"replace\",\r\n index=False, chunksize=1000)\r\n # print(valueLabels)\r\n insert_varlist(valueLabels)\r\n print(\"Değişkenler tablolara yazdırıldı...\")\r\n\r\nsavFilePath = r\".\\085048_2_SA_C.sav\"\r\n\r\nif __name__ == \"__main__\":\r\n main(savFilePath)\r\n","sub_path":"readSav.py","file_name":"readSav.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"165951324","text":"\"\"\" Analyzes the word frequencies in a book downloaded from\nProject Gutenberg \"\"\"\n\nimport string\nfrom heapq import nlargest\n\n\ndef get_word_list(file_name):\n \"\"\" Reads the specified project Gutenberg book. Header comments,\n punctuation, and whitespace are stripped away. The function\n returns a list of the words used in the book as a list.\n All words are converted to lower case.\n \"\"\"\n\n #strips out header comments\n f = open(file_name, 'r')\n lines = f.readlines()\n curr_line = 0\n while lines[curr_line].find('POIROT EXPLAINS') == -1:\n curr_line += 1\n lines = lines[curr_line+1:]\n\n #the list of all the words in the text\n word_list = []\n\n #strips out whitespace, punctuation, and makes lowercase\n #makes into a list of words\n for line in lines:\n words = line.split()\n for word in words:\n word = word.strip(string.punctuation + string.whitespace)\n word = word.lower()\n word_list.append(word)\n\n return word_list\n\n\ndef get_top_n_words(word_list, n):\n \"\"\" Takes a list of words as input and returns a list of the n most frequently\n occurring words ordered from most to least frequently occurring.\n\n word_list: a list of words (assumed to all be in lower case with no\n punctuation\n n: the number of words to return\n returns: a list of n most frequently occurring words ordered from most\n frequently to least frequently occurring\n \"\"\"\n word_counts = dict()\n\n for word in word_list:\n if word not in word_counts:\n word_counts[word] = 1\n else:\n word_counts[word] += 1\n\n topn = []\n\n for word in nlargest(n, word_counts, key=word_counts.get):\n topn.append(word)\n\n return topn\n\nif __name__ == \"__main__\":\n print(\"Running WordFrequency Toolbox\")\n print(string.punctuation)\n\n #code that calls two functions\n wordlist = get_word_list('MysteriousAffair.txt')\n print(get_top_n_words(wordlist, 100))\n","sub_path":"frequency.py","file_name":"frequency.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"13662679","text":"import feedparser\nfrom strip_html import *\n\nd = feedparser.parse(\"http://the-rinse.com/feed\")\n\nif d.feed.has_key('generator') \\\n and d.feed.generator == \"blog.myspace.com\":\n for entry in d.entries:\n title = entry.title\n desc = entry.description\n date = entry.date\n id = entry.link\n content = entry.content[0].value\nelse:\n for entry in d.entries:\n title = entry.title\n desc = entry.description\n date = entry.date\n id = entry.id\n content = entry.content[0].value\n","sub_path":"feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"590764378","text":"\nfrom socket import *\nimport RPi.GPIO as GPIO\nimport time\nimport sys\n\nhost = \"\"\nport = 13000\nbuf = 1024\naddr = (host, port)\nUDPSock = socket(AF_INET, SOCK_DGRAM)\nUDPSock.bind(addr)\n\nservoPIN = 17\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(servoPIN, GPIO.OUT)\np = GPIO.PWM(servoPIN, 50) # GPIO 17 for PWM with 50Hz\np.start(2.5) # Initialization\n\nprint(\"Waiting to receive messages...\")\n\nwhile True:\n (data, addr) = UDPSock.recvfrom(buf)\n print(\"Received message: \" + data)\n if data == \"left\":\n p.ChangeDutyCycle(2.5)\n time.sleep(0.5)\n elif data == \"mid\":\n p.ChangeDutyCycle(5)\n time.sleep(0.5)\n elif data == \"right\":\n p.ChangeDutyCycle(7.5)\n time.sleep(0.5)\n \n if data == \"exit\":\n break\n\nUDPSock.close()\np.stop()\nGPIO.cleanup()\nsys.exit(1)\n","sub_path":"2_server_udp.py","file_name":"2_server_udp.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"469379580","text":"A = []\nB = []\nC = []\nD = []\nE = []\nMax = 10\n\ndef preencherA ():\n for item in range(0,Max):\n PerguntaA = int(input(\" Escolha os valores para A: \"))\n A.append(PerguntaA)\n\ndef preencherB ():\n for item in range(0,Max):\n PerguntaB = int(input(\"Escolha os valores para B: \"))\n B.append(PerguntaB)\n\ndef preencherC ():\n for itemA in A:\n C.append(itemA)\n for itemB in B:\n C.append(itemB)\n\ndef preencherD ():\n for itemA in A:\n if itemA not in B:\n D.append(itemA)\n\ndef preencherE ():\n for itemA in A:\n if itemA in B:\n if itemA not in E:\n E.append(itemA)\n for itemB in B:\n if itemB not in E:\n E.append(itemB)\n\npreencherA()\npreencherB()\npreencherC()\npreencherD()\npreencherE()\n\nprint(A)\nprint(B)\nprint(C)\nprint(D)\nprint(E)","sub_path":"FichasPraticas/Ficha6.ex4.py","file_name":"Ficha6.ex4.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"629956330","text":"# coding=utf-8\n__author__ = 'huydq17'\n\nimport time\nfrom flask import current_app\nfrom datetime import datetime\n\n\ndef to_string(time_from, time_to):\n if (time_from.day, time_from.month, time_from.year) == (time_to.day, time_to.month, time_to.year):\n return '{0}/{1}/{2}'.format(time_from.day, time_from.month, time_from.year)\n return '{0}/{1}/{2} - {3}/{4}/{5}'.format(time_from.day, time_from.month, time_from.year,\n time_to.day, time_to.month, time_to.year)\n\n\nclass DateTimeWrapper(object):\n def __init__(self):\n # Timestamp is in millisecond\n pass\n\n @staticmethod\n def curr_timestamp(unit='second'):\n if unit == 'millisecond':\n return int(time.time()) * 1000\n return int(time.time())\n\n @staticmethod\n def to_datetime(timestamp, fmt='%H:%M:%S %d-%m-%Y'):\n \"\"\"\n Convert timestamp in second to datetime readable format\n :param timestamp:\n :param fmt:\n :return:\n \"\"\"\n try:\n return datetime.fromtimestamp(int(timestamp)).strftime(fmt)\n except Exception as e:\n current_app.logger.error('Error when convert timestamp to datetime: {0}'.format(str(e)))\n return timestamp\n\n @staticmethod\n def to_timestamp(str_datetime, fmt='%H:%M:%S %d-%m-%Y'):\n \"\"\"\n Convert datetime in string with valid format to timestamp\n \"\"\"\n timestamp = time.mktime(datetime.strptime(str_datetime, fmt).timetuple())\n return int(timestamp)\n\n @staticmethod\n def datetime_to_timestamp(datetime_obj, unit='second'):\n \"\"\"\n Convert python datetime object to timestamp in unit (second or millisecond)\n \"\"\"\n delta = 1000 if unit == 'millisecond' else 1\n return int(time.mktime(datetime_obj.timetuple()) * delta)\n\n @staticmethod\n def rfc3339_to_timestamp(rfc_time, fmt='%Y-%m-%dT%H:%M:%S.%fZ'):\n \"\"\"\n Convert UTC time in RFC 3339 format to timestamp\n Example: 2017-07-19T03:20:13.801Z -> 3h 20m 13s (UTC time) or 10h 20m 13s in GMT+7 time\n :param rfc_time:\n :param fmt\n :return:\n \"\"\"\n utc_dt = datetime.strptime(rfc_time, fmt)\n\n # Convert UTC datetime to seconds since the\n timestamp = (utc_dt - datetime(1970, 1, 1)).total_seconds()\n return int(timestamp)\n\n @staticmethod\n def format_timestamp(timestamp, unit='second'):\n \"\"\"\n Convert timestamp in second (or millisecond) to human readable time\n Example: timestamp = 3661, unit = second -> Readable time = 1h 1m 1s\n \"\"\"\n if timestamp == 'N/A':\n return timestamp\n\n seconds = timestamp\n if unit == 'millisecond': # timestamp in millisecond\n seconds = int(timestamp / 1000)\n days = int(seconds / (24 * 3600))\n tmp = seconds - days * 24 * 3600\n hours = int(tmp / 3600)\n tmp = seconds - days * 24 * 3600 - hours * 3600\n minutes = int(tmp / 60)\n seconds = seconds - days * 24 * 3600 - hours * 3600 - minutes * 60\n\n result = \"0\"\n if days > 0:\n result = str(days) + \" ngày \" + str(hours) + \" giờ \" + str(minutes) + \" phút \" + str(seconds) + \" giây\"\n elif hours > 0:\n result = str(hours) + \" giờ \" + str(minutes) + \" phút \" + str(seconds) + \" giây\"\n elif minutes > 0:\n result = str(minutes) + \" phút \" + str(seconds) + \" giây\"\n elif seconds > 0:\n result = str(seconds) + \" giây\"\n return result\n","sub_path":"server_worker_api/worker/app/utils/datetimeutils.py","file_name":"datetimeutils.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"392878078","text":"\nimport random\n\nour = 'zhuhaiwenluobiyu'\nnames = ''\nfor s in our:\n if s not in names:\n names += s\n\nend = []\nfor n in range(10):\n name = ''\n length = random.randint(10,20)\n for i in range(length):\n k = names[random.randint(0,len(names)-1)]\n name += k\n end.append(name)\nprint(end)\n","sub_path":"name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"375855126","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.parse import quote\r\nimport parse\r\nclass Link:\r\n def __init__(self):\r\n pass\r\n\r\n def enter_the_inf(self):\r\n print(\"Потрібно ввести назву міста спочатку на російській потім на англійській, нажміть на enter ще раз після першого вводу\")\r\n x = str(input((\"Введіть назву міста:\")))\r\n return x\r\n def poiskpersSin(self, nick):\r\n geourl = \"https://ua.sinoptik.ua/{0}\".format(quote(nick))\r\n return geourl\r\n def poiskpersMet(self, nick):\r\n geourl = \"https://www.meteoprog.ua/ru/{0}\".format(quote(nick))\r\n return geourl\r\n\r\nclass Request:\r\n HEADERS = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (HTML, like Gecko) '\r\n 'Chrome/88.0.4324.190 Safari/537.36',\r\n 'accept': '*/*'}\r\n def __init__(self, URL1):\r\n self.URL1 = URL1\r\n\r\n def get_html(self, params=None):\r\n r = requests.get(self.URL1, headers=self.HEADERS, params=params)\r\n return r.text\r\n\r\nclass Weather_father():\r\n def __init__(self, html, mainClass, mainNextDay, tabsContentInner, temperature, pClassOne, pClassTwo, pClassThree, min, max):\r\n self.html = html\r\n self.mainClass = mainClass\r\n self.mainNextDay = mainNextDay\r\n self.tabsContentInner = tabsContentInner\r\n self.temperature = temperature\r\n self.pClassOne = pClassOne\r\n self.pClassTwo = pClassTwo\r\n self.pClassThree = pClassThree\r\n self.min = min\r\n self.max = max\r\n def start_gettingSinoptik(self):\r\n soup = BeautifulSoup(self.html, 'html.parser')\r\n # Ищет в этом классе\r\n items = soup.find_all('div', class_=self.mainClass)\r\n weather1 = []\r\n weather2 = []\r\n weather3 = []\r\n weatherNextDay = []\r\n for item in items:\r\n weather1.append(dict(\r\n title=item.find('p', class_= self.pClassOne).get_text(),\r\n date1=item.find('p', class_= self.pClassTwo).get_text(),\r\n date2=item.find('p', class_= self.pClassThree).get_text(),\r\n temp=item.find('div', class_= self.min).get_text(),\r\n temp2=item.find('div', class_= self.max).get_text(),\r\n ))\r\n itemsNextDay = soup.find_all('div', id=self.mainNextDay)\r\n for item in itemsNextDay:\r\n weatherNextDay.append(dict(\r\n title=item.find('a', class_=self.pClassOne).get_text(),\r\n date1=item.find('p', class_=self.pClassTwo).get_text(),\r\n date2=item.find('p', class_=self.pClassThree).get_text(),\r\n temp=item.find('div', class_=self.min).get_text(),\r\n temp2=item.find('div', class_=self.max).get_text(),\r\n ))\r\n\r\n # print(weatherNextDay)\r\n item2 = soup.find_all('div', class_=self.tabsContentInner)\r\n\r\n for item in item2:\r\n weather2.append(\r\n dict(vol1=item.find('td', class_='p1').get_text(), vol2=item.find('td', class_='p2').get_text(),\r\n vol3=item.find('td', class_='p3').get_text(), vol4=item.find('td', class_='p4').get_text(),\r\n vol5=item.find('td', class_='p5').get_text(), vol6=item.find('td', class_='p6').get_text(),\r\n vol7=item.find('td', class_='p7').get_text(),\r\n vol8=item.find('td', class_='p8').get_text()))\r\n\r\n item3 = soup.find_all('tr', class_=self.temperature)\r\n\r\n for item in item3:\r\n weather3.append(\r\n dict(vol1=item.find('td', class_='p1').get_text(), vol2=item.find('td', class_='p2').get_text(),\r\n vol3=item.find('td', class_='p3').get_text(), vol4=item.find('td', class_='p4').get_text(),\r\n vol5=item.find('td', class_='p5').get_text(), vol6=item.find('td', class_='p6').get_text(),\r\n vol7=item.find('td', class_='p7').get_text(),\r\n vol8=item.find('td', class_='p8').get_text()))\r\n return [weather1, weather2, weather3, weatherNextDay]\r\n\r\n def start_gettingMeteoprog(self):\r\n soup = BeautifulSoup(self.html, 'html.parser')\r\n items = soup.find_all(attrs={'data-daynumber': '0'})\r\n weather1 = []\r\n\r\n for item in items:\r\n weather1.append(dict(\r\n title=item.find('div', class_=self.tabsContentInner).get_text(),\r\n date1=item.find('div', class_=self.temperature).get_text(),\r\n temp1=item.find('div', class_=self.pClassOne).get_text(),\r\n temp2=item.find('div', class_=self.pClassTwo).get_text(),\r\n ))\r\n itemsNextDay = soup.find_all(attrs={'data-daynumber': '1'})\r\n weather2 = []\r\n\r\n for item in itemsNextDay:\r\n weather2.append(dict(\r\n title=item.find('div', class_=self.tabsContentInner).get_text(),\r\n date1=item.find('div', class_=self.temperature).get_text(),\r\n temp=item.find('div', class_=self.pClassOne).get_text(),\r\n temp2=item.find('div', class_=self.pClassTwo).get_text(),\r\n ))\r\n return [weather1, weather2]\r\n\r\nclass ContentSinoptik(Weather_father, Link):\r\n def __init__(self, html, mainClass, mainNextDay, tabsContentInner, temperature, pClassOne, pClassTwo, pClassThree, min, max):\r\n super(ContentSinoptik, self).__init__(html, mainClass, mainNextDay, tabsContentInner, temperature, pClassOne, pClassTwo, pClassThree, min, max)\r\n\r\n def outputInf(self, weatherList):\r\n dicSet = super().enter_the_inf()\r\n weatherFather = weatherList[0]\r\n weatherFather2 = weatherList[1]\r\n weatherFather3 = weatherList[2]\r\n weatherNextDay = weatherList[3]\r\n\r\n for i in weatherFather:\r\n w1 = i\r\n for i in weatherFather2:\r\n w2 = i\r\n for i in weatherFather3:\r\n w3 = i\r\n for i in weatherNextDay:\r\n w4 = i\r\n print(\"------------------------------------------------------\")\r\n print(\"\\t\\t\\t\\t\\t\\t\\t\\t\\t\\tПогода з сайту Sinoptik \" + dicSet)\r\n print(\"День:\" + w1['title'])\r\n print(\"Число: \" + w1['date1'], ' ', w1['date2'])\r\n print(\"Температура: :\" + w1['temp'], \" || \" + w1['temp2'])\r\n print()\r\n\r\n mw2 = []\r\n for i in w2:\r\n mw2.append(w2[i])\r\n mw3 = []\r\n for i in w3:\r\n mw3.append(w3[i])\r\n print(\"Прогноз на день\")\r\n for i in mw2:\r\n print(i, end=' ')\r\n print()\r\n for j in mw3:\r\n print(j, end=' ')\r\n if j == mw3[4]:\r\n print(end=' ')\r\n if j == mw3[5]:\r\n print(end=' ')\r\n print()\r\n\r\n # ТЕМПЕРАТУРА НА СЕГОДНЯ\r\n result1 = w1['temp'] + w1['temp2']\r\n currentTemp = []\r\n num = \"\"\r\n for char in result1:\r\n if char.isdigit():\r\n num = num + char\r\n else:\r\n if num != '':\r\n currentTemp.append(int(num))\r\n num = ''\r\n if num != '':\r\n currentTemp.append(int(num))\r\n # ---------------------\r\n\r\n # ТЕМПЕРАТУРА НА ЗАВТРА\r\n result1 = w4['temp'] + w4['temp2']\r\n weatherFuture = []\r\n num2 = \"\"\r\n for char in result1:\r\n if char.isdigit():\r\n num2 = num2 + char\r\n else:\r\n if num2 != '':\r\n weatherFuture.append(int(num2))\r\n num2 = ''\r\n if num2 != '':\r\n weatherFuture.append(int(num2))\r\n # ---------------------\r\n\r\n # ЗАПИСИСЬ ТЕКУЩЕЙ ПОГОДЫ В ФАЙЛ\r\n try:\r\n files = open(\"w1.txt\", \"w\")\r\n files.write(str(currentTemp))\r\n files.close()\r\n except:\r\n print()\r\n # ---------------------\r\n\r\n # ЗАПИСЬ БУДУЮЩЕЙ ПОГОДЫ В ФАЙЛ ДЛЯ ПОДАЛЬШЕГО СРАВНЕНИЯ\r\n try:\r\n files = open(\"w2.txt\", \"a\")\r\n files.write(str(weatherFuture))\r\n files.close()\r\n except:\r\n print()\r\n # ---------------------\r\n # ЗАПИСЬ ЗАВТРАШНЕЙ И СЕГОДНЯШНЕЙ ТЕМПЕРАТУРЫ В ПЕРЕМЕННЫЕ\r\n files = open(\"w2.txt\", \"r\")\r\n openWeatherPast = files.read()\r\n files.close()\r\n files = open(\"w1.txt\", \"r\")\r\n openWeatherCurrent = files.read()\r\n files.close()\r\n\r\n # ПРЕОБРАЗОВАНИЕ В МАСИВ\r\n pastW = list(map(int, openWeatherPast[1:-1].split(',')))\r\n currentW = list(map(int, openWeatherCurrent[1:-1].split(',')))\r\n # ---------------------\r\n\r\n minTemp = currentW[0] - pastW[0]\r\n maxTemp = currentW[1] - pastW[1]\r\n print(\"Відхилення погоди на sinoptik.ua може становити від {} до {} градусів\".format(abs(maxTemp), abs(minTemp)))\r\n print(\"------------------------------------------------------\")\r\n\r\n# https://www.meteoprog.ua/ru/weather/London/\r\nclass ContentMeteoprog(ContentSinoptik):\r\n def __init__(self, html, mainClass, mainNextDay, tabsContentInner, temperature, pClassOne, pClassTwo, pClassThree, min, max):\r\n super(ContentMeteoprog, self).__init__(html, mainClass, mainNextDay, tabsContentInner, temperature, pClassOne, pClassTwo, pClassThree, min, max)\r\n\r\n def outputInf(self, weather):\r\n dicSet = super().enter_the_inf()\r\n weatherToday = weather[0]\r\n weatherTomorrow = weather[1]\r\n for i in weatherToday:\r\n w1 = i\r\n for i in weatherTomorrow:\r\n w2 = i\r\n print(\"------------------------------------------------------\")\r\n print(\"\\t\\t\\t\\t\\t\\t\\t\\t\\t\\tПогода з сайту MeteoProg \" + dicSet)\r\n print(\"День:\" + w1['title'].strip())\r\n print(\"Число: \" + w1['date1'].strip())\r\n print(\"Температура: \" + w1['temp1'] + \" || \" + w1['temp2'])\r\n # ТЕМПЕРАТУРА НА СЕГОДНЯ\r\n result1 = w1['temp1'] + w1['temp2']\r\n currentTemp = []\r\n num1 = \"\"\r\n for char in result1:\r\n if char.isdigit():\r\n num1 = num1 + char\r\n else:\r\n if num1 != '':\r\n currentTemp.append(int(num1))\r\n num1 = ''\r\n if num1 != '':\r\n currentTemp.append(int(num1))\r\n # ---------------------\r\n\r\n # ТЕМПЕРАТУРА НА ЗАВТРА\r\n result2 = w2['temp'] + w2['temp2']\r\n weatherFuture = []\r\n num2 = \"\"\r\n for char in result2:\r\n if char.isdigit():\r\n num2 = num2 + char\r\n else:\r\n if num2 != '':\r\n weatherFuture.append(int(num2))\r\n num2 = ''\r\n if num2 != '':\r\n weatherFuture.append(int(num2))\r\n\r\n # ---------------------\r\n\r\n # ЗАПИСИСЬ ТЕКУЩЕЙ ПОГОДЫ В ФАЙЛ\r\n try:\r\n files = open(\"w1M.txt\", \"w\")\r\n files.write(str(currentTemp))\r\n files.close()\r\n except:\r\n print()\r\n # ---------------------\r\n\r\n # ЗАПИСЬ БУДУЮЩЕЙ ПОГОДЫ В ФАЙЛ ДЛЯ ПОДАЛЬШЕГО СРАВНЕНИЯ\r\n try:\r\n files = open(\"w2M.txt\", \"a\")\r\n files.write(str(weatherFuture))\r\n files.close()\r\n except:\r\n print()\r\n # ---------------------\r\n\r\n # ЗАПИСЬ ЗАВТРАШНЕЙ И СЕГОДНЯШНЕЙ ТЕМПЕРАТУРЫ В ПЕРЕМЕННЫЕ\r\n files = open(\"w2M.txt\", \"r\")\r\n openWeatherPast = files.read()\r\n files.close()\r\n files = open(\"w1M.txt\", \"r\")\r\n openWeatherCurrent = files.read()\r\n files.close()\r\n # print(\"Температура на завтра\")\r\n # print(openWeatherPast)\r\n # ---------------------\r\n\r\n # ПРЕОБРАЗОВАНИЕ В МАСИВ\r\n pastW = list(map(int, openWeatherPast[1:-1].split(',')))\r\n currentW = list(map(int, openWeatherCurrent[1:-1].split(',')))\r\n # ---------------------\r\n minTemp = pastW[0] - currentW[0]\r\n maxTemp = pastW[1] - currentW[1]\r\n print(\"Відхилення погоди на meteoprog.ua може становити від {} до {} градусів\".format(abs(minTemp), abs(maxTemp)))\r\n print(\"------------------------------------------------------\")\r\n\r\n# Вивід информації х sinoptik\r\nsinoptik= Link()\r\nsinoptikLink = sinoptik.poiskpersSin(\"погода-\"+sinoptik.enter_the_inf())\r\nsinoptikRequest = Request(sinoptikLink)\r\nsinoptikResponse = sinoptikRequest.get_html()\r\nsinoptikOutputInf = ContentSinoptik(sinoptikResponse, 'main loaded', \"bd2\", 'tabsContentInner', 'temperature', 'day-link', 'date', 'month', 'min', 'max')\r\nsinoptikOutputInf.outputInf(sinoptikOutputInf.start_gettingSinoptik())\r\n\r\n# Вивід информації з meteoprog\r\nmeteorg = Link()\r\nmeteorgLink = meteorg.poiskpersMet(\"weather/\"+meteorg.enter_the_inf()+\"/\")\r\nmeteorgRequest = Request(meteorgLink)\r\nmeteorgResponse = meteorgRequest.get_html()\r\nmeteorgOutputInf = ContentMeteoprog(meteorgResponse, \"activeBg\", \"someDayOffWeek\", 'dayoffWeek', 'dayoffMonth', 'from', 'to', \"asd\", \"asds\", \"asdfg\")\r\nmeteorgOutputInf.outputInf(meteorgOutputInf.start_gettingMeteoprog())\r\n\r\n\r\n\r\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":13904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"513823432","text":"class concreteProperties:\r\n def __init__(self,fck,ecu3,ec3,units='MPa'):\r\n allowableUnits = ['MPa']\r\n if units not in allowableUnits:\r\n raise Exception(\"Please use MPa as material units for now\")\r\n self.Ec = (fck*0.85/1.5)/ec3\r\n self.fck = fck\r\n self.fcd = fck*0.85/1.5\r\n self.ecu3 = ecu3\r\n self.ecu2 = ecu3/2\r\n self.ec3 = ec3#-self.fcd/Ec #ec3 in eurocode\r\n\r\nclass steelProperties:\r\n def __init__(self,Es,fyk,euk,k,units='MPa'):\r\n allowableUnits = ['MPa']\r\n if units not in allowableUnits:\r\n raise Exception(\"Please use MPa as material units for now\")\r\n self.Es = Es\r\n self.fyk = fyk\r\n self.fyd = fyk/1.15\r\n self.euk = euk\r\n self.eud = 0.9*euk\r\n self.k = k\r\n self.ey = self.fyd/Es","sub_path":"sectionAnalysis/reinforcedConcrete/materials.py","file_name":"materials.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"88879375","text":"def fatorial (y):\n r = 1\n i = 1\n while i < y:\n i += 1 \n r *= i\n return r\n\ndef calcula_euler (x,n):\n i = 0\n a = 0\n while i < n:\n a += (x**i)/fatorial(n-1)\n i += 1\n return a\n ","sub_path":"backup/user_173/ch119_2020_04_01_15_11_34_816639.py","file_name":"ch119_2020_04_01_15_11_34_816639.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"360675306","text":"import os\nimport fnmatch\nimport warnings\nimport numpy as np\nfrom joblib import Parallel, delayed, parallel_backend\nfrom FRETboard.MainTable import MainTable\nfrom pathlib import Path\n\ndef parse_input_path(location, pattern=None):\n \"\"\"\n Take path, list of files or single file, Return list of files with path name concatenated.\n \"\"\"\n if not isinstance(location, list):\n location = [location]\n all_files = []\n for loc in location:\n loc = Path(loc).resolve()\n if loc.is_dir():\n for root, dirs, files in os.walk(loc):\n if pattern:\n for f in fnmatch.filter(files, pattern):\n all_files.append(os.path.join(root, f))\n else:\n for f in files:\n all_files.append(os.path.join(root, f))\n elif loc.exists():\n all_files.extend(str(loc))\n else:\n warnings.warn('Given file/dir %s does not exist, skipping' % str(loc), RuntimeWarning)\n if not len(all_files):\n ValueError('Input file location(s) did not exist or did not contain any files.')\n return all_files\n\n\ndef parallel_fn(f_array, fn_list, dt):\n for fi, f in enumerate(np.hsplit(f_array, f_array.shape[1])):\n f = f.squeeze()\n dt.add_tuple(np.row_stack((np.arange(f.shape[1]), f)), fn_list[fi])\n return dt.data\n\n\ndef parse_trace_file(file_contents, fn, threads, eps):\n \"\"\"\n Take contents extracted from .trace binary file, return list of [threads] MainTable objects\n \"\"\"\n nb_colors = 2\n nb_frames, _, nb_traces = np.frombuffer(file_contents, dtype=np.int16, count=3)\n traces_vec = np.frombuffer(file_contents, dtype=np.int16)\n traces_vec = traces_vec[3:]\n nb_points_expected = nb_colors * (nb_traces // nb_colors) * nb_frames\n traces_vec = traces_vec[:nb_points_expected]\n file_contents = traces_vec.reshape((nb_colors, nb_traces // nb_colors, nb_frames), order='F')\n fn_clean = os.path.splitext(fn)[0]\n\n file_chunks = np.array_split(file_contents, threads, axis=1)\n fn_list = [f'{fn_clean}_{it}.dat' for it in range(file_contents.shape[1])]\n fn_chunks = np.array_split(fn_list, threads)\n\n df_list = Parallel(n_jobs=threads)(delayed(parallel_fn)(fc, fnc, MainTable([], eps))\n for fc, fnc in zip(file_chunks, fn_chunks))\n return df_list\n","sub_path":"build/lib/FRETboard/io_functions.py","file_name":"io_functions.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"223958615","text":"import time\nimport klassen\nimport spelspelen\nimport spelstarten\ninstelling_twee = spelstarten.instelling_twee\ninstelling_zeven = spelstarten.instelling_zeven\ninstelling_acht = spelstarten.instelling_acht\ninstelling_tien = spelstarten.instelling_tien\ninstelling_boer = spelstarten.instelling_boer\ninstelling_heer = spelstarten.instelling_heer\ninstelling_joker = spelstarten.instelling_joker\ninstelling_aas = spelstarten.instelling_aas\n\n#functie voor het tellen van kaarten op symbool\ndef kaart_teller(speler_hand):\n L=[klassen.hand(), klassen.hand(), klassen.hand(), klassen.hand(), klassen.hand(), klassen.hand()]\n i = len(speler_hand)-1\n #check alle kaarten in hand, sorteer op symbool\n while i >= 0:\n if speler_hand[i].symbool == 'schoppen':\n L[0].append(speler_hand[i])\n i=i-1\n elif speler_hand[i].symbool == 'ruiten':\n L[1].append(speler_hand[i])\n i=i-1\n elif speler_hand[i].symbool == 'harten':\n L[2].append(speler_hand[i])\n i=i-1\n elif speler_hand[i].symbool == 'klaveren':\n L[3].append(speler_hand[i])\n i=i-1\n elif speler_hand[i].waarde == 'J':\n L[4].append(speler_hand[i])\n i=i-1\n # kijk of er 2'en aanwezig zijn.\n if speler_hand[i].waarde == '2':\n L[5].append(speler_hand[i])\n #return telling van sortering\n return ([len(L[0]),len(L[1]), len(L[2]), len(L[3]), len(L[4]), len(L[5])])\n\n#functie voor het uitvoeren van de pestkaart twee\ndef kaart_twee(gespeeld,deck,handen,volgorde,beurt):\n #check instelling\n if instelling_twee == 'ja':\n #tel 2'en in hand volgende speler\n A = kaart_teller(handen[volgorde[(beurt+1)%len(volgorde)]])\n print(volgorde[beurt], 'heeft een twee gespeeld!')\n time.sleep(3)\n if A[-1] == 0:\n #Als geen 2'en in hand, pak twee kaarten\n print(volgorde[(beurt+1)%len(volgorde)], 'moet twee kaarten pakken')\n time.sleep(3)\n spelspelen.kaart_pakken(handen[volgorde[(beurt+1)%len(volgorde)]],deck)\n spelspelen.kaart_pakken(handen[volgorde[(beurt+1)%len(volgorde)]],deck)\n beurt = beurt + 1\n else:\n #Als wel 2'en in hand\n speler_verschil = 1\n aantal_twee = A[-1]\n while aantal_twee != 0:\n #Check handen volgende spelers totdat er een speler is zonder 2'en.\n aantal_twee = kaart_teller(handen[volgorde[(beurt+speler_verschil)%len(volgorde)]])[-1]\n if aantal_twee != 0:\n print(volgorde[(beurt+speler_verschil)%len(volgorde)], 'heeft ook een twee!')\n time.sleep(3)\n index = len(handen[volgorde[(beurt+speler_verschil)%len(volgorde)]])-1\n #kijk waar twee in hand zit\n while handen[volgorde[(beurt+speler_verschil)%len(volgorde)]][index].waarde != '2':\n index = index-1\n gespeeld.append(handen[volgorde[(beurt+speler_verschil)%len(volgorde)]].pop(index))\n speler_verschil = speler_verschil+1\n #stel vast het aantal te pakken kaarten \n aantal = len(gespeeld)*2 \n print(volgorde[(beurt+speler_verschil-1)%len(volgorde)], 'moet',aantal,'kaarten pakken')\n time.sleep(3)\n k = 0\n #pak alle kaarten\n while k != aantal:\n spelspelen.kaart_pakken(handen[volgorde[(beurt+speler_verschil-1)%len(volgorde)]],deck)\n k = k+1 \n beurt = beurt + speler_verschil - 1\n return ([gespeeld,deck,handen,beurt]) \n\n#functie voor de pestkaart zeven \ndef kaart_zeven(beurt):\n #checkt instelling\n if instelling_zeven == 'ja':\n #beurt 1 plaats terug (wordt later met 1 verhoogt)\n beurt = beurt - 1\n return beurt\n\n#functie voor de pestkaart acht\ndef kaart_acht(beurt):\n #checkt instelling\n if instelling_acht == 'ja':\n #voegt 1 toe bij de beurt zodat de volgende speler wordt overgeslagen\n beurt = beurt + 1\n return beurt\n\n#functie voor de pestkaart tien \ndef kaart_tien(handen,volgorde,beurt):\n #checkt instelling\n if instelling_tien == 'ja':\n a=len(handen[volgorde[beurt]])\n #voeg elke kaart uit de hand van de volgende speler toe aan de hand van de beurtspeler\n for i in range(len(handen[volgorde[(beurt+1)%len(volgorde)]])):\n handen[volgorde[beurt]].append(handen[volgorde[(beurt+1)%len(volgorde)]][i])\n #maak de hand van de volgende speler leeg\n handen[volgorde[(beurt+1)%len(volgorde)]].clear()\n #voeg de 'oude' kaarten uit de hand van de beurtspeler toe aan de hand van de volgende speler\n for i in range(a):\n handen[volgorde[(beurt+1)%len(volgorde)]].append(handen[volgorde[beurt]][i])\n #verwijder de 'oude' kaarten uit de hand van de beurtspeler\n for i in range(a):\n handen[volgorde[beurt]].remove(handen[volgorde[beurt]][0])\n #laat de menselijke speler weten wat er precies gebeurt\n print(volgorde[beurt],'heeft handen gewisseld met',volgorde[(beurt+1)%len(volgorde)],'!')\n time.sleep(3)\n return handen\n\n#functie voor de pestkaart boer \ndef kaart_boer(gespeeld,volgorde,beurt,handen):\n #checkt instelling\n if instelling_boer == 'ja':\n #als de speler aan de beurt is: vraag om het nieuwe symbool\n if volgorde[beurt] == 'speler': \n print('Welk symbool wilt u spelen?')\n symbool_input = input('Uw keuze is: ')\n #zolang input niet voldoet aan 1 cvan de symbolen, vraag opnieuw\n while symbool_input != 'schoppen' and symbool_input != 'harten' and symbool_input != 'klaveren' and symbool_input != 'ruiten':\n print('U kunt kiezen uit: schoppen, klaveren, ruiten of harten')\n symbool_input = input('Uw keuze is: ')\n #voeg vervolgens de gekozen boer toe aan de pot\n gespeeld.append(klassen.kaart(str(symbool_input),'B'))\n else:\n #tegenstander checkt van welk symbool hij de meeste heeft\n aantal_symbolen = kaart_teller(handen[volgorde[beurt]])\n #verwijdert de telling van de joker en 2'en\n aantal_symbolen.remove(aantal_symbolen[-1])\n aantal_symbolen.remove(aantal_symbolen[-1])\n #selecteer het symbool waarvan de meeste zijn\n max_symbool = max(aantal_symbolen)\n i = 0\n #zoekt door gebruik van de index weer naar de naam van het symbool waarvan de meeste zijn.\n while aantal_symbolen[i] != max_symbool:\n i = i + 1\n symbolen = ['schoppen','ruiten','harten','klaveren']\n print(volgorde[beurt], 'heeft de pot veranderd in', symbolen[i],'!')\n time.sleep(3)\n #voegt vervolgens de gekozen boer toe aan de pot voor de goede weergave van de zet\n gespeeld.append(klassen.kaart(symbolen[i],'B'))\n #geeft gespeeld terug met daarin de boer\n return gespeeld\n\n#functie voor de pestkaart heer\ndef kaart_heer(beurt):\n #checkt instelling\n if instelling_heer == 'ja':\n #beurt 1 plaats terug (wordt later met 1 verhoogd)\n beurt = beurt - 1\n return beurt\n\n#functie voor de pestkaart aas\ndef kaart_aas(beurt,volgorde):\n #checkt instelling\n if instelling_aas == 'ja':\n #maak nieuwe volgorde aan en bepaal index\n nieuwevolgorde=[]\n i = beurt\n #Ga alle spelers in volgorde langs en voeg zij toe aan nieuwe volgorde door steeds de persoon voor beurt te kiezen.\n while i >= 0:\n nieuwevolgorde.append(volgorde[i])\n i = i - 1\n for i in range(1,len(volgorde)-beurt):\n nieuwevolgorde.append(volgorde[-i])\n #stel beurt weer in om bij het begin van volgorde te beginnen\n beurt = 0\n print(nieuwevolgorde[beurt],'heeft de volgorde omgedraaid!')\n time.sleep(3)\n #return de beurt en de nieuwe volgorde\n return ([beurt,nieuwevolgorde])\n #als instelling nee, return meteen beurt en volgorde\n else:\n return ([beurt,volgorde])\n\n#functie voor de pestkaart joker\ndef kaart_joker(handen,deck,volgorde,beurt):\n #check instelling \n if instelling_joker == 'ja':\n # voer de functie kaartpakken 5 keer uit voor de volgende speler\n for i in range(5):\n spelspelen.kaart_pakken(handen[volgorde[(beurt+1)%len(volgorde)]],deck)\n # return alle handen en het deck\n return ([handen,deck])\n","sub_path":"Pesten/pestkaarten.py","file_name":"pestkaarten.py","file_ext":"py","file_size_in_byte":7926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"62052758","text":"from collections import defaultdict\n\nd = defaultdict(int)\nwith open('q1.txt', mode='r') as f:\n for l in f.readlines():\n for c in l:\n if 97 <= ord(c) <= 122:\n d[c] += 1\n elif 65 <= ord(c) <= 90:\n d[c.lower()] += 1\nfor c, h in sorted(d.items(), key=lambda o: o[0]):\n print(c, h)\n","sub_path":"2007_summer/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"296639489","text":"import os\nimport tempfile\nfrom io import StringIO\nfrom logging.config import fileConfig\nfrom unittest import TestCase\n\nimport pandas as pd\n\nfrom algorithms.RelationExtractionLinearFactory import RelationExtractionLinearFactory\nfrom algorithms.result_scorer import ResultScorer\n\n\nclass TestSitRelationExtractionLinearFactory(TestCase):\n def setUp(self):\n fileConfig(os.path.join(os.path.dirname(__file__), 'logger.ini'))\n\n def test_call(self):\n # Arrange\n out_dir= tempfile.mkdtemp()\n embedding = StringIO(\"\\n\".join([\"hat 0.2 .34 0.8\", \"mat 0.5 .34 0.8\", \"entity1 0.5 .55 0.8\", \"entity2 0.3 .55 0.9\"]))\n sut = RelationExtractionLinearFactory(class_size=2, embedding_handle=embedding, embedding_dim=3, ngram=1,\n output_dir=out_dir, pos_label=\"1\")\n\n train_df = [[\"This is good\", \"entity1\", \"entity2\"],\n [\"this is a cat not a hat\", \"mat protein\", \"cat protein\"]]\n\n val_data = [[\"This is hat\", \"entity1\", \"entity2\"],\n [\"this is a cat not a mat\", \"mat protein\", \"cat protein\"]]\n\n labels = [\"1\", \"0\"]\n cols =['abstract', 'entity1', 'entity2']\n train_df = pd.DataFrame(train_df, columns=cols)\n val_df = pd.DataFrame(val_data,columns=cols)\n\n # Act\n actual = sut(train_df, labels, val_df, labels)\n\n def test_predict(self):\n # Arrange\n out_dir = tempfile.mkdtemp()\n embedding = StringIO(\n \"\\n\".join([\"hat 0.2 .34 0.8\", \"mat 0.5 .34 0.8\", \"entity1 0.5 .55 0.8\", \"entity2 0.3 .55 0.9\"]))\n pos_label = 1\n sut = RelationExtractionLinearFactory(class_size=2, embedding_handle=embedding, embedding_dim=3, ngram=1,\n output_dir=out_dir, pos_label=pos_label)\n\n train_df = [[\"This is good\", \"entity1\", \"entity2\"],\n [\"this is a cat not a hat\", \"mat protein\", \"cat protein\"]]\n\n val_data = [[\"This is hat\", \"entity1\", \"entity2\"],\n [\"this is a cat not a mat\", \"mat protein\", \"cat protein\"]]\n\n labels = [1, 0]\n cols = ['abstract', 'entity1', 'entity2']\n train_df = pd.DataFrame(train_df, columns=cols)\n val_df = pd.DataFrame(val_data, columns=cols)\n\n model, expected_scores, expected_actual, expected_predicted = sut(train_df, labels, val_df, labels)\n\n predictor = RelationExtractionLinearFactory.load(out_dir)\n\n scorer = ResultScorer()\n\n # Act\n actual = predictor(val_df)\n\n self.assertSequenceEqual(expected_scores, scorer(y_pred=actual, y_actual=labels, pos_label=1))\n","sub_path":"tests/test_algorithms/test_sit_relationExtractionLinearFactory.py","file_name":"test_sit_relationExtractionLinearFactory.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"301922518","text":"from flask_restplus import Namespace, Resource, reqparse\nfrom dateutil import parser as datetime_parser\nfrom models import *\n\n\napi = Namespace('Loan History', description='Loans related operations')\n\nparser = reqparse.RequestParser()\nparser.add_argument('book_id', help='The identifier of the book loaned out')\nparser.add_argument('borrower_id', help='The user_id of the borrower of the book')\nparser.add_argument('due', help='The due date of the book')\nparser.add_argument('actual_return_date', help='The actual return date of the book')\n\npost_parser = parser.copy()\npost_parser.remove_argument('actual_return_date')\npost_parser.replace_argument('book_id', help='The identifier of the book loaned out', required=True)\npost_parser.replace_argument('borrower_id', help='The user_id of the borrower of the book', required=True)\n\n\n@api.route('/')\nclass Loans(Resource):\n @api.doc('create_loan')\n @api.doc(responses={\n 201: 'Created',\n 400: 'Validation Error',\n 404: 'Book or User Not Found',\n })\n @api.expect(post_parser)\n def post(self):\n '''create a loan'''\n args = post_parser.parse_args()\n book_id = args['book_id']\n book = Book.query.get_or_404(book_id)\n if book.LoanedOut:\n return \"The book is already loaned out\", 400\n new_loan_history = LoanHistory(BookId=args['book_id'],\n BorrowerId=args['borrower_id'])\n due = args['due']\n if due is not None:\n new_loan_history.Due = datetime_parser.parse(due)\n db.session.add(new_loan_history)\n book.LoanedOut = True\n db.session.flush()\n db.session.commit()\n return new_loan_history.serialize(), 201\n\n @api.doc('get_loan')\n @api.doc(responses={\n 200: 'Success',\n 400: 'Validation Error'\n })\n @api.expect(parser)\n def get(self): \n '''get all loans given constraints'''\n args = parser.parse_args()\n book_id = args['book_id']\n borrower_id = args['borrower_id']\n due = args['due']\n actual_return_date = args['actual_return_date']\n\n queries = []\n if book_id is not None:\n queries.append(LoanHistory.BookId == book_id)\n if borrower_id is not None:\n queries.append(LoanHistory.BorrowerId == borrower_id)\n if due is not None:\n due = datetime_parser.parse(due)\n queries.append(LoanHistory.Due == due)\n if actual_return_date is not None:\n actual_return_date = datetime_parser.parse(actual_return_date)\n queries.append(LoanHistory.ActualReturnDate == actual_return_date)\n\n loan_list = db.session.query(LoanHistory).filter(*queries).order_by(LoanHistory.LoanId).all()\n return Serializer.serialize_list(loan_list), 200\n\n\n@api.route('/')\n@api.param('loan_id', 'The loan identifier')\n@api.response(404, 'Loan Not Found')\nclass LoanOfID(Resource):\n @api.doc(responses={\n 200: 'Success',\n })\n @api.doc('get_loan')\n def get(self, loan_id):\n '''Fetch a loan given its identifier'''\n loan = LoanHistory.query.get_or_404(loan_id)\n return loan.serialize(), 200\n\n @api.doc(responses={\n 200: 'Success',\n })\n @api.doc(params={'due': 'The due date of the book'})\n @api.doc(params={'actual_return_date': 'The actual return date of the book'})\n @api.expect(parser)\n def put(self, loan_id):\n '''Update the content of a loan given its identifier'''\n loan = LoanHistory.query.get_or_404(loan_id)\n args = parser.parse_args()\n due = args['due']\n actual_return_date = args['actual_return_date']\n if due is not None:\n loan.Due = datetime_parser.parse(due)\n if actual_return_date is not None:\n # TODO: check if return date is < current date\n loan.ActualReturnDate = datetime_parser.parse(actual_return_date)\n book = Book.query.get_or_404(loan.BookId)\n if book.LoanedOut:\n book.LoanedOut = False\n db.session.commit()\n return loan.serialize(), 200\n\n @api.doc(responses={\n 204: 'Deleted',\n })\n def delete(self, loan_id):\n '''Delete a note given its identifier'''\n loan = LoanHistory.query.get_or_404(loan_id)\n book = Book.query.get_or_404(loan.BookId)\n if book.LoanedOut:\n book.LoanedOut = False\n LoanHistory.query.filter_by(LoanId=loan_id).delete()\n db.session.commit()\n return 'Success', 204\n","sub_path":"apis/loan.py","file_name":"loan.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"106654362","text":"from .logs import Registry\n\n\nlog = Registry()\n\n\nrename = log.rename\nupdate = log.update\n\n\ndebug = log.debug\ninfo = log.info\nwarn = log.warn\nwarning = log.warning\nerror = log.error\nexception = log.error\ncritical = log.critical\n\n# from .enums import Levels\n# from .formatters import GenericFormatter\n# from .handlers import FileHandler\n#\n#\n#\n#\n#\n#\n# DefaultConfig = {\n# \"disabled\": True,\n# \"formatters\": {\n# \"default\": {\n# \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n# \"format\": \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\"\n# }\n# },\n# \"handlers\": {\n# \"console\": {\n# \"class\": \"logging.StreamHandler\",\n# \"level\": \"DEBUG\",\n# \"formatter\": \"default\",\n# \"stream_\": \"ext://sys.stdout\"\n# },\n# \"logfile\": {\n# \"class\": \"logging.handlers.RotatingFileHandler\",\n# \"level\": \"DEBUG\",\n# \"formatter\": \"default\",\n# \"maxBytes\": 25000,\n# \"backupCount\": 2,\n# \"filename\": \"/logs/application.log\"\n# }\n# },\n# \"loggers\": {\n# \"%NAME%\": {\n# \"level\": \"DEBUG\",\n# \"handlers\": [\"console\", \"logfile\"],\n# \"propagate\": False\n# }\n# }\n# }","sub_path":"fuze/log/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"192861933","text":"import json\nimport os\nfrom datetime import datetime, timezone\nfrom json.decoder import JSONDecodeError\nfrom os import path, remove\nfrom shutil import rmtree, move\nfrom tempfile import mkdtemp\n\nfrom retriever import reload_scripts, dataset_names\nfrom retriever import download\nfrom retriever.lib.engine_tools import getmd5\nfrom retriever.lib.defaults import HOME_DIR\n\nimport sys\nimport csv\n\ntry:\n from .status_dashboard_tools import get_dataset_md5\n from .status_dashboard_tools import diff_generator, diff_generator_spatial, data_shift\n from .status_dashboard_tools import create_dirs\n from .status_dashboard_tools import dataset_type, join_path, delete_raw_data\n from .status_dashboard_tools import install_postgres\nexcept ImportError as error:\n try:\n from retrieverdash.dashboard_script.status_dashboard_tools import get_dataset_md5\n from retrieverdash.dashboard_script.status_dashboard_tools import diff_generator, diff_generator_spatial, data_shift\n from retrieverdash.dashboard_script.status_dashboard_tools import create_dirs\n from retrieverdash.dashboard_script.status_dashboard_tools import dataset_type, join_path, delete_raw_data\n from retrieverdash.dashboard_script.status_dashboard_tools import install_postgres\n except ImportError as error:\n pass\n# To set location of the path\nfile_location = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))\n\n# To prevent field size errors when converting to csv\nmaxInt = sys.maxsize\ndecrement = True\nwhile decrement:\n try:\n csv.field_size_limit(maxInt)\n decrement = False\n except OverflowError:\n maxInt = int(maxInt / 10)\n\n# The DEV_LIST, useful for testing on less strong machines.\nDEV_LIST = ['iris', 'poker-hands', 'harvard-forest', 'titanic']\nIGNORE = [\n 'activity-timberharvest',\n 'covid-case-surveillance',\n 'aquatic-animal-excretion',\n 'lakecats-final-tables',\n # Amazon\n 'baltimore-restaurants',\n 'fernow-soil-productivity',\n 'nlcd-imperviousness-conus',\n 'white-clay-creek-avondale-soil',\n 'white-clay-creek-boulton-chemistry',\n 'white-clay-creek-chlorophyll',\n 'white-clay-creek-christina-chemistry',\n 'white-clay-creek-christina-sediment',\n 'white-clay-creek-christina-temperatures',\n 'white-clay-creek-streamflow',\n 'white-clay-creek-swrc-meteorology',\n 'white-clay-creek-waterlevels',\n 'white-clay-dissolved-carbon',\n 'white-clay-dissolved-carbon',\n 'usda-agriculture-plants-database',\n 'vertnet',\n 'vertnet-amphibian',\n 'vertnet-bird',\n 'vertnet-fishe',\n 'vertnet-mammal',\n 'vertnet-reptiles'\n]\n\nDATASET_DETAIL_JSON = os.path.join(file_location, \"dataset_details.json\")\nCURRENT_PATH = os.path.join(file_location, 'current')\nDATASET_DATA_FOLDER = os.path.join(file_location, 'current', '{dataset_name}')\n\n\ndef check_dataset(dataset):\n md5 = None\n status = None\n reason = None\n diff = None\n dataset_detail = None\n previous_md5 = \"\"\n\n try:\n dataset_detail = load_dataset_details()\n previous_detail_records = \"dataset_details\" in dataset_detail and dataset_detail[\n \"dataset_details\"]\n dataset_has_record = dataset.name in dataset_detail['dataset_details']\n if previous_detail_records and dataset_has_record:\n previous_md5 = dataset_detail['dataset_details'][dataset.name]['md5']\n\n if dataset_type(dataset) == 'spatial':\n install_postgres(dataset)\n dir_path = DATASET_DATA_FOLDER.format(dataset_name=dataset.name)\n md5 = getmd5(dir_path, data_type='dir')\n if not dataset_has_record or md5 != previous_md5:\n diff = diff_generator_spatial(dataset)\n else:\n remove_old_diff(dataset)\n data_shift(dataset, is_spatial=True)\n else:\n md5 = get_dataset_md5(dataset)\n if not dataset_has_record or md5 != previous_md5:\n diff = diff_generator(dataset)\n else:\n remove_old_diff(dataset)\n data_shift(dataset)\n status = True\n except Exception as e:\n reason = str(e)\n status = False\n finally:\n json_file_details = dataset_detail\n json_file_details[\"dataset_details\"][dataset.name] = {\n \"md5\": md5,\n \"status\": status,\n \"reason\": reason,\n \"diff\": diff\n }\n json_file_details[\"last_checked_on\"] = datetime.now(\n timezone.utc).strftime(\"%d %b %Y\")\n dataset_details_write = open(DATASET_DETAIL_JSON, 'w')\n json.dump(json_file_details, dataset_details_write, sort_keys=True, indent=4)\n dataset_details_write.close()\n delete_raw_data(dataset)\n\n\ndef remove_old_diff(dataset):\n \"\"\"Delete old html diffs from previous run\"\"\"\n for keys in dataset.tables:\n file_name = '{}.{}'.format(dataset.name.replace('-', '_'), keys)\n html_file_name = '{}.html'.format(file_name)\n old_diff = os.path.join(file_location, 'diffs', html_file_name)\n if os.path.exists(old_diff):\n remove(old_diff)\n\n\ndef load_dataset_details():\n \"\"\"Read dataset details from last run \"\"\"\n try:\n with open(DATASET_DETAIL_JSON, 'r') as json_file:\n dataset_detail = json.load(json_file)\n except (OSError, JSONDecodeError):\n dataset_detail = dict()\n dataset_detail['dataset_details'] = {}\n\n for dataset_ignore in IGNORE:\n if dataset_detail['dataset_details'] and dataset_ignore in dataset_detail[\n 'dataset_details']:\n dataset_detail['dataset_details'].pop(dataset_ignore)\n return dataset_detail\n\n\ndef run():\n create_dirs()\n datasets_to_check = []\n\n if os.environ.get(\"RETR_TEST\") == \"true\":\n datasets_to_check = [\n script for script in reload_scripts() if script.name in DEV_LIST\n ]\n else:\n datasets_to_check = [\n script for script in reload_scripts() if script.name not in IGNORE\n ]\n\n for dataset in datasets_to_check:\n print(\"Checking dataset {}:\".format(dataset.name))\n check_dataset(dataset)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"retrieverdash/dashboard_script/dashboard_script.py","file_name":"dashboard_script.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"491858525","text":"# Regression Template\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\n\n# Importing the dataset\nxt = pd.read_csv('dengue_features_train.csv')\nyt = pd.read_csv('dengue_labels_train.csv')\nwt = pd.read_csv('dengue_features_test.csv')\n\nX = xt.iloc[:,4:].values\ny = yt.iloc[:, 3].values\nw = wt.iloc[:,4:].values\n\nfrom sklearn.preprocessing import Imputer\nimputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\nimputer = imputer.fit(X)\nX = imputer.transform(X)\n\nimputer = imputer.fit(w)\nw = imputer.transform(w)\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n# Feature Scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\"\"\"\n\n# Fitting the Regression Model to the dataset\nfrom sklearn.ensemble import RandomForestRegressor\nregressor = RandomForestRegressor(n_estimators=300,random_state=0)\nregressor.fit(X,y)\n\n# Predicting a new result\ny_pred = regressor.predict(w)\n\nfilename = 'model.pkl'\npickle.dump(regressor, open(filename, 'wb'))","sub_path":"DenguePredictor.py","file_name":"DenguePredictor.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"67244461","text":"import sys\r\nimport socket\r\nimport time\r\nimport threading \r\nimport queue\r\nimport random\r\nimport select\r\nfrom buffer import Buffer\r\nfrom pacote import Pacote\r\n\r\nclass Cliente:\r\n\tdef __init__(self, log, hostPort, tamanhoJanela, temporizador, pError):\r\n\t\tself.nomeLog = log\r\n\t\tself.host = hostPort[0]\r\n\t\tself.port = int(hostPort[1])\r\n\t\tself.temporizador = int(temporizador)\r\n\t\tself.pError = float(pError)\r\n\r\n\t\tself.sock = self.iniciaSock()\r\n\t\tself.janela = Buffer(int(tamanhoJanela)) \r\n\t\tself.filaDeEspera = queue.Queue(maxsize = int(tamanhoJanela)) #fila de espera da janela\r\n\r\n\t\tself.logsTransmitidos = 0\r\n\t\tself.logsDistintosTransmitidos = 0\r\n\t\tself.logsIncorretosTransmitidos = 0\r\n\t\t\r\n\t\tself.enviando = True #indica se a filaDeEspera está sendo alimentada\r\n\t\tself.confirmados = False #indica se todos os pacotes foram confirmados\r\n\t\tself.permissaoSock = threading.Lock() #permissão para utilizar o sock\r\n\r\n\tdef iniciaSock(self):\r\n\t\ttry:\r\n\t\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,0)\r\n\t\texcept socket.error:\r\n\t\t\tprint (\"Falha ao criar socket\")\r\n\t\t\tsys.exit()\r\n\t\r\n\t\treturn sock\r\n\r\n\tdef abrirArquivo(self, arquivo):\r\n\t\tfor linha in open(arquivo):\r\n\t\t\tyield linha, \"%.20f\"%time.time()\r\n\r\n\tdef lerArquivoLog(self):\r\n\t\tidentificador = 0\r\n\t\tlinhas = self.abrirArquivo(self.nomeLog)\r\n\r\n\t\tfor linha, timesatamp in linhas:\r\n\t\t\tsegundos, nanosegundos = timesatamp.split('.')\r\n\t\t\tpacote = Pacote(identificador, segundos, nanosegundos, linha[:-1]) #linha[:-1] retira o \\n\r\n\t\t\tself.filaDeEspera.put(pacote, block = True)\r\n\t\t\tidentificador += 1\r\n\r\n\t\tself.enviando = False\r\n\r\n\t\treturn\r\n\r\n\tdef cuidarDaTransmissao(self, pacote):\r\n\t\twhile not pacote.confirmado and not self.confirmados:\r\n\t\t\terro = random.random()\r\n\r\n\t\t\tif erro > self.pError:\r\n\t\t\t\tself.sock.sendto(pacote.pacoteParaRede(erro=False), (self.host, self.port))\r\n\t\t\telse:\r\n\t\t\t\tself.sock.sendto(pacote.pacoteParaRede(erro=True), (self.host, self.port))\r\n\t\t\t\tself.logsIncorretosTransmitidos += 1\r\n\r\n\t\t\tself.logsTransmitidos += 1\r\n\t\t\ttime.sleep(self.temporizador)\r\n\r\n\t\treturn\r\n\r\n\tdef confirmarPacote(self, identificador):\r\n\t\tfor pacote in self.janela.dados:\t\r\n\t\t\tif not(pacote == None) and pacote.identificador == identificador:\r\n\t\t\t\tpacote.confirmado = True\r\n\t\t\t\tbreak\r\n\r\n\tdef escutarServidor(self):\r\n\t\tdados = None\r\n\t\twhile not self.confirmados:\r\n\t\t\tentrada = None\r\n\t\t\tentrada, saida, excecao = select.select([self.sock], [], [], 10)\r\n\r\n\t\t\tif entrada:\r\n\t\t\t\tdados, _ = self.sock.recvfrom(36) #36 é o tamanho do cabeçalho\r\n\t\t\t\tpacoteRecebido, md5Recebido = Pacote.redeParaPacote(dados, texto = False)\r\n\t\t\t\tif pacoteRecebido.verificaMD5(md5Recebido):\r\n\t\t\t\t\tself.confirmarPacote(pacoteRecebido.identificador)\r\n\r\n\t\treturn\r\n\r\n\tdef enviarPacotes(self):\r\n\t\tnovoItens = True\r\n\t\tself.janela.insere(self.filaDeEspera.get(block = True)) #primeiro\r\n\r\n\t\touvinte = threading.Thread(target = self.escutarServidor)\r\n\t\touvinte.start()\r\n\r\n\t\twhile self.janela.contemItens() or self.enviando :\r\n\t\t\twhile self.janela.temEspaco() and novoItens:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself.janela.insere(self.filaDeEspera.get(block = True, timeout = 5))\r\n\t\t\t\texcept queue.Empty:\r\n\t\t\t\t\tif not self.enviando:\r\n\t\t\t\t\t\tnovoItens = False\r\n\r\n\t\t\tfor pacote in self.janela.dados:\r\n\t\t\t\tif not(pacote == None) and not pacote.enviado:\r\n\t\t\t\t\tself.logsDistintosTransmitidos += 1\r\n\t\t\t\t\tpacote.enviado = True\r\n\t\t\t\t\tenvio = threading.Thread(target = self.cuidarDaTransmissao, \r\n\t\t\t\t\t\targs = [pacote])\r\n\r\n\t\t\t\t\tenvio.start()\r\n\r\n\t\t\tfor posicao in range (len(self.janela.dados)):\r\n\t\t\t\tif not(self.janela.dados[posicao] == None) and self.janela.dados[posicao].confirmado:\r\n\t\t\t\t\tself.janela.liberaEspaco(posicao)\r\n\r\n\t\tself.confirmados = True #parametro que mantém o ouvinte\r\n\t\touvinte.join()\r\n\r\n\t\treturn\r\n\r\n\tdef __str__(self):\r\n\t\treturn f'''nome do arquivo de log : {self.nomeLog}\r\n\t\t\\rHOST: {self.host}\r\n\t\t\\rPORT: {self.port}\r\n\t\t\\rtamanho da janela deslizante: {self.janela.tamanho}\r\n\t\t\\rtemporizador: {self.temporizador}\r\n\t\t\\rprobabilidade de erro no MD5: {self.pError}'''\r\n\r\nif __name__ == '__main__':\r\n\tif len(sys.argv) < 6:\r\n\t\tprint('Inicialização incorreta:')\r\n\t\tsys.exit()\r\n\r\n\ttempoInicial = time.time()\r\n\tcliente = Cliente(sys.argv[1], sys.argv[2].split(':'), sys.argv[3], sys.argv[4], sys.argv[5])\r\n\r\n\tthreadLendoLog = threading.Thread(target = cliente.lerArquivoLog)\r\n\tthreadEnviandoPacotes = threading.Thread(target = cliente.enviarPacotes)\r\n\r\n\tthreadLendoLog.start()\r\n\tthreadEnviandoPacotes.start()\r\n\r\n\tthreadLendoLog.join()\r\n\tthreadEnviandoPacotes.join()\r\n\t\r\n\ttempoDeExecucao = '%.3f'%(time.time() - tempoInicial)\r\n\tprint(f'{cliente.logsDistintosTransmitidos} {cliente.logsTransmitidos} {cliente.logsIncorretosTransmitidos} {tempoDeExecucao}')","sub_path":"cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"312065343","text":"import torch.nn.functional as F\nimport torch.nn as nn\nimport torch\n\n\"\"\"\n This script define the structure of discriminator\n According to the original Re-cycle GAN paper, \n the structure of discriminator is 70x70 PatchGAN\n And also it is also used in original CycleGAN official implementation\n Thus we borrow the implementation from: https://github.com/aitorzip/PyTorch-CycleGAN/blob/master/models.py\n\"\"\"\n\nclass Discriminator(nn.Module):\n def __init__(self, n_in = 3, r = 1):\n super().__init__()\n\n # A bunch of convolutions one after another\n model = [ nn.Conv2d(n_in, 64 // r, 4, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n\n model += [ nn.Conv2d(64 // r, 128 // r, 4, stride=2, padding=1),\n nn.InstanceNorm2d(128 // r), \n nn.LeakyReLU(0.2, inplace=True) ]\n\n model += [ nn.Conv2d(128 // r, 256 // r, 4, stride=2, padding=1),\n nn.InstanceNorm2d(256 // r), \n nn.LeakyReLU(0.2, inplace=True) ]\n\n model += [ nn.Conv2d(256 // r, 512 // r, 4, padding=1),\n nn.InstanceNorm2d(512 // r), \n nn.LeakyReLU(0.2, inplace=True) ]\n\n # FCN classification layer\n model += [nn.Conv2d(512 // r, 1, 4, padding=1)]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n x = self.model(x)\n # Average pooling and flatten\n return F.avg_pool2d(x, x.size()[2:]).view(x.size()[0], -1)","sub_path":"lib/model/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"160238373","text":"import tensorflow as tf\nimport numpy as np\nimport time\nimport pandas as pd\nimport logging\nimport os\nimport sentencepiece as spm\n#tf.enable_eager_execution()\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL\nlogging.getLogger('tensorflow').setLevel(logging.FATAL)\n\nfrom sklearn.model_selection import train_test_split\n\n\nfile1=open('english.txt','r',encoding='utf-8')\nfile2=open('gujarati.txt','r',encoding='utf-8')\n\nt1=[]\nfor i in file1.readlines():\n t1.append(i[:-1])\nt2=[]\nfor i in file2.readlines():\n t2.append(i[:-1])\n\n\nraw_data = pd.DataFrame(list(zip(t1, t2)), columns =['eng', 'guj'])\n\n# split data into train and test set\ntrain, test = train_test_split(raw_data.values, test_size=0.3, random_state = 12)\n\n# data preprocessing\nraw_data_en=list(train[:,0])\nraw_data_fr=list(train[:,1])\n\nspm.SentencePieceTrainer.train('--input=gujarati.txt --model_prefix=g --vocab_size=2000')\nsp = spm.SentencePieceProcessor()\nsp.load('g.model')\n\nspm.SentencePieceTrainer.train('--input=english.txt --model_prefix=e --vocab_size=2000')\nsp1 = spm.SentencePieceProcessor()\nsp1.load('e.model')\n\n\nfor i in range(len(raw_data_en)):\n raw_data_en[i]=\" \".join(sp1.encode_as_pieces(raw_data_en[i]))\n raw_data_fr[i]=\" \".join(sp.encode_as_pieces(raw_data_fr[i]))\n\nraw_data_fr_in = [' ' + data for data in raw_data_fr]\nraw_data_fr_out = [data + ' ' for data in raw_data_fr]\n\ntest_en=list(test[:,0])\n\ntest_fr=list(test[:,1])\n\n# tokenization\nen_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')\nen_tokenizer.fit_on_texts(raw_data_en)\ndata_en = en_tokenizer.texts_to_sequences(raw_data_en)\ndata_en = tf.keras.preprocessing.sequence.pad_sequences(data_en,padding='post')\n\nfr_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')\nfr_tokenizer.fit_on_texts(raw_data_fr_in)\nfr_tokenizer.fit_on_texts(raw_data_fr_out)\ndata_fr_in = fr_tokenizer.texts_to_sequences(raw_data_fr_in)\ndata_fr_in = tf.keras.preprocessing.sequence.pad_sequences(data_fr_in,padding='post')\n\ndata_fr_out = fr_tokenizer.texts_to_sequences(raw_data_fr_out)\ndata_fr_out = tf.keras.preprocessing.sequence.pad_sequences(data_fr_out,padding='post')\n\nBATCH_SIZE = 32\ndataset = tf.data.Dataset.from_tensor_slices(\n (data_en, data_fr_in, data_fr_out))\ndataset = dataset.shuffle(20).batch(BATCH_SIZE)\n\n\n# positional embedding\ndef positional_embedding(pos, model_size):\n PE = np.zeros((1, model_size))\n for i in range(model_size):\n if i % 2 == 0:\n PE[:, i] = np.sin(pos / 10000 ** (i / model_size))\n else:\n PE[:, i] = np.cos(pos / 10000 ** ((i - 1) / model_size))\n return PE\n\nmax_length = max(len(data_en[0]), len(data_fr_in[0]))\nMODEL_SIZE = 512\n\npes = []\nfor i in range(max_length):\n pes.append(positional_embedding(i, MODEL_SIZE))\n\npes = np.concatenate(pes, axis=0)\npes = tf.constant(pes, dtype=tf.float32)\n\n\n# Multi-head attention\nclass MultiHeadAttention(tf.keras.Model):\n def __init__(self, model_size, h):\n super(MultiHeadAttention, self).__init__()\n self.query_size = model_size // h\n self.key_size = model_size // h\n self.value_size = model_size // h\n self.h = h\n self.wq = [tf.keras.layers.Dense(self.query_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(h)]\n self.wk = [tf.keras.layers.Dense(self.key_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(h)]\n self.wv = [tf.keras.layers.Dense(self.value_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(h)]\n self.wo = tf.keras.layers.Dense(model_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01))\n\n def call(self, decoder_output, encoder_output):\n # decoder_output has shape (batch, decoder_len, model_size)\n # encoder_output has shape (batch, encoder_len, model_size)\n heads = []\n for i in range(self.h):\n score = tf.matmul(self.wq[i](decoder_output), self.wk[i](encoder_output), transpose_b=True) / tf.math.sqrt(tf.dtypes.cast(self.key_size, tf.float32))\n # score has shape (batch, decoder_len, encoder_len)\n alignment = tf.nn.softmax(score, axis=2)\n # alignment has shape (batch, decoder_len, encoder_len)\n head = tf.matmul(alignment, self.wv[i](encoder_output))\n # head has shape (batch, decoder_len, value_size)\n heads.append(head)\n heads = tf.concat(heads, axis=2)\n heads = self.wo(heads)\n # heads has shape (batch, decoder_len, model_size)\n return heads\n \n\n# Encoder\nclass Encoder(tf.keras.Model):\n def __init__(self, vocab_size, model_size, num_layers, h):\n super(Encoder, self).__init__()\n self.model_size = model_size\n self.num_layers = num_layers\n self.h = h\n self.embedding = tf.keras.layers.Embedding(vocab_size, model_size,embeddings_regularizer=tf.keras.regularizers.L2(l2=0.01))\n self.attention = [MultiHeadAttention(model_size, h) for _ in range(num_layers)]\n \n self.attention_norm = [tf.keras.layers.BatchNormalization() for _ in range(num_layers)]\n \n self.dense_1 = [tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(num_layers)]\n self.dense_2 = [tf.keras.layers.Dense(model_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(num_layers)]\n self.ffn_norm = [tf.keras.layers.BatchNormalization() for _ in range(num_layers)]\n \n def call(self, sequence):\n sub_in = []\n for i in range(sequence.shape[1]):\n embed = self.embedding(tf.expand_dims(sequence[:, i], axis=1))\n sub_in.append(embed + pes[i, :])\n \n sub_in = tf.concat(sub_in, axis=1)\n \n for i in range(self.num_layers):\n sub_out = []\n for j in range(sub_in.shape[1]):\n attention = self.attention[i](\n tf.expand_dims(sub_in[:, j, :], axis=1), sub_in)\n\n sub_out.append(attention)\n\n sub_out = tf.concat(sub_out, axis=1)\n sub_out = sub_in + sub_out\n sub_out = self.attention_norm[i](sub_out)\n \n ffn_in = sub_out\n\n ffn_out = self.dense_2[i](self.dense_1[i](ffn_in))\n ffn_out = ffn_in + ffn_out\n ffn_out = self.ffn_norm[i](ffn_out)\n\n sub_in = ffn_out\n \n return ffn_out\n\n \nclass Decoder(tf.keras.Model):\n def __init__(self, vocab_size, model_size, num_layers, h):\n super(Decoder, self).__init__()\n self.model_size = model_size\n self.num_layers = num_layers\n self.h = h\n self.embedding = tf.keras.layers.Embedding(vocab_size, model_size,embeddings_regularizer=tf.keras.regularizers.L2(l2=0.01))\n self.attention_bot = [MultiHeadAttention(model_size, h) for _ in range(num_layers)]\n self.attention_bot_norm = [tf.keras.layers.BatchNormalization() for _ in range(num_layers)]\n self.attention_mid = [MultiHeadAttention(model_size, h) for _ in range(num_layers)]\n self.attention_mid_norm = [tf.keras.layers.BatchNormalization() for _ in range(num_layers)]\n \n self.dense_1 = [tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(num_layers)]\n self.dense_2 = [tf.keras.layers.Dense(model_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(num_layers)]\n self.ffn_norm = [tf.keras.layers.BatchNormalization() for _ in range(num_layers)]\n \n self.dense = tf.keras.layers.Dense(vocab_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01))\n \n def call(self, sequence, encoder_output):\n # EMBEDDING AND POSITIONAL EMBEDDING\n embed_out = []\n for i in range(sequence.shape[1]):\n embed = self.embedding(tf.expand_dims(sequence[:, i], axis=1))\n embed_out.append(embed + pes[i, :])\n \n embed_out = tf.concat(embed_out, axis=1)\n \n \n bot_sub_in = embed_out\n \n for i in range(self.num_layers):\n # BOTTOM MULTIHEAD SUB LAYER\n bot_sub_out = []\n \n for j in range(bot_sub_in.shape[1]):\n values = bot_sub_in[:, :j, :]\n attention = self.attention_bot[i](\n tf.expand_dims(bot_sub_in[:, j, :], axis=1), values)\n\n bot_sub_out.append(attention)\n bot_sub_out = tf.concat(bot_sub_out, axis=1)\n bot_sub_out = bot_sub_in + bot_sub_out\n bot_sub_out = self.attention_bot_norm[i](bot_sub_out)\n \n # MIDDLE MULTIHEAD SUB LAYER\n mid_sub_in = bot_sub_out\n\n mid_sub_out = []\n for j in range(mid_sub_in.shape[1]):\n attention = self.attention_mid[i](\n tf.expand_dims(mid_sub_in[:, j, :], axis=1), encoder_output)\n\n mid_sub_out.append(attention)\n\n mid_sub_out = tf.concat(mid_sub_out, axis=1)\n mid_sub_out = mid_sub_out + mid_sub_in\n mid_sub_out = self.attention_mid_norm[i](mid_sub_out)\n\n # FFN\n ffn_in = mid_sub_out\n\n ffn_out = self.dense_2[i](self.dense_1[i](ffn_in))\n ffn_out = ffn_out + ffn_in\n ffn_out = self.ffn_norm[i](ffn_out)\n\n bot_sub_in = ffn_out\n \n logits = self.dense(ffn_out)\n \n return logits\n \nH = 2\nNUM_LAYERS = 2\n\nen_vocab_size = len(en_tokenizer.word_index) + 1\nencoder = Encoder(en_vocab_size, MODEL_SIZE, NUM_LAYERS, H)\n\n\nprint('Input vocabulary size', en_vocab_size)\n\nfr_vocab_size = len(fr_tokenizer.word_index) + 1\nmax_len_fr = data_fr_in.shape[1]\ndecoder = Decoder(fr_vocab_size, MODEL_SIZE, NUM_LAYERS, H)\n\n\nprint('Target vocabulary size', fr_vocab_size)\n\n\n\ncrossentropy = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True)\ndef loss_func(targets, logits):\n mask = tf.math.logical_not(tf.math.equal(targets, 0))\n mask = tf.cast(mask, dtype=tf.int64)\n loss = crossentropy(targets, logits, sample_weight=mask)\n\n return loss\n\n\noptimizer = tf.keras.optimizers.Adam()\n\ndef predict(test_source_text=None):\n if test_source_text is None:\n test_source_text = raw_data_en[np.random.choice(len(raw_data_en))]\n #print(test_source_text)\n test_source_seq = en_tokenizer.texts_to_sequences([test_source_text])\n #print(test_source_seq)\n\n en_output = encoder(tf.constant(test_source_seq))\n\n de_input = tf.constant([[fr_tokenizer.word_index['']]], dtype=tf.int64)\n\n out_words = []\n\n while True:\n de_output = decoder(de_input, en_output)\n new_word = tf.expand_dims(tf.argmax(de_output, -1)[:, -1], axis=1)\n out_words.append(fr_tokenizer.index_word[new_word.numpy()[0][0]])\n\n de_input = tf.concat((de_input, new_word), axis=-1)\n\n if out_words[-1] == '' or len(out_words) >= 14:\n break\n\n return ' '.join(out_words)\n\n\n@tf.function\ndef train_step(source_seq, target_seq_in, target_seq_out):\n with tf.GradientTape() as tape:\n encoder_output = encoder(source_seq)\n \n decoder_output = decoder(target_seq_in, encoder_output)\n\n loss = loss_func(target_seq_out, decoder_output)\n\n variables = encoder.trainable_variables + decoder.trainable_variables\n gradients = tape.gradient(loss, variables)\n capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in zip(gradients,variables)]\n optimizer.apply_gradients(capped_gvs)\n #optimizer.apply_gradients(zip(gradients, variables))\n\n return loss\n\nNUM_EPOCHS = 1\n\nstart_time = time.time()\nfor e in range(NUM_EPOCHS):\n for batch, (source_seq, target_seq_in, target_seq_out) in enumerate(dataset.take(-1)):\n loss = train_step(source_seq, target_seq_in,\n target_seq_out)\n\n print('Epoch {} Loss {:.4f}'.format(\n e + 1, loss.numpy()))\n\nend_time = time.time()\nprint('Average elapsed time: {:.2f}s'.format((end_time - start_time) / (e + 1)))\n\n \nimport nltk\n\nbleu_sum=0\ncount=0\nfor i in range(len(test_en)):\n test_sequence=test_en[i]\n try:\n op=predict(test_sequence)\n except:\n count+=1\n continue\n op=sp.decode_pieces(op.split(' '))\n if i%1000==0:\n print(test_en[i])\n print(test_fr[i])\n print(op,'\\n')\n BLEU = nltk.translate.bleu_score.sentence_bleu([test_fr[i]], op ,weights = (0.5, 0.5))\n bleu_sum+= BLEU\n\nprint(\"BLEU Score :\",(bleu_sum/(len(test_en)-count))*100)","sub_path":"final_subword.py","file_name":"final_subword.py","file_ext":"py","file_size_in_byte":12614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"503511819","text":"#!/usr/bin/python3.7\n\nimport twitter\nimport json\nfrom twitterConfig import TwitterConfig\nfrom Sink import Sink\n\nclass Tweets(): \n\n def __init__(self): \n self.config = TwitterConfig() \n self.api = twitter.Api(consumer_key=self.config.consumer_key, consumer_secret=self.config.consumer_secret,\n access_token_key=self.config.access_key, access_token_secret=self.config.access_secret,\n input_encoding=None)\n self.sink = Sink(host=\"redis\", port=\"6357\")\n\n def getTweets(self):\n stream = self.api.GetStreamFilter(track=['@realDonaldTrump'])\n try:\n for line in stream:\n print(json.dumps(line))\n self.sink.write(json.dumps(line))\n finally:\n stream.close()\n \ndef main():\n tweet = Tweets()\n tweet.getTweets()\n\nif __name__ == \"__main__\":\n main()","sub_path":"Tweets.py","file_name":"Tweets.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"564494455","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\" Main script for connecting to a MQTT queue \"\"\"\n\nimport paho.mqtt.client\nimport json\n\ndef on_message(client, userdata, msg, ):\n print(\"{} Payload -> {}\".format(msg.topic, msg.payload.decode()))\n # client.publish('output', msg.payload.decode())\n\n\ndef on_publish(client, userdata, messageId):\n print(\"MessageID: \"+str(messageId))\n\n\ndef on_subscribe(client, userdata, messageId, granted_qos):\n print(\"Subscribed: \"+str(messageId)+\" \"+str(granted_qos))\n\n\ndef on_log(client, userdata, level, string):\n print(string)\n\n\nif __name__ == \"__main__\":\n\n # read config file\n with open('config.json', 'r') as filePointer:\n cfg = json.load(filePointer)\n\n client = paho.mqtt.client.Client()\n client.username_pw_set(cfg['mqtt']['user'], cfg['mqtt']['pass'])\n client.on_message = on_message\n client.on_publish = on_publish\n client.on_subscribe = on_subscribe\n\n try:\n client.connect(cfg['mqtt']['url'], int(cfg['mqtt']['port']), 60)\n\n for topic in cfg['topics'].values():\n print(topic)\n client.subscribe(topic, 0)\n\n client.loop_forever()\n\n except KeyboardInterrupt:\n print(\"\\nMQTT Ended\")\n\n finally:\n client.disconnect()","sub_path":"mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"641701183","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 18 13:56:59 2017\n\n@author: Mauro\n\"\"\"\n\nfrom LanguageSupport import _\n\nclass TestLS:\n \n def __init__(self):\n \n self.mypoints = 10\n \n \n def showPoints(self):\n \n sdb = {}\n sdb[\"points\"] = self.mypoints\n \n msg = \"My points = {points}\"\n \n msg = _(msg, \"it-IT\")\n \n msg = msg.format(**sdb)\n \n return msg","sub_path":"src/test_ls.py","file_name":"test_ls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"19576870","text":"import configparser\nimport csv\nimport json\nimport os\nimport re\nimport sys\n\nimport pycountry\n\n# import requests\nimport xlsxwriter\n\n# from selenium import webdriver\n# from selenium.webdriver.common.by import By\n# from selenium.webdriver.common.keys import Keys\n# from selenium.webdriver.support import expected_conditions as EC\n# from selenium.webdriver.support.ui import WebDriverWait\n\n# import xlrd\n\n\n# options = webdriver.ChromeOptions()\n# options.add_argument(\"headless\")\n# options.add_argument(\"log-level=3\")\n# browser = webdriver.Chrome(os.getcwd() + \"\\chromedriver.exe\", chrome_options=options)\n# print(\"Logging in to CF...\")\n# browser.get(\"https://app.clickfunnels.com/users/sign_out\") # sign out first\n# browser.get(\"https://app.clickfunnels.com/users/sign_in\") # sign in\n# userNameField = browser.find_element_by_id(\"user_email\")\n# userNameField.send_keys(\"***REMOVED***\")\n# pwField = browser.find_element_by_id(\"user_password\")\n# pwField.send_keys(\"***REMOVED***\")\n# pwField.send_keys(Keys.ENTER)\n\n\n# config variables\npardir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\nconfig = configparser.ConfigParser()\nconfig.read(pardir + \"\\Step_1_config.txt\")\n\n# get Country codes from pycountry module and config\ncountryCodes = {}\nfor country in pycountry.countries:\n countryCodes[country.name.upper()] = country.alpha_2\nfor key in config[\"Additional countries\"]:\n countryCodes[str(key).upper()] = (\n config[\"Additional countries\"][str(key).upper()]\n ).upper()\n\n\nSG_LABELS_WB = xlsxwriter.Workbook(pardir + \"\\\\zz_sg_labels.xlsx\")\nMY_LABELS_WB = xlsxwriter.Workbook(pardir + \"\\\\zz_my_labels.xlsx\")\nSG_LABELS_WS = SG_LABELS_WB.add_worksheet()\nMY_LABELS_WS = MY_LABELS_WB.add_worksheet()\nSG_LABELS_index = 0\nMY_LABELS_index = 0\n\nfunnels = config.sections()\nfunnels.remove(\"Additional countries\")\nfor funnel in funnels:\n # URL = config[funnel][\"URL\"]\n excludedProducts = json.loads(config[funnel][\"excludedProducts\"])\n bookCode = config[funnel][\"bookCode\"]\n startingNumber = int(config[funnel][\"startingNumber\"])\n lastNameProcessed = config[funnel][\"lastNameProcessed\"]\n lastCountryProcessed = config[funnel][\"lastCountryProcessed\"]\n\n # print(\"Generating sales list for \" + funnel + \"...\")\n # browser.get(URL)\n # elem = browser.find_element_by_xpath(\"//a[@class='btn btn-default export-link']\")\n # elem.click() # generate sales list\n\n # print(\"Downloading sales list for \" + funnel + \"...\")\n # wait = WebDriverWait(browser, 600) # wait for CF to provide download link\n # elem = wait.until(\n # EC.element_to_be_clickable(\n # (By.XPATH, \"//a[@class='btn btn-primary' and text() = 'Download']\")\n # )\n # )\n # r = requests.get(elem.get_attribute(\"href\"), allow_redirects=True)\n fileName = \"\\\\\" + funnel + \"_sales.csv\"\n # open(pardir + fileName, \"wb\").write(r.content)\n # print(\"Download complete!\")\n\n fileSrc = pardir + fileName\n\n # read csv file from ClickFunnels\n rawData = []\n with open(fileSrc, newline=\"\", encoding=\"utf-8\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\")\n for row in reader:\n rawData.append(row)\n\n # remove records that have already been processed\n lastRecordedIndex = 0\n for i, col in enumerate(rawData[0:]):\n if (\n lastNameProcessed.upper() == col[0].strip().upper()\n and lastCountryProcessed.upper() == col[15].upper()\n ):\n lastRecordedIndex = i\n del rawData[: lastRecordedIndex + 1]\n\n # error handling\n if rawData == []:\n print(\"NO NEW SALES FOR \" + funnel + \"!\")\n continue\n\n # save processed name into config\n config[funnel][\"lastNameProcessed\"] = rawData[-1][0].strip()\n config[funnel][\"lastCountryProcessed\"] = rawData[-1][15]\n\n # remove unnecessary products\n for product in excludedProducts:\n rawData = [col for col in rawData if product not in col[17]]\n\n # output files\n SG_WB = xlsxwriter.Workbook(pardir + \"\\\\\" + funnel + \"_sg.xlsx\")\n MY_WB = xlsxwriter.Workbook(pardir + \"\\\\\" + funnel + \"_my.xlsx\")\n PH_WB = xlsxwriter.Workbook(pardir + \"\\\\\" + funnel + \"_ph.xlsx\")\n OTHERS_WB = xlsxwriter.Workbook(pardir + \"\\\\\" + funnel + \"_others.xlsx\")\n DHL_WB = xlsxwriter.Workbook(pardir + \"\\\\\" + funnel + \"_DHL.xlsx\")\n SG_WS = SG_WB.add_worksheet()\n MY_WS = MY_WB.add_worksheet()\n PH_WS = PH_WB.add_worksheet()\n OTHERS_WS = OTHERS_WB.add_worksheet()\n DHL_WS = DHL_WB.add_worksheet()\n\n # merge orders going to same address\n while True:\n addresses = []\n for col in rawData[0:]:\n addresses.append(col[11])\n totalCount = 0\n for address in addresses:\n totalCount += addresses.count(address)\n lastIndex = \"\"\n firstIndex = \"\"\n if (\n addresses.count(address) != 1\n ): # each address should only exist once in list\n for i, col in enumerate(rawData[0:]):\n if col[11] == address:\n firstIndex = i\n break # first index found\n for i, col in enumerate(rawData[0:]):\n if col[11] == address:\n lastIndex = i\n rawData[firstIndex][17] += \",\" + rawData[lastIndex][17]\n del rawData[lastIndex]\n break # one address merged, restart while loop\n if totalCount == len(addresses): # no duplicates\n break\n\n # write headers\n for i, header in enumerate(\n [\n \"NAME\",\n \"EMAIL\",\n \"TRACKING NO.\",\n \"PHONE\",\n \"ADDRESS\",\n \"CITY\",\n \"STATE\",\n \"POSCODE\",\n \"COUNTRY\",\n \"ORDERS\",\n \"STATUS\",\n \"COST\",\n ]\n ):\n SG_WS.write(0, i, header)\n MY_WS.write(0, i, header)\n PH_WS.write(0, i, header)\n OTHERS_WS.write(0, i, header)\n SG_LABELS_WS.write(0, i, header)\n MY_LABELS_WS.write(0, i, header)\n\n for i, header in enumerate(\n [\n \"Pick-up Account Number\",\n \"Shipment Order ID\",\n \"Shipping Service Code\",\n \"Consignee Name\",\n \"Address Line 1\",\n \"Address Line 2\",\n \"Address Line 3\",\n \"City\",\n \"State (M)\",\n \"Postal Code (M)\",\n \"Destination Country Code\",\n \"Phone Number\",\n \"Email Address\",\n \"Shipment Weight (g)\",\n \"Currency Code\",\n \"Total Declared Value\",\n \"Incoterm\",\n \"Shipment Description\",\n \"Content Description\",\n \"Content Export Description\",\n \"Content Unit Price\",\n \"Content Origin Country\",\n \"Content Quantity\",\n \"Content Code\",\n \"Content Indicator\",\n \"Remarks\",\n ]\n ):\n DHL_WS.write(0, i, header)\n\n SGindex = 1\n MYindex = 1\n PHindex = 1\n OTHERSindex = 1\n DHLindex = 1\n wsOut = \"\"\n indexOut = \"\"\n # output by country\n for col in rawData[0:]:\n shipmentOrderID = \"\"\n if col[15].lower() == \"singapore\" or col[15].lower() == \"sg\":\n indexOut = SGindex\n SGindex += 1\n SG_LABELS_index += 1\n wsOut = SG_WS\n elif (col[15].lower() in [\"malaysia\", \"hong kong\", \"canada\", \"iran\"]) or (\n col[15].lower() in [\"my\", \"hk\", \"ca\", \"ir\"]\n ):\n indexOut = MYindex\n MYindex += 1\n MY_LABELS_index += 1\n wsOut = MY_WS\n elif col[15].lower() == \"philippines\" or col[15].lower() == \"ph\":\n indexOut = PHindex\n PHindex += 1\n wsOut = PH_WS\n else:\n indexOut = OTHERSindex\n OTHERSindex += 1\n wsOut = OTHERS_WS\n\n # handle DHL output\n quantityTotal = 0\n tmpOrders = \",\" + col[17]\n commas = [m.start() for m in re.finditer(\",\", tmpOrders)]\n Xs = [m.start() for m in re.finditer(\" X \", tmpOrders)]\n for startQuantity, endQuantity in zip(commas, Xs):\n # tmpstrr = tmpOrders[startQuantity + 1 : endQuantity]\n # print(tmpstrr)\n quantityTotal += int(tmpOrders[startQuantity + 1 : endQuantity])\n\n quantity28000 = 0\n if \" X 28000 Book\" in tmpOrders:\n endIndex = [m.start() for m in re.finditer(\" X 28000\", tmpOrders)]\n for i in endIndex:\n startIndex = tmpOrders[:i].rindex(\",\") + 1\n quantity28000 += int(tmpOrders[startIndex:i])\n\n if (len(col[8])) == 2:\n countryCode = col[8].upper()\n else:\n countryCode = countryCodes.get(col[8].upper())\n if countryCode is None:\n print(\n \"Country code not found for:\", col[8].upper()\n ) # dbg add file name and row here\n\n if countryCode in [\"SG\", \"TH\", \"AU\", \"GB\"]:\n shippingServiceCode = \"PLT\"\n incoterm = \"DDP\"\n elif countryCode == \"US\":\n shippingServiceCode = \"PLE\"\n incoterm = \"DDP\"\n else:\n shippingServiceCode = \"PPS\"\n incoterm = \"DDU\"\n\n shipmentOrderID = bookCode + str(startingNumber)\n\n # All books(250g) except 28000 book(425g)\n weight = (quantityTotal - quantity28000) * 250 + (quantity28000 * 425)\n\n # RM10 per book, max RM50\n declaredValue = min(quantityTotal * 10, 50)\n\n for k, content in enumerate(\n [\n \"5345221\",\n shipmentOrderID,\n shippingServiceCode,\n col[0][:30], # name is MAX 30 CHARS\n col[11],\n \"\",\n \"\",\n col[6],\n col[7],\n col[9],\n countryCode,\n col[10],\n col[3],\n weight,\n \"MYR\",\n declaredValue,\n incoterm,\n \"educational book, perfect bound book\",\n \"educational book, perfect bound book\",\n \"\",\n declaredValue,\n \"MY\",\n 1,\n shipmentOrderID,\n \"\",\n \"\",\n ]\n ):\n DHL_WS.write(DHLindex, k, content)\n DHLindex += 1\n startingNumber += 1\n\n for j, content in enumerate(\n [\n col[0],\n col[3],\n shipmentOrderID,\n col[10],\n col[11],\n col[13],\n col[14],\n col[16],\n col[15],\n col[17],\n \"\",\n \"\",\n ]\n ):\n wsOut.write(indexOut, j, content)\n if wsOut == SG_WS:\n SG_LABELS_WS.write(SG_LABELS_index, j, content)\n elif wsOut == MY_WS:\n MY_LABELS_WS.write(MY_LABELS_index, j, content)\n\n config[funnel][\"startingnumber\"] = str(startingNumber)\n\n # cleanup\n SG_WB.close()\n MY_WB.close()\n PH_WB.close()\n OTHERS_WB.close()\n DHL_WB.close()\n print(\"Output for \" + funnel + \" complete!\")\n\nSG_LABELS_WB.close()\nMY_LABELS_WB.close()\n\nwith open(pardir + \"\\Step_1_config.txt\", \"w\") as configfile:\n config.write(configfile)\n","sub_path":"prj/src/Step_1_Filter_addresses.py","file_name":"Step_1_Filter_addresses.py","file_ext":"py","file_size_in_byte":11704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"527581709","text":"from timer import Timer\r\nfrom paths import get_path_data\r\nfrom io_util import IOUtil\r\n\r\ndef main(sellerApi, category):\r\n timer = Timer()\r\n \r\n timer.start()\r\n if category['id'] is None:\r\n print(\"Top offers...\")\r\n offers = sellerApi.get_top_offers()\r\n else:\r\n offers = sellerApi.get_offers(category)\r\n print(\"Time elapsed to get {0} produts: \".format(len(offers)), timer.diff())\r\n \r\n timer.start()\r\n adds, deletes, updates = sellerApi.process_offers(offers, category)\r\n print(\"Time elapsed to process the products: \", timer.diff())\r\n\r\n print(\"Produts to add: \", len(adds))\r\n print(\"Produts to update: \", len(updates))\r\n print(\"Produts to delete: \", len(deletes))\r\n\r\n # Salvando os ultimos produtos enviados para o csv\r\n IOUtil.dic_to_json( 'to_add.json', category['id'], adds )\r\n IOUtil.dic_to_json( 'to_update.json', category['id'], updates )\r\n IOUtil.dic_to_json( 'to_delete.json', category['id'], deletes )\r\n\r\n # IOUtil.dic_to_csv( 'to_add.csv', category['id'], adds )\r\n # IOUtil.dic_to_csv( 'to_update.csv', category['id'], updates )\r\n # IOUtil.dic_to_csv( 'to_delete.csv', category['id'], deletes )","sub_path":"api_c_process.py","file_name":"api_c_process.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"607864998","text":"#coding=utf8\n#######################################################\n#filename: TestCase.py\n#author: defias\n#date: 2016-3\n#function: TEST CASE\n#######################################################\nimport unittest,time,os,sys\nsys.path.append(\"..\")\nfrom PageObject import IndexPage\nfrom Login import Login\n\nclass TestLogin(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.LoginO = Login()\n cls.LoginO.open() #打开页面\n\n def setUp(self):\n unittest.TestCase.setUp(self)\n self.verificationErrors = []\n self.accept_next_alert = True\n\n def tearDown(self):\n unittest.TestCase.tearDown(self)\n self.assertEqual(self.verificationErrors, [])\n\n @classmethod\n def tearDownClass(cls):\n cls.LoginO.close() #关闭页面\n\n def test_login_success_001(self):\n '''正常登录登出'''\n po = IndexPage.IndexPage()\n po.InputUser('root')\n po.InputPasswd('root123')\n po.CkLoginButton()\n self.assertEqual(u'您好, 超级管理员 ', po.LoginSucessCheck())\n po.Logout()\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n","sub_path":"src/TestCaseLib/TestLogin.py","file_name":"TestLogin.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"42853147","text":"import hashlib\nimport logging\n\nfrom PIL import Image, ImageColor, ImageDraw, ImageFilter\n\nfrom django.apps import apps\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .layers import layer_decorations, layer_saved_transformations\n\nlogger = logging.getLogger(name=__name__)\n\n\nclass BaseTransformationType(type):\n def __str__(self):\n return force_text(s=self.label)\n\n\nclass BaseTransformation(metaclass=BaseTransformationType):\n \"\"\"\n Transformation can modify the appearance of the document's page preview.\n Some transformation available are: Rotate, zoom, resize and crop.\n \"\"\"\n arguments = ()\n name = 'base_transformation'\n _layer_transformations = {}\n _registry = {}\n\n @staticmethod\n def combine(transformations):\n result = None\n\n for transformation in transformations:\n if not result:\n result = hashlib.sha256(transformation.cache_hash())\n else:\n result.update(transformation.cache_hash())\n\n return result.hexdigest()\n\n @classmethod\n def get(cls, name):\n return cls._registry[name]\n\n @classmethod\n def get_arguments(cls):\n return cls.arguments\n\n @classmethod\n def get_assigned_layer(cls):\n for layer, transformations in cls._layer_transformations.items():\n if cls in transformations:\n return layer\n\n @classmethod\n def get_label(cls):\n arguments = cls.get_arguments()\n if arguments:\n return format_lazy('{}: {}', cls.label, ', '.join(arguments))\n else:\n return cls.label\n\n @classmethod\n def get_transformation_choices(cls, group_by_layer=False, layer=None):\n if layer:\n transformation_list = [\n (transformation.name, transformation) for transformation in cls._layer_transformations.get(layer, ())\n ]\n else:\n transformation_list = cls._registry.items()\n\n if group_by_layer:\n flat_transformation_list = [\n klass for name, klass in transformation_list\n ]\n\n result = {}\n for layer, transformations in cls._layer_transformations.items():\n for transformation in transformations:\n if transformation in flat_transformation_list:\n result.setdefault(layer, [])\n result[layer].append(\n (transformation.name, transformation.get_label())\n )\n\n result = [\n (layer.label, transformations) for layer, transformations in result.items()\n ]\n\n # Sort by transformation group, then each transformation in the\n # group.\n return sorted(result, key=lambda x: (x[0], x[1]))\n else:\n return sorted(\n [\n (name, klass.get_label()) for name, klass in transformation_list\n ]\n )\n\n @classmethod\n def register(cls, layer, transformation):\n cls._registry[transformation.name] = transformation\n cls._layer_transformations.setdefault(layer, set())\n cls._layer_transformations[layer].add(transformation)\n\n def __init__(self, **kwargs):\n self.kwargs = {}\n for argument_name in self.__class__.get_arguments():\n setattr(self, argument_name, kwargs.get(argument_name))\n self.kwargs[argument_name] = kwargs.get(argument_name)\n\n def cache_hash(self):\n result = hashlib.sha256(force_bytes(s=self.name))\n\n # Sort arguments for guaranteed repeatability\n for key, value in sorted(self.kwargs.items()):\n result.update(force_bytes(s=key))\n result.update(force_bytes(s=value))\n\n return force_bytes(s=result.hexdigest())\n\n def execute_on(self, image):\n self.image = image\n self.aspect = 1.0 * image.size[0] / image.size[1]\n\n\nclass AssertTransformationMixin:\n @classmethod\n def get_arguments(cls):\n arguments = super().get_arguments() + (\n 'asset_name', 'rotation', 'transparency', 'zoom'\n )\n return arguments\n\n def get_asset_images(self, asset_name):\n try:\n transparency = float(self.transparency or '100.0')\n except ValueError:\n transparency = 100\n\n if transparency < 0:\n transparency = 0\n elif transparency > 100:\n transparency = 100\n\n try:\n rotation = int(self.rotation or '0') % 360\n except ValueError:\n rotation = 0\n\n try:\n zoom = float(self.zoom or '100.0')\n except ValueError:\n zoom = 100.0\n\n Asset = apps.get_model(app_label='converter', model_name='Asset')\n\n try:\n asset = Asset.objects.get(internal_name=asset_name)\n except Asset.DoesNotExist:\n logger.error('Asset \"%s\" not found.', asset_name)\n raise\n else:\n image_asset = asset.get_image()\n\n if image_asset.mode != 'RGBA':\n image_asset.putalpha(alpha=255)\n\n image_asset = image_asset.rotate(\n angle=360 - rotation, resample=Image.BICUBIC,\n expand=True\n )\n\n if zoom != 100.0:\n decimal_value = zoom / 100.0\n image_asset = image_asset.resize(\n (\n int(image_asset.size[0] * decimal_value),\n int(image_asset.size[1] * decimal_value)\n ), Image.ANTIALIAS\n )\n\n paste_mask = image_asset.getchannel(channel='A').point(\n lambda i: i * transparency / 100.0\n )\n\n return {\n 'image_asset': image_asset, 'paste_mask': paste_mask\n }\n\n\nclass TransformationAssetPaste(AssertTransformationMixin, BaseTransformation):\n arguments = ('left', 'top')\n label = _('Paste an asset')\n name = 'paste_asset'\n\n def _execute_on(self, *args, **kwargs):\n try:\n left = int(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = int(self.top or '0')\n except ValueError:\n top = 0\n\n asset_name = getattr(self, 'asset_name', None)\n\n if asset_name:\n align_horizontal = getattr(self, 'align_horizontal', 'left')\n align_vertical = getattr(self, 'align_vertical', 'top')\n\n result = self.get_asset_images(asset_name=asset_name)\n if result:\n if align_horizontal == 'left':\n left = left\n elif align_horizontal == 'center':\n left = int(left - result['image_asset'].size[0] / 2)\n elif align_horizontal == 'right':\n left = int(left - result['image_asset'].size[0])\n\n if align_vertical == 'top':\n top = top\n elif align_vertical == 'middle':\n top = int(top - result['image_asset'].size[1] / 2)\n elif align_vertical == 'bottom':\n top = int(top - result['image_asset'].size[1])\n\n self.image.paste(\n im=result['image_asset'], box=(left, top),\n mask=result['paste_mask']\n )\n else:\n logger.error('No asset name specified.')\n\n return self.image\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n return self._execute_on(self, *args, **kwargs)\n\n\nclass TransformationAssetPastePercent(TransformationAssetPaste):\n label = _('Paste an asset (percents coordinates)')\n name = 'paste_asset_percent'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n try:\n left = float(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = float(self.top or '0')\n except ValueError:\n top = 0\n\n if left < 0:\n left = 0\n\n if left > 100:\n left = 100\n\n if top < 0:\n top = 0\n\n if top > 100:\n top = 100\n\n self.left = left / 100.0 * self.image.size[0]\n self.top = top / 100.0 * self.image.size[1]\n self.align_horizontal = 'center'\n self.align_vertical = 'middle'\n\n return self._execute_on(self, *args, **kwargs)\n\n\nclass TransformationAssetWatermark(\n AssertTransformationMixin, BaseTransformation\n):\n arguments = (\n 'left', 'top', 'right', 'bottom', 'horizontal_increment',\n 'vertical_increment'\n )\n label = _('Paste an asset as watermark')\n name = 'paste_asset_watermark'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n try:\n left = int(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = int(self.top or '0')\n except ValueError:\n top = 0\n\n try:\n right = int(self.right or '0')\n except ValueError:\n right = 0\n\n try:\n bottom = int(self.bottom or '0')\n except ValueError:\n bottom = 0\n\n asset_name = getattr(self, 'asset_name', None)\n\n if asset_name:\n result = self.get_asset_images(asset_name=asset_name)\n if result:\n try:\n horizontal_increment = int(self.horizontal_increment or '0')\n except ValueError:\n horizontal_increment = 0\n\n try:\n vertical_increment = int(self.vertical_increment or '0')\n except ValueError:\n vertical_increment = 0\n\n if horizontal_increment == 0:\n horizontal_increment = result['paste_mask'].size[0]\n\n if vertical_increment == 0:\n vertical_increment = result['paste_mask'].size[1]\n\n for x in range(left, right or self.image.size[0], horizontal_increment):\n for y in range(top, bottom or self.image.size[1], vertical_increment):\n self.image.paste(\n im=result['image_asset'], box=(x, y),\n mask=result['paste_mask']\n )\n else:\n logger.error('No asset name specified.')\n\n return self.image\n\n\nclass TransformationCrop(BaseTransformation):\n arguments = ('left', 'top', 'right', 'bottom',)\n label = _('Crop')\n name = 'crop'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n try:\n left = int(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = int(self.top or '0')\n except ValueError:\n top = 0\n\n try:\n right = int(self.right or '0')\n except ValueError:\n right = 0\n\n try:\n bottom = int(self.bottom or '0')\n except ValueError:\n bottom = 0\n\n if left < 0:\n left = 0\n\n if left > self.image.size[0] - 1:\n left = self.image.size[0] - 1\n\n if top < 0:\n top = 0\n\n if top > self.image.size[1] - 1:\n top = self.image.size[1] - 1\n\n if right < 0:\n right = 0\n\n if right > self.image.size[0] - 1:\n right = self.image.size[0] - 1\n\n if bottom < 0:\n bottom = 0\n\n if bottom > self.image.size[1] - 1:\n bottom = self.image.size[1] - 1\n\n # Invert right value\n # Pillow uses left, top, right, bottom to define a viewport\n # of real coordinates\n # We invert the right and bottom to define a viewport\n # that can crop from the right and bottom borders without\n # having to know the real dimensions of an image\n right = self.image.size[0] - right\n bottom = self.image.size[1] - bottom\n\n if left > right:\n left = right - 1\n\n if top > bottom:\n top = bottom - 1\n\n logger.debug(\n 'left: %f, top: %f, right: %f, bottom: %f', left, top, right,\n bottom\n )\n\n return self.image.crop((left, top, right, bottom))\n\n\nclass TransformationDrawRectangle(BaseTransformation):\n arguments = (\n 'left', 'top', 'right', 'bottom', 'fillcolor', 'outlinecolor',\n 'outlinewidth'\n )\n label = _('Draw rectangle')\n name = 'draw_rectangle'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n try:\n left = int(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = int(self.top or '0')\n except ValueError:\n top = 0\n\n try:\n right = int(self.right or '0')\n except ValueError:\n right = 0\n\n try:\n bottom = int(self.bottom or '0')\n except ValueError:\n bottom = 0\n\n if left < 0:\n left = 0\n\n if left > self.image.size[0] - 1:\n left = self.image.size[0] - 1\n\n if top < 0:\n top = 0\n\n if top > self.image.size[1] - 1:\n top = self.image.size[1] - 1\n\n if right < 0:\n right = 0\n\n if right > self.image.size[0] - 1:\n right = self.image.size[0] - 1\n\n if bottom < 0:\n bottom = 0\n\n if bottom > self.image.size[1] - 1:\n bottom = self.image.size[1] - 1\n\n # Invert right value\n # Pillow uses left, top, right, bottom to define a viewport\n # of real coordinates\n # We invert the right and bottom to define a viewport\n # that can crop from the right and bottom borders without\n # having to know the real dimensions of an image\n right = self.image.size[0] - right\n bottom = self.image.size[1] - bottom\n\n if left > right:\n left = right - 1\n\n if top > bottom:\n top = bottom - 1\n\n logger.debug(\n 'left: %f, top: %f, right: %f, bottom: %f', left, top, right,\n bottom\n )\n\n fillcolor_value = getattr(self, 'fillcolor', None)\n if fillcolor_value:\n fill_color = ImageColor.getrgb(fillcolor_value)\n else:\n fill_color = 0\n\n outlinecolor_value = getattr(self, 'outlinecolor', None)\n if outlinecolor_value:\n outline_color = ImageColor.getrgb(outlinecolor_value)\n else:\n outline_color = None\n\n outlinewidth_value = getattr(self, 'outlinewidth', None)\n if outlinewidth_value:\n outline_width = int(outlinewidth_value)\n else:\n outline_width = 0\n\n draw = ImageDraw.Draw(self.image)\n draw.rectangle(\n (left, top, right, bottom), fill=fill_color, outline=outline_color,\n width=outline_width\n )\n\n return self.image\n\n\nclass TransformationDrawRectanglePercent(BaseTransformation):\n arguments = (\n 'left', 'top', 'right', 'bottom', 'fillcolor', 'outlinecolor',\n 'outlinewidth'\n )\n label = _('Draw rectangle (percents coordinates)')\n name = 'draw_rectangle_percent'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n try:\n left = float(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = float(self.top or '0')\n except ValueError:\n top = 0\n\n try:\n right = float(self.right or '0')\n except ValueError:\n right = 0\n\n try:\n bottom = float(self.bottom or '0')\n except ValueError:\n bottom = 0\n\n if left < 0:\n left = 0\n\n if left > 100:\n left = 100\n\n if top < 0:\n top = 0\n\n if top > 100:\n top = 100\n\n if right < 0:\n right = 0\n\n if right > 100:\n right = 100\n\n if bottom < 0:\n bottom = 0\n\n if bottom > 100:\n bottom = 100\n\n logger.debug(\n 'left: %f, top: %f, right: %f, bottom: %f', left, top, right,\n bottom\n )\n\n fillcolor_value = getattr(self, 'fillcolor', None)\n if fillcolor_value:\n fill_color = ImageColor.getrgb(fillcolor_value)\n else:\n fill_color = 0\n\n outlinecolor_value = getattr(self, 'outlinecolor', None)\n if outlinecolor_value:\n outline_color = ImageColor.getrgb(outlinecolor_value)\n else:\n outline_color = None\n\n outlinewidth_value = getattr(self, 'outlinewidth', None)\n if outlinewidth_value:\n outline_width = int(outlinewidth_value)\n else:\n outline_width = 0\n\n left = left / 100.0 * self.image.size[0]\n top = top / 100.0 * self.image.size[1]\n\n # Invert right value\n # Pillow uses left, top, right, bottom to define a viewport\n # of real coordinates\n # We invert the right and bottom to define a viewport\n # that can crop from the right and bottom borders without\n # having to know the real dimensions of an image\n\n right = self.image.size[0] - (right / 100.0 * self.image.size[0])\n bottom = self.image.size[1] - (bottom / 100.0 * self.image.size[1])\n\n draw = ImageDraw.Draw(self.image)\n draw.rectangle(\n (left, top, right, bottom), fill=fill_color, outline=outline_color,\n width=outline_width\n )\n\n return self.image\n\n\nclass TransformationFlip(BaseTransformation):\n arguments = ()\n label = _('Flip')\n name = 'flip'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n return self.image.transpose(Image.FLIP_TOP_BOTTOM)\n\n\nclass TransformationGaussianBlur(BaseTransformation):\n arguments = ('radius',)\n label = _('Gaussian blur')\n name = 'gaussianblur'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n return self.image.filter(ImageFilter.GaussianBlur(radius=self.radius))\n\n\nclass TransformationLineArt(BaseTransformation):\n label = _('Line art')\n name = 'lineart'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n return self.image.convert('L').point(lambda x: 0 if x < 128 else 255, '1')\n\n\nclass TransformationMirror(BaseTransformation):\n arguments = ()\n label = _('Mirror')\n name = 'mirror'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n return self.image.transpose(Image.FLIP_LEFT_RIGHT)\n\n\nclass TransformationResize(BaseTransformation):\n arguments = ('width', 'height')\n label = _('Resize')\n name = 'resize'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n width = int(self.width)\n height = int(self.height or 1.0 * width / self.aspect)\n\n factor = 1\n while self.image.size[0] / factor > 2 * width and self.image.size[1] * 2 / factor > 2 * height:\n factor *= 2\n\n if factor > 1:\n self.image.thumbnail(\n (self.image.size[0] / factor, self.image.size[1] / factor),\n Image.NEAREST\n )\n\n # Resize the image with best quality algorithm ANTI-ALIAS\n self.image.thumbnail((width, height), Image.ANTIALIAS)\n\n return self.image\n\n\nclass TransformationRotate(BaseTransformation):\n arguments = ('degrees', 'fillcolor')\n label = _('Rotate')\n name = 'rotate'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n self.degrees %= 360\n\n if self.degrees == 0:\n return self.image\n\n fillcolor_value = getattr(self, 'fillcolor', None)\n if fillcolor_value:\n fillcolor = ImageColor.getrgb(fillcolor_value)\n else:\n fillcolor = None\n\n return self.image.rotate(\n angle=360 - self.degrees, resample=Image.BICUBIC, expand=True,\n fillcolor=fillcolor\n )\n\n\nclass TransformationRotate90(TransformationRotate):\n arguments = ()\n degrees = 90\n label = _('Rotate 90 degrees')\n name = 'rotate90'\n\n def __init__(self, **kwargs):\n super().__init__()\n self.kwargs['degrees'] = 90\n\n\nclass TransformationRotate180(TransformationRotate):\n arguments = ()\n degrees = 180\n label = _('Rotate 180 degrees')\n name = 'rotate180'\n\n def __init__(self, **kwargs):\n super().__init__()\n self.kwargs['degrees'] = 180\n\n\nclass TransformationRotate270(TransformationRotate):\n arguments = ()\n degrees = 270\n label = _('Rotate 270 degrees')\n name = 'rotate270'\n\n def __init__(self, **kwargs):\n super().__init__()\n self.kwargs['degrees'] = 270\n\n\nclass TransformationUnsharpMask(BaseTransformation):\n arguments = ('radius', 'percent', 'threshold')\n label = _('Unsharp masking')\n name = 'unsharpmask'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n return self.image.filter(\n ImageFilter.UnsharpMask(\n radius=self.radius, percent=self.percent,\n threshold=self.threshold\n )\n )\n\n\nclass TransformationZoom(BaseTransformation):\n arguments = ('percent',)\n label = _('Zoom')\n name = 'zoom'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n if self.percent == 100:\n return self.image\n\n decimal_value = float(self.percent) / 100\n return self.image.resize(\n (\n int(self.image.size[0] * decimal_value),\n int(self.image.size[1] * decimal_value)\n ), Image.ANTIALIAS\n )\n\n\nBaseTransformation.register(\n layer=layer_decorations, transformation=TransformationAssetPaste\n)\nBaseTransformation.register(\n layer=layer_decorations, transformation=TransformationAssetPastePercent\n)\nBaseTransformation.register(\n layer=layer_decorations, transformation=TransformationAssetWatermark\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationCrop\n)\nBaseTransformation.register(\n layer=layer_saved_transformations,\n transformation=TransformationDrawRectangle\n)\nBaseTransformation.register(\n layer=layer_saved_transformations,\n transformation=TransformationDrawRectanglePercent\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationFlip\n)\nBaseTransformation.register(\n layer=layer_saved_transformations,\n transformation=TransformationGaussianBlur\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationLineArt\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationMirror\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationResize\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationRotate\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationRotate90\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationRotate180\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationRotate270\n)\nBaseTransformation.register(\n layer=layer_saved_transformations,\n transformation=TransformationUnsharpMask\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationZoom\n)\n","sub_path":"mayan/apps/converter/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":23939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"66579316","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\nimport sys\nimport urllib\nfrom bs4 import BeautifulSoup\nreload(sys)\nsys.setdefaultencoding('utf-8')\ndef get_html(url): #通过url获取网页内容\n result = urllib.urlopen(url)\n return result.read()\n # save_file(result.read(), 'thefile.txt')\ndef get_movie_all(html): #通过soup提取到每个电影的全部信息,以list返回\n soup = BeautifulSoup(html,\"html.parser\")\n movie_1 = soup.find_all('ul', class_=\"content-meta info\")\n movie_2=soup.find_all('title')\n movie_3=soup.find_all('div', class_=\"castSection \")\n movie_4=soup.find_all('div', id=\"scoreStats\" ,class_=\"hidden-xs\")\n movie_5=soup.find_all('span', class_=\"meter-value superPageFontColor\")\n movie_str=str(movie_1[0])+str(movie_2[0])+str(movie_3[0])+str(movie_4[0])+str(movie_5[0])\n movie=[movie_str]\n return movie\ndef get_movie_one(movie):\n result = [] # 用于存储提取出来的电影信息\n soup_all = BeautifulSoup(str(movie),\"html.parser\")\n title = soup_all.find_all('title')\n soup_title = BeautifulSoup(str(title[0]),\"html.parser\")\n for line in soup_title.stripped_strings: # 对获取到的里的内容进行提取\n result.append(line)\n result_str=\" | Fresh:\"\n\n fresh=soup_all.find_all('span', class_=\"meter-value superPageFontColor\")\n soup_fresh=BeautifulSoup(str(fresh[0]),\"html.parser\")\n for line in soup_fresh.stripped_strings:\n result_str=result_str+line\n\n rating=soup_all.find_all('div', class_=\"superPageFontColor\")\n soup_rating=BeautifulSoup(str(rating[0]),\"html.parser\")\n for line in soup_rating.stripped_strings:\n result_str=result_str+line\n result_str=result_str+\" | Actor:\"\n\n actor=soup_all.find_all('a', class_=\"unstyled articleLink\")\n for it_actor in actor:\n soup_actor = BeautifulSoup(str(it_actor),\"html.parser\")\n for line in soup_actor.stripped_strings:\n result_str = result_str + line + \" \"\n\n info=soup_all.find_all('li' ,class_=\"meta-row clearfix\")\n for it_info in info:\n soup_info=BeautifulSoup(str(it_info),\"html.parser\")\n for line in soup_info.stripped_strings:\n result_str=result_str+line+\" \"\n\n\n result.append(result_str)\n\n\n\n return result #返回获取到的结果\ndef save_file(text, filename): #保存网页到文件\n f= open(filename,'ab')\n f.write(bytes(text))\n f.close()\ndef read_file(filename): #读取文件\n f = open(filename,'r')\n text = f.read()\n f.close()\n return text\ndef work():\n try:\n f = open('RottenTomatoes_by_TV.txt', 'r')\n\n name = f.read()\n finally:\n if f:\n f.close()\n w = open('RottenTomatoes_by_TV.txt', 'w')\n w.truncate()\n w.close()\n url = 'https://www.rottentomatoes.com/tv/'+name\n html = get_html(url)\n movie_list = get_movie_all(html)\n for movie in movie_list: # 将每一页中的每个电影信息放入函数中提取\n result = get_movie_one(movie)\n text = '' + 'TV:' + str(result[0]) + str(result[1]) + '\\n' + '\\t'\n save_file(text, 'RottenTomatoes_by_TV.txt')\n\n\nif __name__=='__main__':\n work()","sub_path":"Catcher/RottenTomatoes_by_TV.py","file_name":"RottenTomatoes_by_TV.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"266939758","text":"#!/usr/bin/env python3\n\n# import Python modules\nimport os , re, csv, operator\n\n# initialse dictionaries\nerrors = {}\nper_user = {}\n\n# Create a raw-strng REG-EX search pattern\n# this will create 3 groups:\n# Group 1 will catch ERROR or INFO: ([A-Z]*)\n# Group 2 will catch entire ERROR message: ([\\w\\S ']*)\n# Group 3 will cacth user name (escaping parentesis): \\(([\\Sa-z]*)\\)\nregex = r\"ticky: ([A-Z]*)([\\w\\S ']*)\\(([\\Sa-z]*)\\)\"\n\n# Open a log file to itearte through the file and add items to dictinaries:\nwith open(\"syslog.log\") as file:\n for log in file.readlines():\n result = re.search(regex, log)\n \"\"\" Add new user key vluse pair to per_user={} dictionary if\n error key does not exist in dictionary. \"\"\"\n if result != None and result.group(3) not in per_user:\n per_user.update({result.group(3):[0,0]})\n # Yes, this creates list as a value which will create a problem later. See: **** Problem *** \n else:\n NotImplemented\n # When ERROR messages are parsed\n if result != None and result.group(1) == \"ERROR\": \n per_user[result.group(3)][1] += 1\n \"\"\" Add new error key value pair to errors={} dictionary if\n error key does not exist in dictionary. \"\"\"\n if result.group(2) not in errors:\n errors.update({result.group(2):1})\n else:\n # When INFO messages are parsed\n errors[result.group(2)] += 1\n else:\n per_user[result.group(3)][0] += 1\n file.close()\n\n# Sorting dictionaries errors={} and per_user={}\n# The error dictionary should be sorted by the number of errors from most common to least common\n# The user dictionary should be sorted by username\n# NB! this chages data type from dict() to list()\nsorted_errors = sorted(errors.items(), key = operator.itemgetter(1), reverse=True)\nsorted_users = sorted(per_user.items())\n\n# **** Problem *** \n# Since dicionary's per_user={} value is a list() data type we need to change it to a tuple for CSV writability purposes\nremovelist_list=[]\nfor item in sorted_users:\n count_list = item[1]\n new_tuple = (item[0],count_list[0],count_list[1])\n removelist_list.append(new_tuple)\nsorted_users = removelist_list\n\n# Inserting column names as (\"Error\", \"Count\") at the zero index position of the sorted error={} list.\n# Inserting column names as (\"Username\", \"INFO\", \"ERROR\") at the zero index position of the sorted per_user={} list.\nsorted_errors.insert(0, (\"Error\", \"Count\")) \nsorted_users.insert(0, (\"Username\", \"INFO\", \"ERROR\")) \n\n# Testing column names ans sorting \n# print(sorted_errors)\n# print(sorted_users)\n\n# Storing lists in CSV files: sorted_errors into error_message.csv and sorted_users into user_statistics.csv\nwith open('error_message.csv', 'w') as errors_csv:\n writer = csv.writer(errors_csv)\n writer.writerows(sorted_errors)\n\nwith open('user_statistics.csv', 'w') as user_stats_csv:\n writer = csv.writer(user_stats_csv)\n writer.writerows(sorted_users)","sub_path":"Qwiklabs/Log Analysis Using Regular Expressions/ticky_check.py","file_name":"ticky_check.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"340449914","text":"#!/user/bin/env python\r\n# -*- coding:utf-8 -*-\r\n__author__ = \"Zhong Lu\"\r\n\r\n\r\nimport pika\r\nimport gevent\r\nimport uuid\r\nfrom gevent import monkey\r\nfrom conf import settings\r\nfrom core import logger\r\n\r\n\r\n# 默认情况下,把程序直接交给gevent处理,gevent是不知道程序做了IO操作,所以就要给程序打补丁,让gevent能识别出IO操作\r\n# 在程序开头声明下面的代码,作用是把当前程序的所有IO操作都做上标记\r\nmonkey.patch_all()\r\n\r\n\r\n# 获取执行系统shell命令日志的logger对象\r\nexecute_command_logger = logger.logger_function(\"execute_command\")\r\n# 获取查看命令执行结果日志的logger对象\r\nview_command_logger = logger.logger_function(\"view_command\")\r\n\r\n\r\ncommand_result_dict = {} # 承放命令结果的字典\r\n\r\n\r\nclass RpcClient(object):\r\n \"\"\"RPC客户端类\"\"\"\r\n def call(self, ip_address, command):\r\n \"\"\"\r\n 向rabbitmq server队列中发送数据的方法函数\r\n :param ip_address: 需要执行命令的主机ip地址\r\n :param command: 需要执行的命令\r\n :return:\r\n \"\"\"\"\"\r\n # 生成一个随机queue,并生成queue对象\r\n # 因为是广播所以就不指定queue的名字,不指定的话,rabbit会随机分配一个名字。这里也可以自己命令queue的名字\r\n # exclusive是排他的、唯一的,exclusive=True会在使用此queue的消费者断开后,自动将queue删除\r\n queue_obj = self.channel.queue_declare(exclusive=True)\r\n # 获取queue的名字\r\n self.callback_queue = queue_obj.method.queue\r\n print(\"随机生成的队列名为\", self.callback_queue)\r\n\r\n self.corr_id = str(uuid.uuid4()) # 通过随机数来生成UUID,并转换成字符串\r\n\r\n # 以给定的交换(exchange)、路由键(routing_key)和主体(body)发布到通道\r\n self.channel.basic_publish(exchange='',\r\n routing_key=ip_address, # 将消息发送到哪个queue中\r\n properties=pika.BasicProperties(\r\n\r\n # 本端主动告诉对端,将响应本端的结果发送到哪个队列中\r\n reply_to=self.callback_queue,\r\n\r\n # 将在本端生成的UUID发送给对端\r\n correlation_id=self.corr_id,\r\n ),\r\n\r\n body=command # 将命令发送出去\r\n )\r\n\r\n print(\"命令%s已发送到rabbitmq server队列中了\" % command)\r\n print(\"命令%s需要在%s机器上执行\" % (command, ip_address))\r\n\r\n self.get_response()\r\n\r\n # 将单个主机执行命令后的结果放到承放命令结果的字典中\r\n command_result_dict[self.random_key][ip_address] = self.response\r\n\r\n def get_response(self):\r\n \"\"\"\r\n 从rabbitmq server队列中接收消息的方法函数\r\n :return:\r\n \"\"\"\"\"\r\n self.response = None # self.response是承放接收到的消息内容,默认值设置为None\r\n\r\n # 声明接收消息\r\n self.channel.basic_consume(self.on_response, # 如果收到消息,就调用self.on_response方法函数来处理消息\r\n queue=self.callback_queue # 从哪个队列中接收消息\r\n )\r\n\r\n # 如果self.response的值为None就要一直接收消息\r\n while self.response is None:\r\n # 接收消息。不管有没有接收到消息,都不会被block。相当于非阻塞版的start_consuming()\r\n self.connection.process_data_events()\r\n\r\n def on_response(self, ch, method, props, body):\r\n \"\"\"\r\n 接收到消息后触发的回调函数\r\n :param ch: 通道(或频道)的内存对象\r\n :param method: 方法。method中包含了将信息发送给谁的一些信息,例如队列、交换(exchange)\r\n :param props: 属性\r\n :param body: 接收的消息\r\n :return:\r\n \"\"\"\"\"\r\n # 如果本端生成的UUID和对端发送给本端的UUID相同,就代表接收到的消息是正确的\r\n # 本端可以连续给对端发送多条消息,为了保证接收到的结果和发送的消息正确对应上,添加了UUID确认机制\r\n # props.correlation_id就是获取,对端发送给本端的UUID\r\n if self.corr_id == props.correlation_id:\r\n self.response = body # 将接收到的消息内容赋值给self.response\r\n\r\n ch.basic_ack(delivery_tag=method.delivery_tag) # 给rabbitmq server发送确认消息\r\n\r\n\r\nclass Client(RpcClient):\r\n \"\"\"客户端类,继承了RpcClient类\"\"\"\r\n def __init__(self):\r\n # 创建连接对象,并在构建时将连接参数对象传递到连接适配器\r\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=settings.ip_address))\r\n\r\n # 在连接上创建一个通道\r\n self.channel = self.connection.channel()\r\n\r\n self.interactive()\r\n\r\n def interactive(self):\r\n \"\"\"\r\n 和用户交互的方法函数\r\n :return:\r\n \"\"\"\"\"\r\n self._help()\r\n\r\n while True:\r\n\r\n input_command = input(\"输入命令\\n>>>\").strip()\r\n\r\n if len(input_command) == 0: # 用户输入的命令为空\r\n continue\r\n\r\n action = input_command.split()[0] # 获取用户操作类型,run、check_task、help、exit\r\n\r\n if hasattr(self, \"_%s\" % action):\r\n function = getattr(self, \"_%s\" % action)\r\n function(input_command)\r\n else:\r\n print(\"\\033[1;31m输入的内容有误,请重新输入\\033[0m\")\r\n self._help()\r\n\r\n def _run(self, input_command):\r\n \"\"\"\r\n 解析用户输入的命令,并调用远程机器执行命令的方法函数\r\n :param input_command: 用户输入的命令\r\n :return:\r\n \"\"\"\"\"\r\n if input_command.count(\"--hosts\") != 1:\r\n print(\"\\033[1;31m输入的命令有误,缺少'--hosts'选项,请重新输入\\033[0m\")\r\n return False\r\n elif len(input_command.split(\"\\\"\")) < 4:\r\n print(\"\\033[1;31m输入的命令有误,'run'或'--hosts'后面的参数没有用双引号引起来,请重新输入\\033[0m\")\r\n return False\r\n\r\n command = input_command.split(\"\\\"\")[1].strip() # 获取shell命令,类型为字符串\r\n host_list = input_command.split(\"\\\"\")[3].strip().split() # 获取需要执行shell命令的主机,类型为列表\r\n\r\n if len(command) < 1 or len(host_list) < 1:\r\n print(\"\\033[1;31m输入的命令有误,'run'或'--hosts'后面缺少参数,请重新输入\\033[0m\")\r\n return False\r\n\r\n while True:\r\n # 通过随机数来生成UUID,并转换成字符串\r\n # 生成的UUID用来做承放命令结果字典的key\r\n random_key = str(uuid.uuid4())\r\n\r\n if random_key not in command_result_dict: # 生成的UUID不在承放命令结果字典中\r\n self.random_key = random_key # 将生成的UUID封装到对象中\r\n # 在承放命令结果字典中再初始化一个字典,UUID为key(每一条命令对应唯一一个UUID),初始化的字典为value\r\n command_result_dict[self.random_key] = {}\r\n break\r\n\r\n gevent_list = [] # 承放所要开启协程的列表\r\n for ip_address in host_list:\r\n # 启动协程,并将所有要启动的协程放入列表中\r\n gevent_list.append(gevent.spawn(self.call, ip_address, command))\r\n\r\n gevent.joinall(gevent_list) # 等待所有协程执行完成\r\n\r\n print(\"命令 \\033[1;32m%s\\033[0m \\n\"\r\n \"task id \\033[1;32m%s\\033[0m\" % (command, self.random_key))\r\n\r\n # 保存到日志文件中\r\n execute_command_logger.info(\"对主机%s执行了%s命令,生成的id是%s\" % (host_list, command, self.random_key))\r\n\r\n return True\r\n\r\n def _check_task(self, input_command):\r\n \"\"\"\r\n 解析用户输入的命令,并查看命令执行结果的方法函数\r\n :param input_command: 用户输入的命令\r\n :return:\r\n \"\"\"\"\"\r\n command_list = input_command.split()\r\n if len(command_list) < 2:\r\n print(\"\\033[1;31m输入的命令有误,缺少id,请重新输入\\033[0m\")\r\n return False\r\n\r\n command_id = command_list[1]\r\n if command_id not in command_result_dict:\r\n print(\"\\033[1;31m输入的id不存在,请重新输入\\033[0m\")\r\n return False\r\n\r\n result_dict = command_result_dict[command_id]\r\n for items in result_dict:\r\n print(\"-----\\033[1;32m%s\\033[0m\\n\"\r\n \"%s\" % (items, result_dict[items].decode(encoding=\"utf-8\")))\r\n\r\n # 保存到日志文件中\r\n view_command_logger.info(\"用户查看了id为%s所对应的命令结果\" % command_id)\r\n\r\n del command_result_dict[command_id] # 将指定的key从字典中删除\r\n\r\n def _help(self, *args):\r\n \"\"\"\r\n 使用帮助的方法函数\r\n :param args: 扩展参数\r\n :return:\r\n \"\"\"\"\"\r\n help_info = r\"\"\"\r\n\r\n 程序使用帮助信息:\r\n\r\n -- 在远程机器上执行系统shell命令\r\n 命令格式:run \"shell_command\" --hosts \"ip_address ...\"\r\n 注意,shell_command和ip_address必须要用双引号引起来\r\n 命令示例:\r\n run \"ipconfig\" --hosts \"192.168.0.23\":在192.168.0.23机器上执行ipconfig命令\r\n run \"ipconfig\" --hosts \"192.168.0.23 192.168.157.128\":在192.168.0.23和192.168.157.128两台机器上分别执行ipconfig命令\r\n\r\n -- 查看系统shell命令执行结果\r\n 查看id对应的命令在远程机器上执行的结果\r\n 命令格式:check_task id\r\n 命令示例:\r\n check_task 26106e37-8f0f-478d-b7b0-c0e598b72719\r\n\r\n -- 结束程序运行,退出整个程序\r\n 命令示例:\r\n exit\r\n\r\n -- 查看程序使用帮助信息\r\n 命令示例:\r\n help\r\n \"\"\"\r\n print(help_info)\r\n\r\n def _exit(self, *args):\r\n \"\"\"\r\n 退出程序的方法函数\r\n :param args: 扩展参数\r\n :return:\r\n \"\"\"\"\"\r\n self.connection.close() # 关闭连接\r\n exit(\"程序退出\")\r\n","sub_path":"day11/Host_management_based_on_RabbitMQ_RPC/client/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"59896891","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nimport json\nfrom Constant import constants\nfrom Models import PDFInfo\n\n\ndef find_events_link(past_events):\n events_link = []\n for p in past_events:\n events_link.append(p.find(\"a\")[\"href\"])\n return events_link\n\n\ndef find_pdf_info(pdfs, title, date, past_events_info):\n for pdf in pdfs:\n pdf_title = pdf.get_text()\n href = pdf[\"href\"]\n pdf_info = PDFInfo.PDFInfo(title, date, pdf_title, href)\n past_events_info.append(pdf_info.__dict__)\n return past_events_info\n\n\ndef find_past_events_info(events_link):\n past_events_info = [] # title, date, [{\"pdfTitle\": \"pdfLink\"}]\n\n for link in events_link:\n address = requests.get(link)\n soup_address = bs(address.content)\n title = soup_address.find(\"div\", attrs={\"class\": \"wd_title wd_event_title detail_header\"}).get_text()\n date = soup_address.find(\"div\", attrs={\"class\": \"item_date wd_event_sidebar_item wd_event_date\"}).get_text()\n pdfs = soup_address.select(\"div.wd_event_info a\")\n pdf_info_list = find_pdf_info(pdfs, title, date, past_events_info)\n # past_event_info = EventCompleteInfo.EventCompleteInfo(title, date, pdf_info_list)\n # past_events_info.append(pdf_info_list)\n return past_events_info\n\n\ndef find_past_events_info_for_fb(json_object):\n past_events_info = []\n\n for event in json_object[\"GetEventListResult\"]:\n attachments = event[\"Attachments\"]\n for attachment in attachments:\n pdf_info = PDFInfo.PDFInfo(event[\"Title\"], event[\"EndDate\"], attachment[\"Title\"], attachment[\"Url\"])\n past_events_info.append(pdf_info.__dict__)\n return past_events_info\n\n\ndef find_past_events_info_for_informa(json_object):\n past_events_info = []\n\n for event in json_object[\"files\"]:\n pdf_info = PDFInfo.PDFInfo(event[\"category\"], event[\"updated_at\"], event[\"title\"], event[\"url\"])\n past_events_info.append(pdf_info.__dict__)\n return past_events_info\n\n\ndef get_data_for_homedot(href):\n headers = {\n \"cookie\": \"ASP.NET_SessionId=54fdvkoft30df3b4v1ego1eq; _ga=GA1.2.187890047.1629194699; _gid=GA1.2.1071652028.1629365143; AWSALB=nYuj3j/TH85GACNKznQvFiSvE/t0lcIUG2BOmMLFVhCcUWndQn3G2MB31kWcd2UxReaQRUzsycMCkLSSMshGN3Uh7li2AZTuYzAYLHKWYJBMzUzrd1E5gL4Ymwbu; AWSALBCORS=nYuj3j/TH85GACNKznQvFiSvE/t0lcIUG2BOmMLFVhCcUWndQn3G2MB31kWcd2UxReaQRUzsycMCkLSSMshGN3Uh7li2AZTuYzAYLHKWYJBMzUzrd1E5gL4Ymwbu; _gat=1; _gat_INVDSitecore=1\",\n \"user-agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36\",\n }\n address = requests.get(href, headers=headers)\n data = bs(address.content)\n return data\n\n\ndef scrap_for_homedepot(href, parent):\n past_events_info = []\n data = get_data_for_homedot(href)\n if parent:\n temp = data.find(\"div\", attrs={\"class\": \"snapdown-container board-container clearfix past-container\"})\n else:\n temp = data\n events = temp.findChildren(\"div\", recursive=False)\n for event in events:\n date = event.find(\"span\", attrs={\"class\": \"event-title\"})\n event_title = event.find(\"span\", attrs={\"class\": \"event-date\"})\n if date is None or event_title is None:\n return past_events_info\n date = date.get_text()\n event_title = event_title.get_text()\n pdfs = event.select(\"div.snapdown-content.member-description.clearfix\")\n for pdf in pdfs:\n if pdf.find(\"a\")[\"href\"]:\n pdf_link = pdf.find(\"a\")[\"href\"]\n pdf_title = pdf.find(\"a\").get_text()\n pdf_info = PDFInfo.PDFInfo(date, event_title, pdf_title, pdf_link)\n past_events_info.append(pdf_info.__dict__)\n return past_events_info\n\n\ndef scrap(event_id, href):\n if event_id == \"id1\":\n return scrap_for_weyerhaeuser(href)\n elif event_id == \"id2\":\n return scrap_for_fb(href)\n elif event_id == \"id3\":\n event_data = []\n for link in constants.events_informa:\n event_info = scrap_for_informa(link)\n for event in event_info:\n event_data.append(event)\n return event_data\n elif event_id == \"id4\":\n past_events_info = []\n past_events = scrap_for_homedepot(href, True)\n for event in past_events:\n past_events_info.append(event)\n data = get_data_for_homedot(href)\n other_pages = data.find_all(\"li\", attrs={\"class\": \"next\"})\n for page in other_pages:\n link = page.find(\"a\")\n if link is None:\n continue\n link = link[\"href\"]\n res = \"https://ir.homedepot.com/\" + link[: 26] + \"async=1&\" + link[26:]\n past_events = scrap_for_homedepot(res, False)\n for event in past_events:\n past_events_info.append(event)\n return past_events_info\n\n\ndef scrap_for_weyerhaeuser(href):\n r = requests.get(href)\n soup = bs(r.content)\n pastEvents = soup.select(\"div.wd_event\")\n eventsLink = find_events_link(pastEvents)\n return find_past_events_info(eventsLink)\n\n\ndef scrap_for_fb(href):\n address = requests.get(href)\n return find_past_events_info_for_fb(address.json())\n\n\ndef scrap_for_informa(href):\n address = requests.get(href)\n s = address.text\n my_prefix = \"jQuery1830500746899123085_1629202578914(\"\n s = s[len(my_prefix):]\n s = s[:-1]\n s = json.loads(s)\n return find_past_events_info_for_informa(s)\n","sub_path":"Service/document_scrap.py","file_name":"document_scrap.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"10006786","text":"import numpy as np\n\n\nclass Feature:\n _keys = [\n 'doc_cos_1',\n 'doc_cos_2',\n 'sent_cos_1',\n 'sent_cos_2',\n 'sent_cos_3',\n ]\n\n @classmethod\n def size(cls):\n return len(cls._keys)\n\n def __init__(self):\n super().__setattr__('data', np.zeros((len(Feature._keys),), dtype=np.float64))\n\n def __getattr__(self, item):\n if item in Feature._keys:\n return self.data[Feature._keys.index(item)]\n else:\n raise AttributeError\n\n def __setattr__(self, key, value):\n if key in Feature._keys:\n self.data[Feature._keys.index(key)] = value\n else:\n raise AttributeError\n\n","sub_path":"feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"447624141","text":"\"\"\"\nAuthor: Travis Hammond\nVersion: 1_1_2020\n\"\"\"\n\n\nimport os\nimport datetime\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import model_from_json\n\ntry:\n from utils.neural_network import (\n Trainner, Predictor, dense, conv2d\n )\n from utils.util_funcs import load_directory_dataset, load_h5py\nexcept ImportError:\n from neural_network import (\n Trainner, Predictor, dense, conv2d\n )\n from util_funcs import load_directory_dataset, load_h5py\n\n\nclass GANTrainner(Trainner):\n \"\"\"Generative Adversarial Network Trainner is used for loading, saving,\n and training keras GAN models.\n \"\"\"\n\n def __init__(self, model, dis_model, train_data, file_loader=None,\n conditional=False, normal_distribution=False):\n \"\"\"Initializes train, validation, and test data.\n params:\n model: A compiled keras model, which is the generator\n (loss function does not matter)\n dis_model: A compiled keras model, which is the discriminator\n (loss function does not matter)\n train_data: A dictionary, numpy ndarray, string/path\n containg train data, or a list with x\n and y ndarrays (Ex. {'train_x': [...]})\n file_loader: A function for loading each file\n conditional: A boolean, which determines if the GAN is a\n conditional GAN and neededs y data\n normal_distribution: A boolean, which determines if the\n model should be trained with normal\n or uniform random values\n \"\"\"\n assert isinstance(train_data, (str, dict, np.ndarray, list)), (\n 'train_data must be a dictionary, a file/folder path, a ndarray, '\n 'or a list with two ndarrays'\n )\n self.model = model\n self.input_shape = self.model.layers[0].input_shape[0][1:]\n self.optimizer = model.optimizer\n self.dis_model = dis_model\n self.dis_optimizer = dis_model.optimizer\n self.metric = tf.keras.metrics.Mean(name='loss')\n self.dis_metric = tf.keras.metrics.Mean(name='dis_loss')\n self.train_data = train_data\n self.conditional = conditional\n self.normal_distribution = normal_distribution\n\n if (not isinstance(train_data, np.ndarray) and\n (self.conditional and not\n isinstance(train_data[0], np.ndarray))):\n if isinstance(train_data, str):\n if os.path.isdir(train_data):\n assert file_loader is not None\n if self.conditional:\n data = load_directory_dataset(\n train_data, file_loader\n )\n train_data = [data['train_x'], data['train_y']]\n else:\n train_data = load_directory_dataset(\n train_data, file_loader\n )['train_x']\n else:\n assert train_data.split('.')[1] == 'h5'\n train_data = load_h5py(train_data)\n if isinstance(train_data, dict):\n if 'train_x' in train_data:\n if self.conditional:\n self.train_data = [train_data['train_x'],\n train_data['train_y']]\n else:\n self.train_data = train_data['train_x']\n else:\n raise Exception('There must be a train dataset')\n else:\n raise ValueError('Invalid train_data')\n if self.conditional:\n self.train_data[0] = self.train_data[0].astype(\n tf.keras.backend.floatx()\n )\n self.train_data[1] = self.train_data[1].astype(\n tf.keras.backend.floatx()\n )\n else:\n self.train_data = self.train_data.astype(\n tf.keras.backend.floatx()\n )\n\n @tf.function\n def _train_step(self, x):\n \"\"\"Trains the GAN 1 epoch.\n params:\n x: A Tensor\n y: A Tensor\n \"\"\"\n if self.conditional:\n length = x[0].shape[0]\n else:\n length = x.shape[0]\n if self.normal_distribution:\n inputs = tf.random.normal([length,\n *self.input_shape])\n else:\n inputs = tf.random.uniform([length,\n *self.input_shape])\n if self.conditional:\n inputs = [inputs, x[1]]\n with tf.GradientTape() as tape, tf.GradientTape() as dis_tape:\n preds = self.model(inputs, training=True)\n if len(self.model.losses) > 0:\n reg_loss = tf.math.add_n(self.model.losses)\n else:\n reg_loss = 0\n if self.conditional:\n preds = [preds, x[1]]\n dis_preds = self.dis_model(preds, training=True)\n dis_real_preds = self.dis_model(x, training=True)\n if len(self.dis_model.losses) > 0:\n dis_reg_loss = tf.math.add_n(self.dis_model.losses)\n else:\n dis_reg_loss = 0\n loss = tf.nn.sigmoid_cross_entropy_with_logits(\n tf.ones_like(dis_preds), dis_preds\n ) + reg_loss\n dis_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n tf.zeros_like(dis_preds), dis_preds\n )\n dis_real_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n tf.ones_like(dis_real_preds), dis_real_preds\n )\n total_dis_loss = dis_loss + dis_real_loss + dis_reg_loss\n grads = tape.gradient(loss, self.model.trainable_variables)\n dis_grads = dis_tape.gradient(total_dis_loss,\n self.dis_model.trainable_variables)\n\n self.optimizer.apply_gradients(\n zip(grads, self.model.trainable_variables)\n )\n self.dis_optimizer.apply_gradients(\n zip(dis_grads, self.dis_model.trainable_variables)\n )\n\n self.metric(loss)\n self.dis_metric(total_dis_loss)\n\n def train(self, epochs, batch_size=None, verbose=True):\n \"\"\"Trains the keras model.\n params:\n epochs: An integer, which is the number of complete\n iterations to train\n batch_size: An integer, which is the number of samples\n per graident update\n verbose: A boolean, which determines the verbositiy level\n \"\"\"\n\n if self.conditional:\n length = self.train_data[0].shape[0]\n batches = tf.data.Dataset.from_tensor_slices(\n (self.train_data[0],\n self.train_data[1])\n ).shuffle(length).batch(batch_size)\n else:\n length = self.train_data.shape[0]\n batches = tf.data.Dataset.from_tensor_slices(\n self.train_data\n ).shuffle(length).batch(batch_size)\n for epoch in range(1, epochs + 1):\n if verbose:\n print(f'Epoch {epoch}/{epochs}')\n count = 0\n for batch in batches:\n self._train_step(batch)\n count += np.minimum(batch_size, length - count)\n print(f'{count}/{length}', end='\\r')\n if verbose:\n print(f'{count}/{length} - '\n f'loss: {self.metric.result()} - '\n f'dis_loss: {self.dis_metric.result()}')\n self.metric.reset_states()\n self.dis_metric.reset_states()\n\n def load(self, path, optimizer='sgd', dis_optimizer='sgd'):\n \"\"\"Loads a generator and discriminator model and weights from a file.\n (overrides the inital provided model)\n params:\n path: A string, which is the path to a folder\n containing model.json, weights.h5, and note.txt\n optimizer: A string or optimizer instance, which will be\n the optimizer for the loaded generator model\n dis_optimizer: A string or optimizer instance, which will be\n the optimizer for the loaded discriminator model\n \"\"\"\n with open(os.path.join(path, 'model.json'), 'r') as file:\n self.model = model_from_json(file.read())\n self.model.optimizer = optimizer\n with open(os.path.join(path, 'dis_model.json'), 'r') as file:\n self.dis_model = model_from_json(file.read())\n self.dis_model.optimizer = dis_optimizer\n self.model.load_weights(os.path.join(path, 'weights.h5'))\n self.dis_model.load_weights(os.path.join(path, 'dis_weights.h5'))\n with open(os.path.join(path, 'note.txt'), 'r') as file:\n print(file.read(), end='')\n\n def save(self, path, note=None):\n \"\"\"Saves the generator and discriminator model and weights to a file.\n params:\n path: A string, which is the path to create a folder in\n containing model.json, weights.h5, note.txt,\n dis_model.json, and dis_weights.h5\n return: A string, which is the given path + folder name\n \"\"\"\n time = datetime.datetime.now()\n path = os.path.join(path, time.strftime(r'%Y%m%d_%H%M%S_%f'))\n os.mkdir(path)\n self.model.save_weights(os.path.join(path, 'weights.h5'))\n self.dis_model.save_weights(os.path.join(path, 'dis_weights.h5'))\n with open(os.path.join(path, 'model.json'), 'w') as file:\n file.write(self.model.to_json())\n with open(os.path.join(path, 'dis_model.json'), 'w') as file:\n file.write(self.dis_model.to_json())\n with open(os.path.join(path, 'note.txt'), 'w') as file:\n if note is None:\n self.model.summary(print_fn=lambda line: file.write(line+'\\n'))\n else:\n file.write(note)\n return path\n\n\nclass GANPredictor(Predictor):\n \"\"\"Generative Adversarial Network Predictor is used for\n loading and predicting keras GAN models.\n \"\"\"\n\n def predict(self, x, y=None):\n \"\"\"Predicts on a single sample.\n params:\n x: A single model input\n y: A single model conditional input\n return: A result from the model output\n \"\"\"\n if y is None:\n return self.model.predict(np.expand_dims(x, axis=0))[0]\n return self.model.predict([np.expand_dims(x, axis=0),\n np.expand_dims(y, axis=0)])[0]\n\n def predict_all(self, x, y=None, batch_size=None):\n \"\"\"Predicts on many samples.\n params:\n x: A ndarray of model inputs\n y: A ndarray of model conditional inputs\n return: A result from the model output\n \"\"\"\n if y is None:\n return self.model.predict(x, batch_size=batch_size)\n return self.model.predict([x, y], batch_size=batch_size)\n\n def random_normal_predict(self, y=None):\n \"\"\"Predicts an output with a random normal distribution.\n params:\n y: A single model conditional input\n return: A result from the model output\n \"\"\"\n input_shape = self.model.layers[0].input_shape[0][1:]\n normal = tf.random.normal([1, *input_shape])\n if y is None:\n return self.model.predict(normal)[0]\n return self.model.predict([normal,\n np.expand_dims(y, axis=0)])[0]\n\n def random_uniform_predict(self, y=None):\n \"\"\"Predicts an output with a random uniform distribution.\n params:\n y: A single model conditional input\n return: A result from the model output\n \"\"\"\n input_shape = self.model.layers[0].input_shape[0][1:]\n uniform = tf.random.uniform([1, *input_shape])\n if y is None:\n return self.model.predict(uniform)[0]\n return self.model.predict([uniform,\n np.expand_dims(y, axis=0)])[0]\n\n\nif __name__ == '__main__':\n import image as img\n from time import sleep\n\n training = False\n conditional = True\n path = 'trained_conditional' if conditional else 'trained'\n\n (tx, ty), _ = keras.datasets.fashion_mnist.load_data()\n tx = np.expand_dims((tx - 127.5) / 127.5, axis=-1)\n if conditional:\n labels = ['T-shirt', 'Trouser', 'Pullover', 'Dress',\n 'Coat', 'Sandal', 'Shirt', 'Sneaker',\n 'Bag', 'Ankle boot']\n ty = np.identity(len(labels))[ty]\n tx = [tx, ty]\n\n if training:\n if conditional:\n # Generator Model\n inputs = keras.layers.Input(shape=(100))\n x1 = dense(512)(inputs)\n cond_inputs = keras.layers.Input(shape=(len(labels)))\n x2 = dense(512)(cond_inputs)\n x = keras.layers.Concatenate()([x1, x2])\n x = dense(7*7*32)(x)\n x = keras.layers.Reshape((7, 7, 32))(x)\n x = conv2d(128, 3, strides=1)(x)\n x = conv2d(64, 3, strides=2, transpose=True)(x)\n outputs = conv2d(1, 3, strides=2, \n activation='tanh', batch_norm=False,\n transpose=True)(x)\n model = keras.Model(inputs=[inputs, cond_inputs],\n outputs=outputs)\n model.summary()\n optimizer = tf.keras.optimizers.Adam(.0002, .5)\n model.optimizer = optimizer\n\n # Discriminator Model\n inputs = keras.layers.Input(shape=(28, 28, 1))\n cond_inputs = keras.layers.Input(shape=(len(labels)))\n x = conv2d(64, 3, strides=2, activation=None,\n batch_norm=False)(inputs)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(128, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(256, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(512, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = keras.layers.Flatten()(x)\n x2 = dense(1024, activation=None)(cond_inputs)\n x2 = keras.layers.LeakyReLU(alpha=0.2)(x2)\n x = keras.layers.Concatenate()([x, x2])\n x = dense(1024, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n outputs = dense(1, activation=None, batch_norm=False)(x)\n dis_model = keras.Model(inputs=[inputs, cond_inputs],\n outputs=outputs)\n dis_model.summary()\n dis_optimizer = tf.keras.optimizers.Adam(.0002, .5)\n dis_model.optimizer = dis_optimizer\n else:\n # Generator Model\n inputs = keras.layers.Input(shape=(100))\n x = dense(7*7*32)(inputs)\n x = keras.layers.Reshape((7, 7, 32))(x)\n x = conv2d(128, 3, strides=1)(x)\n x = conv2d(64, 3, strides=2, transpose=True)(x)\n outputs = conv2d(1, 3, strides=2,\n activation='tanh', batch_norm=False,\n transpose=True)(x)\n model = keras.Model(inputs=inputs, outputs=outputs)\n model.summary()\n optimizer = tf.keras.optimizers.Adam(.0002, .5)\n # model.compile(optimizer=optimizer, loss='mse')\n model.optimizer = optimizer\n\n # Discriminator Model\n inputs = keras.layers.Input(shape=(28, 28, 1))\n x = conv2d(64, 3, strides=2, activation=None,\n batch_norm=False)(inputs)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(128, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(256, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(512, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = keras.layers.Flatten()(x)\n outputs = dense(1, activation=None, batch_norm=False)(x)\n dis_model = keras.Model(inputs=inputs, outputs=outputs)\n dis_model.summary()\n dis_optimizer = tf.keras.optimizers.Adam(.0002, .5)\n dis_model.optimizer = dis_optimizer\n\n gant = GANTrainner(model, dis_model, tx,\n conditional=conditional)\n if path is not None:\n gant.load(path)\n gant.train(50, 512)\n path = gant.save('')\n gant.load(path, optimizer=optimizer, dis_optimizer=dis_optimizer)\n\n del gant\n\n ganp = GANPredictor(path)\n\n ws = img.Windows()\n w = ws.add('Image')\n ws.start()\n\n while True:\n if conditional:\n identity = np.identity(len(labels))\n for ndx in range(len(labels)):\n preds = ganp.random_uniform_predict(identity[ndx])\n preds = np.squeeze(preds * 127.5 + 127.5).astype(np.uint8)\n ws.set(w, preds)\n print(labels[ndx])\n sleep(2)\n else:\n preds = ganp.random_uniform_predict() * 127.5 + 127.5\n preds = np.squeeze(preds).astype(np.uint8)\n ws.set(w, preds)\n sleep(1)\n\n ws.stop()\n","sub_path":"video12/gan_network.py","file_name":"gan_network.py","file_ext":"py","file_size_in_byte":17606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"539165967","text":"import argparse, os, random\n\n\nclass Board:\n def __init__(self, n):\n self.size = n\n self.cells = [[' ' for _ in range(n)] for _ in range(n)]\n self.winning_lines = []\n \n # add winning rows\n for i in range(n):\n self.winning_lines.append([])\n for j in range(n):\n self.winning_lines[-1].append((i, j))\n \n # add winning columns\n for i in range(n):\n self.winning_lines.append([])\n for j in range(n):\n self.winning_lines[-1].append((j, i))\n \n # add winning diagonal lines\n self.winning_lines.append([])\n for i in range(n):\n self.winning_lines[-1].append((i, i))\n self.winning_lines.append([])\n for i in range(n):\n self.winning_lines[-1].append(((n-1)-i, i))\n\n def __str__(self):\n result = ''\n for i in range(self.size):\n # print top part of row\n line = ' '\n for _ in range(self.size):\n line += '+---'\n line += '+\\n'\n result += line\n\n # print middle section of row\n line = str(self.size - 1 - i) + ' '\n for j in range(self.size):\n line += '| ' + self.cells[i][j] + ' '\n line += '|\\n'\n result += line\n\n # print bottom line of bottom row with labels\n line = ' '\n labels = ' '\n for i in range(self.size):\n line += '+---'\n labels += str(i) + ' '\n line += '+\\n'\n result += line + labels\n\n return result\n\n def get_open_cells(self):\n open_cells = []\n for i in range(len(self.cells)):\n for j in range(len(self.cells[i])):\n if self.cells[i][j] == ' ':\n open_cells.append((i, j))\n return open_cells\n\n def compute_danger(self):\n # for simplicity, we only compute player x danger\n # = sum of (1/2)^(# of open cells remaining\n # in line that contains no o's)\n danger = 0\n for line in self.winning_lines:\n if 'O' not in line:\n danger += (1/2)**(len(line) - line.count('X'))\n return danger\n\n def get_optimal_move(self):\n # maximize the value we subtract from the \"danger\" function\n open_cells = self.get_open_cells()\n best_move = open_cells[0]\n best_move_val = 0\n\n for pos in open_cells:\n val = 0\n for line in self.winning_lines:\n if pos in line and 'O' not in line:\n val += (1/2)**(len(line) - line.count('X'))\n if val > best_move_val:\n best_move = pos\n best_move_val = val\n\n return best_move\n\n def add_move(self, position, symbol):\n # in all lines, replace position with symbol\n for i in range(len(self.winning_lines)):\n for j in range(len(self.winning_lines[i])):\n if self.winning_lines[i][j] == position:\n self.winning_lines[i][j] = symbol\n\n # remove winning lines that contain both symbols\n temp = list(filter(lambda lst: not('X' in lst and 'O' in lst), self.winning_lines))\n self.winning_lines = temp\n\n # add move to board\n self.cells[position[0]][position[1]] = symbol\n\n def check_for_win(self):\n if len(self.winning_lines) == 0:\n return True, 'No one'\n\n for line in self.winning_lines:\n if line.count('X') == len(line):\n return True, 'Player X'\n elif line.count('O') == len(line):\n return True, 'Player O'\n return False, None\n\n\nclass Player:\n def __init__(self, symbol, board):\n self.symbol = symbol\n self.board = board\n\n def add_move(self, position):\n self.board.add_move(position, self.symbol)\n\n def choose_move(self):\n while True:\n try:\n pos = input('Please enter x,y coordinates: ').split(',')\n pos = convert_to_working(tuple(int(c.strip(\"()[] \")) for c in pos), self.board)\n if pos in self.board.get_open_cells():\n self.board.add_move(pos, self.symbol)\n break\n else:\n print('Cell not available.')\n except:\n print('Cannot read input. Please try again.')\n\n def random_move(self):\n open_cells = self.board.get_open_cells()\n random_index = random.randint(0, len(open_cells)-1)\n position = open_cells[random_index]\n self.board.add_move(position, self.symbol)\n\n\ndef convert_to_display(coordinates, board):\n x = coordinates[1]\n y = (board.size - 1) - coordinates[0]\n return (x, y)\n\n\ndef convert_to_working(coordinates, board):\n x = (board.size - 1) - coordinates[1]\n y = coordinates[0]\n return (x, y)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('size', type=int, help='size of board (height/width')\n args = parser.parse_args()\n size = args.size\n\n board = Board(size)\n player_x = Player('X', board)\n player_o = Player('O', board)\n game_over = False\n winner = None\n\n while True:\n os.system('clear') # clear screen before printing updated grid\n print(board)\n print('Player X, choose an option:')\n opt = input('(1) Enter a move\\n(2) Random move\\n(x) Exit\\n').strip()\n if opt == '1':\n player_x.choose_move()\n elif opt == '2':\n player_x.random_move()\n elif opt == 'x':\n exit(0)\n\n game_over, winner = board.check_for_win()\n if game_over:\n break\n\n potential = board.compute_danger()\n optimal_move = board.get_optimal_move()\n \n os.system('clear') # clear screen before printing updated grid\n print(board)\n print('Erdos-Selfridge potential: ' + str(potential))\n print('Optimal move: ' + str(convert_to_display(optimal_move, board)) + '\\n')\n\n print('Player O, choose an option:')\n opt = input('(1) Enter a move\\n(2) Random move\\n(3) Potential strategy\\n(x) Exit\\n').strip()\n if opt == '1':\n player_o.choose_move()\n elif opt == '2':\n player_o.random_move()\n elif opt == '3':\n player_o.add_move(optimal_move)\n elif opt == 'x':\n exit(0)\n\n game_over, winner = board.check_for_win()\n if game_over:\n break\n\n os.system('clear') # clear screen before printing updated grid\n print(board)\n print('Game over. ' + str(winner) + ' wins!')\n\n","sub_path":"tic-tac-toe.py","file_name":"tic-tac-toe.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"140133466","text":"from itertools import count\nimport json\nimport os\nimport struct\nimport sys\n\ndef main(src_dir, build_dir, out_file):\n index = []\n paths = []\n hidden_deps = set()\n\n src = lambda path: os.path.join(src_dir, path)\n build = lambda path: os.path.join(build_dir, path)\n\n def add(ty, name, path, hide_dep=False):\n size = os.stat(path).st_size\n\n index.append({\n 'name': name,\n 'length': size,\n 'type': ty,\n })\n paths.append(path)\n\n if hide_dep:\n hidden_deps.add(path)\n\n add('image', 'tiles', build('tiles.png'))\n add('image', 'fonts', build('fonts.png'))\n add('image', 'items_img', build('items.png'))\n add('image', 'ui_atlas', build('ui_atlas.png'))\n\n add('binary', 'client_data', build('client_data.bin'))\n\n add('text', 'sprite.vert', src('assets/shaders/sprite.vert'))\n add('text', 'sprite.frag', src('assets/shaders/sprite.frag'))\n add('text', 'app_pony.frag', src('assets/shaders/app_pony.frag'))\n add('text', 'cursor.frag', src('assets/shaders/cursor.frag'))\n add('text', 'cursor.vert', src('assets/shaders/cursor.vert'))\n\n add('text', 'blit_post.frag', src('assets/shaders/blit_post.frag'))\n add('text', 'blit_output.frag', src('assets/shaders/blit_output.frag'))\n add('text', 'blend_layers.frag', src('assets/shaders/blend_layers.frag'))\n add('text', 'blit_fullscreen.vert', src('assets/shaders/blit_fullscreen.vert'))\n\n add('text', 'terrain2.frag', src('assets/shaders/terrain2.frag'))\n add('text', 'terrain2.vert', src('assets/shaders/terrain2.vert'))\n add('text', 'structure2.frag', src('assets/shaders/structure2.frag'))\n add('text', 'structure2.vert', src('assets/shaders/structure2.vert'))\n add('text', 'light2.frag', src('assets/shaders/light2.frag'))\n add('text', 'light2.vert', src('assets/shaders/light2.vert'))\n add('text', 'entity2.frag', src('assets/shaders/entity2.frag'))\n add('text', 'entity2.vert', src('assets/shaders/entity2.vert'))\n add('text', 'slicing.inc', src('assets/shaders/slicing.inc'))\n\n add('text', 'debug_graph.vert', src('assets/shaders/debug_graph.vert'))\n add('text', 'debug_graph.frag', src('assets/shaders/debug_graph.frag'))\n\n add('text', 'ui_blit.vert', src('assets/shaders/ui_blit.vert'))\n add('text', 'ui_blit.frag', src('assets/shaders/ui_blit.frag'))\n add('text', 'ui_blit_tiled.vert', src('assets/shaders/ui_blit_tiled.vert'))\n add('text', 'ui_blit_tiled.frag', src('assets/shaders/ui_blit_tiled.frag'))\n add('text', 'ui_blit2.vert', src('assets/shaders/ui_blit2.vert'))\n add('text', 'ui_blit2.frag', src('assets/shaders/ui_blit2.frag'))\n\n\n with open(build('structures_list.json')) as f:\n structures_list = json.load(f)\n for s in structures_list:\n add('image', s, build(s + '.png'))\n\n with open(build('sprites_list.json')) as f:\n sprites_list = json.load(f)\n for f in sprites_list:\n dest, _ = os.path.splitext(os.path.basename(f))\n add('image', dest, build(os.path.join('sprites', f)))\n\n\n # Generate the pack containing the files added above.\n\n offset = 0\n for entry in index:\n entry['offset'] = offset\n offset += entry['length']\n\n\n index_str = json.dumps(index)\n index_len = len(index_str.encode())\n\n with open(out_file, 'wb') as f:\n f.write(struct.pack(''+temp_id+url)\r\n\r\ndef ajax(request):\r\n if request.method == \"POST\":\r\n print(request.POST['name'],request.POST['id'])\r\n name = request.POST['name']\r\n av_id = request.POST['id']\r\n # response_data = {'name':name,'av_id':av_id}\r\n # return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\r\n \r\n url = 'https://www.dmm.co.jp/digital/videoa/-/list/narrow/=/article=actress/id='+av_id+'/limit=30/n1=DgRJTglEBQ4GpoD6,YyI,qs_/'\r\n # print(url)\r\n html = urllib.request.urlopen(url).read()\r\n soup = BeautifulSoup(html, 'html.parser')\r\n src = soup.find_all('p',class_=\"tmb\")\r\n \r\n\r\n pic_url = []\r\n for j in src:\r\n temp = j.find_all('img')[0].get_attribute_list('src')[0]\r\n pic_url.append(temp)\r\n \r\n context = {\r\n 'name' : name,\r\n 'pic_url' : pic_url,\r\n }\r\n return HttpResponse(json.dumps(context), content_type=\"application/json\")","sub_path":"faceApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"394891869","text":"\n\nfrom django.urls import path\nfrom . import views\nfrom django.conf.urls import url\n\nurlpatterns = [\n path('Home/',views.homepage,name='homepage'),\n path('write-story/',views.startStory,name='startStory'),\n path('submit-story/',views.submitStory,name='submitStory'),\n path('imageupload/',views.imageupload,name='imageupload'),\n path('story-list/',views.listOfStories,name='listOfStories'),\n url(r'^login/$', views.LoginFormView.as_view(), name='login'),\n url(r'^register/$', views.UserFormView.as_view(), name='register'),\n url(r'^logout/$', views.logout_user, name='logout_user'),\n path('update_rating/',views.update_rating,name='update_rating'),\n path('submit-review/',views.submit_review,name='submit-review'),\n url(r'^(?P[0-9a-zA-Z\\s]+)/(?P[0-9a-zA-Z\\s]+)/read-story/$', views.readStory,name='readStory'),\n]\n\n\n","sub_path":"storiesandstuff/stories/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"532132996","text":"import MySQLdb\nimport os\nimport string\n\ndb = MySQLdb.connect (host=\"localhost\",\n user=\"root\",\n passwd=\"Abc@1234\",\n db=\"email_verify\",\n local_infile = 1) #Grants permission to write to db from an input file. Without this you get sql Error: (1148, 'The used command is not allowed with this MySQL version')\n\nprint(\"\\nConnection to DB established\\n\")\n\n#The statement 'IGNORE 1 LINES' below makes the Python script ignore first line on csv file\n#You can execute the sql below on the mysql bash to test if it works\nsqlLoadData = \"\"\"load data local infile 'series_mbl_vcon_circle.csv' into table Series_Mbl FIELDS TERMINATED BY ',' \n ENCLOSED BY '\"' LINES TERMINATED BY '\\n' IGNORE 1 LINES;\"\"\"\n\ntry:\n curs = db.cursor() \n curs.execute(sqlLoadData)\n db.commit() \n print(\"SQL execution complete\") \n resultSet = curs.fetchall() \nexcept: \n print(\"Error incurred: \") \n db.rollback()\n db.close()\nprint(\"Data loading complete.\\n\")","sub_path":"html-file/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"421162736","text":"\"\"\"\nHernandez-Andres, Lee and Romero (1999) Correlated Colour Temperature\n=====================================================================\n\nDefines the *Hernandez-Andres et al. (1999)* correlated colour temperature\n:math:`T_{cp}` computations objects:\n\n- :func:`colour.temperature.xy_to_CCT_Hernandez1999`: Correlated colour\n temperature :math:`T_{cp}` computation of given *CIE xy* chromaticity\n coordinates using *Hernandez-Andres, Lee and Romero (1999)* method.\n- :func:`colour.temperature.CCT_to_xy_Hernandez1999`: *CIE xy* chromaticity\n coordinates computation of given correlated colour temperature\n :math:`T_{cp}` using *Hernandez-Andres, Lee and Romero (1999)* method.\n\nReferences\n----------\n- :cite:`Hernandez-Andres1999a` : Hernández-Andrés, J., Lee, R. L., &\n Romero, J. (1999). Calculating correlated color temperatures across the\n entire gamut of daylight and skylight chromaticities. Applied Optics,\n 38(27), 5703. doi:10.1364/AO.38.005703\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\nfrom scipy.optimize import minimize\n\nfrom colour.colorimetry import CCS_ILLUMINANTS\nfrom colour.hints import (\n ArrayLike,\n Dict,\n FloatingOrArrayLike,\n FloatingOrNDArray,\n NDArray,\n Optional,\n)\nfrom colour.utilities import as_float_array, as_float, tsplit, usage_warning\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"colour-developers@colour-science.org\"\n__status__ = \"Production\"\n\n__all__ = [\n \"xy_to_CCT_Hernandez1999\",\n \"CCT_to_xy_Hernandez1999\",\n]\n\n\ndef xy_to_CCT_Hernandez1999(xy: ArrayLike) -> FloatingOrNDArray:\n \"\"\"\n Return the correlated colour temperature :math:`T_{cp}` from given\n *CIE xy* chromaticity coordinates using *Hernandez-Andres et al. (1999)*\n method.\n\n Parameters\n ----------\n xy\n *CIE xy* chromaticity coordinates.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n Correlated colour temperature :math:`T_{cp}`.\n\n References\n ----------\n :cite:`Hernandez-Andres1999a`\n\n Examples\n --------\n >>> xy = np.array([0.31270, 0.32900])\n >>> xy_to_CCT_Hernandez1999(xy) # doctest: +ELLIPSIS\n 6500.7420431...\n \"\"\"\n\n x, y = tsplit(xy)\n\n n = (x - 0.3366) / (y - 0.1735)\n CCT = (\n -949.86315\n + 6253.80338 * np.exp(-n / 0.92159)\n + 28.70599 * np.exp(-n / 0.20039)\n + 0.00004 * np.exp(-n / 0.07125)\n )\n\n n = np.where(CCT > 50000, (x - 0.3356) / (y - 0.1691), n)\n\n CCT = np.where(\n CCT > 50000,\n 36284.48953\n + 0.00228 * np.exp(-n / 0.07861)\n + 5.4535e-36 * np.exp(-n / 0.01543),\n CCT,\n )\n\n return as_float(CCT)\n\n\ndef CCT_to_xy_Hernandez1999(\n CCT: FloatingOrArrayLike, optimisation_kwargs: Optional[Dict] = None\n) -> NDArray:\n \"\"\"\n Return the *CIE xy* chromaticity coordinates from given correlated colour\n temperature :math:`T_{cp}` using *Hernandez-Andres et al. (1999)* method.\n\n Parameters\n ----------\n CCT\n Correlated colour temperature :math:`T_{cp}`.\n optimisation_kwargs\n Parameters for :func:`scipy.optimize.minimize` definition.\n\n Returns\n -------\n :class:`numpy.ndarray`\n *CIE xy* chromaticity coordinates.\n\n Warnings\n --------\n *Hernandez-Andres et al. (1999)* method for computing *CIE xy* chromaticity\n coordinates from given correlated colour temperature is not a bijective\n function and might produce unexpected results. It is given for consistency\n with other correlated colour temperature computation methods but should be\n avoided for practical applications. The current implementation relies on\n optimization using :func:`scipy.optimize.minimize` definition and thus has\n reduced precision and poor performance.\n\n References\n ----------\n :cite:`Hernandez-Andres1999a`\n\n Examples\n --------\n >>> CCT_to_xy_Hernandez1999(6500.7420431786531) # doctest: +ELLIPSIS\n array([ 0.3127..., 0.329...])\n \"\"\"\n\n usage_warning(\n '\"Hernandez-Andres et al. (1999)\" method for computing '\n '\"CIE xy\" chromaticity coordinates from given correlated '\n \"colour temperature is not a bijective function and and\"\n \"might produce unexpected results. It is given for \"\n \"consistency with other correlated colour temperature \"\n \"computation methods but should be avoided for practical \"\n \"applications.\"\n )\n\n CCT = as_float_array(CCT)\n shape = list(CCT.shape)\n CCT = np.atleast_1d(CCT.reshape([-1, 1]))\n\n def objective_function(\n xy: ArrayLike, CCT: FloatingOrArrayLike\n ) -> FloatingOrNDArray:\n \"\"\"Objective function.\"\"\"\n\n objective = np.linalg.norm(\n xy_to_CCT_Hernandez1999(xy) - as_float_array(CCT)\n )\n\n return as_float(objective)\n\n optimisation_settings = {\n \"method\": \"Nelder-Mead\",\n \"options\": {\n \"fatol\": 1e-10,\n },\n }\n if optimisation_kwargs is not None:\n optimisation_settings.update(optimisation_kwargs)\n\n xy = as_float_array(\n [\n minimize(\n objective_function,\n x0=CCS_ILLUMINANTS[\"CIE 1931 2 Degree Standard Observer\"][\n \"D65\"\n ],\n args=(CCT_i,),\n **optimisation_settings,\n ).x\n for CCT_i in as_float_array(CCT)\n ]\n )\n\n return xy.reshape(shape + [2])\n","sub_path":"colour/temperature/hernandez1999.py","file_name":"hernandez1999.py","file_ext":"py","file_size_in_byte":5610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"628362063","text":"import sys\r\nfrom pyspark.sql import *\r\nspark = SparkSession\\\r\n .builder\\\r\n .appName(\"customername\")\\\r\n .getOrCreate() # creating the spark session\r\n \r\nlines = spark.read.text(\"purchase\").rdd.map(lambda r: r[0])\r\nparts = lines.map(lambda l: l.split(\"\\t\"))\r\npurchase = parts.map(lambda p:\r\nRow(year=int(p[0]),cid=p[1],isbn=p[2],seller=p[3],price=int(p[4])))\r\npurchaseTable = spark.createDataFrame(purchase)\r\npurchaseTable.createOrReplaceTempView(\"purchase\") \r\n\r\nlines = spark.read.text(\"book\").rdd.map(lambda r: r[0])\r\nparts = lines.map(lambda l: l.split(\"\\t\"))\r\nbook = parts.map(lambda p: Row(isbn=p[0],name=p[1]))\r\nbookTable = spark.createDataFrame(book)\r\nbookTable.createOrReplaceTempView(\"book\")\r\n\r\nlines = spark.read.text(\"customer\").rdd.map(lambda r: r[0])\r\nparts = lines.map(lambda l: l.split(\"\\t\"))\r\ncustomer = parts.map(lambda p:\r\nRow(cid=p[0],name=p[1],age=int(p[2]),address=p[3],sex=p[4]))\r\ncustomerTable = spark.createDataFrame(customer)\r\ncustomerTable.createOrReplaceTempView(\"customer\")\r\n\r\ncustomername = spark.sql(\"select name from customer where cid IN (select distinct(purch.cid) as cid from purchase as purch INNER JOIN (select pur.cid as cid,pur.isbn as isbn from purchase as pur INNER JOIN (select cid from customer where name like '%Harry%') as harry ON pur.cid=harry.cid) as common ON purch.isbn=common.isbn and purch.cid != common.cid)\")\r\nNames = customername.rdd.map(lambda p: p.name).collect()\r\nf = open('customernames.txt','w') # Creating a ouput file named customernames in the current working directory\r\nfor name in Names:\r\n\tf.write(name+'\\n') # writing the names to the file\r\nf.close()\r\nspark.stop()\r\n","sub_path":"RelationalDataAnalysis.py","file_name":"RelationalDataAnalysis.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"479340455","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport re\nimport subprocess\nimport shutil\nfrom pathlib import Path\nfrom build import Builder\n\npackage_name = Builder.package_name_from_filename(__file__)\ndependencies = ()\n\n\ndef prepare(builder):\n archive_name = package_name + '-src.tgz'\n builder.extract(archive_name)\n return True\n\n\ndef build(builder):\n os.chdir('icu')\n os.chdir('source')\n\n \"\"\"\n List of relevant configure arguments:\n --enable-shared build shared libraries default=yes\n --enable-static build static libraries default=no\n --enable-extras build ICU extras default=yes\n --enable-layoutex build ICU's Paragraph Layout library default=yes.\n icu-le-hb must be installed via pkg-config. See http://harfbuzz.org\n\n --enable-tests build ICU tests default=yes\n --enable-samples build ICU samples default=yes\n\n --with-library-bits=bits specify how many bits to use for the library (32, 64, 64else32, nochange) default=nochange\n --with-data-packaging specify how to package ICU data. Possible values:\n files raw files (.res, etc)\n archive build a single icudtXX.dat file\n library shared library (.dll/.so/etc.)\n static static library (.a/.lib/etc.)\n auto build shared if possible (default)\n See http://userguide.icu-project.org/icudata for more info.\n \"\"\"\n common_configure_args = ['--disable-shared', '--enable-static', '--disable-extras', '--disable-layoutex',\n '--disable-tests', '--disable-samples',\n '--with-library-bits={}'.format(builder.target_platform_bits),\n '--with-data-packaging=static']\n if builder.toolset.startswith('msvc'):\n environment = builder.setup_env()\n # We need to add the /utf-8 flag to make cl treat source files UTF8 encoded.\n environment['CFLAGS'] = '{} /utf-8'.format(environment.get('CFLAGS', ''))\n environment['CXXFLAGS'] = '{} /utf-8'.format(environment.get('CXXFLAGS', ''))\n\n # Convert Windows path to Cygwin path.\n install_prefix = re.sub(r'(.):/(.*)', r'/cygdrive/\\1/\\2', builder.install_prefix.as_posix())\n\n configure_args = [(builder.cygwin / 'bin' / 'bash.exe').as_posix(), 'runConfigureICU', 'Cygwin/MSVC',\n '--prefix={}'.format(install_prefix)]\n configure_args.extend(common_configure_args)\n subprocess.check_call(configure_args, env=environment)\n\n # Replace linker parameter '-o' to '/out' in all Makefiles.\n for filename in Path('.').glob('**/Makefile'):\n print('* Patching compiler flags in file {}'.format(filename))\n with open(filename.as_posix(), 'r') as file:\n file_data = file.read()\n file_data = re.sub(r'(\\$\\(LINK\\.cc\\).*) -o (.*)', r'\\1 /OUT:\\2', file_data)\n with open(filename.as_posix(), 'w') as file:\n file.write(file_data)\n\n builder.make()\n builder.make(install=True)\n\n # The install target places DLL files in the lib folder, even though they actually belong in the bin folder.\n for filename in (builder.install_prefix / 'lib').glob('**/icu*64.dll'):\n shutil.move(filename, builder.install_prefix / 'bin' / filename.name)\n else:\n builder.configure(args=common_configure_args)\n builder.make(install=True)\n\n\ndef cleanup(builder):\n builder.remove_folder('icu')\n\n\nif __name__ == \"__main__\":\n print('You must not call this script directly.')\n sys.exit(1)\n","sub_path":"3rdparty/packages/icu4c-65_1.py","file_name":"icu4c-65_1.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"97354398","text":"import pandas as pd\nimport numpy as np\nimport warnings\n\nfrom ..utils.validation import isfloat\nfrom ..utils.utilities import list_del_indices\n__all__ = [\n 'missing_values',\n]\n\ndef cut_df(col, df, paste_col=False, on_right=False):\n \"\"\" \n To cut one or more columns from a dataframe as seprate dataframe.\n paste_col sets optional columns for the resulted dataframe. Both col and \n paste_col must be lists.\n on_right: select as many columns as length of 'col' from right side of \n dataframe. \n Notice: The order must had been considered in the 'paste_col'\n \"\"\" \n if on_right:\n n = len(col)\n df_paste = df.iloc[:,-n:]\n if paste_col:\n df_paste.columns = paste_col\n df = df.iloc[:,:-n]\n else:\n df_paste = df[col]\n if paste_col:\n df_paste.columns = paste_col\n df.drop(col,axis=1, inplace=True)\n return df, df_paste\n \ndef _check_object_col(df, name):\n \"\"\"\n Goals: \n - check if columns with type 'object' don't have elements that can be \n converted to numeric values.\n - remove columns with all non numeric elements.\n \"\"\"\n object_cols = [df.dtypes.index[i] for i, typ in enumerate(df.dtypes) if typ == \"object\"]\n for col in object_cols:\n for i, value in enumerate(df[col]):\n if isfloat(value):\n raise ValueError(\"column '%s' in '%s' includes both string and float values.\" %(str(col),name))\n # drop object columns\n if len(object_cols)>0:\n df = df.drop(object_cols,1)\n return df\n \nclass missing_values(object):\n \"\"\" Handle all the missing values.\n \n Parameters\n ----------\n strategy: string, optional (default=\"ignore_row\")\n \n list of strategies:\n - interpolate: interpolate based on sorted target values\n - zero: set to the zero\n - ignore_row: remove the entire row in data and target\n - ignore_column: remove the entire column in data and target\n\n string_as_null: boolean, optional (default=True)\n If True non numeric elements are considered to be null in computations.\n \n missing_values: list, optional (default=None)\n where you define specific formats of missing values. It is a list of string, float or integer values.\n\n inf_as_null: boolean, optional (default=True)\n If True inf and -inf elements are considered to be null in computations.\n\n Returns\n -------\n data frame\n mask: Only if strategy = ignore_row. Mask is a binary pandas series which stores the information regarding removed\n \"\"\"\n def __init__(self, strategy=\"ignore_row\", string_as_null = True,\n inf_as_null = True, missing_values = None):\n self.strategy = strategy\n self.string_as_null = string_as_null\n self.inf_as_null = inf_as_null\n self.missing_values = missing_values\n \n def fit_transform(self, df):\n \"\"\"\n use fit_transform for:\n - replace missing values with nan.\n - drop columns with all nan values.\n - fill nan values with the specified strategy.\n\n :param:\n df: pandas data frame\n :attribute:\n mask: binary pandas series, only if strategy = 'ignore_row' or 'ignore_column'\n mask is a binary vector whose length is the number of rows/indices in the df. The index of each bit shows\n if the row/column in the same position has been removed or not.\n The goal is keeping track of removed rows/columns to change the target data frame or other input data frames based\n on that. The mask can later be used in the transform method to change other data frames in the same way.\n \"\"\"\n if self.inf_as_null == True:\n df.replace([np.inf, -np.inf,'inf','-inf'], np.nan, True)\n if self.string_as_null == True:\n df = df.convert_objects(convert_numeric=True)\n if isinstance(self.missing_values, (list, tuple)):\n for pattern in self.missing_values:\n df.replace(pattern, np.nan, True)\n\n df = _check_object_col(df, 'df')\n # drop null columns\n df.dropna(axis=1, how='all', inplace=True)\n\n if self.strategy == 'zero':\n for col in df.columns:\n df[col].fillna(value=0,inplace=True)\n return df\n elif self.strategy == 'ignore_row':\n dfi = df.index\n df.dropna(axis=0, how='any', inplace=True)\n mask=[i in df.index for i in dfi]\n self.mask = pd.Series(mask, index=dfi)\n # self.mask = pd.notnull(df).all(1)\n # df = df[self.mask]\n return df\n elif self.strategy == 'ignore_column':\n dfc = df.columns\n df.dropna(axis=1, how='any', inplace=True)\n mask=[i in df.columns for i in dfc]\n self.mask = pd.Series(mask, index=dfc)\n # self.mask = pd.notnull(df).all(0)\n # df = df.T[self.mask].T\n return df\n elif self.strategy == 'interpolate':\n df = df.interpolate()\n df.fillna(method='ffill',axis=1, inplace=True) # because of nan in the first and last element of column\n return df\n else:\n msg = \"Wrong strategy has been passed\"\n raise TypeError(msg)\n\n def transform(self, df):\n \"\"\"\n Only if the class is fitted with 'ignore_row' or 'ignore_column' strategies.\n\n :param df: pandas dataframe\n :return: transformed data frame based on the mask vector from fit_transform method.\n \"\"\"\n if self.strategy == 'ignore_row':\n return df[self.mask]\n elif self.strategy == 'ignore_column':\n return df.loc[:,self.mask]\n else:\n msg = \"The transform method doesn't change the dataframe if strategy='zero' or 'interpolate'. You should fit_transform the new dataframe with those methods.\"\n warnings.warn(msg)\n","sub_path":"cheml/preprocessing/handle_missing.py","file_name":"handle_missing.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"134285291","text":"#how to Define tuple\n\ntp=()\n\n#possible to store same and different type of data\n\ntple=(10,\"sibin\",True)\nprint(tple )\n\n#it possibe to store duplicate value\n\ntple1=(10,\"sibin\",True,10,\"sibin\",True)\nprint(tple1)\n\n# insertion order is preserved\n\n#tuple is immutable means that can't update","sub_path":"python collections/Tuple/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"158105759","text":"#!/usr/bin/env python\n# -*-coding:utf_8-*-\n\nimport random\nimport time\n\n\n# Class created to get the mock values.\nclass Sensorslibrary(object):\n \"\"\"Sensorslibrary\"\"\"\n\n def __init__(self):\n super(Sensorslibrary, self).__init__()\n\n @staticmethod\n def nfc():\n time.sleep(1)\n if random.randint(1, 4) == 3:\n return random.choice(['ABCD136468', 'BCDE789514', 'CDEF663247'])\n return None\n\n @staticmethod\n def flow():\n time.sleep(1)\n flow = 0.0\n a = 15\n while a > 0:\n flow += random.random()\n a -= 1\n time.sleep(1)\n return flow\n","sub_path":"sensors/sensorslibrary.py","file_name":"sensorslibrary.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"532262398","text":"import urllib.request\nimport urllib.parse\nimport random\nimport hashlib\nfrom gtk import *\n\ndef __translate(lin, lout, fy, text):\n\tif text == '':\n\t\treturn ''\n\tif fy == 'Baidu':\n\t\tlin = list_b2[lin]\n\t\tlout = list_b2[lout]\n\t\turl = 'http://fanyi.baidu.com/transapi'\n\t\tdata = {\"query\": text, 'from': lin, 'to': lout}\n\t\theaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36', 'method': 'POST'}\n\t\tdata = urllib.parse.urlencode(data).encode('utf-8')\n\t\treq = urllib.request.Request(url, data, headers)\n\t\tresponse = urllib.request.urlopen(req)\n\t\thtml = response.read().decode('utf-8')\n\telif fy == 'Google':\n\t\tlin = list_g2[lin]\n\t\tlout = list_g2[lout]\n\t\turl = 'https://translate.google.cn/translate_a/single'\n\t\tdata = {\"q\": text}\n\t\tparams = {'client': 't', 'sl': lin, 'tl': lout, 'hl': 'en','dt': 'at', 'dt': 'bd', 'dt': 'ex', 'dt': 'ld', 'dt': 'md','dt': 'qca', 'dt': 'rw', 'dt': 'rm', 'dt': 'ss', 'dt': 't','ie': 'UTF-8', 'oe': 'UTF-8', 'source': 'bh', 'ssel': '0','tsel': '0', 'kc': '1', 'tk': ''}\n\t\theaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36','Referer': 'https://translate.google.cn/'}\n\t\tparams['tk'] = TK.get_tk(text)\n\t\t#data = urllib.parse.urlencode(data).encode('utf-8')\n\t\tres = requests.post(url, headers = headers, data = data, params = params)\n\t\t#res.raise_for_status()\n\t\thtml = res.text\n\telse: \n\t\tlin = list_y2[lin]\n\t\tlout = list_y2[lout]\n\t\t#print(lin,lout)\n\t\turl= 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'\n\t\t# 发送给有道服务器的数据\n\t\tu = 'fanyideskweb'\n\t\tf = str(int(time.time()*1000) + random.randint(1,10))\n\t\tc = 'ebSeFb%=XZ%T[KZ)c(sy!'\n\t\tsign = hashlib.md5((u + text + f + c).encode('utf-8')).hexdigest()\n\t\tdata = {'i': text,'from':lin,'to':lout,'salt': f, 'sign': sign,'client': u, 'doctype': 'json','keyfrom': 'fanyi.web'}\n\t\theaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36', 'method': 'POST','Referer': 'http://fanyi.youdao.com/',}\n\t\tdata = urllib.parse.urlencode(data).encode('utf-8')\n\t\treq = urllib.request.Request(url, data, headers)\t\n\t\tresponse = urllib.request.urlopen(req)\n\t\thtml = response.read().decode('utf-8')\n\tif fy == 'Baidu':\n\t\treturn json.loads(html)['data'][0]['dst']\n\telif fy == 'Google':\n\t\treturn json.loads(html)[0][0][0]\n\telse: \n\t\t#print(json.loads(html))\n\t\treturn json.loads(html)['translateResult'][0][0]['tgt']\n\t\t\nTK = CalcTk()\n\nlist_b1 =['中文','英语','粤语','文言文','日语','韩语','法语','西班牙语','泰语','阿拉伯语',\n'俄语','葡萄牙语','德语','意大利语','希腊语','荷兰语','波兰语','保加利亚语','爱沙尼亚语','丹麦语','芬兰语',\n'捷克语','罗马尼亚语','斯洛语尼亚语','瑞典语','匈牙利语','繁体中文','越南语']\n\nlist_b2 = {'auto':'auto','中文':'zh','英语':'en','粤语':'yue','文言文':'wyw','日语':'jp','韩语':'kor','法语':'fra','西班牙语':'spa','泰语':'th','阿拉伯语':'ara',\n'俄语':'ru','葡萄牙语':'pt','德语':'de','意大利语':'it','希腊语':'el','荷兰语':'nl','波兰语':'pl','保加利亚语':'bul','爱沙尼亚语':'est',\n'丹麦语':'dan','芬兰语':'fin','捷克语':'cs','罗马尼亚语':'rom','斯洛语尼亚语':'slo','瑞典语':'swe','匈牙利语':'hu','繁体中文':'cht','越南语':'vie'}\n\nlist_y1 = ['中文','日语','英语','韩语','法语','阿拉伯语','波兰语','丹麦语','德语','俄语','芬兰语',\n'荷兰语','捷克语','罗马尼亚语','挪威语','葡萄牙语','瑞典语','斯洛伐克语','西班牙语','印地语',\n'印度尼西亚语','意大利语','泰语','土耳其语','希腊语','匈牙利语']\n\nlist_y2 ={'auto':'auto','中文':'zh-CHS','日语':'ja','英语':'EN','韩语':'ko','法语':'fr','阿拉伯语':'ar','波兰语':'pl','丹麦语':'da','德语':'de','俄语':'ru','芬兰语':'fi',\n'荷兰语':'nl','捷克语':'cs','罗马尼亚语':'ro','挪威语':'no','葡萄牙语':'pt','瑞典语':'sv','斯洛伐克语':'sk','西班牙语':'es','印地语':'hi',\n'印度尼西亚语':'id','意大利语':'it','泰语':'th','土耳其语':'tr','希腊语':'el','匈牙利语':'hu'}\n\nlist_g1 = ['中文','中文(简体)','中文(繁体)','英语','南非语','俄语','法语','阿拉伯语','意大利语','日语','丹麦语','德语',\n'希腊语','世界语','西班牙语','爱沙尼亚语','巴士克语','法斯语','芬兰语','法罗语','加里西亚语','古吉拉特语','阿塞拜疆语','比利时语','保加利亚语','加泰隆语','捷克语',\n'希伯来语','印地语','克罗地亚语','匈牙利语','亚美尼亚语','印度尼西亚语','冰岛语','格鲁吉亚语','哈萨克语','卡纳拉语','朝鲜语','孔卡尼语','吉尔吉斯语',\n'立陶宛语','拉脱维亚语','毛利语','马其顿语','蒙古语','马拉地语','马来语','马耳他语','挪威语','荷兰语','北梭托语','威尔士语','第维埃语',\n'旁遮普语','波兰语','葡萄牙语','克丘亚语','罗马尼亚语','梵文','北萨摩斯语','斯洛伐克语','斯洛文尼亚语','阿尔巴尼亚语','瑞典语','斯瓦希里语','叙利亚语',\n'泰米尔语','泰卢固语','泰语','塔加路语','茨瓦纳语','土耳其语','宗加语','鞑靼语','乌克兰语','乌都语','乌兹别克语','越南语',\n'班图语','祖鲁语']\nlist_g2 = {'auto':'auto','南非语':'af','阿拉伯语':'ar','阿塞拜疆语':'az','比利时语':'be','保加利亚语':'bg','加泰隆语':'ca','捷克语':'cs','威尔士语':'cy','丹麦语':'da','德语':'de','第维埃语':'dv',\n'希腊语':'el','英语':'en','世界语':'eo','西班牙语':'es','爱沙尼亚语':'et','巴士克语':'eu','法斯语':'fa','芬兰语':'fi','法罗语':'fo','法语':'fr','加里西亚语':'gl','古吉拉特语':'gu',\n'希伯来语':'he','印地语':'hi','克罗地亚语':'hr','匈牙利语':'hu','亚美尼亚语':'hy','印度尼西亚语':'id','冰岛语':'is','意大利语':'it','日语':'ja','格鲁吉亚语':'ka','哈萨克语':'kk','卡纳拉语':'kn','朝鲜语':'ko','孔卡尼语':'kok','吉尔吉斯语':'ky',\n'立陶宛语':'lt','拉脱维亚语':'lv','毛利语':'mi','马其顿语':'mk','蒙古语':'mn','马拉地语':'mr','马来语':'ms','马耳他语':'mt','挪威语':'nb','荷兰语':'nl','北梭托语':'ns',\n'旁遮普语':'pa','波兰语':'pl','葡萄牙语':'pt','克丘亚语':'qu','罗马尼亚语':'ro','俄语':'ru','梵文':'sa','北萨摩斯语':'se','斯洛伐克语':'sk','斯洛文尼亚语':'sl','阿尔巴尼亚语':'sq','瑞典语':'sv','斯瓦希里语':'sw','叙利亚语':'syr',\n'泰米尔语':'ta','泰卢固语':'te','泰语':'th','塔加路语':'tl','茨瓦纳语':'tn','土耳其语':'tr','宗加语':'ts','鞑靼语':'tt','乌克兰语':'uk','乌都语':'ur','乌兹别克语':'uz','越南语':'vi',\n'班图语':'xh','中文':'zh','中文(简体)':'zh-CN','中文(繁体)':'zh-TW','祖鲁语':'zu'}\n","sub_path":"trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"16075380","text":"import statistics\r\ndef make_bin(li):\r\n leng = len(li)\r\n count = 0\r\n final_bin = []\r\n bin_size = int(input(\"Enter bin size : - \"))\r\n rem = leng % bin_size\r\n\r\n if rem != 0:\r\n while(count < leng - rem):\r\n temp_li = []\r\n temp = 0\r\n while temp != bin_size:\r\n temp_li = temp_li + [li[temp + count]]\r\n temp = temp + 1\r\n final_bin.append(temp_li)\r\n count = count + bin_size\r\n\r\n count1 = 0\r\n temp_li = []\r\n\r\n while count1 != rem:\r\n temp_li = temp_li + [li[count]]\r\n\r\n count = count + 1\r\n count1 = count1 + 1\r\n else:\r\n while (count != leng):\r\n temp_li = []\r\n temp = 0\r\n while temp != bin_size:\r\n if li[temp + count] is None:\r\n break\r\n else:\r\n temp_li = temp_li + [li[temp + count]]\r\n temp = temp + 1\r\n final_bin.append(temp_li)\r\n count = count + bin_size\r\n final_bin.append(temp_li)\r\n\r\n return final_bin\r\n\r\ndef mean_bi(bi):\r\n l = len(bi)\r\n count = 0\r\n fi = []\r\n while count != l:\r\n temp = []\r\n le = len(bi[count])\r\n total = 0\r\n count2 = 0\r\n while count2 != le:\r\n total = total + bi[count][count2]\r\n count2 = count2 +1\r\n\r\n me = float(total / le)\r\n count3 = 0\r\n while count3 != le:\r\n temp = temp + [me]\r\n count3 = count3 + 1\r\n fi.append(temp)\r\n count = count + 1\r\n return fi\r\n\r\n\r\ndef median_bi(bi):\r\n l = len(bi)\r\n count = 0\r\n fi = []\r\n while count != l:\r\n temp = []\r\n\r\n le = len(bi[count])\r\n me = statistics.median(bi[count])\r\n count3 = 0\r\n while count3 != le:\r\n temp = temp + [me]\r\n count3 = count3 + 1\r\n fi.append(temp)\r\n count = count + 1\r\n return fi\r\n\r\ndef range_bi(bi):\r\n\r\n l = len(bi)\r\n count = 0\r\n fi = []\r\n while count != l:\r\n temp = []\r\n max_bi = max(bi[count])\r\n min_bi = min(bi[count])\r\n le = len(bi[count])\r\n\r\n count3 = 0\r\n while count3 != le:\r\n mi = abs(bi[count][count3]-min_bi)\r\n ma = abs(max_bi-bi[count][count3])\r\n if mi <= ma :\r\n temp = temp + [min_bi]\r\n elif mi > ma :\r\n temp = temp + [max_bi]\r\n\r\n count3 = count3 + 1\r\n fi.append(temp)\r\n count = count + 1\r\n return fi\r\n\r\n\r\nli = [13, 15, 16, 16, 19, 20, 20, 21, 22, 22, 25, 25, 25, 25, 30, 33, 33, 35, 5, 35, 35, 36, 40, 45, 46, 52, 70 ]\r\nbi = make_bin(li)\r\nprint(mean_bi(bi))\r\nprint(median_bi(bi))\r\nprint(range_bi(bi))\r\n\r\n","sub_path":"binning.py","file_name":"binning.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"169679759","text":"\"\"\"Tests for api module.\"\"\"\n\nimport pytest\nimport vcr\nimport os\nfrom shapely.geometry import Polygon\nfrom landsatxplore import api, errors\n\n\nBRUSSELS_AREA = Polygon(\n [(4.25, 50.75), (4.50, 50.75), (4.50, 50.95), (4.25, 50.95), (4.25, 50.75)]\n)\n\n\ndef test__random_string():\n str_a = api._random_string(length=10)\n str_b = api._random_string(length=10)\n assert str_a != str_b\n assert len(str_a) == 10\n assert len(str_b) == 10\n\n\ndef test_coordinate():\n coord = api.Coordinate(4.35, 50.85)\n assert coord == {\"longitude\": 4.35, \"latitude\": 50.85}\n\n\ndef test_geojson():\n geojson = api.GeoJson(BRUSSELS_AREA.__geo_interface__)\n assert geojson[\"type\"] == \"Polygon\"\n assert len(geojson[\"coordinates\"]) == 5\n assert geojson[\"coordinates\"][0] == {\"longitude\": 4.25, \"latitude\": 50.75}\n\n\ndef test_spatial_filter_mbr():\n mbr = api.SpatialFilterMbr(*BRUSSELS_AREA.bounds)\n assert mbr[\"filterType\"] == \"mbr\"\n assert mbr[\"lowerLeft\"] == {\"longitude\": 4.25, \"latitude\": 50.75}\n assert mbr[\"upperRight\"] == {\"longitude\": 4.5, \"latitude\": 50.95}\n\n\ndef test_spatial_filter_geojson():\n sfilter = api.SpatialFilterGeoJSON(BRUSSELS_AREA.__geo_interface__)\n assert sfilter[\"filterType\"] == \"geoJson\"\n\n\ndef test_acquisition_filter():\n afilter = api.AcquisitionFilter(\"2000-01-01\", \"2001-12-31\")\n assert afilter[\"start\"] == \"2000-01-01\"\n assert afilter[\"end\"] == \"2001-12-31\"\n\n\ndef test_cloud_cover_filter():\n cfilter = api.CloudCoverFilter(max=10)\n assert cfilter[\"min\"] == 0\n assert cfilter[\"max\"] == 10\n\n\ndef test_metadata_value():\n mfilter = api.MetadataValue(\n field_id=\"5e83d08fd4594aae\", value=\"LT05_L1GS_173058_20111028_20161005_01_T2\"\n )\n assert mfilter[\"filterType\"] == \"value\"\n assert mfilter[\"filterId\"] == \"5e83d08fd4594aae\"\n assert mfilter[\"value\"] == \"LT05_L1GS_173058_20111028_20161005_01_T2\"\n assert mfilter[\"operand\"] == \"like\"\n\n\n@pytest.fixture(scope=\"module\")\ndef ee_api():\n def _filter_credentials(request):\n if \"password\" in str(request.body):\n request.body = None\n return request\n\n with vcr.use_cassette(\n \"tests/fixtures/vcr_cassettes/api_login.yaml\",\n before_record_request=_filter_credentials,\n ):\n ee = api.API(\n os.getenv(\"LANDSATXPLORE_USERNAME\"), os.getenv(\"LANDSATXPLORE_PASSWORD\")\n )\n return ee\n\n\ndef test_api_login(ee_api):\n assert ee_api.session.headers.get(\"X-Auth-Token\")\n\n\ndef test_api_login_error():\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_login_error.yaml\"):\n with pytest.raises(errors.USGSAuthenticationError):\n api.API(\"bad_username\", \"bad_password\")\n\n\ndef test_api_get_scene_id(ee_api):\n\n # Single Product ID\n PRODUCT_ID = \"LT05_L1GS_173058_20111028_20161005_01_T2\"\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_scene_id.yaml\"):\n scene_id = ee_api.get_scene_id(PRODUCT_ID, dataset=\"landsat_tm_c1\")\n assert scene_id == \"LT51730582011301MLK00\"\n\n # Multiple Product IDs\n PRODUCT_IDS = [\n \"LT05_L1GS_173058_20111028_20161005_01_T2\",\n \"LT05_L1GS_173057_20010407_20171209_01_T2\",\n ]\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_scene_ids.yaml\"):\n scene_ids = ee_api.get_scene_id(PRODUCT_IDS, dataset=\"landsat_tm_c1\")\n assert scene_ids == [\"LT51730582011301MLK00\", \"LT51730572001097LBG00\"]\n\n\ndef test_api_metadata(ee_api):\n\n # Collection 1\n SCENE_ID = \"LT51730582011301MLK00\"\n DATASET = \"landsat_tm_c1\"\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_scene_metadata_c1.yaml\"):\n metadata = ee_api.metadata(SCENE_ID, DATASET)\n assert metadata[\"entityId\"] == SCENE_ID\n assert metadata[\"landsat_scene_id\"] == SCENE_ID\n\n # Collection 2\n SCENE_ID = \"LT51730582011301MLK00\"\n DATASET = \"landsat_tm_c2_l1\"\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_scene_metadata_c2.yaml\"):\n metadata = ee_api.metadata(SCENE_ID, DATASET)\n assert metadata[\"entityId\"] == SCENE_ID\n assert metadata[\"collection_number\"] == 2\n\n\ndef test_api_get_product_id(ee_api):\n\n SCENE_ID = \"LT51730582011301MLK00\"\n\n # Collection 1\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_productid_c1.yaml\"):\n product_id = ee_api.get_product_id(SCENE_ID, \"landsat_tm_c1\")\n assert product_id == \"LT05_L1GS_173058_20111028_20161005_01_T2\"\n\n # Collection 2\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_productid_c2.yaml\"):\n product_id = ee_api.get_product_id(SCENE_ID, \"landsat_tm_c2_l2\")\n assert product_id == \"LT05_L2SP_173058_20111028_20200820_02_T1\"\n\n\ndef test_api_search(ee_api):\n\n # Longitude and Latitude\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_search_c1_lonlat.yaml\"):\n scenes = ee_api.search(\n \"landsat_8_c1\",\n longitude=4.38,\n latitude=50.85,\n start_date=\"2018-01-01\",\n end_date=\"2018-01-07\",\n max_results=5,\n )\n assert len(scenes) >= 1\n assert \"cloudCover\" in scenes[0]\n\n # Bounding box\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_search_c1_bbox.yaml\"):\n scenes = ee_api.search(\n \"landsat_8_c1\",\n bbox=BRUSSELS_AREA.bounds,\n start_date=\"2018-01-01\",\n end_date=\"2018-01-07\",\n max_results=5,\n )\n assert len(scenes) >= 1\n assert \"cloudCover\" in scenes[0]\n\n # Collection 2\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_search_c2.yaml\"):\n scenes = ee_api.search(\n \"landsat_ot_c2_l2\",\n longitude=4.38,\n latitude=50.85,\n start_date=\"2018-01-01\",\n end_date=\"2018-01-31\",\n max_results=10,\n )\n assert len(scenes) >= 1\n assert \"cloudCover\" in scenes[0]\n assert scenes[0][\"displayId\"][5:7] == \"L2\"\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":5896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"516194848","text":"from 二元文法 import read_txt\n\n\nclass caculate():\n \"\"\"\n 和上次一模一样\n \"\"\"\n def __init__(self, filename):\n #\n # jieba分词的结果\n self.true_correct_txt = read_txt(\"./txt/jieba.txt\")\n self.true_correct_num = len(self.true_correct_txt)\n # 这一步将列表转为字典,因为字典查询较快\n self.true_correct_dic = self.changeIntoDic(self.true_correct_txt)\n # 样本信息条数(除去标点符号,只保留中文)\n self.sample_num_txt = read_txt(filename)\n self.sample_num = len(self.sample_num_txt)\n\n self.my_correct_txt = self.getMyCorrectNum()\n self.my_correct_num = len(self.my_correct_txt)\n\n self.Precision = self.caculatePrecision()\n self.Recall = self.caculateRecall()\n self.FScore = self.caculateFScore()\n pass\n\n def changeIntoDic(self, txt):\n result = {}\n for t in txt:\n result[t] = 1\n return result\n\n def getMyCorrectNum(self):\n result = []\n for s in self.sample_num_txt:\n if s in self.true_correct_dic:\n result.append(s)\n return result\n\n def caculatePrecision(self):\n return self.my_correct_num / self.true_correct_num\n\n def caculateRecall(self):\n return self.my_correct_num / self.sample_num\n\n def caculateFScore(self):\n return (2 * self.Precision * self.Recall) / (self.Precision + self.Recall)\n\n\nif __name__ == '__main__':\n print(\"FMM:\")\n fmm = caculate(\"./txt/fmm.txt\")\n print(\"Precision:%f,Recall:%f,FScore:%f\" % (fmm.Precision, fmm.Recall, fmm.FScore))\n print(\"BMM:\")\n bmm = caculate(\"./txt/bmm.txt\")\n print(\"Precision:%f,Recall:%f,FScore:%f\" % (bmm.Precision, bmm.Recall, bmm.FScore))\n print(\"消除歧义后的:\")\n two_way = caculate(\"./txt/消歧结果.txt\")\n print(\"Precision:%f,Recall:%f,FScore:%f\" % (two_way.Precision, two_way.Recall, two_way.FScore))\n # print(\"mmseg:\")\n # mmseg = caculate(\"./data/MMSEG.txt\")\n # print(\"Precision:%f,Recall:%f,FScore:%f\" % (mmseg.Precision, mmseg.Recall, mmseg.FScore))\n","sub_path":"2元文法进行文本分词消歧/其他.py","file_name":"其他.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"99147194","text":"\nimport sys\nimport numpy as np\nimport pickle\nimport pdb\nimport matplotlib.pyplot as plt\nfrom syncUtils import getPhasesFromVoltages\nfrom circularStats import circularVariance\nfrom utils import alignMeasurements\n\ndef main(argv):\n def computeCircularVariances(phases1, phases2):\n cvs = np.empty(len(phases1))\n for i in xrange(len(phases1)):\n cvs[i] = circularVariance(angles=(phases1[i], phases2[i]))\n return cvs\n\n if len(argv)!=2:\n print(\"Usage: %s \"%argv[0])\n sys.exit(1)\n\n selfCouplingStrength = float(argv[1])\n epsilon = 0.1\n i0 = 10\n couplingStartTime = 99.04\n colorNeuron0 = \"blue\"\n colorNeuron1 = \"green\"\n linestyleCoupled = \"-\"\n linestyleUncoupled = \":\"\n integrationFilename = \"results/integrationWCoupledINapIKHighThresholdWithSelfCouplingStrength%.02fI0%.02fEpsilon%.06fCouplingStartTime%.02f.npz\"%(selfCouplingStrength, i0, epsilon, couplingStartTime)\n figFilenamePattern = \"figures/fig10_25INapIKHighThresholdWithSelfCouplingStrength%.02fI0%.02fEpsilon%.06fCouplingStart%.02f.%s\"\n\n figFilename = figFilenamePattern%(selfCouplingStrength, i0, epsilon, \n couplingStartTime, \n \"eps\")\n res = np.load(integrationFilename)\n timesCoupled = res[\"timesCoupled\"]\n voltagesCoupledNeuron0 = res[\"ysCoupled\"][0,:]\n voltagesCoupledNeuron1 = res[\"ysCoupled\"][2,:]\n voltagesUncoupledNeuron0 = res[\"ys0Uncoupled\"][0,:]\n timesUncoupledNeuron0 = res[\"times0Uncoupled\"]\n voltagesUncoupledNeuron1 = res[\"ys1Uncoupled\"][0,:]\n timesUncoupledNeuron1 = res[\"times1Uncoupled\"]\n sampleRate = 1.0/(timesCoupled[1]-timesCoupled[0])\n\n resPhasesNeuron0 = getPhasesFromVoltages(times=timesCoupled, \n voltages=voltagesCoupledNeuron0)\n phasesNeuron0 = resPhasesNeuron0[\"phases\"]\n timePhasesNeuron0 = resPhasesNeuron0[\"times\"]\n resPhasesNeuron1 = getPhasesFromVoltages(times=timesCoupled,\n voltages=voltagesCoupledNeuron1)\n phasesNeuron1 = resPhasesNeuron1[\"phases\"]\n timePhasesNeuron1 = resPhasesNeuron1[\"times\"]\n spikeTimesNeuron0 = resPhasesNeuron0[\"spikeTimes\"]\n uncoupledSpikeTimesNeuron0 = spikeTimesNeuron0[spikeTimesNeuron0[^/]+)/{self.service_path}/?(?P.*)?\"\n\n def add_existing(self, obj):\n return self.model.add_existing(obj)\n\n def create(self, request):\n obj = self._schema_draft().loads(request.body)\n data = self.model.add(obj)\n return create_response(request, json=data)\n\n def get_by_id(self, request, id):\n obj = self.model.get_by_id(id)\n if obj:\n return create_response(request, json=obj)\n return create_response(request, status_code=404)\n\n def get_by_key(self, request, key):\n obj = self.model.get_by_key(key)\n if obj:\n return create_response(request, json=obj)\n return create_response(request, status_code=404)\n\n def query(self, request):\n params = utils.parse_request_params(abstract.AbstractQuerySchema, request)\n results = self.model.query(params.get(\"where\"))\n total_count = len(results)\n if params.get(\"limit\"):\n results = results[: params[\"limit\"]]\n\n data = {\n \"count\": len(results),\n \"total\": total_count,\n \"offset\": 0,\n \"results\": self.model._resource_schema().load(results, many=True),\n }\n content = self._schema_query_response().dumps(data)\n return create_response(request, text=content)\n\n def update_by_id(self, request, id):\n obj = self.model.get_by_id(id)\n return self._update(request, obj)\n\n def update_by_key(self, request, key):\n obj = self.model.get_by_key(key)\n return self._update(request, obj)\n\n def delete_by_id(self, request, id):\n obj = self.model.get_by_id(id)\n if obj:\n response = self._validate_resource_version(request, obj)\n if response is not None:\n return response\n\n obj = self.model.delete_by_id(id)\n return create_response(request, json=obj)\n return create_response(request, status_code=404)\n\n def delete_by_key(self, request, key):\n obj = self.model.get_by_key(key)\n if obj:\n response = self._validate_resource_version(request, obj)\n if response is not None:\n return response\n\n obj = self.model.delete_by_key(key)\n return create_response(request, json=obj)\n return create_response(request, status_code=404)\n\n def _update(self, request, obj):\n if not obj:\n return create_response(request, status_code=404)\n\n update = self._schema_update().load(request.json())\n if update.actions:\n obj, err = self._apply_update_actions(obj, update)\n if err:\n return create_response(request, json=err, status_code=err[\"statusCode\"])\n return create_response(request, json=obj)\n\n def _validate_resource_version(self, request, obj):\n update_version = self._get_version_from_request(request)\n if update_version != obj[\"version\"]:\n data = self._create_version_error_response(obj[\"version\"])\n return create_response(request, json=data, status_code=409)\n\n def _get_version_from_request(self, request):\n version_data = request.qs.get(\"version\")\n if version_data:\n return int(version_data[0])\n return request.json().get(\"version\")\n\n def _apply_update_actions(self, obj, update):\n original_obj = obj\n\n for action in update.actions:\n func = self._actions.get(action.action)\n if not func:\n print(\"Missing action for\", action.action)\n continue\n try:\n obj = func(self, obj, action)\n except utils.InternalUpdateError as exc:\n return None, self._create_data_error_response(str(exc), obj)\n\n # Save the updated object to the model\n if obj != original_obj:\n if obj[\"version\"] != update.version:\n return None, self._create_version_error_response(obj[\"version\"])\n self.model.save(obj)\n\n # Temporary\n elif not self._actions:\n self.model.save(obj)\n\n return obj, None\n\n def _create_data_error_response(self, message, obj):\n return schemas.ErrorResponseSchema().dump(\n types.ErrorResponse(\n status_code=400,\n message=message,\n errors=[\n types.ConcurrentModificationError(\n message=message,\n current_version=obj['version']\n )\n ],\n )\n )\n\n\n def _create_version_error_response(self, version):\n return schemas.ErrorResponseSchema().dump(\n types.ErrorResponse(\n status_code=409,\n message=\"Version mismatch. Concurrent modification.\",\n errors=[\n types.ConcurrentModificationError(\n message=\"Version mismatch. Concurrent modification.\",\n current_version=version,\n )\n ],\n )\n )\n","sub_path":"src/commercetools/testing/abstract.py","file_name":"abstract.py","file_ext":"py","file_size_in_byte":9242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"291993060","text":"#!/usr/bin/env python\n\nimport json\n\nfrom . import mb\nfrom flask import render_template\nfrom flask import request\n\nimport os\nfrom algorithm_toolkit import app\n\nfrom sarpy.deprecated.tools import FrameGenerator\nfrom sarpy.deprecated.tools import FrameForm\n\ncam = FrameGenerator()\n\n\n@app.route('/taser/')\ndef index():\n form = FrameForm()\n \"\"\"Image Blending home page.\"\"\"\n return render_template('index.html', form=form)\n\n\n@mb.route('/taser/update_image_path', methods=['POST'])\ndef set_image_path():\n\n image_path = os.path.normpath(request.values.get('image_path', ''))\n tnx = int(request.values.get('tnx', ''))\n tny = int(request.values.get('tny', ''))\n\n nx, ny = cam.set_image_path(image_path, tnx, tny)\n\n return json.dumps({'nx': nx, 'ny': ny})\n\n\n@mb.route('/taser/update_image_content', methods=['POST'])\ndef crop_image():\n\n minx = int(round(float((request.values.get('minx', '')))))\n maxx = int(round(float((request.values.get('maxx', '')))))\n miny = int(round(float((request.values.get('miny', '')))))\n maxy = int(round(float((request.values.get('maxy', '')))))\n tnx = int(round(float((request.values.get('tnx', '')))))\n tny = int(round(float((request.values.get('tny', '')))))\n\n cam.crop_image(minx, miny, maxx, maxy, tnx, tny)\n\n return ''\n\n\n@mb.route('/taser/ortho_image', methods=['POST'])\ndef ortho_image():\n\n output_image_path = os.path.normpath(request.values.get('input', ''))\n cam.ortho_image(output_image_path)\n\n return ''\n\n\n@mb.route('/taser/get_frame', methods=['POST'])\ndef get_image():\n return cam.get_frame()\n","sub_path":"sarpy/deprecated/tools/taser_web/views/taser.py","file_name":"taser.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"605470639","text":"# -*-coding:UTF-8-*-\n\nimport heapq\nimport threading\n\nclass PriorityQueue:\n def __init__(self):\n self._queue = []\n self._count = 0\n self._cv = threading.Condition()\n\n def put(self, item, priority):\n with self._cv:\n heapq.heappush(self._queue, (-priority, self._count, item))\n self._count += 1\n self._cv.notify()\n\n def get(self):\n with self._cv:\n while len(self._queue)== 0:\n self._cv.wait()\n return heapq.heappop(self._queue)[-1]\n\nif __name__ == '__main__':\n pQueue = PriorityQueue()\n pQueue.put('a',1)\n print(pQueue.get())\n\n","sub_path":"prioriry_queue_demo.py","file_name":"prioriry_queue_demo.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"440997165","text":"# coding:utf8\n\"\"\"\n 123. 买卖股票的最佳时机 III\n 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。\n 设计一个算法来计算你所能获取的最大利润。你最多可以完成 两笔 交易。\n 注意:你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。\n\n 示例 1:\n 输入:prices = [3,3,5,0,0,3,1,4]\n 输出:6\n 解释:在第 4 天(股票价格 = 0)的时候买入,在第 6 天(股票价格 = 3)的时候卖出,这笔交易所能获得利润 = 3-0 = 3 。\n 随后,在第 7 天(股票价格 = 1)的时候买入,在第 8 天 (股票价格 = 4)的时候卖出,这笔交易所能获得利润 = 4-1 = 3 。\n 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iii\n\"\"\"\nfrom typing import List\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n return self.maxProfit_v1(prices)\n def maxProfit_v1(self, prices: List[int]) -> int:\n \"\"\"\n dp table\n dp[i][k][j] = x 表示第i天, 交易次数为k, 持有股票状态为j的最大利润为x\n 其中 j = {0, 1}, 0 <= k <= K\n ans: max(dp[n - 1][k][0])\n 每次买入作为一笔交易\n dp[i][k][0] = max(dp[i - 1][k][0], dp[i - 1][k][1] + prices[i])\n dp[i][k][1] = max(dp[i - 1][k - 1][0] - prices[i], dp[i - 1][k][1])\n base case:\n dp[0][0][0] = 0\n dp[0][1][0] = -inf\n dp[0][0][1] = -inf\n dp[0][1][1] = -prices[0]\n \"\"\"\n if not prices:\n return 0\n k = 2\n dp = [[[0] * 2 for _ in range(k + 1)] for _ in range(len(prices))]\n dp[0][0][0] = 0\n dp[0][1][0] = 0\n dp[0][0][1] = float('-inf')\n dp[0][1][1] = -prices[0]\n dp[0][2][0] = 0\n dp[0][2][1] = -prices[0]\n \n for i in range(1, len(prices)):\n for j in range(1, k + 1):\n dp[i][j][0] = max(dp[i - 1][j][0], dp[i - 1][j][1] + prices[i])\n dp[i][j][1] = max(dp[i - 1][j - 1][0] - prices[i], dp[i - 1][j][1])\n return max(dp[-1][1][0], dp[-1][2][0], 0)\n\n\n\nif __name__ == '__main__':\n prices = [3, 3, 5, 0, 0, 3, 1, 4]\n prices = [1, 2, 3, 4, 5]\n obj = Solution()\n print(obj.maxProfit(prices))\n","sub_path":"suqing/fuckal/python/dp/best-time-to-buy-and-sell-stock-iii.py","file_name":"best-time-to-buy-and-sell-stock-iii.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"403527688","text":"# name.py\n# Walker M. White (wmw2)\n# August 30, 2015\n\"\"\"Module with a single, non-working function.\n\nThe function in this module has a bug (in the sense that it does not satisfy\nits specification). This allows us to show off debugging.\"\"\"\n\ndef last_name_first(n):\n \"\"\"Returns: copy of n but in the form 'last-name, first-name'\n \n Parameter n: the person's name\n Precondition: n is in the form 'first-name last-name'\n with one or more blanks between the two names no spaces\n in or \"\"\"\n \n end_first = n.find(' ')\n first = n[:end_first]\n last = n[end_first+1:]\n return last+', '+first\n\n","sub_path":"Python/lab05/presentation-08/presentation-08/name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"428964021","text":"\n# Perovskites octahedral tilting extraction\n# based on Surf.Sci.602 3674 (2008)\n# http://dx.doi.org/10.1016/j.susc.2008.10.002\n# Author: Evgeny Blokhin\n#\n# KNOWN BUG: in some low-symmetry cases (\"batio3_lda_hw12d_160_to.out\"),\n# octahedra are not adjusted with the axes, and their distortion origin is unknown.\n# Even if the rotation is absent (i.e. pseudo-cubic structure),\n# an \"artificial\" rotation can be extracted\n\nfrom __future__ import division\nimport math\nfrom functools import reduce\n\nfrom numpy.linalg import norm\n\nfrom ase import Atom\n\nfrom tilde.core.common import ModuleError #, generate_xyz\nfrom tilde.core.constants import Perovskite_Structure\nfrom tilde.core.symmetry import SymmetryFinder\n\n\nclass Perovskite_tilting():\n OCTAHEDRON_BOND_LENGTH_LIMIT = 2.5 # Angstrom\n OCTAHEDRON_ATOMS_Z_DIFFERENCE = 1.6 # Angstrom\n MAX_TILTING_DEGREE = 22.4 # degrees, this is for adjusting, may produce unphysical results\n\n def __init__(self, tilde_calc):\n self.prec_angles = {} # non-rounded, non-unique, all-planes angles\n self.angles = {} # rounded, unique, one-plane angles\n\n symm = SymmetryFinder()\n symm.refine_cell(tilde_calc)\n if symm.error:\n raise ModuleError(\"Cell refinement error: %s\" % symm.error)\n\n # check if the longest axis is Z, rotate otherwise\n lengths = list(map(norm, symm.refinedcell.cell)) # Py3\n if not (lengths[2] - lengths[0] > 1E-6 and lengths[2] - lengths[1] > 1E-6):\n axnames = {0: 'x', 1: 'y'}\n principal_ax = axnames[ lengths.index(max(lengths[0], lengths[1])) ]\n symm.refinedcell.rotate(principal_ax, 'z', rotate_cell = True)\n\n self.virtual_atoms = symm.refinedcell.copy()\n\n #with open('tilting.xyz', 'w') as f:\n # f.write(generate_xyz(self.virtual_atoms))\n\n # translate atoms around octahedra in all directions\n shift_dirs = [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (1, 1, 0), (1, -1, 0), (-1, -1, 0), (-1, 1, 0), (0, 0, 1), (0, 0, -1)]\n\n for k, i in enumerate(symm.refinedcell):\n if i.symbol in Perovskite_Structure.C:\n for sdir in shift_dirs:\n self.translate(k, symm.refinedcell.cell, sdir, self.virtual_atoms)\n\n # extract octahedra and their main tilting planes\n for octahedron in self.get_octahedra(symm.refinedcell, symm.refinedcell.periodicity):\n #print 'octahedron:', octahedron[0]+1 #, self.virtual_atoms[octahedron[0]].symbol, self.virtual_atoms[octahedron[0]].x, self.virtual_atoms[octahedron[0]].y, self.virtual_atoms[octahedron[0]].z\n #print 'corners:', [i+1 for i in octahedron[1]]\n\n # Option 1. Extract only one tilting plane, the closest to perpendicular to Z-axis\n '''tiltplane = self.get_tiltplane(octahedron[1])\n if len(tiltplane) == 4:\n t = self.get_tilting(tiltplane)\n #print 'result:', [i+1 for i in tiltplane], t\n self.prec_angles.update( { octahedron[0]: [ t ] } )'''\n\n # Option 2. Extract all three possible tilting planes,\n # try to spot the closest to perpendicular to Z-axis\n # and consider the smallest tilting\n plane_tilting = []\n for oplane in self.get_tiltplanes(octahedron[1]):\n t = self.get_tilting(oplane)\n #print \"result:\", [i+1 for i in oplane], t\n plane_tilting.append( t )\n\n self.prec_angles.update( { octahedron[0]: plane_tilting } )\n\n if not self.prec_angles: raise ModuleError(\"Cannot find any main tilting plane!\")\n\n # uniquify and round self.prec_angles to obtain self.angles\n u, todel = [], []\n for o in self.prec_angles:\n self.prec_angles[o] = reduce(lambda x, y: x if sum(x) <= sum(y) else y, self.prec_angles[o]) # only minimal angles are taken if tilting planes vary!\n self.prec_angles[o] = list(map(lambda x: list(map(lambda y: round(y, 2), x)), [self.prec_angles[o]])) # Py3\n for i in self.prec_angles[o]:\n u.append([o] + i)\n\n u = sorted(u, key=lambda x:x[0])\n u.reverse() # to make index of oct.centers minimal\n for i in u:\n for j in range(u.index(i)+1, len(u)):\n if i[1:] == u[j][1:]:\n todel.append(u.index(i))\n continue\n for i in [j for j in u if u.index(j) not in todel]:\n self.angles[ i[0]+1 ] = i[1:] # atomic index is counted from zero!\n\n def translate(self, num_of_atom, cell, components, reference):\n a_component, b_component, c_component = components\n reference.append(Atom(\n reference[num_of_atom].symbol,\n (reference[num_of_atom].x + a_component * cell[0][0] + b_component * cell[1][0] + c_component * cell[2][0],\n reference[num_of_atom].y + a_component * cell[0][1] + b_component * cell[1][1] + c_component * cell[2][1],\n reference[num_of_atom].z + a_component * cell[0][2] + b_component * cell[1][2] + c_component * cell[2][2])\n ))\n\n def get_bisector_point(self, num_of_A, num_of_O, num_of_B, reference):\n xA = reference[num_of_A].x\n yA = reference[num_of_A].y\n zA = reference[num_of_A].z\n xO = reference[num_of_O].x\n yO = reference[num_of_O].y\n zO = reference[num_of_O].z\n xB = reference[num_of_B].x\n yB = reference[num_of_B].y\n zB = reference[num_of_B].z\n m = self.virtual_atoms.get_distance(num_of_O, num_of_A)\n n = self.virtual_atoms.get_distance(num_of_O, num_of_B)\n\n # bisector length\n l = 2 * m * n * math.cos(math.radians(self.virtual_atoms.get_angle(num_of_A, num_of_O, num_of_B) / 2)) / (m + n)\n v = math.sqrt(n**2 - n * l**2 / m)\n u = m * v / n\n A = yA*(zO - zB) + yO*(zB - zA) + yB*(zA - zO)\n B = zA*(xO - xB) + zO*(xB - xA) + zB*(xA - xO)\n C = xA*(yO - yB) + xO*(yB - yA) + xB*(yA - yO)\n if C == 0: C = 1E-10 # prevent zero division\n D = xA*(yO*zB - yB*zO) + xO*(yB*zA - yA*zB) + xB*(yA*zO - yO*zA)\n D *= -1\n\n # from surface analytical equation\n x = (xA + u*xB/v)/(1+u/v)\n y = (yA + u*yB/v)/(1+u/v)\n z = -((A*x + B*y + D) / C)\n return [x, y, z]\n\n def get_octahedra(self, atoms, periodicity=3):\n '''\n Extract octahedra as lists of sequence numbers of corner atoms\n '''\n octahedra = []\n for n, i in enumerate(atoms):\n found = []\n if i.symbol in Perovskite_Structure.B:\n for m, j in enumerate(self.virtual_atoms):\n if j.symbol in Perovskite_Structure.C and self.virtual_atoms.get_distance(n, m) <= self.OCTAHEDRON_BOND_LENGTH_LIMIT:\n found.append(m)\n\n if (periodicity == 3 and len(found) == 6) or (periodicity == 2 and len(found) in [5, 6]):\n octahedra.append([n, found])\n\n if not len(octahedra): raise ModuleError(\"Cannot extract valid octahedra: not enough corner atoms found!\")\n return octahedra\n\n def get_tiltplane(self, sequence):\n '''\n Extract the main tilting plane basing on Z coordinate\n '''\n sequence = sorted(sequence, key=lambda x: self.virtual_atoms[ x ].z)\n in_plane = []\n for i in range(0, len(sequence)-4):\n if abs(self.virtual_atoms[ sequence[i] ].z - self.virtual_atoms[ sequence[i+1] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \\\n abs(self.virtual_atoms[ sequence[i+1] ].z - self.virtual_atoms[ sequence[i+2] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \\\n abs(self.virtual_atoms[ sequence[i+2] ].z - self.virtual_atoms[ sequence[i+3] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE:\n in_plane = [sequence[j] for j in range(i, i+4)]\n return in_plane\n\n def get_tiltplanes(self, sequence):\n '''\n Extract tilting planes basing on distance map\n '''\n tilting_planes = []\n distance_map = []\n\n for i in range(1, len(sequence)):\n distance_map.append([ sequence[i], self.virtual_atoms.get_distance( sequence[0], sequence[i] ) ])\n\n distance_map = sorted(distance_map, key=lambda x: x[1])\n\n if len(distance_map) == 4:\n # surface edge case\n # semi-octahedron at surface edge has only one tilting plane to consider\n sorted_dist = [i[0] for i in distance_map]\n if distance_map[-1][1] - distance_map[-2][1] < 0.5:\n # 1st case: max diff < 0.5 Angstrom,\n # meaning all distances to reference atom are similar,\n # therefore the reference atom is above the searched plane\n # and the searched plane consists of other atoms\n tilting_planes.append( [ i[0] for i in distance_map ] )\n else:\n # 2nd case: reference atom belongs to the searched plane,\n # procedure needs to be repeated with the next atom as reference atom\n candidates = [sequence[0], sorted_dist[-1]]\n next_distance_map = []\n next_distance_map.append([ sorted_dist[1], self.virtual_atoms.get_distance( sorted_dist[0], sorted_dist[1] ) ])\n next_distance_map.append([ sorted_dist[2], self.virtual_atoms.get_distance( sorted_dist[0], sorted_dist[2] ) ])\n next_distance_map = sorted(next_distance_map, key=lambda x: x[1])\n next_sorted_dist = [i[0] for i in next_distance_map]\n\n # the next reference atom is taken above the plane (distances are similar)\n if next_distance_map[1][1] - next_distance_map[0][1] < 0.5: candidates.extend([ next_sorted_dist[0], next_sorted_dist[1] ])\n\n # the next reference atom is taken in the plane (distances are different)\n else: candidates.extend([ sorted_dist[0], next_sorted_dist[1] ])\n tilting_planes.append(candidates)\n\n elif len(distance_map) == 5:\n # full octahedron case\n # full octahedron has 3 different tilting planes (perpendicular in ideal case)\n sorted_dist = [i[0] for i in distance_map]\n\n # 1st plane is found as:\n first_plane = sorted_dist[0:4]\n tilting_planes.append(first_plane)\n distance_map_first_plane = []\n for i in range(1, 4):\n distance_map_first_plane.append([ first_plane[i], self.virtual_atoms.get_distance( first_plane[0], first_plane[i] ) ])\n distance_map_first_plane = sorted(distance_map_first_plane, key=lambda x: x[1])\n sorted_first_plane = [i[0] for i in distance_map_first_plane]\n\n # 2nd and 3rd planes are found as:\n tilting_planes.append([ sequence[0], sorted_dist[4], first_plane[0], sorted_first_plane[2] ])\n tilting_planes.append([ sequence[0], sorted_dist[4], sorted_first_plane[0], sorted_first_plane[1] ])\n\n # filter planes by Z according to octahedral spatial compound\n filtered = list(filter(lambda x:\n abs(self.virtual_atoms[ x[0] ].z - self.virtual_atoms[ x[1] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \\\n abs(self.virtual_atoms[ x[1] ].z - self.virtual_atoms[ x[2] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \\\n abs(self.virtual_atoms[ x[2] ].z - self.virtual_atoms[ x[3] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE,\n tilting_planes\n )) # Py3\n if len(filtered): tilting_planes = filtered\n\n return tilting_planes\n\n def get_tilting(self, oplane):\n '''\n Main procedure\n '''\n surf_atom1, surf_atom2, surf_atom3, surf_atom4 = oplane\n\n # divide surface atoms into groups by distance between them\n compare = [surf_atom2, surf_atom3, surf_atom4]\n distance_map = []\n\n for i in range(0, 3):\n distance_map.append([ compare[i], self.virtual_atoms.get_distance(surf_atom1, compare[i]) ])\n\n distance_map = sorted(distance_map, key=lambda x: x[1])\n\n distance_map_keys = [i[0] for i in distance_map]\n surf_atom3 = distance_map_keys[2]\n surf_atom2 = distance_map_keys[1]\n surf_atom4 = distance_map_keys[0]\n\n if self.virtual_atoms[surf_atom1].z == self.virtual_atoms[surf_atom2].z and \\\n self.virtual_atoms[surf_atom2].z == self.virtual_atoms[surf_atom3].z and \\\n self.virtual_atoms[surf_atom3].z == self.virtual_atoms[surf_atom4].z:\n # this is done to prevent false zero tilting\n self.virtual_atoms[surf_atom1].z += 1E-10\n self.virtual_atoms[surf_atom2].z += 1E-10\n self.virtual_atoms[surf_atom3].z -= 1E-10\n self.virtual_atoms[surf_atom4].z -= 1E-10\n\n # new axes will be defined simply as vectors standing on 1 - 3 and 2 - 4 (they are moved to the point of origin)\n self.virtual_atoms.append(Atom('X', (self.virtual_atoms[surf_atom1].x - self.virtual_atoms[surf_atom3].x, self.virtual_atoms[surf_atom1].y - self.virtual_atoms[surf_atom3].y, self.virtual_atoms[surf_atom1].z - self.virtual_atoms[surf_atom3].z)))\n self.virtual_atoms.append(Atom('X', (self.virtual_atoms[surf_atom2].x - self.virtual_atoms[surf_atom4].x, self.virtual_atoms[surf_atom2].y - self.virtual_atoms[surf_atom4].y, self.virtual_atoms[surf_atom2].z - self.virtual_atoms[surf_atom4].z)))\n self.virtual_atoms.append(Atom('X', (0, 0, 0)))\n\n # redefine tilted axes\n surf_atom_first = len(self.virtual_atoms)-3\n surf_atom_second = len(self.virtual_atoms)-2\n center = len(self.virtual_atoms)-1\n\n # inverse arbitrary atom\n self.virtual_atoms.append(Atom('X', (-self.virtual_atoms[surf_atom_first].x, -self.virtual_atoms[surf_atom_first].y, -self.virtual_atoms[surf_atom_first].z)))\n inversed_one = len(self.virtual_atoms)-1\n\n # find and add bisectors, silly swapping\n first_bisector = self.get_bisector_point(surf_atom_first, center, surf_atom_second, self.virtual_atoms)\n sec_bisector = self.get_bisector_point(surf_atom_second, center, inversed_one, self.virtual_atoms)\n\n swap = True\n if first_bisector[0] < 0 and sec_bisector[0] < 0:\n swap = False\n if first_bisector[0] < 0:\n first_bisector[0] *= -1\n first_bisector[1] *= -1\n first_bisector[2] *= -1\n if sec_bisector[0] < 0:\n sec_bisector[0] *= -1\n sec_bisector[1] *= -1\n sec_bisector[2] *= -1\n if swap:\n first_bisector, sec_bisector = sec_bisector, first_bisector\n\n swap = False\n if first_bisector[0] < sec_bisector[0] and first_bisector[1] < 0:\n first_bisector[0] *= -1\n first_bisector[1] *= -1\n first_bisector[2] *= -1\n swap = True\n if first_bisector[0] < sec_bisector[0] and first_bisector[1] > 0:\n swap = True\n if first_bisector[0] > sec_bisector[0] and sec_bisector[1] < 0:\n sec_bisector[0] *= -1\n sec_bisector[1] *= -1\n sec_bisector[2] *= -1\n if swap:\n first_bisector, sec_bisector = sec_bisector, first_bisector\n\n self.virtual_atoms.append(Atom('X', (first_bisector[0], first_bisector[1], first_bisector[2])))\n self.virtual_atoms.append(Atom('X', (sec_bisector[0], sec_bisector[1], sec_bisector[2])))\n first_bisector = len(self.virtual_atoms)-2\n sec_bisector = len(self.virtual_atoms)-1\n\n # use vector cross product to define normal which will play Z axis role\n self.virtual_atoms.append(Atom('X', (\n self.virtual_atoms[first_bisector].y*self.virtual_atoms[sec_bisector].z - self.virtual_atoms[first_bisector].z*self.virtual_atoms[sec_bisector].y,\n self.virtual_atoms[first_bisector].z*self.virtual_atoms[sec_bisector].x - self.virtual_atoms[first_bisector].x*self.virtual_atoms[sec_bisector].z,\n self.virtual_atoms[first_bisector].x*self.virtual_atoms[sec_bisector].y - self.virtual_atoms[first_bisector].y*self.virtual_atoms[sec_bisector].x\n )))\n tilt_z = len(self.virtual_atoms)-1\n\n # Euler angles ZYZ\n alpha = math.degrees(math.atan2(self.virtual_atoms[sec_bisector].z, self.virtual_atoms[first_bisector].z))\n beta = math.degrees(math.atan2(math.sqrt(self.virtual_atoms[tilt_z].x**2 + self.virtual_atoms[tilt_z].y**2), self.virtual_atoms[tilt_z].z))\n gamma = math.degrees(math.atan2(self.virtual_atoms[tilt_z].y, -self.virtual_atoms[tilt_z].x))\n\n # angles adjusting\n adjust_angles = [45, 90, 135, 180, 225, 270, 315, 360]\n tilting = [alpha, beta, gamma]\n for i in range(0, 3):\n tilting[i] = abs(tilting[i])\n if tilting[i] in adjust_angles:\n tilting[i] = 0.0\n continue\n\n if tilting[i] > self.MAX_TILTING_DEGREE:\n for checkpoint in adjust_angles:\n if checkpoint - self.MAX_TILTING_DEGREE < tilting[i] < checkpoint + self.MAX_TILTING_DEGREE:\n tilting[i] = abs(tilting[i] - checkpoint)\n break\n return tilting\n","sub_path":"tilde/apps/perovskite_tilting/perovskite_tilting.py","file_name":"perovskite_tilting.py","file_ext":"py","file_size_in_byte":17287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"449758878","text":"\"\"\"\nImporting the required libraries\n\"\"\"\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\n\"\"\"\nWhen running from the terminal following arguments have to be specified\n\"\"\"\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--prototxt\", required=True,\n help=\"path to Caffe 'deploy' prototxt file\")\nap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to Caffe pre-trained model\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.2,\n help=\"minimum probability to filter weak detections\")\nap.add_argument(\"-i\", \"--shape\", type=str, required=True,\n help=\"path to the image that containes shape\")\nap.add_argument(\"-o\", \"--output\", type=str,\n\thelp=\"path to optional output video file\")\nargs = vars(ap.parse_args())\n\"\"\"\nClasses here characterizes the objects that this particular code can recognize\n\"\"\"\nCLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\nCOLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\"\"\"\nLoading the prototext and caffemodel\n\"\"\"\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n\"\"\"\ninitialize the video stream, allow the camera sensor to warmup,\nand initialize the FPS counter\n\"\"\"\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\ntime.sleep(2.0)\nfps = FPS().start()\n\ndef nothing(x):\n # any operation\n pass\n\n\"\"\"\nDefining the trackers for the frame so as to manually change on which color to detect\n\"\"\"\ncap = cv2.VideoCapture(0)\ncv2.namedWindow(\"Trackbars\")\ncv2.createTrackbar(\"L-H\", \"Trackbars\", 0, 180, nothing)\ncv2.createTrackbar(\"L-S\", \"Trackbars\", 66, 255, nothing)\ncv2.createTrackbar(\"L-V\", \"Trackbars\", 134, 255, nothing)\ncv2.createTrackbar(\"U-H\", \"Trackbars\", 180, 180, nothing)\ncv2.createTrackbar(\"U-S\", \"Trackbars\", 255, 255, nothing)\ncv2.createTrackbar(\"U-V\", \"Trackbars\", 243, 255, nothing)\n\"\"\"\nTaking as an input the shape to be detected.\nAlso finding it contours.\n\"\"\"\npath = args[\"shape\"]\nimg = cv2.imread(path,0)\nret, thresh = cv2.threshold(img, 127,255,0)\ncontours, _ = cv2.findContours(thresh, 2,1)\ncnt1 = contours[0]\n\"\"\"\ninitializing the writer object\n\"\"\"\nwriter = None\nwhile True:\n \"\"\"\n getting the frame from the webcam/ipcam and resizing it to a specific size\n \"\"\"\n frame = vs.read()\n frame = imutils.resize(frame, width=1000)\n \"\"\"\n grab the frame dimensions and convert it to a blob\n \"\"\"\n (h, w) = frame.shape[:2]\n \"\"\"\n specifying the video format\n \"\"\"\n if args[\"output\"] is not None and writer is None:\n fourcc = cv2.VideoWriter_fourcc(*\"MP4V\")\n writer = cv2.VideoWriter(args[\"output\"], fourcc, 30,\n (w, h), True)\n \"\"\"\n coverting the fram from BGR to HSV\n \"\"\"\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n \"\"\"\n getting the pointer position from the trackbar\n \"\"\"\n l_h = cv2.getTrackbarPos(\"L-H\", \"Trackbars\")\n l_s = cv2.getTrackbarPos(\"L-S\", \"Trackbars\")\n l_v = cv2.getTrackbarPos(\"L-V\", \"Trackbars\")\n u_h = cv2.getTrackbarPos(\"U-H\", \"Trackbars\")\n u_s = cv2.getTrackbarPos(\"U-S\", \"Trackbars\")\n u_v = cv2.getTrackbarPos(\"U-V\", \"Trackbars\")\n \"\"\"\n specifying the range of yellow color to be detected\n \"\"\"\n lower_red = np.array([20, 110, 110])\n upper_red = np.array([40, 255, 255])\n \"\"\"\n specifying the range of the color dynamically\n \"\"\"\n lower = np.array([l_h,l_s,l_v])\n upper = np.array([u_h,u_s,u_v])\n \"\"\"\n the following snippet will show a blacked out frame.\n This frame highlights the portion of the frame where color is detected.\n \"\"\"\n mask = cv2.inRange(hsv, lower, upper)\n kernel = np.ones((5, 5), np.uint8)\n mask = cv2.erode(mask, kernel)\n font = cv2.FONT_HERSHEY_COMPLEX\n \"\"\"\n drawing the line at the center\n \"\"\"\n cv2.line(frame, (0, h//2), (w, h//2), (0, 255, 0), 2)\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),\n 0.007843, (300, 300), 127.5)\n \"\"\"\n pass the blob through the network and obtain the detections and predictions\n \"\"\"\n net.setInput(blob)\n detections = net.forward()\n \"\"\"\n loop over the detections\n \"\"\"\n for i in np.arange(0, detections.shape[2]):\n \"\"\"\n Extracting the confidence\n \"\"\"\n confidence = detections[0, 0, i, 2]\n \"\"\"\n removing the unnecesaary detections based on the confidence\n \"\"\"\n if confidence > args[\"confidence\"]:\n \"\"\"\n extract the index of the class label from the\n detections`, then compute the (x, y)-coordinates of\n the bounding box for the object\n \"\"\"\n idx = int(detections[0, 0, i, 1])\n if CLASSES[idx] != \"bottle\":\n continue\n \"\"\"\n drawing the detection on the fram using a rectangle border.\n tracking this border to check whether the bottle moves\n beyond the specified limits\n \"\"\"\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\") \n label = \"{}: {:.2f}%\".format(CLASSES[idx],\n confidence * 100)\n cv2.rectangle(frame, (startX, startY), (endX, endY),\n COLORS[idx], 2) \n center = startY+endY/2\n if center > h//2:\n cv2.putText(frame, 'BOTTLE CROSSED', (500, h-40), \n cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 2) \n y = startY - 15 if startY - 15 > 15 else startY + 15\n cv2.putText(frame, label, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) \n\n \"\"\"\n checkng the cv2 version installed in the system\n and getting the contours from the blacked out frame\n \"\"\"\n if int(cv2.__version__[0]) > 3:\n contours, _ = cv2.findContours(mask, 2, 1)\n else:\n _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \"\"\"\n looping over the contours\n \"\"\"\n for cnt in contours:\n \"\"\"\n calculating the area\n approximating the points to identify the shapes\n \"\"\"\n area = cv2.contourArea(cnt)\n approx = cv2.approxPolyDP(cnt, 0.02*cv2.arcLength(cnt, True), True)\n x = approx.ravel()[0]\n y = approx.ravel()[1]\n \"\"\"\n the shape detected is then compared with the base shape 'star'\n \"\"\"\n dist = cv2.matchShapes(cnt, cnt1, 1, 0.0)\n \"\"\"\n if the shape is matched, it will draw the borders around the shape \n using cv2 and a rectangle box keeping the shape in the center. \n Also shape tracking is done to check whether it moves \n beyond a specified limit or not\n \"\"\"\n if area > 400 and dist < 0.001:\n cv2.drawContours(frame, [approx], 0, (0, 255, 0), 2)\n (a,b,c,d) = cv2.boundingRect(cnt)\n cv2.rectangle(frame, (a,b), (a+c,b+d),(255,0,0),1)\n cv2.putText(frame, \"MATCHED\", (x, y), font, 1, (255, 0, 0))\n center = (b + b + d)/2\n if center > h//2:\n cv2.putText(frame, 'SHAPE CROSSED', (500, h-40),\n cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 2)\n \"\"\"\n if the writer object is true, it will write the entire tracking into \n a video file\n \"\"\"\n if writer is not None:\n writer.write(frame)\n \"\"\"\n displaying the frame\n \"\"\"\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n fps.update()\n \nfps.stop()\nprint(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()\n","sub_path":"obj_shape.py","file_name":"obj_shape.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"63689028","text":"\"\"\"\nProject Euler Problem 9\n=======================\n\nA Pythagorean triplet is a set of three natural numbers, a < b < c, for\nwhich,\n a^2 + b^2 = c^2\n\nFor example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.\n\nThere exists exactly one Pythagorean triplet for which a + b + c = 1000.\nFind the product abc.\n\"\"\"\n\n\ndef solution():\n for c in range(997, 2, -1):\n a, b = 1, 999 - c\n target = c ** 2\n while a < b:\n if a ** 2 + b ** 2 == target:\n return a * b * c\n a += 1\n b -= 1\n\n\nprint(solution())\n","sub_path":"009.py","file_name":"009.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"264579256","text":"from flask import Flask, render_template, request, redirect\nfrom flask_login import login_required\nfrom flask_migrate import Migrate\nfrom social_flask.routes import social_auth\nfrom social_flask_sqlalchemy.models import init_social\nimport models\n\n\ndef setup_app_routes(app):\n @app.before_request\n def force_ssl():\n if not app.config['SKIP_SSL'] and request.url.startswith('http://'):\n new = request.url.replace('http://', 'https://', 1)\n return redirect(new, code=301)\n\n # @app.route('/login')\n # def login():\n # return render_template('login.html')\n\n @app.route('/')\n @app.route('/')\n # @login_required\n def index(path=None):\n return 'coming soon'\n # return render_template('app.html')\n\n\ndef register_blueprints(app):\n app.register_blueprint(social_auth)\n\n\ndef init_libraries(app):\n models.db.init_app(app)\n Migrate(app, models.db)\n init_social(app, models.db.session)\n models.user.init_auth(app)\n\n\ndef create_app(config='config'):\n app = Flask(__name__)\n app.config.from_object(config)\n\n init_libraries(app)\n register_blueprints(app)\n setup_app_routes(app)\n\n return app\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"40306762","text":"\"\"\"\nGraph class and functionality\n\"\"\"\nclass Graph:\n routes = []\n \n \n def __init__(self, cities, routes, city_dict, convert):\n \"\"\"\n Constructor for the Graph\n \"\"\"\n self.city_info = cities\n self.route_info = routes\n self.city_dict = city_dict\n self.convert = convert\n \n for route in routes:\n port = route[\"ports\"]\n self.routes.append((port[0].encode(\"utf-8\"),port[1].encode(\"utf-8\")))\n \n def print_cities(self):\n \"\"\"\n List of all cities\n \"\"\"\n for city in self.city_info:\n print(city[\"name\"])\n \n def get_city_info(self, city_name):\n \"\"\"\n Information of a particular city\n \"\"\"\n flag =0\n for check in self.city_info:\n if(check[\"name\"].encode(\"utf-8\") == city_name):\n print(\"Name: \" + check[\"name\"].encode(\"utf-8\"))\n print(\"Code: \" + check[\"code\"].encode(\"utf-8\"))\n print(\"Country: \" + check[\"country\"].encode(\"utf-8\"))\n print(\"Continent: \" + check[\"continent\"].encode(\"utf-8\"))\n print(\"Timezone: {}\" ).format(check[\"timezone\"])\n print(\"Coordinates: {}\" ).format(check[\"coordinates\"])\n print(\"Population: {}\" ).format(check[\"population\"])\n print(\"Region: {}\" ).format(check[\"region\"])\n flag=1\n \n if(flag==0):\n print(\"Invalid Input\")\n \n def longest_flight(self):\n \"\"\"\n Distance and endpoints of the longest flight\n \"\"\"\n max =0\n max_route = None\n \n for route in self.route_info:\n if(route[\"distance\"]>max):\n max = route[\"distance\"]\n max_route = route[\"ports\"]\n \n print(\"Longest flight is:\")\n print(\"From: {} to {}\").format(max_route[0].encode(\"utf-8\"), max_route[1].encode(\"utf-8\"))\n print(\"Distance of: {}\").format(max)\n return max\n \n def shortest_flight(self):\n \"\"\"\n Distance and endpoints of the shortest flight\n \"\"\"\n min =100000000\n min_route = None\n \n for route in self.route_info:\n if(route[\"distance\"]max):\n max = city[\"population\"]\n max_city = city[\"name\"]\n \n print(\"Biggest City served by CSAir: \")\n print(\"{}, with a population of {}\").format(max_city, max)\n return max\n \n def smallest_city(self):\n \"\"\"\n Calculates city with the least population\n \"\"\"\n min = 100000000000000\n min_city = None\n \n for city in self.city_info:\n if(city[\"population\"]max):\n max = len((self.city_dict[key]).flights_in)\n \n \n for key in self.city_dict:\n if(len((self.city_dict[key]).flights_in)==max): \n hub_cities.append((self.city_dict[key]).name) \n \n \n print(\"{} are CSAir's main hub cities\").format(hub_cities)\n \n def visualize(self):\n \"\"\"\n URL to visualize entire network\n \"\"\"\n url = \"http://www.gcmap.com/mapui?P=\"\n \n for route in self.routes:\n url = url+route[0] + \"-\" + route[1]\n url = url + \",+\"\n \n url = url[:-2]\n print(url)\n \n \n ","sub_path":"CSAir2.0/Assignment2.0/src/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"153475443","text":"#!/usr/bin/env python3\n\n# Author: Jeffrey Grover\n# Purpose: Annotate the differentially expressed genes from DESeq2 with\n# start/stop and feature information from a gff3. This will also work with any\n# gene list where geneID is in the first column of a tsv/csv\n# Created: 10/2017\n\nimport csv\nfrom argparse import ArgumentParser\n\n# Function definitions go here\n\n\ndef parse_deseq2(input_file, file_dialect):\n features_dict = {}\n with open(input_file, 'r') as input_handle:\n if file_dialect == 'csv':\n results = csv.reader(input_handle)\n elif file_dialect == 'tsv':\n results = csv.reader(input_handle, delimiter='\\t')\n field_names = next(results)\n for row in results:\n feature_id = row[0]\n remaining_fields = row[1:]\n features_dict[feature_id] = [remaining_fields]\n return [features_dict, field_names]\n\n\ndef parse_gff(input_gff, gff_feature):\n gff_dict = {}\n with open(input_gff, 'r') as input_handle:\n gff3 = csv.reader(\n (row for row in input_handle if not row.startswith('#')),\n delimiter='\\t')\n for row in gff3:\n if row[2] == gff_feature:\n chromosome = row[0]\n feature = row[2]\n start = int(row[3])\n stop = int(row[4])\n strand = row[6]\n feature_id = int(''.join(\n filter(str.isdigit, str(row[8].split(';')[0])[3:])))\n if chromosome not in gff_dict:\n gff_dict[chromosome] = {}\n if feature_id not in gff_dict[chromosome]:\n gff_dict[chromosome][feature_id] = {}\n gff_dict[chromosome][feature_id] = [\n feature, start, stop, strand\n ]\n return gff_dict\n\n\ndef annotate_results(features_dict, gff3_dict, output_file, header):\n with open(output_file, 'w') as output_handle:\n output_file = csv.writer(output_handle)\n output_file.writerow(header)\n for feature in features_dict:\n feature_digits = int(''.join(filter(str.isdigit, feature)))\n for chromosome in gff3_dict:\n if feature_digits in gff3_dict[chromosome]:\n feature_type = gff3_dict[chromosome][feature_digits][0]\n start = gff3_dict[chromosome][feature_digits][1]\n stop = gff3_dict[chromosome][feature_digits][2]\n strand = gff3_dict[chromosome][feature_digits][3]\n deseq2_info = features_dict[feature][0]\n output_row = [\n chromosome, feature, feature_type, start, stop, strand\n ] + deseq2_info\n output_file.writerow(output_row)\n\n\n# Parse command line options\n\nparser = ArgumentParser(\n description='Annotate the differentially expressed genes from DESeq2 with '\n 'start/stop and feature information from a gff3. This will also work with '\n 'any gene/feature list where geneID is in the first column of a tsv/csv')\nparser.add_argument('--gff', help='Input gff3 file', metavar='File')\nparser.add_argument('--deseq', help='Input ShortStack Report', metavar='File')\nparser.add_argument('-d', '--deseq_dialect', help='tsv or csv')\nparser.add_argument('-f', '--feature', help='String matching a gff feature')\n\ngff_file = parser.parse_args().gff\ndeseq2_file = parser.parse_args().deseq\ndeseq2_dialect = parser.parse_args().deseq_dialect\nfeature = parser.parse_args().feature\noutput_file = deseq2_file.rsplit('.')[0] + '_annotated.csv'\n\n# Run the functions to create dictionaries\n\ndeseq2_result = parse_deseq2(deseq2_file, deseq2_dialect)\ndeseq2_dict = deseq2_result[0]\ndeseq2_header = deseq2_result[1]\ngff3_dict = parse_gff(gff_file, feature)\n\n# Create the header for the output file\n\noutput_header = [\n 'chromosome', 'feature_id', 'type', 'start', 'stop', 'strand'\n] + deseq2_result[1][1:]\n\n# Run the function to annotate the file\n\nannotate_results(deseq2_dict, gff3_dict, output_file, output_header)\n","sub_path":"deseq2_results_gff_annotate.py","file_name":"deseq2_results_gff_annotate.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"288134916","text":"import codecs\nfrom os import path\n\n\nfrom preprocessing.language_standardizer import eng\n\nPREPROCESS_SUFFIX = '_PRE'\n\n\nclass Preprocessor(object):\n \"\"\"\n Preprocessor\n \"\"\"\n def __init__(self, file_name, input_dir, output_dir, standardizer=eng):\n \"\"\"\n Parameters:\n file_name - name of input file\n \"\"\"\n self.standardizer = standardizer\n self.input_dir = input_dir\n self.file_name = file_name\n self.output_dir = output_dir\n\n def process(self):\n output_name = self.file_name + PREPROCESS_SUFFIX\n in_file = path.join(self.input_dir, self.file_name)\n out_file = path.join(self.output_dir, output_name)\n with codecs.open(in_file, encoding='UTF-8') as f:\n with codecs.open(out_file, mode='w+', encoding='UTF-8') as o:\n for line in f:\n processed_line = self.standardizer.standardize_line(line)\n o.write(processed_line)\n","sub_path":"preprocessing/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"156624508","text":"from BuildMinHeap import MinHeap\nfrom random import randint\n\n# 1. Create Heaps.\n# 2. Check each Heap.\n# 3. Run this whole code.\n\ndef create_heap():\n \"\"\" creates heap for testing \"\"\"\n\n no_tst = randint(100, 1000)\n heap_lst = []\n for i in range(no_tst):\n size = randint(0, 1000)\n arr = []\n for i in range(size):\n ele = randint(0, 100000000)\n arr.append(ele)\n obj = MinHeap(size, arr)\n obj.buildHeap()\n heap_lst.append(obj)\n \n return heap_lst\n\n\ndef check_min_heap(test_cases):\n \"\"\" Checks for a min heap \"\"\"\n\n result = [False for i in range(len(test_cases))]\n for idx, obj in enumerate(test_cases):\n for i in range(1, obj.heapSize):\n if obj.heapCheck == False:\n result[idx] = False\n break\n else:\n result[idx] = True\n\n return result\n\n\ndef final_check(result):\n \"\"\" Displays the final result \"\"\"\n \n ret_val = True\n for flag in result:\n if flag == False:\n return False\n return True\n\n\ndef main():\n \"\"\" The main function \"\"\"\n\n test_cases = create_heap()\n result = check_min_heap(test_cases)\n pass_res = final_check(result)\n if final_check:\n print('This passes the test cases.')\n else:\n print('The code does not work properly')\n\nif __name__ == '__main__':\n main()","sub_path":"Heaps/001MinHeapTest.py","file_name":"001MinHeapTest.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"86014931","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 21 14:51:55 2013\n\n@author: cimatori\n\"\"\"\n\n# 1. General\n# Data path\nfrom socket import gethostname\nif gethostname()=='sboron2':\n BaseDir = '/run/media/sambarluc/Cimatoribus1/Analyses/LIS131/'\nelse:\n BaseDir = '/media/scratch/Analyses/LIS131/'\n#Output directory\nOutDir = BaseDir+'TemperatureDissipation/'\n# Data dir\nDetailFile = BaseDir+'ProcessData/results/Output_data_stex.chn'\n\n# Are we computing slopes of NoFilter or Taylor data?\ndata_type = 'Taylor'\n\n# Start-end dates\nStart = 104.\nEnd = 224.\n\n# Range of increments in the inertial range for\n# tide up\nif data_type=='NoFilter':\n limsup = ([8,1.e3], # set 1-36\n [8,1.e3], # set 37-72\n [8,1.e3], # set 73-108\n [8,1.e3]) # set 109-144\n limsdown=([8,1.e3], # set 1-36\n [8,1.e3], # set 37-72\n [8,1.e3], # set 73-108\n [8,1.e3]) # set 109-144\nelif data_type=='Taylor':\n limsup = ([2.0,60], # set 1-72\n [2.5,60]) # set 73-144\n limsdown=([2,100], # set 1-72\n [2,18]) # set 73-144\n limslarge = 300.\nelif data_type=='TaylorHi': # Taylor high passed\n limsup = ([1,100], # set 1-36\n [1,100], # set 37-72\n [1,100], # set 73-108\n [1,100]) # set 109-144\n limsdown=([1,100], # set 1-36\n [1,100], # set 37-72\n [1,100], # set 73-108\n [1,100]) # set 109-144\ndTs = (limsup, limsdown)\n\n# Which thermistor sets should be computed?\nsets = ['73-108','109-144']\n\n# Order of moments to compute\nMoms = range(1,11)\nnM = len(Moms)\n\n# Which sets to consider\nsetNames = ('AB','CD')\nnSets = len(setNames)\n\nsetLabels = dict(zip(setNames,('A','B','C','D')))\nsetThms = dict(zip(setNames,(range(0,36),range(36,72),range(72,108),range(108,144))))\n\n# Style for point plots\npStyle = dict(ms=8, alpha=0.5, ls='none', mew=1.2)\npStyle2 = dict(ms=4, alpha=0.8, ls='none')\n# Style for fill_between plots\nfStyle = dict(alpha=0.2, lw=1.)\n\n# Markers for tidal phases\nmarkT = ('^','o')\nnamT = ('up', 'down')\ncolorsT = ('b','r')\n\n# Number of tidal phases\nnT = len(namT)\n\n# Step of Taylor data, in case we are using it\nStepX = 0.2\n\n# Colors for plotting\nfrom matplotlib.pyplot import cm\ncolorsSet = cm.get_cmap('jet',nSets)(range(nSets))\nmarkSet = ('o','*','^','s')\n\n# Label symbols\nif data_type=='NoFilter':\n delta = '$\\\\Delta \\\\theta_\\\\tau$'\n spacing = '$\\\\tau$ $\\\\mathrm{[s]}$'\n axis = '\\\\tau'\nelif data_type in ('Taylor', 'TaylorHi'):\n delta = '$\\\\Delta \\\\theta_x$'\n spacing = '$r$ $\\\\mathrm{[m]}$'\n axis = 'r'\norder = '$q$'\nexponent= '$\\\\zeta_q$'\n\n# Style for tidal phases\nmarkT = ('^','o')\nnamT = ('up', 'down')\ncolorsT = ('b','r')\n\n# Number of tidal phases\nnT = len(namT)\n\n# File names\nif data_type=='NoFilter':\n files = [OutDir+'results/Moments_{}_NoFilter_day_{}_{}.npz' \\\n .format(sn,Start,End) for sn in setNames]\n fig_file = OutDir+'figures/MomentSlopes_NoFilter_day_{}_{}.pdf' \\\n .format(Start,End)\nelif data_type=='Taylor':\n files = [OutDir+'results/Moments_{}_Taylor_StepX_{}_day_{}_{}.npz' \\\n .format(sn,StepX,Start,End) for sn in setNames]\n fig_file = OutDir+'figures/Taylor_StepX_{}/MomentSlopes_day_{}_{}.tif' \\\n .format(StepX,Start,End)\n fig_file_L = OutDir+'figures/Taylor_StepX_{}/MomentSlopesLarge_day_{}_{}.png' \\\n .format(StepX,Start,End)\nelif data_type=='TaylorHi':\n files = [OutDir+'results/Moments_{}_TaylorHighPass_StepX_{}_day_{}_{}.npz' \\\n .format(sn,StepX,Start,End) for sn in setNames]\n fig_file = OutDir+'figures/Taylor_StepX_{}/MomentSlopes_HighPass_day_{}_{}.pdf' \\\n .format(StepX,Start,End)\n","sub_path":"LIS131/TemperatureDissipation/ConfigMomentSlopes.py","file_name":"ConfigMomentSlopes.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"192431444","text":"from .models import MoonDay, Ritual\nfrom datetime import *\n\n\n\ndef clear_year(year):\n return MoonDay.objects.filter(year=year).delete()\n\ndef fill_default_calendar(year, save=False):\n days = MoonDay.objects.filter(year=year)\n fd = datetime(year, 1, 1)\n ld = datetime(year+1,1,1)\n dd = ld-fd\n \n sdn=0\n sdmn=6 \n \n for i in range(sdn, dd.days):\n nmd = MoonDay(year=year, day_no=i, moon_day_no=sdmn, morning_hural=Ritual.objects.get(pk=13), day_hural=Ritual.objects.get(pk=14))\n if nmd.date().weekday() == 4:\n nmd.day_hural = Ritual.objects.get(pk=16) # юроол\n elif nmd.date().weekday() == 5:\n nmd.day_hural = Ritual.objects.get(pk=17) # банзарагша\n elif nmd.date().weekday() == 6:\n nmd.morning_hural = Ritual.objects.get(pk=15) # намсарай\n nmd.day_hural = Ritual.objects.get(pk=18) # алтэн гэрэл\n tl = [(25, 31),(23, 29),(25, 31),(24, 30),(25, 31),(24, 30),(25, 31),(25, 31), (24, 30),(25, 31),(24, 30),(25, 31)] if dd.days == 366 else [(25, 31),(22, 28),(25, 31),(24, 30),(25, 31),(24, 30),(25, 31),(25, 31), (24, 30),(25, 31),(24, 30),(25, 31)] \n if nmd.date().day in range(tl[nmd.date().month-1][0], tl[nmd.date().month-1][1]):\n nmd.day_hural = Ritual.objects.get(pk=19) # сундуй\n \n sdmn = sdmn + 1 if sdmn < 29 else 1\n \n if save: \n nmd.save()\n print (str(nmd))\n\n return days","sub_path":"kalachakra/saraswati/cal_helpers.py","file_name":"cal_helpers.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"186688900","text":"import unittest\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\n\nfrom avalanche.models import SimpleMLP\nfrom avalanche.training.plugins import ExperienceBalancedStoragePolicy, \\\n ClassBalancedStoragePolicy, ReplayPlugin\nfrom avalanche.training.strategies import Naive\n\nfrom tests.unit_tests_utils import get_fast_scenario\n\n\nclass ReplayTest(unittest.TestCase):\n def test_replay_balanced_memory(self):\n mem_size = 25\n policies = [None,\n ExperienceBalancedStoragePolicy({}, mem_size=mem_size),\n ClassBalancedStoragePolicy({}, mem_size=mem_size)]\n for policy in policies:\n self._test_replay_balanced_memory(policy, mem_size)\n\n def _test_replay_balanced_memory(self, storage_policy, mem_size):\n scenario = get_fast_scenario(use_task_labels=True)\n model = SimpleMLP(input_size=6, hidden_size=10)\n replayPlugin = ReplayPlugin(mem_size=mem_size,\n storage_policy=storage_policy)\n cl_strategy = Naive(\n model,\n SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.001),\n CrossEntropyLoss(), train_mb_size=32, train_epochs=1,\n eval_mb_size=100, plugins=[replayPlugin]\n )\n\n n_seen_data = 0\n for step in scenario.train_stream:\n n_seen_data += len(step.dataset)\n mem_fill = min(mem_size, n_seen_data)\n cl_strategy.train(step)\n ext_mem = replayPlugin.ext_mem\n lengths = []\n for task_id in ext_mem.keys():\n lengths.append(len(ext_mem[task_id]))\n self.assertEqual(sum(lengths), mem_fill) # Always fully filled\n\n def test_balancing(self):\n p1 = ExperienceBalancedStoragePolicy({}, 100, adaptive_size=True)\n p2 = ClassBalancedStoragePolicy({}, 100, adaptive_size=True)\n\n for policy in [p1, p2]:\n self.assert_balancing(policy)\n\n def assert_balancing(self, policy):\n ext_mem = policy.ext_mem\n scenario = get_fast_scenario(use_task_labels=True)\n replay = ReplayPlugin(mem_size=100, storage_policy=policy)\n model = SimpleMLP(num_classes=scenario.n_classes)\n\n # CREATE THE STRATEGY INSTANCE (NAIVE)\n cl_strategy = Naive(model,\n SGD(model.parameters(), lr=0.001),\n CrossEntropyLoss(), train_mb_size=100,\n train_epochs=0,\n eval_mb_size=100, plugins=[replay], evaluator=None)\n\n for exp in scenario.train_stream:\n cl_strategy.train(exp)\n print(list(ext_mem.keys()), [len(el) for el in ext_mem.values()])\n\n # buffer size should equal self.mem_size if data is large enough\n len_tot = sum([len(el) for el in ext_mem.values()])\n assert len_tot == policy.mem_size\n","sub_path":"tests/training/test_replay.py","file_name":"test_replay.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"545888119","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef remove_keymap_conflicts(new_keys_set):\n for prop in plt.rcParams:\n if prop.startswith('keymap.'):\n keys = plt.rcParams[prop]\n remove_list = set(keys) & new_keys_set\n for key in remove_list:\n keys.remove(key)\n\ndef multi_slice_viewer(volume, view='axial',\n overlay_1=None, overlay_1_cmap='RdYlGn', overlay_1_alpha=0.5, overlay_1_thres=0.5,\n overlay_2=None, overlay_2_cmap='Wistia', overlay_2_alpha=0.5, overlay_2_thres=0.5,\n title=''):\n\n assert view in ['axial', 'sagittal', 'coronal']\n remove_keymap_conflicts({'j', 'k'})\n\n # change view\n rotation = None\n if view == 'axial':\n rotation = lambda img : img.copy()\n elif view == 'sagittal':\n rotation = lambda img : np.rot90(np.rot90(img.copy(), axes=(0,2)), axes=(1,2))\n elif view == 'coronal':\n rotation = lambda img : np.rot90(img.copy(), axes=(1,0))\n\n fig, ax = plt.subplots()\n plt.xticks([], [])\n plt.yticks([], [])\n ax.volume = rotation(volume)\n ax.index = ax.volume.shape[0] // 2\n ax.imshow(ax.volume[ax.index], cmap='gray', vmin=np.min(ax.volume), vmax=np.max(ax.volume), interpolation='bilinear')\n if overlay_1 is not None:\n ax.volume_2 = np.ma.masked_where(rotation(overlay_1) < overlay_1_thres, rotation(overlay_1))\n ax.imshow(ax.volume_2[ax.index], vmin=np.min(ax.volume_2) , vmax=np.max(ax.volume_2), cmap=overlay_1_cmap, alpha=overlay_1_alpha)\n if overlay_2 is not None:\n ax.volume_3 = np.ma.masked_where(rotation(overlay_2) < overlay_2_thres, rotation(overlay_2))\n ax.imshow(ax.volume_3[ax.index], vmin=np.min(ax.volume_3) , vmax=np.max(ax.volume_3), cmap=overlay_2_cmap, alpha=overlay_2_alpha)\n plt.title(title)\n plt.xlabel(ax.index)\n fig.canvas.mpl_connect('key_press_event', process_key)\n fig.canvas.mpl_connect('scroll_event', process_scroll)\n\ndef process_key(event):\n fig = event.canvas.figure\n ax = fig.axes[0]\n if event.key == 'j':\n previous_slice(ax)\n elif event.key == 'k':\n next_slice(ax)\n fig.canvas.draw()\n\ndef process_scroll(event):\n fig = event.canvas.figure\n ax = fig.axes[0]\n if event.button == 'down':\n previous_slice(ax)\n elif event.button == 'up':\n next_slice(ax)\n fig.canvas.draw()\n\ndef previous_slice(ax):\n volume = ax.volume\n ax.index = (ax.index - 1) % volume.shape[0] # wrap around using %\n plt.xlabel(ax.index)\n ax.images[0].set_array(volume[ax.index])\n if ax.volume_2 is not None:\n volume_2 = ax.volume_2\n ax.images[1].set_array(volume_2[ax.index])\n if ax.volume_3 is not None:\n volume_3 = ax.volume_3\n ax.images[2].set_array(volume_3[ax.index])\n\ndef next_slice(ax):\n volume = ax.volume\n ax.index = (ax.index + 1) % volume.shape[0]\n plt.xlabel(ax.index)\n ax.images[0].set_array(volume[ax.index])\n if ax.volume_2 is not None:\n volume_2 = ax.volume_2\n ax.images[1].set_array(volume_2[ax.index])\n if ax.volume_3 is not None:\n volume_3 = ax.volume_3\n ax.images[2].set_array(volume_3[ax.index])\n","sub_path":"multi_slice_viewer.py","file_name":"multi_slice_viewer.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"294404046","text":"import time\n\ndef insertion_sort(a_list):\n \"\"\"Sort a_list in ascending order. \n \n Use insertion sort with linear search.\n \"\"\"\n for current_position in range(1, len(a_list)):\n current_value = a_list[current_position]\n position = current_position\n while position > 0 and a_list[position - 1] > current_value:\n a_list[position] = a_list[position - 1]\n position = position - 1\n a_list[position] = current_value\n\n# the length of the input list\nsize = 32000 # CHANGE THIS\n\n# Create a list of integers in descending order (worst-case scenario)\ntest_list = []\nfor index in range(size):\n test_list.append(size - index)\n# Time the execution of the sorting algorithm\nstart = time.time()\ninsertion_sort(test_list)\nelapsed = (time.time() - start) * 1000\nprint(size, 'integers sorted in', int(elapsed), 'milliseconds')","sub_path":"insertion_sort/python/with_binary_search/normal_insertion_sort.py","file_name":"normal_insertion_sort.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"152242500","text":"import sqlite3\n\ncon = sqlite3.connect('sqlite.db')\n\ncursor = con.cursor()\n\ncommand = \"\"\"SELECT playlist.id,music.path FROM music JOIN playlist\nON music.id_playlist = playlist.id\nORDER BY playlist.id\"\"\"\n\ncursor.execute(command)\nresult = cursor.fetchall()\nprint(result)","sub_path":"Requetes/Requtes SQL python/recuperer les musique dune playlist.py","file_name":"recuperer les musique dune playlist.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"175444705","text":"import pygame\nfrom pygame.locals import *\nfrom functions.checkpoint_functions import get_highest_checkpoint_reached\n\n\ndef death(screen, player, camera, rooms):\n pygame.image.save(screen, \"data/images/temp.jpg\")\n screen_shot = pygame.image.load(\"data/images/temp.jpg\")\n\n black = (0, 0, 0, 255)\n\n black_screen = pygame.Surface((camera.screen_width, camera.screen_height))\n black_screen.set_alpha(0)\n current_a_value = 0\n\n exit_counter = 0\n\n # \"revives\" the player\n player.health = 3\n player.invulnerability_counter = 0\n player.speed_x = 0\n player.speed_y = 0\n player.direction_x = 0\n\n # move the player to the highest checkpoint\n highest_checkpoint = get_highest_checkpoint_reached(rooms)\n player.x, player.y = highest_checkpoint.x, highest_checkpoint.y\n\n in_animation = True\n while in_animation:\n for event in pygame.event.get():\n if event.type == QUIT:\n in_animation = False\n pygame.quit()\n\n if event.type == KEYDOWN:\n in_animation = False\n\n black_screen.fill(black)\n screen.blit(screen_shot, (0, 0))\n screen.blit(black_screen, (0, 0))\n\n current_a_value += 10\n if current_a_value >= 255:\n current_a_value = 255\n exit_counter += 1\n\n black_screen.set_alpha(current_a_value)\n if exit_counter == 30:\n break\n\n pygame.display.update()\n\n\n\n\n\n\n","sub_path":"functions/death.py","file_name":"death.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"396759855","text":"import re\nx = []\nsum = a = az= 0\nflag = 0\nq = input()\nfor i in range(0, 12):\n x.append([])\n if az == 0:\n az = input()\n try:\n int(flag)\n for j in range(0, 12):\n y = float(az)\n x[i].insert(j, y)\n az = input()\n\n except:\n flag = 'a'\n lst = list(int(k) for k in re.findall(r'(-?\\d+)', az))\n for j in lst:\n x[i].append(j)\n az = 0\n\n\n\"\"\" \n for i in x:\n print(i)\n\"\"\"\nc = 11\nl =0\nfor i in range(0, 12):\n for j in range(0, 12):\n if c < j:\n l += 1\n sum += x[i][j]\n c -= 1\n\nif q == 'S':\n print(\"{0:.1f}\".format(round(sum, 1)))\nelif q == 'M':\n print(\"{0:.1f}\".format(round(sum/l, 1)))","sub_path":"1186 - Below the Secundary Diagonal.py","file_name":"1186 - Below the Secundary Diagonal.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"209426402","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport io\nimport os\nimport json\nimport time\nimport zlib\nimport base64\nimport shutil\nimport inspect\n\nfrom collections import OrderedDict, deque\nfrom datetime import date, datetime\nfrom base64 import b64encode, b64decode\n\n\ntry:\n from dateutil.parser import parse\nexcept:\n pass\n\ntry:\n import numpy as np\nexcept:\n pass\n\ntry:\n import pandas as pd\nexcept:\n pass\n\ntry:\n from . import compresslib\n from .comments import strip_comments\n from .warning import logger, WARN_MSG, prt_console\n from .util import write, read\n from .pkg.six import PY2, PY3, add_metaclass, string_types, iteritems\nexcept:\n from superjson import compresslib\n from superjson.comments import strip_comments\n from superjson.warning import logger, WARN_MSG, prt_console\n from superjson.util import write, read\n from superjson.pkg.six import PY2, PY3, add_metaclass, string_types, iteritems\n\n\ndef get_class_name(obj):\n \"\"\"Get class name in dot separete notation\n\n >>> from datetime import datetime\n >>> obj = datetime.datetime(2000, 1, 1)\n >>> get_class_name(obj) -> \"datetime.datetime\"\n\n >>> from collections import deque\n >>> obj = deque([1, 2, 3])\n >>> get_class_name(obj) -> \"collections.deque\"\n \"\"\"\n return obj.__class__.__module__ + \".\" + obj.__class__.__name__\n\n\ndef get_class_name_from_dumper_loader_method(func):\n \"\"\"Get default value of ``class_name`` argument.\n\n Because the third argument of dumper, loader method must be the class name.\n\n \"\"\"\n return inspect.getargspec(func).defaults[0]\n\n\ndef is_dumper_method(func):\n \"\"\"Test if it is a dumper method.\n \"\"\"\n if inspect.getargspec(func).args == [\"self\", \"obj\", \"class_name\"]:\n return True\n else:\n return False\n\n\ndef is_loader_method(func):\n \"\"\"Test if it is a loader method.\n \"\"\"\n if inspect.getargspec(func).args == [\"self\", \"dct\", \"class_name\"]:\n return True\n else:\n return False\n\n\nclass Meta(type):\n\n def __new__(cls, name, bases, attrs):\n klass = super(Meta, cls).__new__(cls, name, bases, attrs)\n\n _dumpers = dict()\n _loaders = dict()\n\n for base in inspect.getmro(klass):\n for attr, value in base.__dict__.items():\n dumper_warning_message = WARN_MSG.format(\n attr=attr,\n method_type=\"dumper\",\n obj_or_dct=\"obj\",\n dump_or_load=\"dump\",\n )\n\n loader_warning_message = WARN_MSG.format(\n attr=attr,\n method_type=\"loader\",\n obj_or_dct=\"dct\",\n dump_or_load=\"load\",\n )\n\n # find dumper method,\n if attr.startswith(\"dump_\"):\n try:\n if is_dumper_method(value):\n class_name = get_class_name_from_dumper_loader_method(\n value)\n _dumpers[class_name] = value\n else:\n logger.warning(dumper_warning_message)\n except TypeError:\n logger.warning(dumper_warning_message)\n\n # find loader method\n if attr.startswith(\"load_\"):\n try:\n if is_loader_method(value):\n class_name = get_class_name_from_dumper_loader_method(\n value)\n _loaders[class_name] = value\n else:\n logger.warning(loader_warning_message)\n except TypeError:\n logger.warning(loader_warning_message)\n\n klass._dumpers = _dumpers\n klass._loaders = _loaders\n return klass\n\n\nif PY2:\n bytes_class_name = \"builtins.str\"\n set_class_name = \"__builtin__.set\"\nelif PY3:\n bytes_class_name = \"builtins.bytes\"\n set_class_name = \"builtins.set\"\n\n\ndef is_compressed_json_file(abspath):\n \"\"\"Test a file is a valid json file.\n\n - *.json: uncompressed, utf-8 encode json file\n - *.js: uncompressed, utf-8 encode json file\n - *.gz: compressed, utf-8 encode json file\n \"\"\"\n abspath = abspath.lower()\n fname, ext = os.path.splitext(abspath)\n if ext in [\".json\", \".js\"]:\n is_compressed = False\n elif ext == \".gz\":\n is_compressed = True\n elif ext == \".tmp\":\n return is_compressed_json_file(fname)\n else:\n raise ValueError(\n \"'%s' is not a valid json file. \"\n \"extension has to be '.json' or '.js' for uncompressed, '.gz' \"\n \"for compressed.\" % abspath)\n return is_compressed\n\n\n@add_metaclass(Meta)\nclass SuperJson(object):\n \"\"\"A extensable json encoder/decoder. You can easily custom converter for \n any types.\n \"\"\"\n _dumpers = dict()\n _loaders = dict()\n\n def _dump(self, obj):\n \"\"\"Dump single object to json serializable value.\n \"\"\"\n class_name = get_class_name(obj)\n if class_name in self._dumpers:\n return self._dumpers[class_name](self, obj)\n raise TypeError(\"%r is not JSON serializable\" % obj)\n\n def _json_convert(self, obj):\n \"\"\"Recursive helper method that converts dict types to standard library\n json serializable types, so they can be converted into json.\n \"\"\"\n # OrderedDict\n if isinstance(obj, OrderedDict):\n try:\n return self._dump(obj)\n except TypeError:\n return {k: self._json_convert(v) for k, v in iteritems(obj)}\n\n # nested dict\n elif isinstance(obj, dict):\n return {k: self._json_convert(v) for k, v in iteritems(obj)}\n\n # list or tuple\n elif isinstance(obj, (list, tuple)):\n return list((self._json_convert(v) for v in obj))\n\n # single object\n try:\n return self._dump(obj)\n except TypeError:\n return obj\n\n def _object_hook1(self, dct):\n \"\"\"A function can convert dict data into object. \n\n it's an O(1) implementation. \n \"\"\"\n # {\"$class_name\": obj_data}\n if len(dct) == 1:\n for key, value in iteritems(dct):\n class_name = key[1:]\n if class_name in self._loaders:\n return self._loaders[class_name](self, dct)\n return dct\n return dct\n\n def _object_hook2(self, dct):\n \"\"\"Another object hook implementation.\n\n it's an O(N) implementation.\n \"\"\"\n for class_name, loader in self._loaders.items():\n if (\"$\" + class_name) in dct:\n return loader(self, dct)\n return dct\n\n def dumps(self, obj,\n indent=None,\n sort_keys=None,\n pretty=False,\n float_precision=None,\n ensure_ascii=True,\n compress=False,\n **kwargs):\n \"\"\"Dump any object into json string.\n\n :param pretty: if True, dump json into pretty indent and sorted key\n format.\n :type pretty: bool\n\n :param float_precision: default ``None``, limit floats to \n N-decimal points. \n :type float_precision: integer\n\n :param compress: default ``False. If True, then compress encoded string.\n :type compress: bool\n \"\"\"\n if pretty:\n indent = 4\n sort_keys = True\n\n if float_precision is None:\n json.encoder.FLOAT_REPR = repr\n else:\n json.encoder.FLOAT_REPR = lambda x: format(\n x, \".%sf\" % float_precision)\n\n s = json.dumps(\n self._json_convert(obj),\n indent=indent,\n sort_keys=sort_keys,\n ensure_ascii=ensure_ascii,\n **kwargs\n )\n\n if compress:\n s = compresslib.compress(s, return_type=\"str\")\n\n return s\n\n def loads(self, s,\n object_hook=None,\n decompress=False,\n ignore_comments=False,\n **kwargs):\n \"\"\"load object from json encoded string.\n\n :param decompress: default ``False. If True, then decompress string.\n :type decompress: bool\n\n :param ignore_comments: default ``False. If True, then ignore comments.\n :type ignore_comments: bool\n \"\"\"\n if decompress:\n s = compresslib.decompress(s, return_type=\"str\")\n\n if ignore_comments:\n s = strip_comments(s)\n\n if object_hook is None:\n object_hook = self._object_hook1\n\n if \"object_pairs_hook\" in kwargs:\n del kwargs[\"object_pairs_hook\"]\n\n obj = json.loads(\n s,\n object_hook=object_hook,\n object_pairs_hook=None,\n **kwargs\n )\n\n return obj\n\n def dump(self, obj,\n abspath,\n indent=None,\n sort_keys=None,\n pretty=False,\n float_precision=None,\n ensure_ascii=True,\n overwrite=False,\n verbose=True,\n **kwargs):\n \"\"\"Dump any object into file.\n\n :param abspath: if ``*.json, *.js** then do regular dump. if ``*.gz``,\n then perform compression.\n :type abspath: str\n\n :param pretty: if True, dump json into pretty indent and sorted key\n format.\n :type pretty: bool\n\n :param float_precision: default ``None``, limit floats to \n N-decimal points. \n :type float_precision: integer\n\n :param overwrite: default ``False``, If ``True``, when you dump to \n existing file, it silently overwrite it. If ``False``, an alert \n message is shown. Default setting ``False`` is to prevent overwrite \n file by mistake.\n :type overwrite: boolean\n\n :param verbose: default True, help-message-display trigger.\n :type verbose: boolean\n \"\"\"\n prt_console(\"\\nDump to '%s' ...\" % abspath, verbose)\n\n is_compressed = is_compressed_json_file(abspath)\n\n if os.path.exists(abspath):\n if not overwrite:\n prt_console(\n \" Stop! File exists and overwrite is not allowed\",\n verbose,\n )\n return\n\n st = time.clock()\n\n s = self.dumps(\n obj,\n indent=indent,\n sort_keys=sort_keys,\n pretty=pretty,\n float_precision=float_precision,\n ensure_ascii=ensure_ascii,\n # use uncompressed string, and directly write to file\n compress=False,\n **kwargs\n )\n\n with open(abspath, \"wb\") as f:\n if is_compressed:\n f.write(compresslib.compress(s, return_type=\"bytes\"))\n else:\n f.write(s.encode(\"utf-8\"))\n\n prt_console(\" Complete! Elapse %.6f sec.\" % (time.clock() - st),\n verbose)\n return s\n\n def safe_dump(self, obj,\n abspath,\n indent=None,\n sort_keys=None,\n pretty=False,\n float_precision=None,\n ensure_ascii=True,\n verbose=True,\n **kwargs):\n \"\"\"A stable version of :func:`SuperJson.dump`, this method will \n silently overwrite existing file.\n\n There's a issue with :func:`SuperJson.dump`: If your program is \n interrupted while writing, you got an incomplete file, and you also \n lose the original file. So this method write json to a temporary file \n first, then rename to what you expect, and silently overwrite old one. \n This way can guarantee atomic write operation.\n\n **中文文档**\n\n 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果使用了\n 覆盖式写入, 则我们即没有得到新文件, 同时也丢失了原文件。所以为了保证\n 写操作的原子性(要么全部完成, 要么全部都不完成), 更好的方法是: 首先将\n 文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途\n 程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会影响原文件。\n \"\"\"\n abspath_temp = \"%s.tmp\" % abspath\n s = self.dump(\n obj,\n abspath_temp,\n indent=indent,\n sort_keys=sort_keys,\n pretty=pretty,\n float_precision=float_precision,\n ensure_ascii=ensure_ascii,\n overwrite=True,\n verbose=verbose,\n **kwargs\n )\n shutil.move(abspath_temp, abspath)\n return s\n\n def load(self, abspath,\n object_hook=None,\n ignore_comments=False,\n verbose=True,\n **kwargs):\n \"\"\"load object from json file.\n\n :param abspath: if ``*.json, *.js** then do regular dump. if ``*.gz``,\n then perform decompression.\n :type abspath: str\n\n :param ignore_comments: default ``False. If True, then ignore comments.\n :type ignore_comments: bool\n\n :param verbose: default True, help-message-display trigger.\n :type verbose: boolean\n \"\"\"\n prt_console(\"\\nLoad from '%s' ...\" % abspath, verbose)\n\n is_compressed = is_compressed_json_file(abspath)\n\n if not os.path.exists(abspath):\n raise ValueError(\"'%s' doesn't exist.\" % abspath)\n raise\n\n st = time.clock()\n\n with open(abspath, \"rb\") as f:\n if is_compressed:\n s = compresslib.decompress(f.read(), return_type=\"str\")\n else:\n s = f.read().decode(\"utf-8\")\n\n obj = self.loads(\n s,\n object_hook=object_hook,\n decompress=False,\n ignore_comments=ignore_comments,\n )\n\n prt_console(\" Complete! Elapse %.6f sec.\" % (time.clock() - st),\n verbose)\n\n return obj\n\n def dump_bytes(self, obj, class_name=bytes_class_name):\n return {\"$\" + class_name: b64encode(obj).decode()}\n\n def load_bytes(self, dct, class_name=bytes_class_name):\n return b64decode(dct[\"$\" + class_name].encode())\n\n def dump_datetime(self, obj, class_name=\"datetime.datetime\"):\n return {\"$\" + class_name: obj.isoformat()}\n\n def load_datetime(self, dct, class_name=\"datetime.datetime\"):\n return parse(dct[\"$\" + class_name])\n\n def dump_date(self, obj, class_name=\"datetime.date\"):\n return {\"$\" + class_name: str(obj)}\n\n def load_date(self, dct, class_name=\"datetime.date\"):\n return datetime.strptime(dct[\"$\" + class_name], \"%Y-%m-%d\").date()\n\n def dump_set(self, obj, class_name=set_class_name):\n return {\"$\" + class_name: [self._json_convert(item) for item in obj]}\n\n def load_set(self, dct, class_name=set_class_name):\n return set(dct[\"$\" + class_name])\n\n def dump_deque(self, obj, class_name=\"collections.deque\"):\n return {\"$\" + class_name: [self._json_convert(item) for item in obj]}\n\n def load_deque(self, dct, class_name=\"collections.deque\"):\n return deque(dct[\"$\" + class_name])\n\n def dump_OrderedDict(self, obj, class_name=\"collections.OrderedDict\"):\n return {\n \"$\" + class_name: [\n (key, self._json_convert(value)) for key, value in iteritems(obj)\n ]\n }\n\n def load_OrderedDict(self, dct, class_name=\"collections.OrderedDict\"):\n return OrderedDict(dct[\"$\" + class_name])\n\n def dump_nparray(self, obj, class_name=\"numpy.ndarray\"):\n return {\"$\" + class_name: self._json_convert(obj.tolist())}\n\n def load_nparray(self, dct, class_name=\"numpy.ndarray\"):\n return np.array(dct[\"$\" + class_name])\n\n\nsuperjson = SuperJson()\n\n\nif __name__ == \"__main__\":\n from pprint import pprint\n\n def test_common():\n data = {\n \"int\": 1,\n \"str\": \"Hello\",\n \"bytes\": \"Hello\".encode(\"utf-8\"),\n \"date\": date.today(),\n \"datetime\": datetime.now(),\n \"set\": set([\n datetime(2000, 1, 1),\n datetime(2000, 1, 2),\n ]),\n \"deque\": deque([\n deque([1, 2]),\n deque([3, 4]),\n ]),\n \"ordereddict\": OrderedDict([\n (\"b\", OrderedDict([(\"b\", 1), (\"a\", 2)])),\n (\"a\", OrderedDict([(\"b\", 1), (\"a\", 2)])),\n ]),\n }\n s = superjson.dumps(data, indent=4)\n# print(s)\n data1 = superjson.loads(s)\n# pprint(data1)\n assert data == data1\n\n s = superjson.dumps(data, compress=True)\n# print(s)\n data1 = superjson.loads(s, decompress=True)\n# pprint(data1)\n assert data == data1\n\n test_common()\n\n def test_numpy():\n data = {\n \"ndarray_int\": np.array([[1, 2], [3, 4]]),\n \"ndarray_float\": np.array([[1.1, 2.2], [3.3, 4.4]]),\n \"ndarray_str\": np.array([[\"a\", \"b\"], [\"c\", \"d\"]]),\n \"ndarray_datetime\": np.array(\n [datetime(2000, 1, 1), datetime(2010, 1, 1)]\n ),\n }\n s = superjson.dumps(data, indent=4)\n# print(s)\n data1 = superjson.loads(s)\n# pprint(data1)\n\n for key in data:\n assert np.array_equal(data[key], data1[key])\n\n test_numpy()\n\n def test_pandas():\n \"\"\"\n\n .. note:: Not supported yet!\n \"\"\"\n data = {\n \"series\": pd.Series([(\"a\", datetime(2000, 1, 1)),\n (\"b\", datetime(2010, 1, 1))]),\n }\n# s = superjson.dumps(data, indent=4)\n# print(s)\n# data1 = superjson.loads(s)\n# pprint(data1)\n\n# test_pandas()\n\n def test_extend():\n \"\"\"Test for extend SuperJson for arbitrary custom types.\n \"\"\"\n from sfm.nameddict import Base as Address\n\n class User(object):\n\n def __init__(self, id=None, name=None):\n self.id = id\n self.name = name\n\n def __repr__(self):\n return \"User(id=%r, name=%r)\" % (self.id, self.name)\n\n def __eq__(self, other):\n return self.id == other.id and self.name == other.name\n\n Address_class_name = \"sfm.nameddict.Base\"\n assert get_class_name(Address()) == \"sfm.nameddict.Base\"\n\n User_class_name = \"__main__.User\"\n assert get_class_name(User()) == \"__main__.User\"\n\n class MySuperJson(SuperJson):\n\n def dump_User(self, obj, class_name=\"__main__.User\"):\n key = \"$\" + class_name\n return {key: {\"id\": obj.id, \"name\": obj.name}}\n\n def load_User(self, dct, class_name=\"__main__.User\"):\n key = \"$\" + class_name\n return User(**dct[key])\n\n def dump_Address(self, obj, class_name=\"sfm.nameddict.Base\"):\n key = \"$\" + class_name\n return {key: {\"street\": obj.street,\n \"city\": obj.city,\n \"state\": obj.state,\n \"zipcode\": obj.zipcode}}\n\n def load_Address(self, dct, class_name=\"sfm.nameddict.Base\"):\n key = \"$\" + class_name\n return Address(**dct[key])\n\n js = MySuperJson()\n data = {\n \"int\": 1,\n \"str\": \"Hello\",\n \"user\": User(id=1, name=\"Alice\"),\n \"address\": Address(\n street=\"123 Main St\", city=\"New York\", state=\"NY\", zipcode=\"10001\",\n ),\n }\n s = js.dumps(data, indent=4)\n# print(s)\n\n data1 = js.loads(s)\n# print(data1)\n\n assert data == data1\n\n test_extend()\n","sub_path":"cazipcode/pkg/superjson/_superjson.py","file_name":"_superjson.py","file_ext":"py","file_size_in_byte":20033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"140912209","text":"lens = {1: 4, 2: 4, 3: 2, 4: 2, 99: 1, 5: 3, 6: 3, 7: 4, 8: 4, 9: 2}\n\n\nclass IntcodeController:\n\n def __init__(self, intcode, buffer=[], pointer=0, extended={}, get_input=None):\n self.intcode = intcode\n self.virgincode = [] + intcode\n self.buffer = buffer\n self.virginbuffer = [] + buffer\n self.ib = 0\n self.pointer = pointer\n self.output = []\n self.alive = True\n self.opcode = []\n self.lastopcode = 0\n self.relbase = 0\n self.extended = extended\n self.tracking = False\n self.get_input = get_input\n\n def dump(self):\n return self.intcode.copy(), self.buffer.copy(), self.ib, self.pointer, self.output.copy(), \\\n self.alive, self.opcode.copy(), self.lastopcode, self.relbase, self.extended.copy()\n\n def restore_from_dump(self, dump):\n self.intcode = dump[0]\n self.buffer = dump[1]\n self.ib = dump[2]\n self.pointer = dump[3]\n self.output = dump[4]\n self.alive = dump[5]\n self.opcode = dump[6]\n self.lastopcode = dump[7]\n self.relbase = dump[8]\n self.extended = dump[9]\n\n def reset(self):\n self.__init__(self.virgincode, self.virginbuffer, pointer=0)\n\n def print(self):\n print('pointer=', self.pointer, 'opcode=', self.intcode[self.pointer],\n 'relbase=', self.relbase, self.extended)\n\n def parsecurrentopcode(self):\n n = self.intcode[self.pointer]\n self.opcode = [n % 100, (n // 100) % 10, (n // 1000) % 10,\n (n // 10000) % 10]\n\n def read(self, n):\n if n < len(self.intcode):\n return self.intcode[n]\n else:\n return self.extended[n] if n in self.extended else 0\n\n def write(self, v):\n offset = lens[self.opcode[0]] - 1\n n = self.parameter(offset, mode=1) + (self.relbase if self.opcode[offset] == 2 else 0)\n if n < len(self.intcode):\n self.intcode[n] = v\n else:\n self.extended[n] = v\n\n def parameter(self, offset, mode=-1):\n mode = self.opcode[offset] if mode == -1 else mode\n if mode == 0:\n return self.read(self.intcode[self.pointer+offset])\n if mode == 1:\n return self.intcode[self.pointer+offset]\n if mode == 2:\n return self.read(self.intcode[self.pointer+offset]+self.relbase)\n\n def read_input(self):\n if not self.get_input:\n self.ib += 1\n return self.buffer[self.ib-1]\n else:\n return self.get_input()\n\n def onetick(self):\n self.parsecurrentopcode()\n next_pointer = self.pointer + lens[self.opcode[0]]\n self.lastopcode = self.opcode[0]\n if self.tracking:\n self.print()\n if self.opcode[0] == 1:\n self.write(self.parameter(1) + self.parameter(2))\n if self.opcode[0] == 2:\n self.write(self.parameter(1) * self.parameter(2))\n if self.opcode[0] == 3:\n self.write(self.read_input())\n if self.opcode[0] == 4:\n self.output.append(self.parameter(1))\n if self.opcode[0] == 5:\n if self.parameter(1) != 0:\n next_pointer = self.parameter(2)\n if self.opcode[0] == 6:\n if self.parameter(1) == 0:\n next_pointer = self.parameter(2)\n if self.opcode[0] == 7:\n self.write(1 if self.parameter(1) < self.parameter(2) else 0)\n if self.opcode[0] == 8:\n self.write(1 if self.parameter(1) == self.parameter(2) else 0)\n if self.opcode[0] == 9:\n self.relbase += self.parameter(1)\n if self.opcode[0] == 99:\n self.alive = False\n return next_pointer\n\n def run(self):\n while self.alive:\n self.pointer = self.onetick()\n\n def run2output(self):\n self.lastopcode = 0\n while (self.lastopcode != 4) and self.alive:\n self.pointer = self.onetick()\n return self.alive\n\n def next_triple(self):\n self.run2output()\n self.run2output()\n self.run2output()\n if self.alive:\n return self.output[-3:]\n else:\n return None\n","sub_path":"intcomp.py","file_name":"intcomp.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"510198498","text":"from tibanna_4dn.lambdas.check_task import handler\nfrom tests.tibanna.pony.conftest import valid_env\nimport pytest\nimport boto3\n\n\n@pytest.fixture()\ndef check_task_input():\n return {\"config\": {\"log_bucket\": \"tibanna-output\"},\n \"jobid\": \"test_job\",\n \"push_error_to_end\": True\n }\n\n\n@pytest.fixture()\ndef s3(check_task_input):\n bucket_name = check_task_input['config']['log_bucket']\n return boto3.resource('s3').Bucket(bucket_name)\n\n\n@valid_env\n@pytest.mark.webtest\ndef test_check_task_awsem_fails_if_job_error_found(check_task_input, s3):\n jobid = 'hahaha'\n check_task_input_modified = check_task_input\n check_task_input_modified['jobid'] = jobid\n job_started = \"%s.job_started\" % jobid\n s3.put_object(Body=b'', Key=job_started)\n job_error = \"%s.error\" % jobid\n s3.put_object(Body=b'', Key=job_error)\n res = handler(check_task_input_modified, '')\n assert ('error' in res)\n s3.delete_objects(Delete={'Objects': [{'Key': job_started}]})\n s3.delete_objects(Delete={'Objects': [{'Key': job_error}]})\n","sub_path":"tests/tibanna/pony/check_task/test_handler.py","file_name":"test_handler.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"474473446","text":"# IEEE 488.2 Block Data Format\n\n# transfer size requirement:\n# len(str(data)) <= 9\nMAX_SUPPORTED_SIZE = 10**9 - 1\n\ndef to_block_data_format(data):\n assert type(data) == bytes, 'data must be bytes!'\n size = len(data)\n assert size < MAX_SUPPORTED_SIZE, f'Maximum supported data size is {MAX_SUPPORTED_SIZE}. len(data) => {size}.'\n size_len = len(str(size))\n header = f'#{size_len}{size}'.encode();\n return header + data\n","sub_path":"src/server/test_automation/commands/block_data.py","file_name":"block_data.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"74408502","text":"\"\"\"\nTest if a binary tree is height-balanced\n\nA binary tree is said to be height-balanced if for each node in the tree, \nthe difference in the height of its left and right subtrees is at most one.\n\nWrite a program that takes as input the root of a binary tree and checks whether the tree is height-balanced\n\nVariant 1: Write a program that returns the size of the largest subtree that is complete.\n\nVariant 2: Define a node in a binary tree to be k-balanced if the difference in the number of nodes in left and right subtrees \nis no more than k. Design an algorithm that takes as input a binary tree and positive integer k and returns a node in the binary\ntree such that hte node is not k-balanced but all of its descendants are k-balanced. \n\nFor the sake of simplicity - will link to leetcode example for easier testing (https://leetcode.com/problems/balanced-binary-tree/)\n\"\"\"\n\nclass TreeNode:\n def __init__(self, val = 0, left = None, right = None):\n self.val = val\n self.left = left\n self.right = right\n\nclass MainSolution_9_1:\n\n \"\"\"\n Bottom Up Approaches\n \"\"\"\n def dfs_helper(self, root):\n \"\"\"\n Bottom-up Recursion: \n Time Complexity: O(N) - each node gets visited once through recursion\n Space Complexity: O(N) - if tree is not balanced/is skewed - then recursion stack will reach number of nodes, N\n \"\"\"\n if (root is None or self.is_tree_balanced == False):\n return (-1)\n\n left_height = self.dfs_helper(root.left)\n right_height = self.dfs_helper(root.right)\n\n curr_height = max(left_height, right_height) + 1\n if (abs(left_height - right_height) > 1):\n self.is_tree_balanced = False\n return(curr_height)\n\n def is_balanced(self, root: TreeNode) -> bool:\n self.is_tree_balanced = True\n self.dfs_helper(root)\n return(self.is_tree_balanced)\n\n def dfs_helper(self, root):\n \"\"\"\n Alternative Way\n \"\"\"\n if (root is None): return (True, -1)\n\n is_left_tree_balanced, left_height = dfs_helper(root.left) # LEFT\n is_right_tree_balanced, right_height = dfs_helper(root.right) # RIGHT\n\n if (is_right_tree_balanced == False or is_left_tree_balanced == False): # VISITED\n return(False, max(left_height, right_height))\n \n return(True, max(left_height, right_height) + 1)\n\n def is_balanced_alt_style(self, root: TreeNode) -> bool:\n \"\"\"\n Can send an array of values - specifically [is_balanced, BinaryTreeNode]\n \"\"\"\n self.dfs_helper_alternative(root)\n \n \"\"\"\n Top Down Approach\n \"\"\"\n\n def calculate_height(self, root: TreeNode):\n if (root is None):\n return (-1)\n return (max(self.height(root.left), self.height(root.right)) + 1)\n \n def is_balanced_top_down(self, root: TreeNode):\n \"\"\"\n Time Complexity: O(N LOG N)\n For the root node, it will calculate the height for the root by going down the left and then the right side.\n Each side will be LOG(N) - height of the tree. So times 2. It will be 2*LOG(N).\n However we call height for each node in the tree and there are N nodes. So it is N * 2 * LOG(N).\n\n Bounded by O(N) because if the tree is skewed, it will stop recursion as soon as height of a nodes children are not within 1.\n\n Space Complexity: O(N) - worst case the tree is skewed and it will hold all nodes in the tree in its stack\n \"\"\"\n if (root is None):\n return True\n \n comparison = abs(self.height(root.left) - self.height(root.right)) <= 1\n ans = comparison and self.is_balanced_top_down(root.left) and self.is_balanced_top_down(root.right)\n return(ans)\n\n\n","sub_path":"Chapter 9 - Binary Trees/9_1 Test if a binary tree is height balanced.py","file_name":"9_1 Test if a binary tree is height balanced.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"281485067","text":"import os\n\nimport click\nimport hcl\nfrom popper import utils as pu\nfrom popper.cli import pass_context\nfrom popper.gha import Workflow\n\n\n@click.option(\n '--wfile',\n help=(\n 'File containing the definition of the workflow. '\n '[default: ./github/main.workflow OR ./main.workflow]'\n ),\n required=False,\n default=None\n)\n@click.option(\n '--recursive',\n help='Generate .dot file for any.workflow file '\n 'found recursively from current path.',\n required=False,\n is_flag=True\n)\n@click.command('dot', short_help='Generate a graph in the .dot format')\n@pass_context\ndef cli(ctx, wfile, recursive):\n \"\"\"\n Creates a graph in the .dot format representing the workflow\n \"\"\"\n wfile_list = list()\n if recursive:\n wfile_list = pu.find_recursive_wfile()\n else:\n wfile_list.append(pu.find_default_wfile(wfile))\n\n for wfile in wfile_list:\n pipeline = Workflow(wfile, False, False, False, False, False, False)\n\n graph = list()\n\n wf = pipeline.wf\n workflow_name = list(wf['workflow'].keys())[0]\n\n action = wf['resolves'][0]\n last_action = get_first_action(wf)\n\n for act in last_action:\n graph.append(\"\\t{} -> {};\\n\".format(\n workflow_name.replace(' ', '_').replace('-', '_'),\n act.replace(' ', '_').replace('-', '_')))\n\n parent_action = cur_action = action\n graph = add(parent_action, cur_action, wf['action'], graph)\n graph = ''.join(list(set(graph)))\n graph = \"digraph G {\\n\" + graph + \"}\\n\"\n pu.info(graph)\n\n\n# Recursively go through \"needs\" and add corresponding actions to graph\ndef add(parent_action, cur_action, actions, graph):\n\n if 'needs' in actions[cur_action]:\n action_list = actions[cur_action]['needs']\n for act in action_list:\n graph = add(cur_action, act, actions, graph)\n\n # Adds edges to the graph\n if cur_action != parent_action:\n graph.append(\"\\t{} -> {};\\n\".format(\n cur_action.replace(' ', '_').replace('-', '_'),\n parent_action.replace(' ', '_').replace('-', '_')))\n\n return graph\n\n\ndef get_first_action(wf):\n actions = list()\n for act in wf['action']:\n if act in wf['action']:\n if 'needs' not in wf['action'][act]:\n actions.append(act)\n return actions\n","sub_path":"cli/popper/commands/cmd_dot.py","file_name":"cmd_dot.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"130138227","text":"#!/home/cdays/pyweb3/bin/python3\nimport json\nimport web3\nimport time\nimport re\n\nfrom web3 import Web3, HTTPProvider, TestRPCProvider\nfrom solc import compile_source\nfrom web3.contract import ConciseContract\n\ndef wait_for_receipt(w3, tx_hash, poll_interval):\n while True:\n tx_receipt = w3.eth.getTransactionReceipt(tx_hash)\n if tx_receipt:\n return tx_receipt\n time.sleep(poll_interval)\n\nprint(\"Content-Type: application/json\")\nprint()\n\n# web3.py instance\nw3 = Web3(HTTPProvider('http://localhost:8501'))\nw3.middleware_stack.inject(web3.middleware.geth_poa_middleware, layer=0)\n\nabi = [{'constant': False, 'stateMutability': 'nonpayable', 'name': 'setGreeting', 'payable': False, 'type': 'function', 'inputs': [{'type': 'string', 'name': '_greeting'}], 'outputs': []}, {'constant': True, 'stateMutability': 'view', 'name': 'greet', 'payable': False, 'type': 'function', 'inputs': [], 'outputs': [{'type': 'string', 'name': ''}]}, {'constant': True, 'stateMutability': 'view', 'name': 'greeting', 'payable': False, 'type': 'function', 'inputs': [], 'outputs': [{'type': 'string', 'name': ''}]}, {'stateMutability': 'nonpayable', 'type': 'constructor', 'inputs': [], 'payable': False}]\n\ncontract_address= '0x2D2403dc13D5c52dE1dD055624762dadc5c10397'\ncontract_instance = w3.eth.contract(address=contract_address, abi=abi,ContractFactoryClass=ConciseContract)\n#tx_hash= contract_instance.setGreeting('Lulu', transact={'from': w3.eth.accounts[0]})\n#wait_for_receipt(w3, tx_hash, 1)\ngetre = re.compile('^greet')\noutput = {}\nfor func in abi:\n try:\n if getre.match(func[\"name\"]):\n output[func[\"name\"]] = eval(\"contract_instance.\"+func[\"name\"]+\"()\")\n except:\n pass\nprint(json.dumps(output))\n\n","sub_path":"cgi-bin/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"113374478","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nsortphotos.py\n\nCreated on 3/2/2013\nCopyright (c) S. Andrew Ning. All rights reserved.\n\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import with_statement\nimport subprocess\nimport os\nimport sys\nimport shutil\ntry:\n import json\nexcept:\n import simplejson as json\nimport filecmp\nfrom datetime import datetime, timedelta\nimport re\nimport locale\nimport exifread\nimport reverse_geocode\n\n# Setting locale to the 'local' value\nlocale.setlocale(locale.LC_ALL, '')\n\nexiftool_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Image-ExifTool', 'exiftool')\n\n\n# -------- convenience methods -------------\n\ndef parse_date_exif(date_string):\n \"\"\"\n extract date info from EXIF data\n YYYY:MM:DD HH:MM:SS\n or YYYY:MM:DD HH:MM:SS+HH:MM\n or YYYY:MM:DD HH:MM:SS-HH:MM\n or YYYY:MM:DD HH:MM:SSZ\n \"\"\"\n\n # split into date and time\n elements = str(date_string).strip().split() # ['YYYY:MM:DD', 'HH:MM:SS']\n\n if len(elements) < 1:\n return None\n\n # parse year, month, day\n date_entries = elements[0].split(':') # ['YYYY', 'MM', 'DD']\n\n # check if three entries, nonzero data, and no decimal (which occurs for timestamps with only time but no date)\n if len(date_entries) == 3 and date_entries[0] > '0000' and '.' not in ''.join(date_entries):\n year = int(date_entries[0])\n month = int(date_entries[1])\n day = int(date_entries[2])\n else:\n return None\n\n # parse hour, min, second\n time_zone_adjust = False\n hour = 12 # defaulting to noon if no time data provided\n minute = 0\n second = 0\n\n if len(elements) > 1:\n time_entries = re.split('(\\+|-|Z)', elements[1]) # ['HH:MM:SS', '+', 'HH:MM']\n time = time_entries[0].split(':') # ['HH', 'MM', 'SS']\n\n if len(time) == 3:\n hour = int(time[0])\n minute = int(time[1])\n second = int(time[2].split('.')[0])\n elif len(time) == 2:\n hour = int(time[0])\n minute = int(time[1])\n\n # adjust for time-zone if needed\n if len(time_entries) > 2:\n time_zone = time_entries[2].split(':') # ['HH', 'MM']\n\n if len(time_zone) == 2:\n time_zone_hour = int(time_zone[0])\n time_zone_min = int(time_zone[1])\n\n # check if + or -\n if time_entries[1] == '+':\n time_zone_hour *= -1\n\n dateadd = timedelta(hours=time_zone_hour, minutes=time_zone_min)\n time_zone_adjust = True\n\n\n # form date object\n try:\n date = datetime(year, month, day, hour, minute, second)\n except ValueError:\n return None # errors in time format\n\n # try converting it (some \"valid\" dates are way before 1900 and cannot be parsed by strtime later)\n try:\n date.strftime('%Y/%m-%b') # any format with year, month, day, would work here.\n except ValueError:\n return None # errors in time format\n\n # adjust for time zone if necessary\n if time_zone_adjust:\n date += dateadd\n\n return date\n\n\n\ndef get_oldest_timestamp(data, additional_groups_to_ignore, additional_tags_to_ignore, print_all_tags=False):\n \"\"\"data as dictionary from json. Should contain only time stamps except SourceFile\"\"\"\n\n # save only the oldest date\n date_available = False\n oldest_date = datetime.now()\n oldest_keys = []\n\n # save src file\n src_file = data['SourceFile']\n\n # ssetup tags to ignore\n ignore_groups = ['ICC_Profile'] + additional_groups_to_ignore\n ignore_tags = ['SourceFile', 'XMP:HistoryWhen'] + additional_tags_to_ignore\n\n\n if print_all_tags:\n print('All relevant tags:')\n\n # run through all keys\n for key in data.keys():\n\n # check if this key needs to be ignored, or is in the set of tags that must be used\n if (key not in ignore_tags) and (key.split(':')[0] not in ignore_groups) and 'GPS' not in key:\n\n date = data[key]\n\n if print_all_tags:\n print(str(key) + ', ' + str(date))\n\n # (rare) check if multiple dates returned in a list, take the first one which is the oldest\n if isinstance(date, list):\n date = date[0]\n\n try:\n exifdate = parse_date_exif(date) # check for poor-formed exif data, but allow continuation\n except Exception as e:\n exifdate = None\n\n if exifdate and exifdate < oldest_date:\n date_available = True\n oldest_date = exifdate\n oldest_keys = [key]\n\n elif exifdate and exifdate == oldest_date:\n oldest_keys.append(key)\n\n if not date_available:\n oldest_date = None\n\n if print_all_tags:\n print()\n\n return src_file, oldest_date, oldest_keys\n\n\n\ndef check_for_early_morning_photos(date, day_begins):\n \"\"\"check for early hour photos to be grouped with previous day\"\"\"\n\n if date.hour < day_begins:\n print('moving this photo to the previous day for classification purposes (day_begins=' + str(day_begins) + ')')\n date = date - timedelta(hours=date.hour+1) # push it to the day before for classificiation purposes\n\n return date\n\n# read tags using exifread\n### helper functions, for geocoordinates\n\ndef _convert_to_degress(value):\n \"\"\"\n Helper function to convert the GPS coordinates stored in the EXIF to degress in float format\n :param value:\n :type value: exifread.utils.Ratio\n :rtype: float\n \"\"\"\n d = float(value.values[0].num) / float(value.values[0].den)\n m = float(value.values[1].num) / float(value.values[1].den)\n s = float(value.values[2].num) / float(value.values[2].den)\n\n return d + (m / 60.0) + (s / 3600.0)\n \ndef get_exif_location(exif_data):\n \"\"\"\n Returns the latitude and longitude, if available, from the provided exif_data (obtained through get_exif_data above)\n \"\"\"\n lat = None\n lon = None\n\n gps_latitude = _get_if_exist(exif_data, 'GPS GPSLatitude')\n gps_latitude_ref = _get_if_exist(exif_data, 'GPS GPSLatitudeRef')\n gps_longitude = _get_if_exist(exif_data, 'GPS GPSLongitude')\n gps_longitude_ref = _get_if_exist(exif_data, 'GPS GPSLongitudeRef')\n\n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = _convert_to_degress(gps_latitude)\n if gps_latitude_ref.values[0] != 'N':\n lat = 0 - lat\n\n lon = _convert_to_degress(gps_longitude)\n if gps_longitude_ref.values[0] != 'E':\n lon = 0 - lon\n\n return lat, lon\n\ndef _get_if_exist(data, key):\n if key in data:\n return data[key]\n\n return None\n\n###\n\n# Open image file for reading (binary mode)\n\ndef get_exif(file_name):\n\n f = open(file_name, 'rb')\n\n tagjson = {'SourceFile': file_name}\n\n tags = exifread.process_file(f, details=True)\n f.close()\n\n lat, lon = get_exif_location(tags)\n for dt_tag in tags:\n dt_value = '%s' % tags[dt_tag]\n tagjson[dt_tag.replace(\" \",\":\")] = dt_value # tag group separator\n\n tagjson['EXIF:GpsLat'] = lat\n tagjson['EXIF:GpsLon'] = lon\n\n if lat and lon:\n location = reverse_geocode.get([lat,lon])\n tagjson['Location:country_code'] = location[\"country_code\"]\n tagjson['Location:city'] = location[\"city\"]\n tagjson['Location:country'] = location[\"country\"]\n\n return tagjson\n\ndef get_exif_folder(folder_name):\n\n # scan through all folders and subfolders\n tagjson = []\n\n for root, subdirs, files in os.walk(folder_name):\n for filename in files:\n file_path = os.path.join(root, filename)\n tagjson += [get_exif(file_path)]\n\n return tagjson\n\n# ---------------------------------------\n\n\ndef sortPhotos(src_dir, dest_dir, sort_format, rename_format, recursive=False,\n copy_files=False, test=False, remove_duplicates=True, day_begins=0,\n additional_groups_to_ignore=['File'], additional_tags_to_ignore=[],\n use_only_groups=None, use_only_tags=None, verbose=True, keep_filename=False):\n \"\"\"\n This function is a convenience wrapper around ExifTool based on common usage scenarios for sortphotos.py\n\n Parameters\n ---------------\n src_dir : str\n directory containing files you want to process\n dest_dir : str\n directory where you want to move/copy the files to\n sort_format : str\n format code for how you want your photos sorted\n (https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior)\n rename_format : str\n format code for how you want your files renamed\n (https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior)\n None to not rename file\n recursive : bool\n True if you want src_dir to be searched recursively for files (False to search only in top-level of src_dir)\n copy_files : bool\n True if you want files to be copied over from src_dir to dest_dir rather than moved\n test : bool\n True if you just want to simulate how the files will be moved without actually doing any moving/copying\n remove_duplicates : bool\n True to remove files that are exactly the same in name and a file hash\n keep_filename : bool\n True to append original filename in case of duplicates instead of increasing number\n day_begins : int\n what hour of the day you want the day to begin (only for classification purposes). Defaults at 0 as midnight.\n Can be used to group early morning photos with the previous day. must be a number between 0-23\n additional_groups_to_ignore : list(str)\n tag groups that will be ignored when searching for file data. By default File is ignored\n additional_tags_to_ignore : list(str)\n specific tags that will be ignored when searching for file data.\n use_only_groups : list(str)\n a list of groups that will be exclusived searched across for date info\n use_only_tags : list(str)\n a list of tags that will be exclusived searched across for date info\n verbose : bool\n True if you want to see details of file processing\n\n \"\"\"\n\n # some error checking\n if not os.path.exists(src_dir):\n raise Exception('Source directory does not exist')\n\n args = [src_dir]\n\n metadata = get_exif_folder(args[0])\n\n # setup output to screen\n num_files = len(metadata)\n print()\n\n if test:\n test_file_dict = {}\n\n # parse output extracting oldest relevant date\n for idx, data in enumerate(metadata):\n\n # extract timestamp date for photo\n src_file, date, keys = get_oldest_timestamp(data, additional_groups_to_ignore, additional_tags_to_ignore)\n\n # fixes further errors when using unicode characters like \"\\u20AC\"\n src_file.encode('utf-8')\n\n if verbose:\n # write out which photo we are at\n ending = ']'\n if test:\n ending = '] (TEST - no files are being moved/copied)'\n print('[' + str(idx+1) + '/' + str(num_files) + ending)\n print('Source: ' + src_file)\n else:\n # progress bar\n numdots = int(20.0*(idx+1)/num_files)\n sys.stdout.write('\\r')\n sys.stdout.write('[%-20s] %d of %d ' % ('='*numdots, idx+1, num_files))\n sys.stdout.flush()\n\n # check if no valid date found\n if not date:\n if verbose:\n print('No valid dates were found using the specified tags. File will remain where it is.')\n print()\n # sys.stdout.flush()\n continue\n\n # ignore hidden files\n if os.path.basename(src_file).startswith('.'):\n print('hidden file. will be skipped')\n print()\n continue\n\n if verbose:\n print('Date/Time: ' + str(date))\n print('Corresponding Tags: ' + ', '.join(keys))\n\n # early morning photos can be grouped with previous day (depending on user setting)\n date = check_for_early_morning_photos(date, day_begins)\n\n\n # create folder structure\n\n country = data.get(\"Location:country\",\"\")\n city = data.get(\"Location:city\", '')\n country_code = data.get(\"Location:country_code\", '')\n\n sort_format_geo = sort_format.replace('%country',country)\n sort_format_geo = sort_format_geo.replace('%city',city)\n sort_format_geo = sort_format_geo.replace('%country_code',country_code)\n dir_structure = date.strftime(sort_format_geo)\n dirs = dir_structure.split('/')\n dest_file = dest_dir\n for thedir in dirs:\n dest_file = os.path.join(dest_file, thedir)\n if not test and not os.path.exists(dest_file):\n os.makedirs(dest_file)\n\n # rename file if necessary\n filename = os.path.basename(src_file)\n\n if rename_format is not None and date is not None:\n _, ext = os.path.splitext(filename)\n filename = date.strftime(rename_format) + ext.lower()\n\n # setup destination file\n dest_file = os.path.join(dest_file, filename)\n root, ext = os.path.splitext(dest_file)\n\n if verbose:\n name = 'Destination '\n if copy_files:\n name += '(copy): '\n else:\n name += '(move): '\n print(name + dest_file)\n\n\n # check for collisions\n append = 1\n fileIsIdentical = False\n\n while True:\n\n if (not test and os.path.isfile(dest_file)) or (test and dest_file in test_file_dict.keys()): # check for existing name\n if test:\n dest_compare = test_file_dict[dest_file]\n else:\n dest_compare = dest_file\n if remove_duplicates and filecmp.cmp(src_file, dest_compare): # check for identical files\n fileIsIdentical = True\n if verbose:\n print('Identical file already exists. Duplicate will be ignored.\\n')\n break\n\n else: # name is same, but file is different\n if keep_filename:\n orig_filename = os.path.splitext(os.path.basename(src_file))[0]\n dest_file = root + '_' + orig_filename + '_' + str(append) + ext\n else:\n dest_file = root + '_' + str(append) + ext\n append += 1\n if verbose:\n print('Same name already exists...renaming to: ' + dest_file)\n\n else:\n break\n\n\n # finally move or copy the file\n if test:\n test_file_dict[dest_file] = src_file\n\n else:\n\n if fileIsIdentical:\n continue # ignore identical files\n else:\n if copy_files:\n shutil.copy2(src_file, dest_file)\n else:\n shutil.move(src_file, dest_file)\n\n\n\n if verbose:\n print()\n # sys.stdout.flush()\n\n\n if not verbose:\n print()\n\n\ndef main():\n import argparse\n\n # setup command line parsing\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,\n description='Sort files (primarily photos and videos) into folders by date\\nusing EXIF and other metadata')\n parser.add_argument('src_dir', type=str, help='source directory')\n parser.add_argument('dest_dir', type=str, help='destination directory')\n parser.add_argument('-r', '--recursive', action='store_true', help='search src_dir recursively')\n parser.add_argument('-c', '--copy', action='store_true', help='copy files instead of move')\n parser.add_argument('-s', '--silent', action='store_true', help='don\\'t display parsing details.')\n parser.add_argument('-t', '--test', action='store_true', help='run a test. files will not be moved/copied\\ninstead you will just a list of would happen')\n parser.add_argument('--sort', type=str, default='%Y/%m-%b-%country-%city',\n help=\"choose destination folder structure using datetime format \\n\\\n https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior. \\n\\\n Use forward slashes / to indicate subdirectory(ies) (independent of your OS convention). \\n\\\n The default is '%%Y/%%m-%%b', which separates by year then month \\n\\\n with both the month number and name (e.g., 2012/02-Feb). \\n\\\n Use %%city, %%country and %%country_code for location.\")\n parser.add_argument('--rename', type=str, default=None,\n help=\"rename file using format codes \\n\\\n https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior. \\n\\\n default is None which just uses original filename\")\n parser.add_argument('--keep-filename', action='store_true',\n help='In case of duplicated output filenames an increasing number and the original file name will be appended',\n default=False)\n parser.add_argument('--keep-duplicates', action='store_true',\n help='If file is a duplicate keep it anyway (after renaming).')\n parser.add_argument('--day-begins', type=int, default=0, help='hour of day that new day begins (0-23), \\n\\\n defaults to 0 which corresponds to midnight. Useful for grouping pictures with previous day.')\n parser.add_argument('--ignore-groups', type=str, nargs='+',\n default=[],\n help='a list of tag groups that will be ignored for date informations.\\n\\\n list of groups and tags here: http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/\\n\\\n by default the group \\'File\\' is ignored which contains file timestamp data')\n parser.add_argument('--ignore-tags', type=str, nargs='+',\n default=[],\n help='a list of tags that will be ignored for date informations.\\n\\\n list of groups and tags here: http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/\\n\\\n the full tag name needs to be included (e.g., EXIF:CreateDate)')\n parser.add_argument('--use-only-groups', type=str, nargs='+',\n default=None,\n help='specify a restricted set of groups to search for date information\\n\\\n e.g., EXIF')\n parser.add_argument('--use-only-tags', type=str, nargs='+',\n default=None,\n help='specify a restricted set of tags to search for date information\\n\\\n e.g., EXIF:CreateDate')\n\n # parse command line arguments\n args = parser.parse_args()\n\n sortPhotos(args.src_dir, args.dest_dir, args.sort, args.rename, args.recursive,\n args.copy, args.test, not args.keep_duplicates, args.day_begins,\n args.ignore_groups, args.ignore_tags, args.use_only_groups,\n args.use_only_tags, not args.silent, args.keep_filename)\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/sortphotos.py","file_name":"sortphotos.py","file_ext":"py","file_size_in_byte":19033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"536478971","text":"import matplotlib.ticker as ticker\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom scipy.signal import find_peaks\nimport time\n\n# data = np.loadtxt('Data/2D/2D plot-0.68.dat')\n# data1 = np.loadtxt('Data/2D/2D plot-{0.69}.dat')\n# data2 = np.loadtxt('Data/2D/2D plot-{0.70}.dat')\n# data3 = np.loadtxt('Data/2D/2D plot-{0.71}.dat')\n# x = data[0]\n# y = data[1]/20\n# x1 = data1[0]\n# y1 = data1[1]\n# x2 = data2[0]\n# y2 = data2[1]\n# x3 = data3[0]\n# y3 = data3[1]\n# # x4 = data4[0]\n# # y4 = data4[1]\n# for i in range(len(y1)):\n# y1[i] = y1[i] + 0.5\n# for i in range(len(y2)):\n# y2[i] = y2[i] + 1\n# for i in range(len(y3)):\n# y3[i] = y3[i] + 1.5\n# # for i in range(len(y4)):\n# # y4[i] = y4[i] + 1.5\n# plt.plot(x,y)\n# plt.plot(x1,y1)\n# plt.plot(x2,y2)\n# plt.plot(x3,y3)\n# # plt.plot(x4,y4)\n# plt.xlabel('Radius (r)')\n# plt.ylabel('Pair Distribution Function g(r)')\n# plt.legend([\"Area Fraction = 0.68\", \"Area Fraction = 0.69\", \"Area Fraction = 0.70\", \"Area Fraction = 0.71\"], loc='upper right')\n# # plt.savefig(\"3D Mixed Plot(3).jpeg\", dpi = 250)\n# plt.show()\ndef d3():\n N3d = [296,320,344,360,372,384,400]\n y3d = []\n x3d = []\n for i in N3d:\n x3d.append((i*math.pi)/(6*(8**3)))\n af = 0.66\n for i in range(len(N3d)):\n data = np.loadtxt(f\"Data/3D/3D plot-{N3d[i]}.dat\")\n N2d = 100\n diameter = 1\n afL = math.sqrt(N2d*math.pi*(diameter**2)/(4*af))\n L = round(afL,3)\n print(L)\n avg_no_density = N2d/(L**2)\n print(avg_no_density)\n density1d = 0.75\n density3d = N3d[i]/(8**3)\n xvals = data[0]\n yvals = data[1]\n\n troughs,_ = find_peaks(-yvals,height=[-20,-0.001])\n\n # plt.plot(xvals,yvals)\n # plt.plot(xvals[troughs],yvals[troughs],'o')\n # plt.title(\"Area Fraction - 0.71\")\n # plt.savefig('Data/2D/2D_Mixed_Plot.png', dpi =250)\n # plt.show()\n for i in range(len(xvals)):\n yvals[i] = (yvals[i])* (xvals[i]**2)\n # print(xvals[troughs[0]])\n # print(yvals[troughs[0]])\n # ytrough = troughs[0]\n\n cn = np.trapz(yvals[0:32],x=xvals[0:32])\n final = cn*4*math.pi*density3d\n y3d.append(final)\n\n print(y3d)\n plt.plot(x3d,y3d,marker= 'x')\n plt.xlabel('Packing Fraction')\n plt.ylabel('Coordination Number')\n # plt.errorbar(x3d,y3d,yerr=0.1)\n plt.show()\n\ndef d2():\n n2d = [0.66,0.67,0.68,0.69,0.7,0.71,0.72]\n y2d = []\n for i in n2d:\n data = np.loadtxt(f\"Data/2D/2D plot-{i}.dat\")\n N2d = 100\n diameter = 1\n afL = math.sqrt(N2d*math.pi*(diameter**2)/(4*i))\n L = round(afL,3)\n avg_no_density = N2d/(L**2)\n xvals = data[0]\n yvals = data[1]\n\n troughs,_ = find_peaks(-yvals,height=[-20,-0.001])\n\n plt.plot(xvals,yvals)\n plt.plot(xvals[troughs],yvals[troughs],'o')\n # plt.title(\"Area Fraction - 0.71\")\n # plt.savefig('Data/2D/2D_Mixed_Plot.png', dpi =250)\n plt.show()\n for i in range(len(xvals)):\n yvals[i] = (yvals[i])* (xvals[i]**2)\n # print(xvals[troughs[0]])\n # print(yvals[troughs[0]])\n # ytrough = troughs[0]\n # print(troughs)\n cn = np.trapz(yvals[0:27],x=xvals[0:27])\n final = cn*4*math.pi*avg_no_density\n y2d.append(final)\n\n print(y2d)\n plt.plot(n2d,y2d,marker= 'x')\n plt.xlabel('Packing Fraction')\n plt.ylabel('Coordination Number')\n plt.show()\n\ndef d1():\n n2d = [0.75,0.8,0.85]\n y2d = []\n for i in n2d:\n data = np.loadtxt(f\"Data/1D/1d plot-{i}.dat\")\n avg_no_density = i\n xvals = data[0]\n yvals = data[1]\n\n troughs,_ = find_peaks(-yvals,height=[-20,-0.001])\n\n # plt.plot(xvals,yvals)\n # plt.plot(xvals[troughs],yvals[troughs],'o')\n # # plt.title(\"Area Fraction - 0.71\")\n # # plt.savefig('Data/2D/2D_Mixed_Plot.png', dpi =250)\n # plt.show()\n for i in range(len(xvals)):\n yvals[i] = (yvals[i])* (xvals[i]**2)\n # print(xvals[troughs[0]])\n # print(yvals[troughs[0]])\n # ytrough = troughs[0]\n print(troughs)\n cn = np.trapz(yvals[0:7],x=xvals[0:7])\n final = cn*4*math.pi*avg_no_density\n y2d.append(final)\n\n print(y2d)\n plt.plot(n2d,y2d)\n plt.xlabel('Packing Fraction')\n plt.ylabel('Coordination Number')\n plt.show()\n\nd1()","sub_path":"Scripts/Plot overlap.py","file_name":"Plot overlap.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"632197780","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\nimport sys,os,tempfile,shutil,subprocess,glob\nimport argparse\n\nif __name__ == \"__main__\":\n\n # define options\n parser = argparse.ArgumentParser(description=\"Harvest track validation plots\")\n parser.add_argument(\"files\", metavar=\"file\", type=str, nargs=\"+\",\n help=\"files to be harvested (convert edm DQM format to plain ROOT format\")\n parser.add_argument(\"-o\", \"--outputFile\", type=str, default=\"harvest.root\",\n help=\"output file (default: 'harvest.root')\")\n\n opts = parser.parse_args()\n\n # absolute path outputFile\n outputFile = os.path.abspath(opts.outputFile)\n\n # check the input files\n for f in opts.files:\n if not os.path.exists(f):\n parser.error(\"DQM file %s does not exist\" % f)\n\n # compile a file list for cmsDriver\n filelist = \",\".join([\"file:{0}\".format(os.path.abspath(_file)) for _file in opts.files])\n\n # go to a temporary directory\n _cwd = os.getcwd()\n _tempdir = tempfile.mkdtemp()\n os.chdir(_tempdir)\n\n # compile cmsDriver command\n cmsDriverCommand = \"cmsDriver.py harvest --scenario pp --filetype DQM --conditions auto:run2_mc --mc -s HARVESTING:@trackingOnlyValidation+@trackingOnlyDQM+postProcessorHLTtrackingSequence -n -1 --filein {0}\".format(filelist)\n print(\"# running cmsDriver\" + \"\\n\" + cmsDriverCommand)\n \n # run it\n subprocess.call(cmsDriverCommand.split(\" \"))\n\n # find the output and move it to the specified output file path\n ofiles = glob.glob(\"DQM*.root\")\n if len(ofiles) != 1:\n print(\"ERROR: expecting exactly one output file matching DQM*.root\")\n print(\" ls of current directory({0}):\".format(_tempdir))\n os.system(\"ls -lt\")\n sys.exit()\n shutil.move(ofiles[0],outputFile)\n \n # move back to the original directory\n os.chdir(_cwd)\n\n # and get rid of the temporary directory\n shutil.rmtree(_tempdir)\n \n","sub_path":"Validation/RecoTrack/scripts/harvestTrackValidationPlots.py","file_name":"harvestTrackValidationPlots.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"136107514","text":"import datetime\r\nimport urllib.request\r\nimport json\r\nimport smtplib\r\nfrom email.mime.text import MIMEText\r\nimport time\r\n\r\n#Create timestamp, open logfile\r\nprint('Initializing...')\r\ntimestamp = datetime.datetime.now().strftime('%Y-%m-%d @ %H:%M:%S')\r\nlog = open('log.txt', 'a') \r\n\r\n#Create dictionary of record upcs + pricing from file\r\nrecords = {}\r\nwith open('records.txt') as f:\r\n for line in f:\r\n line = line.rstrip()\r\n (key, val) = line.split()\r\n records[int(key)] = val\r\n\r\n#Call iTunes API to check pricing on items\r\nprint('Connecting to iTunes...')\r\n\r\nfor key in records:\r\n\r\n #Build query by concatenating url + record id, trigger api call\r\n query = 'https://itunes.apple.com/lookup?id={}'.format(key)\r\n response = json.loads(urllib.request.urlopen(query).read().decode('utf-8'))['results']\r\n\r\n #Parse JSON, compare price\r\n if response:\r\n title = response[0]['collectionName']\r\n old_price = float(records[key])\r\n new_price = float(response[0]['collectionPrice'])\r\n\r\n if old_price > new_price:\r\n print('Change to {}! [{} > {}]'.format(title, old_price, new_price))\r\n log.write(timestamp + ': Change to {} [{} > {}]'.format(title, old_price, new_price))\r\n\r\n## sms = MIMEText('iTunes price drop detected!')\r\n## sms['From'] = 'sender@domain.com'\r\n## sms['To'] = 'recipient@domain.com'\r\n##\r\n## s = smtplib.SMTP('smtp.gmail.com', 587)\r\n## s.ehlo()\r\n## s.starttls()\r\n## s.login('username', 'password')\r\n## s.sendmail('sender@domain.com', 'recipient@domain.com', sms.as_string())\r\n## s.quit()\r\n \r\n else:\r\n print('No change to {}'.format(title))\r\n log.write(timestamp + ': No change to {}\\n'.format(title))\r\n else:\r\n log.write(timestamp + ': No match for {}\\n'.format(key)) \r\n\r\nprint('Finalizing...')\r\nlog.write('-' * 25 + '\\n')\r\nlog.close()\r\nf.close()\r\n\r\nprint('Complete!')\r\ntime.sleep(2)\r\n\r\nexit\r\n","sub_path":"itunes_pricewatcher.py","file_name":"itunes_pricewatcher.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"302532811","text":"# -*- coding: utf-8 -*-\n\"\"\"\nVisualize the ensemble streamflow predictions\n\n@author: Travis Williams\n\"\"\"\nfrom sys import platform\nimport copy\nimport dash\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport glob\nimport json\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nimport scipy\n\nif platform == 'win32':\n from flask_cache import Cache\nelse:\n from flask_caching import Cache\n\n\n# In[] Set up application and server\napp = dash.Dash(__name__)\n\n# The stylesheet is based one of the DASH examples\n# (oil and gas extraction in New York)\napp.css.append_css({'external_url': 'https://rawgit.com/WilliamsTravis/' +\n 'PRF-USDM/master/dash-stylesheet.css'})\n\n# Create server object\nserver = app.server\n\n# Create and initialize a cache for storing data - data pocket\ncache = Cache(config={'CACHE_TYPE': 'simple'})\ncache.init_app(server)\n\n# Create a container for the graphs\nlayout = dict(\n autosize=True,\n height=500,\n font=dict(color='black'),\n titlefont=dict(color='black',\n size='20',\n weight='bold'),\n margin=dict(\n l=35,\n r=35,\n b=65,\n t=55,\n pad=4\n ),\n # hovermode=\"closest\",\n plot_bgcolor=\"white\",\n paper_bgcolor=\"lightblue\",\n legend=dict(font=dict(size=10), orientation='h'))\n\n# In[] Get data -2 sets\nfiles = glob.glob(os.path.join('data', \"*\"))\nfiles = [f for f in files if \"historical\" not in f]\ndolc_hist = pd.read_csv(os.path.join('data', \"DOLC2_historical.csv\")) # use os.path.join() and \"..\" instead of \"data\\\\\"\nmphc_hist = pd.read_csv(os.path.join('data', \"MPHC2_historical.csv\"))\n\ndolc_files = [f for f in files if \"DOLC2\" in f]\nmphc_files = [f for f in files if \"MPHC2\" in f]\n\ndolc_dfs = {f[-8:-4]: pd.read_csv(f) for f in dolc_files}\nyrs = [f[-8:-4] for f in dolc_files]\nyear_options = [{'label': y, 'value': y} for y in yrs]\n\nmphc_dfs = {f[-8:-4]: pd.read_csv(f) for f in mphc_files}\n\ndf_dict = {'MPHC2': mphc_dfs,\n 'DOLC2': dolc_dfs}\n\nhist_dict = {'MPHC2': mphc_hist,\n 'DOLC2': dolc_hist}\n\nsite_options = [{'label': \"McPhee Reservoir\", 'value': 'MPHC2'},\n {'label': \"McPhee Reservoir Entry Point at Dolores\",\n 'value': 'DOLC2'}]\n\n# In[] Set up HTML structure\n\napp.layout = html.Div([\n\n html.H2(\"Ensemble Streamflow Predictions at the McPhee Reservoir\",\n style={'text-align': 'center'}),\n\n html.Div(className=\"row\",\n children=[html.Div([dcc.Dropdown(id='site_choice',\n options=site_options,\n placeholder=\"McPhee Reservoir\")],\n className=\"two columns\"),\n\n html.Div([dcc.Dropdown(id=\"year\",\n options=year_options,\n value='2018')],\n className=\"one column\",\n style={'width': '90'})]),\n\n html.Div(className=\"row\",\n children=[html.Div(children=[dcc.Graph(id='cbrfc_graph')],\n className=\"six columns\"),\n\n html.Div(children=[dcc.Graph(id='cbrfc_history')],\n style={'float': 'right'},\n className='six columns')]),\n\n html.Div(className=\"row\",\n children=[html.Div(children=[dcc.Graph(id='err_evolve')],\n className='six columns'),\n html.Div(children=[dcc.Graph(id='uncrtnty_evolve')],\n className='six columns')]),\n\n html.Div(className=\"row\",\n children=[html.Div(children=[dcc.Graph(id='err_evolve_all')],\n className='six columns'),\n html.Div(children=[dcc.Graph(id='uncrtnty_evolve_all')],\n className='six columns')]),\n\n html.Br(),\n\n html.Div(className=\"twelve columns\",\n children=[dcc.Graph(id='our_graph')]),\n\n html.Div(className=\"row\",\n style={'width': '100%',\n 'margin-bottom': '75'},\n children=[\n html.Div(\n className=\"three columns\",\n style={\n 'height': '200',\n 'margin-right': '10',\n 'margin-left': '150'},\n children=[\n html.P('S.D.'),\n html.P(id='sd_output'),\n dcc.Slider(id='sd',\n min=0,\n max=25,\n step=1,\n value=0,\n updatemode='drag',\n vertical=True,\n marks={0: {'label': '0'},\n 25: {'label': '25'}})]),\n\n html.Div(\n className=\"three columns\",\n style={'height': '200',\n 'margin-right': '10'},\n children=[\n html.P('S.D. 2'),\n html.P(id='sd_output2'),\n dcc.Slider(id='sd2',\n min=0,\n max=25,\n step=1,\n value=0,\n updatemode='drag',\n vertical=True,\n marks={0: {'label': '0'},\n 25: {'label': '25'}})])\n ]),\n html.Hr(),\n ]) # *END\n\n\n# In[]\n@app.callback(Output('cbrfc_graph', 'figure'),\n [Input('year', 'value'),\n Input('site_choice', 'value')])\ndef makeGraph(year, site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n df = dfs[year]\n df = df[75:295] # January to August\n obs = df['Observed Accumulation'].dropna()\n final = obs.iloc[-1]\n var = round(np.nanvar(df['ESP 50']), 2)\n yaxis = dict(range=[0, 700])\n df['ratio10'] = df['ESP 10'].apply(lambda x: str(round(x/final*100, 2)) + \"%\")\n df['text10'] = df['ESP 10'].astype(str) + \" KAF; \" + df['ratio10']\n df['ratio50'] = df['ESP 50'].apply(lambda x: str(round(x/final*100, 2)) + \"%\")\n df['text50'] = df['ESP 50'].astype(str) + \" KAF; \" + df['ratio50']\n df['ratio90'] = df['ESP 90'].apply(lambda x: str(round(x/final*100, 2)) + \"%\")\n df['text90'] = df['ESP 90'].astype(str) + \" KAF; \" + df['ratio90']\n\n annotation = dict(\n text=\"Forecast Variance: \" + \"{:,}\".format(var) + \"\",\n x=year + '-06-25',\n y=650,\n font=dict(size = 17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='#8ad88d',\n width=2,\n dash=\"dashdot\"),\n x=df.Date,\n y=df['ESP 90'],\n name='p90',\n text=df['text90'],\n hoverinfo='text'),\n dict(type='line',\n line=dict(color='#04a00a',\n width=4),\n x=df.Date,\n y=df['ESP 50'],\n name=\"p50\",\n text=df['text50'],\n hoverinfo='text'\n ),\n dict(type='line',\n line=dict(color='#8ad88d',\n width=2,\n dash=\"dashdot\"),\n x=df.Date,\n y=df['ESP 10'],\n name='p10',\n text=df['text10'],\n hoverinfo='text'),\n dict(type='line',\n line=dict(color='blue',\n width=4),\n x=df.Date,\n y=df['Observed Accumulation'],\n name=\"Observation (KAF)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (\"Colorado Basin River Forecast Center\" +\n \"'s \" + '\"ESP\"' + \" - \" + site_choice + \" \" + year)\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['showlegend'] = True\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n@app.callback(Output('cbrfc_history', 'figure'),\n [Input('site_choice', 'value')])\ndef makeGraph2(site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n df = hist_dict[site_choice]\n var = round(np.nanvar(df['vol']), 2)\n yaxis = dict(range=[0, 700])\n df['text'] = df['vol'].astype(str) + \"KAF\"\n annotation = dict(\n text=\"Streamflow Variance: \" + \"{:,}\".format(var) + \"\",\n x=2010,\n y=650,\n font=dict(size=17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='blue',\n width=4),\n x=df['year'],\n y=df['vol'],\n text=df['text'],\n hoverinfo='text',\n name=\"Observation (KAF)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (site_choice + \" streamflow history \")\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['showlegend'] = True\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n@app.callback(Output('err_evolve', 'figure'),\n [Input('year', 'value'),\n Input('site_choice', 'value')])\ndef makeGraph3(year, site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n df = dfs[year]\n df = df[75:295] # January to August\n q = df['Observed Total'].dropna().tolist()[0]\n forecasts = np.array(df['ESP 50'])\n errors = abs(forecasts - q)\n errors = np.round(errors, 2)\n mean_err = round(np.nanmean(errors), 2)\n df['text'] = errors\n df['text'] = df['text'].astype(str) + \" KAF\"\n yaxis = dict(range=[0, 700])\n xaxis = dict(df['Date'])\n annotation = dict(\n text=\"Mean Absolute Error: \" + \"{:,}\".format(mean_err) + \"\",\n x=df.Date.iloc[-15],\n y=650,\n font=dict(size=17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='red',\n width=5),\n x=df['Date'],\n y=errors,\n text=df['text'],\n hoverinfo='text',\n name=\"Error (p50 - q)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (site_choice + \" ESP 50 Absolute Errors \" + year)\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['xaxis'] = xaxis\n layout_c['showlegend'] = True\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n@app.callback(Output('uncrtnty_evolve', 'figure'),\n [Input('year', 'value'),\n Input('site_choice', 'value')])\ndef makeGraph4(year, site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n df = dfs[year]\n df = df[75:295] # January to August\n uncertainties = np.array(df['ESP 10']) - np.array(df['ESP 90'])\n uncertainties = np.round(uncertainties, 2)\n df['text'] = uncertainties\n df['text'] = df['text'].astype(str) + \" KAF\"\n mean_uncert = round(np.nanmean(uncertainties), 2)\n yaxis = dict(range=[0, 700])\n annotation = dict(\n text=\"Average Uncertainty: \" + \"{:,}\".format(mean_uncert) + \"\",\n x=df.Date.iloc[-15],\n y=650,\n font=dict(size=17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='#f4d942',\n width=5),\n x=df.Date,\n y=uncertainties,\n text=df['text'],\n hoverinfo='text',\n name=\"Uncertainty (p10 - p90)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (site_choice + \" ESP 90, 10 range \" + year)\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['showlegend'] = True\n\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n@app.callback(Output('err_evolve_all', 'figure'),\n [Input('site_choice', 'value')])\ndef makeGraph5(site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n dfs = [dfs[key] for key in dfs.keys()]\n for i in range(len(dfs)):\n dfs[i] = dfs[i][75:295]\n print(len(dfs[i])) # January to August\n\n qs = [df['Observed Total'].dropna().tolist()[0] for df in dfs]\n forecasts = [np.array(df['ESP 50']) for df in dfs]\n errors = [forecasts[i] - qs[i] for i in range(len(qs))]\n errors = np.nanmean(errors, axis=0)\n # errors = abs(forecasts - q)\n errors = np.round(errors, 2)\n mean_err = round(np.nanmean(errors), 2)\n df = dfs[0][['Date', 'Average']]\n # df['Date'] = pd.to_datetime(df['Date'])\n # df['Date'] = df['Date'].map(lambda x: x.strftime('%m-%d'))\n df['day'] = df.index\n df['errors'] = errors\n df['text'] = df['errors'].astype(str) + \" KAF\"\n yaxis = dict(range=[0, 700])\n xaxis = dict(df['Date'])\n annotation = dict(\n text=\"Mean Absolute Error: \" + \"{:,}\".format(mean_err) + \"\",\n x=330,\n y=650,\n font=dict(size=17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='red',\n width=5),\n x=df['day'],\n y=df['errors'],\n text=df['text'],\n hoverinfo='text',\n name=\"Error (p50 - q)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (site_choice + \" ESP 50 Absolute Errors - All Years\")\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['xaxis'] = xaxis\n layout_c['showlegend'] = True\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n@app.callback(Output('uncrtnty_evolve_all', 'figure'),\n [Input('year', 'value'),\n Input('site_choice', 'value')])\ndef makeGraph6(year, site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n dfs = [dfs[key] for key in dfs.keys()]\n for i in range(len(dfs)):\n dfs[i] = dfs[i][75:295]\n print(len(dfs[i])) # January to August\n\n uncertainties = [np.array(df['ESP 10']) - np.array(df['ESP 90']) for\n df in dfs]\n uncertainties = np.nanmean(uncertainties, axis=0)\n uncertainties = np.round(uncertainties, 2)\n df = dfs[0][['Date', 'Average']]\n df['uncertainties'] = uncertainties\n df['text'] = df['uncertainties'].astype(str) + \" KAF\"\n df['day'] = df.index\n mean_uncert = round(np.nanmean(uncertainties), 2)\n yaxis = dict(range=[0, 700])\n annotation = dict(\n text=\"Average Uncertainty: \" + \"{:,}\".format(mean_uncert) + \"\",\n x=330,\n y=650,\n font=dict(size=17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='#f4d942',\n width=5),\n x=df.day,\n y=df.uncertainties,\n text=df['text'],\n hoverinfo='text',\n name=\"Uncertainty (p10 - p90)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (site_choice + \" ESP 90, 10 range - All Years\")\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['showlegend'] = True\n\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n# In[] Set up application callbacks for our simulation\n@app.callback(Output('our_graph', 'figure'),\n [Input('year', 'value'),\n Input('site_choice', 'value'),\n Input('sd', 'value')])\ndef makeGraph7(year, site_choice, sd):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n df = dfs[year]\n df = df[75:295] # January to August\n dates = df['Date']\n obs = df['Observed Accumulation'].dropna()\n final = obs.iloc[-1]\n average = list(df['Average'])[0]\n yaxis = dict(range=[0, 700])\n df['ratio50'] = df['ESP 50'].apply(\n lambda x: str(round(x/final*100, 2)) + \"%\")\n\n# Simulation part:\n forecast = np.random.normal(final, sd, len(dates))\n\n df_f = pd.DataFrame({'Date': dates, 'Forecast': forecast})\n data = [dict(type='line',\n line=dict(color='#cc872e',\n width=4),\n x=df_f.Date,\n y=df_f.Forecast,\n text=df['ratio50'],\n hovermode='text',\n name='Simulation'),\n dict(\n type='line',\n line=dict(color='blue',\n width=4),\n x=df.Date,\n y=df['Observed Accumulation'],\n name='Observation')]\n\n layout_c = copy.deepcopy(layout)\n layout_c['dragmode'] = 'select'\n layout_c['yaxis'] = yaxis\n layout_c['title'] = 'Our \"ESP\" - ' + site_choice\n layout_c['font'] = dict(color='white'),\n layout_c['titlefont'] = dict(color='white',\n size='20',\n weight='bold')\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['paper_bgcolor'] = '#013589'\n layout_c['showlegend'] = True\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n# In[] Run application\n\n@app.callback(Output('sd_output', 'children'),\n [Input('sd', 'value')])\ndef displaySD(sd):\n return str(sd)\n\n@app.callback(Output('sd_output2', 'children'),\n [Input('sd2', 'value')])\ndef displaySD2(sd):\n return str(sd)\n\n# In[]\n\n\n\n# In[]\n\nif __name__ == '__main__':\n app.run_server(host='0.0.0.0')\n","sub_path":"scripts/vis_app.py","file_name":"vis_app.py","file_ext":"py","file_size_in_byte":19430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"380910402","text":"\n\n# ## Q1. Preparing Dataset\n\n# In[83]:\n\n\nimport pandas as pd\n\n\n# In[3]:\n\n\nCustomer = pd.read_csv(\"Customer.csv\")\nCustomer\n\n\n# In[4]:\n\n\nProduct_hierarchy = pd.read_csv(\"prod_cat_info.csv\")\nProduct_hierarchy\n\n\n# In[5]:\n\n\nTransaction = pd.read_csv(\"Transactions.csv\")\nTransaction\n\n\n# In[6]:\n\n\n# 1. Merging\nCustomer_Trans = pd.merge(left = Customer,\n right = Transaction,\n left_on = 'customer_Id',\n right_on = 'cust_id',\n how = 'inner',\n indicator = True)\n\n\n# In[7]:\n\n\nCustomer_Trans\n\n\n# In[99]:\n\n\nCustomer_Final = pd.merge(left = Customer_Trans,\n right = Product_hierarchy,\n left_on = 'prod_cat_code',\n right_on = 'prod_cat_code',\n how = 'inner'\n )\n\n\n\n# In[100]:\n\n\nCustomer_Final\n\n\n# ## Q2. Summary Report\n\n#\n# ### Column names with their data-types\n#\n\n# In[11]:\n\n\nCustomer_Final.dtypes\n\n\n# ### Top 10 Observations\n\n# In[12]:\n\n\nCustomer_Final.head(10)\n\n\n# ### 10 Bottom Observations\n\n# In[13]:\n\n\nCustomer_Final.tail(10)\n\n\n# ### Five Number Summary\n\n# In[14]:\n\n\nimport numpy as np\nData_min = Customer_Final['total_amt'].min()\nData_max = Customer_Final['total_amt'].max()\nData_q1 = np.percentile(Customer_Final.total_amt,25)\nmedian = np.percentile(Customer_Final.total_amt,50)\nData_q3 = np.percentile(Customer_Final.total_amt,75)\nprint('Min = ',Data_min)\nprint('Max = ',Data_max)\nprint('Median = ',median)\nprint('Q1 = ',Data_q1)\nprint('Q3 = ',Data_q3)\n\n\n# ## Frequency Table :\n#\n# ### Store type\n\n# In[15]:\n\n\nfreq_table = pd.crosstab(index = Customer_Final['Gender'],\n columns = Customer_Final['Store_type'])\nfreq_table.columns = ['TeleShop','MBR','e-shop','Flagshipstore']\nfreq_table.index = ['Male','Female']\nfreq_table\n\n\n# ### Prod_cat\n\n# In[16]:\n\n\nfreq_table = pd.crosstab(index = Customer_Final['Gender'],\n columns = Customer_Final['prod_cat'])\n\nfreq_table.columns = ['Books','Bags','Clothing','Footwear','Electronics','Home and kitchen']\nfreq_table.index = ['Male','Female']\nfreq_table\n\n\n# ### Prod_subcat\n\n# In[17]:\n\n\nfreq_table = pd.crosstab(index = Customer_Final['Gender'],\n columns = Customer_Final['prod_subcat'])\nfreq_table.columns = ['Men','Women','Kid','Mobile','Computer','Personal Appliances','Cameras','Audio and video',\n 'Fiction','Academic','Non-fiction','Children','Comics','DIY','Furnishing','Kitchen',\n 'Bath','Tools']\nfreq_table.index = ['Male','Female']\nfreq_table\n\n\n# ## Q3. Histograms for all continuous variables and frequency bars for categorical variables\n\n# ### Histogram for continous variables -\n#\n#\n# ### 1. Tax\n\n# In[20]:\n\n\nimport matplotlib.pyplot as plt\nTax = Customer_Final['Tax']\nplt.hist(Tax,color=['yellow'])\nplt.xlabel('tax')\nplt.ylabel('Frequency')\nplt.show()\n\n\n# ### 2. Total amount\n\n# In[19]:\n\n\nTotal_Amt = Customer_Final['total_amt']\nplt.hist(Total_Amt,color = 'Blue')\nplt.xlabel('Total amount')\nplt.ylabel('Frequency')\nplt.show()\n\n\n# ### Frequency Bar for Categorical variables -\n#\n#\n#\n# ### 1. Gender\n\n# In[21]:\n\n\nCustomer_Final['Gender'].value_counts().plot(kind = 'bar')\n\n\n# ### 2. Store type\n\n# In[22]:\n\n\nCustomer_Final['Store_type'].value_counts().plot(kind = 'bar')\n\n\n# ### 3. Product category\n\n# In[23]:\n\n\nCustomer_Final['prod_cat'].value_counts().plot(kind = 'bar')\n\n\n# ### 4. Product sub category\n\n# In[24]:\n\n\nCustomer_Final['prod_subcat'].value_counts().plot(kind = 'bar')\n\n\n# ## Q4\n#\n# ### A. Time period of the available transaction data\n\n# In[ ]:\n\n\n\n\n\n# ### B. Count number of negative total amount\n\n# In[25]:\n\n\ndf = Customer_Final['total_amt']\ncount2 = Customer_Final.loc[(df<0),['total_amt']].count()\ncount2\n\n\n# ## Q5. Analyze which product categories are more popular among females vs male customers.\n\n# In[134]:\n\n\n# Popular among Male\nM = Customer_Final.loc[Customer_Final['Gender']=='M']\n\ngroup_prod = M.groupby(['prod_cat'])['total_amt'].sum()\npopular_M = group_prod.nlargest(1)\ndisplay('The most popular product category in Male customers is : ',popular_M)\n\n# Popular among Female\nF = Customer_Final.loc[Customer_Final['Gender']=='F']\ngroup_prod1 = F.groupby(['prod_cat'])['total_amt'].sum()\npopular_F = group_prod1.nlargest(1)\ndisplay('The most popular product category in Female customers is : ',popular_F)\n\n\n# #### Among Male vs Female the most popular product category is Books.\n\n# ## Q6. Which City code has the maximum customers and what was the percentage of customers from that city?\n\n# In[173]:\n\n\nmax_cust = Customer['city_code'].value_counts()\nt = max_cust.nlargest(1)\n\ndisplay(\"City code which has Maximum customers is : \",t)\n\n#percentage of customers from city code 3\ntot_customer = Customer['customer_Id'].count()\npercent = round((595/tot_customer)*100,2)\nprint(\"Percentage of customers from the city code 3 is {}% : \".format(percent))\n\n\n# ## Q7. Which store type sells the maximum products by value and by quantity?\n\n# In[24]:\n\n\nsort_list = Customer_Final.sort_values(['total_amt','Qty'],ascending = False)\ndisplay(sort_list.head(1)['Store_type'])\n\n\n# ## Q8. What was the total amount earned from the \"Electronics\" and \"Clothing\" categories from\n#Flagship Stores?\n\n# In[32]:\n\n\ndf = pd.DataFrame(Customer_Final)\ntf = df[df.prod_cat.isin(['Electronics','Clothing']) & (df.Store_type == 'Flagship store')]\ntotal = tf.total_amt.sum()\nprint('Total amount earned',total)\n\n\n# ## Q9. What was the total amount earned from \"Male\" customers under the \"Electronics\" category?\n\n# In[52]:\n\n\ntf1 = df[(df.Gender == 'M') & (df.prod_cat == 'Electronics')]\ntotal = tf1.total_amt.sum()\nprint('Total amount earned',total)\n\n\n# ## Q10. How many customers have more than 10 unique transactions, after removing all transactions which have any negative amounts?\n\n# In[46]:\n\n\ndf1 = df[(df.total_amt > 0)]\nts = df1.transaction_id.nunique()\nprint('Total customers having more than 10 unique transactions are - ',ts)\n\n\n# ## Q11. For all customers aged between 25 - 35, find out:\n#\n#\n# ### a. What was the total amount spent for “Electronics” and “Books” product categories?\n\n# In[47]:\n\n\ncurr_year = pd.to_datetime('today').year\ndob_year = pd.DatetimeIndex(df['DOB']).year #extract year from DOB\n\nx = dob_year-100 # for the years which belongs to 60's\nv = curr_year - x\ny = curr_year - dob_year\ndf['age'] = (np.where(dob_year > curr_year,v,y))\ndf\n\n\n# In[174]:\n\n\ntotal = df.loc[((df.age >25) & (df.age <35)) & ((df.prod_cat=='Books') | (df.prod_cat=='Electronics'))]['total_amt'].sum()\nprint('Total amount spent',total)\n\n\n# ### b. What was the total amount spent by these customers between 1st Jan, 2014 to 1st Mar, 2014?\n\n# In[92]:\n\n\nCustomer_Final['tran_date'] = pd.to_datetime(Customer_Final['tran_date'])\n\nt_date = Customer_Final[(Customer_Final['tran_date'] > '2014-01-01') & (Customer_Final['tran_date'] < '2014-03-01')]\ntotal_amount = t_date.total_amt.sum()\nprint('Total amount spent by the customer - ',total_amount)\n\n\n","sub_path":"Retail Case study/Retail_Case_Study.py","file_name":"Retail_Case_Study.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"461239526","text":"from torch import nn\nimport torch.nn.functional as F\n\n\nclass MLP(nn.Module):\n \"\"\"\n Linear (256) -> ReLU -> Dropout-> Linear(64) -> ReLU -> Dropout -> Linear(10) -> ReLU-> LogSoftmax\n \"\"\"\n\n def __init__(self, l1=256, l2=64, dr=.25):\n super().__init__()\n self.fc1 = nn.Linear(784, l1)\n self.fc2 = nn.Linear(l1, l2)\n self.fc3 = nn.Linear(l2, 10)\n\n # Define proportion or neurons to dropout\n self.dropout = nn.Dropout(dr)\n\n def forward(self, x):\n x = x.view(x.shape[0], -1)\n x = F.relu(self.fc1(x))\n # Apply dropout\n x = self.dropout(x)\n x = F.relu(self.fc2(x))\n # Apply dropout\n x = self.dropout(x)\n x = F.relu(self.fc3(x))\n x = F.log_softmax(x, dim=1)\n\n return x\n","sub_path":"mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"369952034","text":"from socket import *\n\ndef main():\n host = 'localhost'\n\n sock = socket(AF_INET6, SOCK_STREAM)\n addr = (host,9898)\n sock.connect(addr)\n\n try:\n msg = b\"This was a terrible test!\\n\"\n sock.sendall(msg)\n except socket.errno as e:\n print(\"Socket error \", e)\n finally:\n sock.close()\n\nmain()","sub_path":"LABS/Socket/client6.py","file_name":"client6.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"186319766","text":"import pytest\r\nfrom flask import Flask\r\nfrom jinja2 import Environment, DictLoader\r\nfrom latexcreator import Api\r\n\r\nclass TestConfig:\r\n TESTING = True\r\n\r\n@pytest.fixture\r\ndef app():\r\n\r\n _app = Flask('test')\r\n _app.config.from_object(TestConfig)\r\n ctx = _app.test_request_context()\r\n ctx.push()\r\n\r\n yield _app\r\n \r\n ctx.pop()\r\n \r\n@pytest.yield_fixture\r\ndef client(app):\r\n with app.test_client() as client:\r\n yield client\r\n \r\n@pytest.fixture\r\ndef api():\r\n return Api()\r\n \r\n@pytest.fixture\r\ndef default_environment():\r\n templates = {'a':'a:{{ a }}','b':'b:{{ b }}, a:{{ a }}'}\r\n return Environment(loader=DictLoader(templates)),templates","sub_path":"tests/test_conf.py","file_name":"test_conf.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"186788950","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------\n# ファイル名 :rirakkuma.py\n# 機能概要 :2019ROBOCON(予選用)\n# ・move_baseを使用\n# ・スタート/ゴール地点は\"Start_Goal.csv\"ファイルで設定する\n# 作成日時 :2019/08/19\n# -----------------------------------------------------------------------\n\n# Import\n# common\nimport rospy\nimport math\n# move_base\nfrom move_base_msgs.msg import MoveBaseActionResult\nfrom geometry_msgs.msg import PoseStamped\n# Twist\nfrom geometry_msgs.msg import Twist\n# file\nimport csv # csv file\nimport os # file path\n# euler to quaternio\nimport tf\nfrom geometry_msgs.msg import Quaternion\n\n# Add ImageProcessing --- START ---\n# use LaserScan\nfrom sensor_msgs.msg import LaserScan\n\n# use Camera\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport numpy as np\n\n# Image Process function\nimport imgProc #function\nfrom imgProc import * #class\n\n# Add ImageProcessing --- END ---\n\nimport math\nfrom tf import TransformListener\nfrom geometry_msgs.msg import PointStamped\nfrom visualization_msgs.msg import Marker, MarkerArray\n\nimport actionlib\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\n\n#camera_fov = 50.0\n#camera_width = 640.0\n\n\n# PythonでEnum的なことを実現\nclass MainState():\n STOP = 0 # 停止\n EXEC_ACTION = 1 # アクション実行\n MOVING = 2 # 移動\n HUNTING = 3 # 追跡\n\nclass RirakkumaBot():\n # クラス変数\n HUNT_CNT_NUM = 0 # HUNTING(追跡)状態に遷移する待ち回数 (2020.08.18 Wait無効)\n \n def __init__(self, bot_name=\"NoName\"):\n ### Parameter Settings\n # bot name \n self.name = bot_name\n\n # State\n self.main_state = MainState.STOP # メイン状態\n self.prev_main_state = MainState.STOP # 前回メイン状態\n self.next_state = MainState.STOP # 次状態\n # CSV ファイルから取り出したデータ保存用リスト\n self.c_data = [] # csvデータ\n self.c_data_cnt = 0 # csvデータ順次取得のためのカウンタ\n # simple/goal用のシーケンス番号 ※これ無いとエラーになるため必要\n self.goal_seq_no = 0\n # HUNTING(追跡)移行カウンタ\n self.hunting_cnt = 0\n\n # Flags\n # 初期化フラグ\n self.initialize_flg = False\n # ゴール到着フラグ\n self.goal_arrival_flg = False\n\n ### Publisher を ROS Masterに登録\n # Velocity\n self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)\n self.pub_goal = rospy.Publisher('move_base_simple/goal', PoseStamped, queue_size=1, latch=True)\n ### Subscriber を ROS Masterに登録\n self.sub_goal_result = rospy.Subscriber(\"move_base/result\", MoveBaseActionResult, self.result_callback, queue_size=1)\n \n # Add ImageProcessing --- START ---\n # lidar scan subscriber\n self.scan = LaserScan()\n self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)\n\n # camera subscribver\n # for convert image topic to opencv obj\n self.img = None\n self.camera_preview = True\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber('image_raw', Image, self.imageCallback)\n #self.image_sub = rospy.Subscriber('/red_bot/image_raw', Image, self.imageCallback)\n\n #cImgProc instance\n self.proc = cImgProc()\n # Add ImageProcessing --- END ---\n\n self.client = actionlib.SimpleActionClient('move_base',MoveBaseAction)\n\n def calcTwist_center(self, center, depth, S):\n \"\"\"目標が中心になるようTwistの値を設定する\"\"\"\n #depth [m]\n if center != -1:\n val = int(center / 16) #centerを0-4の10段階に\n # --- 近距離 --------------------------------\n if 0.3 > depth:\n #if 100 < S: \n x = -0.2\n th = 0.0\n # --- 中距離 --------------------------------\n elif 0.6 > depth:\n if val == 4:\n x = 0.0\n th = -0.2\n\n elif val == 3:\n x = 0.1\n th = -0.1\n\n elif val == 2:\n x = 0.0\n th = 0.0\n\n elif val == 1:\n x = 0.1\n th = 0.1\n\n else:\n x = 0.0\n th = 0.2\n # --- 遠距離 --------------------------------------- \n #elif 1.0 > depth:\n else : \n if val == 4:\n x = 0.0\n th = -0.2\n\n elif val == 3:\n x = 0.1\n th = -0.1\n\n elif val == 2:\n x = 0.15\n th = 0.0\n\n elif val == 1:\n x = 0.1\n th = 0.1\n\n else:\n x = 0.0\n th = 0.2\n # else:\n # x=0.0\n # th=0.0\n # --- no detect green\n else :\n x = 0\n th = 0 \n\n # 更新\n print(\"blue detect x,th=\", x, th)\n twist = Twist()\n twist.linear.x = x; twist.linear.y = 0; twist.linear.z = 0\n twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th\n return twist\n\n def csv_data(self):\n \"\"\"CSVファイルから座標を取得する\"\"\"\n # csvファイルをOpen\n csv_pass = os.path.dirname(__file__) + \"/position_list.csv\"\n csv_file = open(csv_pass, \"r\")\n # データ読み込み\n pos_data = csv.reader(csv_file, delimiter=\",\", doublequote=True, lineterminator=\"\\r\\n\", quotechar='\"', skipinitialspace=True)\n # 最初の一行をヘッダーとして取得\n header = next(pos_data)\n # 各行のデータを抜き出し\n for row in pos_data:\n # データ保存用のリストにcsvファイルから取得したデータを保存する\n # appendでリストに別のリストとして要素を追加する\n self.c_data.append(row)\n\n def vel_ctrl(self, line_x, line_y, ang_z):\n \"\"\"publisher:cmd_vel Topic用(旋回で使用)\"\"\"\n vel_msg = Twist()\n vel_msg.linear.x = line_x\n vel_msg.linear.y = line_y\n vel_msg.angular.z = ang_z\n self.vel_pub.publish(vel_msg)\n\n def simple_goal_publish(self,pos_list):\n \"\"\"publisher:move_base_simple/goal Topic用(引数はリスト型で渡す)\"\"\"\n # Goal Setting\n goal = PoseStamped()\n goal.header.seq = self.goal_seq_no\n goal.header.frame_id = \"map\" # mapで座標系で指定する\n goal.header.stamp = rospy.Time.now() # タイムスタンプは今の時間\n\n self.goal_seq_no += 1 # シーケンス番号を更新\n\n # ** 位置座標\n goal.pose.position.x = float(pos_list[1])\n goal.pose.position.y = float(pos_list[2])\n goal.pose.position.z = 0\n # ** 回転方向\n # 度数をラジアンに変換\n degree_val = float(pos_list[3])\n radian_val = math.radians(degree_val)\n # オイラー角をクォータニオンに変換\n # RESPECT @hotic06 オイラー角をクォータニオンに変換・設定する\n quate = tf.transformations.quaternion_from_euler(0.0, 0.0, radian_val)\n goal.pose.orientation.x = quate[0]\n goal.pose.orientation.y = quate[1]\n goal.pose.orientation.z = quate[2]\n goal.pose.orientation.w = quate[3]\n # debug\n print(goal)\n # 実際にTopicを配信する\n self.pub_goal.publish(goal)\n\n def result_callback(self,goal_result):\n \"\"\"call back:move base result (ゴール座標到着検知)\"\"\"\n if goal_result.status.status == 3: # ゴールに到着\n self.goal_arrival_flg = True\n\n def lidarCallback(self, data):\n \"\"\"call back:lider\"\"\"\n self.scan = data\n\n def imageCallback(self, data):\n \"\"\"call back:camera image \"\"\"\n # comvert image topic to opencv object\n try:\n self.img = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n # image processing\n # liderCallbackより先にimageCallbackがコールされIndexError例外に対応\n try:\n self.proc.imageProcess1(self.img, self.scan)\n #print('cwd=', self.proc.cwd)\n except IndexError as e:\n print(e)\n return\n\n # Show camera window\n if self.proc.debug_view == 1: \n cv2.imshow(\"Camera\", self.proc.img_div2) \n cv2.waitKey(1)\n\n # Show debug window\n if self.proc.debug_view == 2:\n #cv2.imshow(\"rila\", self.proc.rila_img)\n #cv2.imshow(\"div2\", self.proc.img_div2) \n #cv2.imshow(\"div8\", self.proc.img_div8)\n #cv2.imshow(\"red\", self.proc.red_img)\n #cv2.imshow(\"green\", self.proc.green_img)\n #cv2.imshow(\"blue\", self.proc.blue_img) \n #cv2.imshow(\"Camera\", self.proc.img) \n cv2.imshow(\"debug1\", self.proc.debug1_img) \n # --- add T.Ishigami 2020.03.15 22:40 ---\n # Add vertical window position for QVGA ( 960 - 260 = 700)\n # cv2.moveWindow(\"debug1\", 0, 700)\n # Add vertical window position for RHC ( 900 - 260 = 640)\n cv2.moveWindow(\"debug1\", 0, 640)\n\n cv2.waitKey(1)\n # green_index = self.proc.green_center\n # if green_index != -1:\n # green_distance = self.proc.depth_img.item(0,green_index,0)\n # else:\n # green_distance = 0\n\n\n def is_start_hunting(self):\n \"\"\"HUNTING(追跡)を開始するか判定する\n\n ・敵を発見したら「HUNT_CNT_NUM」回数待つ\n ・待ち回数を満たしたら現状態を保持して、次状態をHUNTING(追跡)にする\n \n 戻り値 True:開始する/False:開始しない\n \"\"\"\n if self.proc.green_center != -1:\n if self.hunting_cnt >= RirakkumaBot.HUNT_CNT_NUM:\n self.prev_main_state = self.main_state\n self.next_state = MainState.HUNTING\n return True\n else:\n self.hunting_cnt += 1\n else:\n self.hunting_cnt = 0\n\n return False\n\n def is_finish_hunting(self):\n \"\"\"HUNTING(追跡)を終了するか判定する\n\n ・敵を喪失したらHUNTING(追跡)状態に遷移する前状態に戻る\n \n 戻り値 True:終了する/False:終了しない\n \"\"\"\n if self.proc.green_center == -1:\n self.next_state = self.prev_main_state\n return True\n\n return False\n\n def func_state_stop(self):\n \"\"\"状態処理関数:STOP(停止)\"\"\"\n # 初期処理未実施なら、次状態はEXEC_ACTION\n if self.initialize_flg == False:\n self.initialize_flg = True\n self.next_state = MainState.EXEC_ACTION\n\n def func_state_exec_action(self):\n \"\"\"状態処理関数:EXEC_ACTION(アクション実行)\"\"\"\n # HUNTING(追跡)を開始するか判定する\n if self.is_start_hunting():\n # 開始する場合、以降の処理はしない\n return\n\n # アクションリストを読み込み\n pos_info = self.c_data[self.c_data_cnt]\n self.c_data_cnt += 1 \n # アクションリストに基づいてアクション\n if pos_info[0] == \"move\":\n # 目的地に移動 (次状態はMOVING)\n self.simple_goal_publish(pos_info)\n self.next_state = MainState.MOVING\n elif pos_info[0] == \"turn\": \n # 旋回 (状態維持)\n # 度数をラジアンに変換\n degree_val = float(pos_info[3])\n radian_val = math.radians(degree_val)\n self.vel_ctrl(0,0,radian_val)\n else:\n # 意図しないアクションの場合は次のリスト\n pass\n\n def func_state_moving(self):\n \"\"\"状態処理関数:MOVING(移動)\"\"\"\n # HUNTING(追跡)を開始するか判定する\n if self.is_start_hunting():\n # 開始する場合、以降の処理はしない\n return\n\n # 目的地に到着したら、次状態はEXEC_ACTION\n if self.goal_arrival_flg == True:\n self.goal_arrival_flg = False\n self.next_state = MainState.EXEC_ACTION\n\n def func_state_hunting(self):\n \"\"\"状態処理関数:HUNTING(追跡)\"\"\"\n # HUNTING(追跡)を終了するか判定する\n if self.is_finish_hunting():\n # 終了する場合、以降の処理は実施しない\n return\n\n # 敵の追跡を実行\n print(\"detect green\")\n self.client.cancel_goal()\n twist = self.calcTwist_center(self.proc.green_center, self.proc.green_center_depth, self.proc.green_center_S)\n print(\"#################### green_S_depth ####################\")\n print(self.proc.green_center_S, \"-\", self.proc.green_center_depth)\n print(\"#######################################################\") \n self.vel_pub.publish(twist)\n print(\"snipe_enemy\")\n\n def strategy(self):\n \"\"\"ロボット動作メイン処理(ステートマシンで制御)\"\"\"\n while not rospy.is_shutdown():\n # メイン状態処理を行う\n if self.main_state == MainState.STOP:\n # 停止\n self.func_state_stop()\n elif self.main_state == MainState.EXEC_ACTION:\n # アクション実行\n self.func_state_exec_action()\n elif self.main_state == MainState.MOVING:\n # 移動\n self.func_state_moving()\n elif self.main_state == MainState.HUNTING:\n # 追跡\n self.func_state_hunting()\n else:\n pass\n\n # DEBUG Print\n print('main_state = ',self.main_state)\n print('next_state = ',self.next_state)\n\n # メイン状態を次の状態に更新\n self.main_state = self.next_state\n # 1秒Wait\n rospy.sleep(1)\n\nif __name__ == \"__main__\":\n rospy.init_node('rirakkuma_node')\n bot = RirakkumaBot('rirakkuma')\n bot.csv_data()\n rospy.sleep(1.0) # 起動後、ウェイト(調整値)\n bot.strategy()","sub_path":"burger_war_dev/scripts/rirakkuma.py","file_name":"rirakkuma.py","file_ext":"py","file_size_in_byte":15105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"97337782","text":"import numpy as np\nimport scipy.stats as ss\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom sklearn.cluster import KMeans\n\n\n# np.random.seed(6)\n\n\"\"\"initialize randomly distributed points in square\"\"\"\nsl = 2 # side length of square\nrawdata = sl*np.random.random((1000, 2))-np.array([[sl/2., sl/2.]])\nn_pts = 150\nn_sel = 100\ndof = 2 # number of spatial dimensions\nrawdata = rawdata[:n_pts, :]\n\n\"\"\"plot intial points\"\"\"\nfig = plt.figure()\nplt.scatter(rawdata[:, 0], rawdata[:, 1],\n marker='o', s=20,\n color='k', linewidths=0.0, edgecolors=None, alpha=.3,\n label='original')\n\n\n\"\"\"perform kmeans to identify seeds\"\"\"\nkmeans = KMeans(n_clusters=n_sel).fit(rawdata)\nseeds = kmeans.cluster_centers_\n\nplt.scatter(seeds[:, 0], seeds[:, 1],\n marker='s', s=15,\n color='b', linewidths=0.0, edgecolors=None, alpha=.5,\n label='targets')\n\n\n\"\"\"find the point closest to each seed\"\"\"\nrawdata_ = rawdata\nselected = np.zeros((n_sel, 2))\n\nfor ii in xrange(n_sel):\n dist = np.sum((rawdata_-seeds[ii, :])**2, 1)\n indx = np.argmin(dist)\n\n selected[ii, :] = rawdata_[indx, :]\n rawdata_ = np.delete(rawdata_, indx, axis=0)\n\n x = np.array([seeds[ii, 0], selected[ii, 0]])\n y = np.array([seeds[ii, 1], selected[ii, 1]])\n plt.plot(x, y, 'r:')\n\n\n\"\"\"plot the selected points\"\"\"\nplt.scatter(selected[:, 0], selected[:, 1],\n marker='x', s=40, c='r', edgecolors=None,\n linewidths=1.0, alpha=0.5,\n label='selected')\n\n\ntgt = 0.5\nplt.axis(tgt*np.array([-sl, sl, -sl, sl]))\n\nplt.axes().set_aspect('equal')\nplt.legend(loc='upper right', shadow=True, fontsize='medium', ncol=1)\nfig.tight_layout()\n\nplt.show()\n","sub_path":"fip_collab/2017_02_08_HCF_pearson/toy_problem/test_select_spread.py","file_name":"test_select_spread.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"281583671","text":"#!/usr/bin/env python\n\nimport sys, logging.config, json\nfrom bottle import route, post, error, run, template, static_file, request, response\nimport constant, config, operation, program\n\nlogger = logging.getLogger(__name__)\n\n####\n\n@error(404)\ndef error404(error):\n\treturn \"404 not found.\"\n\n@route('/static/')\ndef static(filename):\n\treturn static_file(filename, root='./static')\n\n@route('/')\ndef index():\n\treturn template(\n\t\t'main',\n\t)\n\n####\n\n@route('/getProgramSequence')\ndef getProgramSequence():\n\ttry:\n\t\thtml = ''\n\t\tfor op in program.getSequence():\n\t\t\thtml += template(\n\t\t\t\t'operation/disp/' + op.getType(),\n\t\t\t\toperation = op,\n\t\t\t)\n\t\treturn json.dumps({'html' : html})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/addOperation')\ndef addOperation():\n\ttry:\n\t\ttype_ = request.params['type']\n\t\top = operation.createByType(type_)\n\t\tprogram.addOperation(op)\n\t\thtml = template(\n\t\t\t'operation/edit/' + op.getType(),\n\t\t\toperation = op,\n\t\t)\n\t\treturn json.dumps({'html' : html})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/saveOperation')\ndef saveOperation():\n\ttry:\n\t\tid_ = request.params['id']\n\t\top = program.operationById(id_)\n\t\top.save(request.params)\n\t\thtml = template(\n\t\t\t'operation/disp/' + op.getType(),\n\t\t\toperation = op,\n\t\t)\n\t\treturn json.dumps({'html' : html})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/editOperation')\ndef editOperation():\n\ttry:\n\t\tid_ = request.params['id']\n\t\top = program.operationById(id_)\n\t\thtml = template(\n\t\t\t'operation/edit/' + op.getType(),\n\t\t\toperation = op,\n\t\t)\n\t\treturn json.dumps({'html' : html})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/moveOperation')\ndef moveOperation():\n\ttry:\n\t\tid_ = request.params['id']\n\t\tupDown = request.params['upDown']\n\t\tres = program.moveOperationById(id_, upDown)\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/deleteOperation')\ndef deleteOperation():\n\ttry:\n\t\tid_ = request.params['id']\n\t\top = program.removeOperationById(id_)\n\t\tdel op\n\t\thtml = ''\n\t\treturn json.dumps({'html' : html})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n####\n\n@route('/startProgram')\ndef startProgram():\n\ttry:\n\t\tres = program.start()\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/stopProgram')\ndef stopProgram():\n\ttry:\n\t\tres = program.stop()\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/isProgramRunning')\ndef isProgramRunning():\n\ttry:\n\t\tres = program.isRunning()\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/getProgramLog')\ndef getProgramLog():\n\ttry:\n\t\tres = program.getLog()\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n@route('/eraseProgramLog')\ndef eraseProgramLog():\n\ttry:\n\t\tres = program.eraseLog()\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n####\n\n@route('/dumpProgram')\ndef dumpProgram():\n\ttry:\n\t\tdic = program.dump()\n\t\treturn json.dumps({'result' : dic})\n\texcept Exception as e:\n\t\treturn str(type(e)) + ' ' + e.message\n\n@post('/restoreProgram')\ndef restoreProgram():\n\ttry:\n\t\tinfo = request.files.get('file')\n\t\tif info.filename.endswith('.json'):\n\t\t\traw = info.file.read()\n\t\t\tdata = json.loads(raw)\n\t\t\tprogram.restore(data)\n\t\telse:\n\t\t\traise RuntimeError('file suffix should be : .json')\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\treturn getProgramSequence()\n\n####\n\ndef __exceptionResponse(e):\n\treturn json.dumps({'error' : str(type(e)) + ' ' + e.message})\n\n####\n\ndef __startWebServer():\n\trun(server=\"tornado\", host=config.my_host, port=config.my_port, quiet=False, reloader=False)\n\ndef main(argv):\n\tlogger.debug('starting tester ...')\n\t__startWebServer()\n\n####\n\nif __name__ == \"__main__\":\n\tlogging.config.fileConfig(\"conf/logger.conf\", disable_existing_loggers=False)\n\tmain(sys.argv[1:])\n","sub_path":"startTester.py","file_name":"startTester.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"391715890","text":"# Copyrght(C) 2013 Huangtao\r\n# 项目:营运管理平台\r\n# 模块:导出印鉴卡密码模块\r\n# 作者:黄涛\r\n# 创建:2013-6-29\r\n\r\nfrom .basefrm import TransFrame\r\nclass SigExport(TransFrame):\r\n\r\n sql=\"select CardNo,Pwd from SigCard where CardNo\"\\\r\n \" between %s and %s order by CardNo\"\r\n \r\n initpath='D:/huangtao/Documents/工作平台/业务报表/防伪系统文件/下发/'\r\n \r\n def init(self):\r\n self['Encrypt']='1'\r\n self.branch_list['values']=\" \".join(\\\r\n self.query_list('select BranchName from branch '\\\r\n 'where Level in(1,2) order by brorder'))\r\n \r\n def process(self,cur):\r\n filename=self.initpath+'%s-(%s-%s).dat'%(self.fh,self.b,self.e)\r\n with open(filename,'w') as f: \r\n f.write('\\n\\n')\r\n if self['Encrypt']=='1':\r\n for CardNo,Pwd in cur:\r\n f.write(\" %s= %s=\\n\"\\\r\n %(encrypt(CardNo),encrypt(Pwd)))\r\n else:\r\n for CardNo,Pwd in cur:\r\n f.write(\"%s%s\\n\"%(CardNo,Pwd))\r\n self.aff_rows=cur.rowcount\r\n \r\n def submit(self):\r\n self.b=self['beginno'].zfill(8)\r\n self.e=self['endno'].zfill(8)\r\n self.fh=self['branch']\r\n if self.e\"] = 8310575403 #开始的的个数 \n\n self.word2_dict = {} #记录概率,2-gram\n self.word2_dict_count = {} #记录词频,2-gram\n\n\n self.gmax_word_length = 0\n self.all_freq = 0 #所有词的词频总和,1-gram的\n\n #估算未出现的词的概率,根据beautiful data里面的方法估算\n def get_unkonw_word_prob(self, word):\n return math.log(10./(self.all_freq*10**len(word)))\n\n #获得片段的概率\n def get_word_prob(self, word):\n if word in self.word1_dict: #如果字典包含这个词\n prob = self.word1_dict[word]\n else:\n prob = self.get_unkonw_word_prob(word)\n return prob\n\n\n #获得两个词的转移概率\n def get_word_trans_prob(self, first_word, second_word):\n trans_word = first_word + \" \" + second_word\n #print trans_word\n if trans_word in self.word2_dict_count:\n trans_prob = \\\n math.log(self.word2_dict_count[trans_word]/self.word1_dict_count[first_word])\n else:\n trans_prob = self.get_word_prob(second_word)\n return trans_prob\n\n #寻找node的最佳前驱节点\n #方法为寻找所有可能的前驱片段\n def get_best_pre_node(self, sequence, node, node_state_list):\n #如果node比最大词长小,取的片段长度以node的长度为限\n max_seg_length = min([node, self.gmax_word_length])\n pre_node_list = [] #前驱节点列表\n #获得所有的前驱片段,并记录累加概率\n for segment_length in range(1,max_seg_length+1):\n segment_start_node = node-segment_length\n segment = sequence[segment_start_node:node] #获取片段\n\n pre_node = segment_start_node #取该片段,则记录对应的��驱节点\n\n if pre_node == 0:\n #如果前驱片段开始节点是序列的开始节点,\n #则概率为转移到当前词的概率\n #segment_prob = self.get_word_prob(segment)\n segment_prob = \\\n self.get_word_trans_prob(\"\", segment)\n else: #如果不是序列开始节点,按照二元概率计算\n #获得前驱片段的前一个词\n pre_pre_node = node_state_list[pre_node][\"pre_node\"]\n pre_pre_word = sequence[pre_pre_node:pre_node]\n segment_prob = \\\n self.get_word_trans_prob(pre_pre_word, segment)\n\n pre_node_prob_sum = node_state_list[pre_node][\"prob_sum\"] #前驱节点的概率的累加值\n\n #当前node一个候选的累加概率值\n candidate_prob_sum = pre_node_prob_sum + segment_prob\n\n pre_node_list.append((pre_node, candidate_prob_sum))\n\n #找到最大的候选概率值\n (best_pre_node, best_prob_sum) = \\\n max(pre_node_list,key=lambda d:d[1])\n return (best_pre_node, best_prob_sum)\n\n #最大概率分词\n def mp_seg(self, sequence):\n sequence = sequence.strip()\n\n #初始化\n node_state_list = [] #记录节点的最佳前驱,index就是位置信息\n #初始节点,也就是0节点信息\n ini_state = {}\n ini_state[\"pre_node\"] = -1 #前一个节点\n ini_state[\"prob_sum\"] = 0 #当前的概率总和\n node_state_list.append( ini_state )\n #字符串概率为2元概率\n #P(a b c) = P(a|)P(b|a)P(c|b)\n\n #逐个节点寻找最佳前驱节点\n for node in range(1,len(sequence) + 1):\n #寻找最佳前驱,并记录当前最大的概率累加值\n (best_pre_node, best_prob_sum) = \\\n self.get_best_pre_node(sequence, node, node_state_list)\n\n #添加到队列\n cur_node = {}\n cur_node[\"pre_node\"] = best_pre_node\n cur_node[\"prob_sum\"] = best_prob_sum\n node_state_list.append(cur_node)\n #print \"cur node list\",node_state_list\n\n # step 2, 获得最优路径,从后到前\n best_path = []\n node = len(sequence) #最后一个点\n best_path.append(node)\n while True:\n pre_node = node_state_list[node][\"pre_node\"]\n if pre_node == -1:\n break\n node = pre_node\n best_path.append(node)\n best_path.reverse()\n\n # step 3, 构建切分\n word_list = []\n for i in range(len(best_path)-1):\n left = best_path[i]\n right = best_path[i + 1]\n word = sequence[left:right]\n word_list.append(word)\n\n seg_sequence = DELIMITER.join(word_list)\n return seg_sequence\n\n #加载词典,为词\\t词频的格式\n def initial_dict(self, gram1_file, gram2_file):\n #读取1_gram文件\n dict_file = open(gram1_file, \"r\")\n for line in dict_file:\n sequence = line.strip()\n key = sequence.split('\\t')[0]\n value = float(sequence.split('\\t')[1])\n self.word1_dict_count[key] = value\n #计算频率\n self.all_freq = sum(self.word1_dict_count.values()) #所有词的词频\n self.gmax_word_length = max(len(key) for key in self.word1_dict_count.keys())\n self.gmax_word_length = 20\n self.all_freq = 1024908267229.0\n #计算1gram词的概率\n for key in self.word1_dict_count:\n self.word1_dict[key] = math.log(self.word1_dict_count[key]/self.all_freq)\n\n #读取2_gram_file,同时计算转移概率\n dict_file = open(gram2_file, \"r\")\n for line in dict_file:\n sequence = line.strip()\n key = sequence.split('\\t')[0]\n value = float(sequence.split('\\t')[1])\n first_word = key.split(\" \")[0]\n second_word = key.split(\" \")[1]\n self.word2_dict_count[key] = float(value)\n if first_word in self.word1_dict_count:\n self.word2_dict[key] = \\\n math.log(value/self.word1_dict_count[first_word]) #取自然对数\n else:\n self.word2_dict[key] = self.word1_dict[second_word]\n#test\nif __name__=='__main__':\n myseg = DNASegment()\n myseg.initial_dict(\"./words_counter.txt\",\"./words_counter_2.txt\")\n with open('./mark_no_same.txt', mode = 'r', encoding = 'utf-8') as f:\n for line in f:\n sequence = line\n seg_sequence = myseg.mp_seg(sequence)\n print(\"original sequence: \" + sequence + \"segment result: \" + seg_sequence + '\\n')\n","sub_path":"Python/Tools/ngram/ngram_segment.py","file_name":"ngram_segment.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"88256839","text":"##### Programme n°2\n\n# On importe les modules qui vont nous permettre de traiter les données\n\n# matplotlib pour réaliser les graphiques\nimport matplotlib.pyplot as pyplot\n# csv pour lire les fichiers de données\nimport csv\n\n###############################################################################\"\n# Fonction de chargement des donnnées\n# Description : Charge un fichier au format suivant :\n# - la premiére ligne est la ligne d'entete\n# - la première colonne correspond aux données de l'axe x\n# param entrant : file : string : Nom du fichier à charger\n# param sortant : dictionnaire contenant les données\n# - clé : 0 à n-1, n étant le nb de colonne dans le fichier\n# - donnée associée : liste contenant les données en colonnne\n# le premier élément de la liste contient l'entête de colonne \ndef load_file(fSource) :\n dicData = {} # Dictionnaire renvoyé\n for strLine in open(fSource,\"r\").readlines(): # Boucle sur les lignes du fichier \n lstLine = strLine.rstrip('\\n').split(';')\n for i in range(0,len(lstLine)) : # Boucle sur les colonnes du fichier\n try : \n dicData[i].append(float(lstLine[i]))\n except : # initialisation du premier élément de la liste\n dicData[i] = [lstLine[i]] # pour la 1 valeur, le type associé n'est pas connu \n return dicData\n\n###############################################################################\"\n# Fonction affichage d'un graphe\n# param entrant : intGraph : integer : référence du graph\n# dicData : dinctionnary : dictionnaire des données comparatives\n# dicMesure : dinctionnary : dictionnaire des données comparatives\n# strLab_y : string : libelle axe y\n# strLab_leg : string : libelle de la legende\n# strLoc_leg : string : position de la legende\ndef graph(intGraph,dicData,dicMesure,strLab_y,strLab_leg,strLoc_leg) :\n pyplot.subplot(1, 3, intGraph) # Référence du graph pour la suite\n [pyplot.plot(dicData[0][1:],dicData[i][1:],label=dicData[i][0]+ \" \" + strLab_leg ) for i in range(1,len(dicData))]\n pyplot.legend( loc = strLoc_leg)\n pyplot.scatter(dicMesure[0][1:],dicMesure[intGraph][1:])\n pyplot.xlabel('Age en mois') \n pyplot.ylabel(strLab_y)\n pyplot.grid(True)\n\n###############################################################################\"\n# Début\n# \n\n# chargement des données light\n# Dictionnaire des normes\n# Niveau 1 :\n# Clé : Type (poids,taille)\n# Data : \n# Liste :\n# 1 : Numéro du graph\n# 2 : Dictionnaire\n# Clé : Genre \n# Data : Contenu du fichier\n# 3 : Libelle Y\n# 4 : libelle legende\n# 5 : position legende \ndicNorme ={'W':[1,{'g':load_file('poids-age-garcon-0-60-light.csv'),'f':load_file('poids-age-fille-0-60-light.csv')},'Poids en kg','poids','upper left'],'T':[2,{'g': load_file('taille-age-garcon-0-60-light.csv'),'f':load_file('taille-age-fille-0-60-light.csv')},'Taille en cm','taille','upper left'],'S':[3,{'g': load_file('perim-cra-age-garcon-0-60-light.csv'),'f':load_file('perim-cra-age-fille-0-60-light.csv') },'Périmètre en cm','périmètre','lower right']} \n\n# chargement des mesures\ndicMesure = load_file('mesures.csv')\n\n# Genre : Saisie / Vérification g ou f \nstrGenre = ''\nwhile True:\n strGenre = str(input (\"Entrez le genre de votre nourrisson ('g' pour garçon, 'f' pour fille), ctr-d pour quitter : \"))\n strGenre = strGenre.lower() # minuscule\n if strGenre == 'g' or strGenre == 'f' : \n break\n\n# Affichage du graph\n[graph(dicNorme[Mykey][0],dicNorme[Mykey][1][strGenre],dicMesure,dicNorme[Mykey][2],dicNorme[Mykey][3],dicNorme[Mykey][4]) for Mykey in dicNorme.keys()]\npyplot.show()","sub_path":"nourrisson-step2.py","file_name":"nourrisson-step2.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"449857962","text":"from gtts import gTTS\nimport os\n\nprint(\"\\nINPUT THE NUMBER OF TEXT FILES THAT YOU WANT TO CONVERT INTO AUDIO: \",end=\"\")\n\nn=int(input())\n\nfor i in range(0,n):\n\n print(\"\\nGIVE THE TEXT FILE NAME(.txt format): \",end=\"\")\n txt=input() \n\n f=open(txt)\n x=f.read()\n\n language='en'\n\n audio=gTTS(text=x,lang=language)\n\n print(\"\\nGIVE THE AUDIO FILE NAME IN WHICH OUTPUT WILL BE SAVED(.wav or .mp3 format): \",end=\"\")\n aud=input()\n\n audio.save(aud)\n os.system(aud)\n","sub_path":"Text to Speech/text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"621226883","text":"#!/usr/bin/python\n\n# Import necessary modules\nimport os\nimport csv\nimport datetime\n\n# Gather/manipulate date information for csv naming/decisions\ntimestamp = datetime.datetime.now()\ntimestamp_string = str(timestamp) # Make string to use split()\ndate, t = timestamp_string.split(' ') # Separate into date and time\nyear, month, day = date.split('-') # Split up elements of date\nmon_yr = month + '-' + year # Recombine month/year\n\n\ndef main():\n logfile = '/home/pi/Documents/Speedtest_Logger/Speedtest_Logfiles/'\n +mon_yr+'.csv'\n outfile = open(logfile, 'a', newline='') # Open file\n writer = csv.writer(outfile) # Create new\n print('Retrieving speed test data...')\n ping, download, upload = get_speedtest_data() # Store data\n writer.writerow([date, t, ping, download, upload]) # Write data\n outfile.close()\n\n\ndef get_speedtest_data():\n '''New function to pipe in data from the speedtest terminal function'''\n speedtest_output = os.popen('speedtest-cli --simple') # Call terminal com\n\n # Set variables to have no data\n ping = download = upload = 0\n\n for line in speedtest_output: # Loop through lines in speedtest\n label, value, unit = line.split(' ') # Split line into three\n\n # Store values in correct variable based on the label in outpu\n if 'Ping' in label:\n ping = float(value)\n elif 'Download' in label:\n download = float(value)\n # Return all values IF all values were parsed\n if all((ping, download, upload)):\n print('Data logged successfully!')\n print('Ping: ' + str(ping) + ' ms')\n print('Download: ' + str(download) + ' Mbps')\n print('Upload: ' + str(upload) + ' Mbps')\n return ping, download, upload\n else:\n print('Values not logged.')\n\n# Runs main() function. Used to make python programs similar to C/C++.\nif __name__ == '__main__':\n main()\n","sub_path":"speedtest_logger.py","file_name":"speedtest_logger.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"44608566","text":"\"\"\" Compiled: 2020-09-18 10:38:50 \"\"\"\n\n#__src_file__ = \"extensions/bdp_benchmark_test/./etc/FBDPBenchmarkTest_Perform.py\"\n\"\"\"----------------------------------------------------------------------------\nMODULE\n FBDPBenchmarkTest_Perform - Module which performs BDPBenchmarkTest.\n\n Requirements:\n \n BDP benchmark test creates instruments, prices, counterparties,\n acquirers, and trades measure the time taken. It also can clone\n the existing trades and measure performance. All the test data\n can be cleaned after the tests complete.\n\nDESCRIPTION\n This module performs the BDP benchmark test based on the\n parameters passed from the script FBDPBenchmarkTest.\n\n----------------------------------------------------------------------------\"\"\"\n\nimport sys\nimport random\nimport time\nfrom contextlib import contextmanager\nimport acm\nimport ael\nimport FBDPCommon\nfrom FBDPCurrentContext import Summary\nfrom FBDPCurrentContext import Logme\n\nINSTRUMENT_NAME_PREFIX = 'BDPBenchmark_INS_'\nSTOCK_NAME_PREFIX = 'BDPBenchmark_INS_STOCK_'\nPORTFOLIO_NAME_PREFIX = 'BDPBenchmarkPort_'\nCOUNTERPARTY_NAME_PREFIX = 'BDPBenchmarkCP_'\nACQUIRER_NAME_PREFIX = 'BDPBenchmarkACQ_'\nNUMBER_OF_PORTFOLIOS = 4\nNUMBER_OF_COUNTERPARTIES = 10\nNUMBER_OF_ACQUIRERS = 10\nPRICEMARKET = 'SPOT'\n\n\ndef perform_test(execParam):\n e = BenchmarkTest(execParam)\n e.perform()\n Summary().log(execParam)\n Logme()(None, 'FINISH')\n\n\ndef getChangeFactor(minFact=0.8, maxFact=1.25):\n return round(float(random.uniform(minFact, maxFact)), 2)\n\n\ndef createCounterParty(name, *argv):\n cpty = acm.FCounterParty()\n cpty.Name(name)\n cpty.Commit()\n Summary().ok(cpty, Summary().CREATE, cpty.Oid())\n return cpty\n\n\ndef createPhysicalPortfolio(name, *argv):\n prf = acm.FPhysicalPortfolio()\n prf.Name(name)\n prf.AssignInfo(name)\n prf.Currency(acm.FCurrency['EUR'])\n prf.Commit()\n Summary().ok(prf, Summary().CREATE, prf.Oid())\n return prf\n\n\ndef createInternalDepartment(name, *argv):\n acq = acm.FInternalDepartment()\n acq.Name(name)\n acq.Commit()\n Summary().ok(acq, Summary().CREATE, acq.Oid())\n return acq\n\n\ndef getEntities(namePrefix, entityName):\n return acm.GetClass('F' + entityName).Select(\n 'name like %s' % (namePrefix + '*'))\n\n\ndef getEntityNames(namePrefix, entityName):\n return [e.Name() for e in\n getEntities(namePrefix, entityName)]\n \n\ndef getEntityStartIndex(namePrefix, entityName):\n return getEntities(namePrefix, entityName).Size()\n\n\ndef createEntities(namePrefix, entityName, numberOfEntities, *argv):\n eList = []\n startIndex = getEntityStartIndex(namePrefix, entityName)\n for i in range(numberOfEntities):\n uName = namePrefix + str(i + 1 + startIndex)\n cpty = getattr(sys.modules[__name__],\n \"create%s\" % entityName)(uName, argv)\n eList.append(cpty)\n return eList\n\n\ndef DeleteEntities(namePrefix, entityName):\n for name in getEntityNames(namePrefix, entityName):\n Logme()(name, \"DEBUG\")\n Summary().ok(acm.GetClass('F' + entityName)[name], Summary().DELETE, name)\n acm.GetClass('F' + entityName)[name].Delete()\n\n\ndef getRandomDate(lastDate, firstDate):\n days = acm.Time.DateDifference(lastDate, firstDate)\n if days < 0:\n raise RuntimeError('lastDate has to larger than firstDate')\n offset = random.randint(0, days)\n return acm.Time.DateAddDelta(firstDate, 0, 0, offset)\n\n\n@contextmanager \ndef measureTime(title):\n t1 = time.clock()\n yield\n t2 = time.clock()\n Logme()('%s: %0.2f seconds elapsed' % (title, t2 - t1))\n\nclass BenchmarkTest(object):\n \n def createInstruments(self):\n instruments = []\n self.startIndex = getEntityStartIndex(STOCK_NAME_PREFIX, 'Instrument')\n\n for i in range(self.numberOfInstruments):\n insName = STOCK_NAME_PREFIX\\\n + str(i + 1 + self.startIndex)\n ins = Stock(insName)\n instruments.append(ins)\n return instruments\n\n def readArguments(self, execParam):\n self.lastTradeDate = FBDPCommon.toDate(\n execParam.get('lastTradeDate', 'Today'))\n self.firstTradeDate = FBDPCommon.toDate(\n execParam.get('firstTradeDate', '-6m'))\n\n self.numberOfInstruments = execParam.get('numberOfInstruments', 1)\n self.numberOfTradesPerIns = execParam.get('numberOfTradesPerIns', 1000) \n self.clone = execParam.get('simulate', 0)\n self.createNew = execParam.get('createNew', 0)\n self.cleanUp = execParam.get('cleanUp', 0)\n self.simulateIns = execParam.get('Instruments', None)\n self.prfs = execParam.get('TradingPortfolios', None)\n self.numberOfClone = execParam.get('numberOfCloneTrades', 10)\n \n \n def __init__(self, execParam):\n self.readArguments(execParam)\n\n def doCleanUp(self):\n\n for p in getEntities(PORTFOLIO_NAME_PREFIX, 'PhysicalPortfolio'):\n tOids = [t.Oid() for t in p.Trades()]\n for oid in tOids:\n Logme()(oid, \"DEBUG\")\n Summary().ok(acm.FTrade[oid], Summary().DELETE, oid)\n acm.FTrade[oid].Delete()\n\n DeleteEntities(PORTFOLIO_NAME_PREFIX, 'PhysicalPortfolio')\n DeleteEntities(COUNTERPARTY_NAME_PREFIX, 'CounterParty')\n DeleteEntities(ACQUIRER_NAME_PREFIX, 'InternalDepartment')\n\n names = getEntityNames(INSTRUMENT_NAME_PREFIX, 'Instrument')\n for n in names:\n Logme()(n, \"DEBUG\")\n query = ('instrument={0} and currency={1}'.format(\n acm.FInstrument[n].Oid(), acm.FInstrument[n].Currency().Oid()))\n prices = acm.FPrice.Select(query)\n pOids = [p.Oid() for p in prices]\n for p in pOids:\n Logme()(p, \"DEBUG\")\n Summary().ok(acm.FPrice[p], Summary().DELETE, p)\n acm.FPrice[p].Delete()\n Summary().ok(acm.FInstrument[n], Summary().DELETE, n)\n acm.FInstrument[n].Delete()\n \n\n def perform(self):\n if self.cleanUp:\n Logme()('Clean up test data.....', \"DEBUG\")\n self.doCleanUp()\n elif self.createNew:\n Logme()('Create new data.....', \"DEBUG\")\n with measureTime('Portfolios creation time'):\n self.portfolioNames = createEntities(\n PORTFOLIO_NAME_PREFIX, 'PhysicalPortfolio', NUMBER_OF_PORTFOLIOS)\n with measureTime('Counterparties creation time'):\n self.counterparties = createEntities(\n COUNTERPARTY_NAME_PREFIX, 'CounterParty', NUMBER_OF_COUNTERPARTIES)\n with measureTime('Acquirers creation time'):\n self.acquirers = createEntities(\n ACQUIRER_NAME_PREFIX, 'InternalDepartment', NUMBER_OF_ACQUIRERS)\n \n with measureTime('Instruments creation time'):\n self.instruments = self.createInstruments() \n\n with measureTime('Prices creation time'):\n for ins in self.instruments:\n ins.createPrice(self.lastTradeDate)\n with measureTime('Trades creation time'):\n for ins in self.instruments:\n ins.createTrades(self.numberOfTradesPerIns,\n self.lastTradeDate, self.firstTradeDate,\n self.portfolioNames, self.counterparties,\n self.acquirers)\n elif self.clone:\n Logme()('Cloning existing data.....', \"DEBUG\")\n \n with measureTime('Instruments clone time'):\n for ins in self.simulateIns:\n insClone = ins.Clone()\n insClone.Name(INSTRUMENT_NAME_PREFIX + ins.Name())\n insClone.Commit()\n with measureTime('Trades clone time'):\n for ins in self.simulateIns:\n count = 0\n for t in ins.Trades():\n tc = t.Clone()\n tc.Instrument(acm.FInstrument[\n INSTRUMENT_NAME_PREFIX + ins.Name()])\n tc.Status('Simulated')\n tc.Commit()\n tc.ConnectedTrade(tc)\n tc.Commit()\n count = count + 1\n if count >= self.numberOfClone:\n break\n\n\nclass InstrumentBase(object):\n def __init__(self, ins_name):\n self.ins.Name(ins_name)\n \n def createPrice(self, price_date, price, price_market=PRICEMARKET):\n ael_date = ael.date(price_date)\n \n ael_market = ael.Party[price_market]\n ael_ins = ael.Instrument[self.ins.Name()]\n try:\n newPrice = ael.Price.new()\n newPrice.insaddr = ael_ins\n newPrice.curr = ael_ins.curr\n newPrice.day = ael_date\n newPrice.ptynbr = ael_market\n newPrice.bid = price\n newPrice.ask = price\n newPrice.last = price\n newPrice.settle = price\n newPrice.commit()\n Summary().ok(newPrice, Summary().CREATE, newPrice.prinbr)\n return price\n\n except Exception as msg:\n if (ael_date == ael.date_today()) and (ael_market.type == 'Market'):\n for p in ael_ins.prices():\n if (ael_market == p.ptynbr):\n thePrice = p\n break\n else:\n for p in ael_ins.historical_prices():\n if (ael_date == p.day) and (ael_market == p.ptynbr):\n thePrice = p\n break\n updPrice = thePrice.clone()\n updPrice.day = ael_date\n updPrice.bid = price\n updPrice.ask = price\n updPrice.last = price\n updPrice.settle = price\n updPrice.commit()\n return price\n\n\nclass Stock(InstrumentBase):\n def __init__(self, ins_name):\n self.ins = acm.FStock()\n super(Stock, self).__init__(ins_name)\n self.ins.Commit()\n Summary().ok(self.ins, Summary().CREATE, self.ins.Oid())\n\n def createPrice(self, price_date, price=None):\n if not price:\n price = 100 * getChangeFactor()\n else:\n price = float(price)\n return super(Stock, self).createPrice(price_date, price)\n \n def createTrades(self, numberOfTrades, lastDate, firstDate,\n portfolioList, cpList, acList):\n\n numberOfPortfolios = len(portfolioList)\n numberOfCP = len(cpList)\n numberOfAcq = len(acList)\n for i in range(numberOfTrades):\n acm.BeginTransaction()\n trd=acm.FTrade()\n trd.Currency(self.ins.Currency())\n trd_time = getRandomDate(lastDate, firstDate)\n trd.TradeTime(trd_time)\n val_day = FBDPCommon.businessDaySpot(self.ins, trd_time)\n trd.ValueDay(val_day)\n trd.AcquireDay(val_day)\n trd.Quantity(100)\n price = 100 * getChangeFactor()\n trd.Price(price)\n trd.Portfolio(portfolioList[random.randint(0, numberOfPortfolios - 1)])\n trd.Counterparty(cpList[random.randint(0, numberOfCP - 1)])\n trd.Acquirer(acList[random.randint(0, numberOfAcq - 1)])\n trd.Instrument(self.ins)\n trd.UpdatePremium(True)\n trd.Status('Simulated')\n trd.Commit()\n Summary().ok(trd, Summary().CREATE, trd.Oid())\n acm.CommitTransaction()\n\n","sub_path":"Extensions/BDP Benchmark Test/FPythonCode/FBDPBenchmarkTest_Perform.py","file_name":"FBDPBenchmarkTest_Perform.py","file_ext":"py","file_size_in_byte":11696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"189905593","text":"import numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport mpl_toolkits.mplot3d.axes3d as p3\r\nfrom matplotlib import animation\r\nimport copy\r\nimport math\r\n\r\nclass integration_and_plot:\r\n \"\"\"\r\n This class will contains two functions. First integrates linear accelation data. \r\n Second function plots the integrated data.\r\n Input is the list of linear acceleration data with timestamps from\r\n data_processor_pandas.py .\r\n \"\"\"\r\n \r\n def __init__(self, AccandTime):\r\n \"\"\"\r\n \"\"\"\r\n self.inputData = AccandTime\r\n \r\n def integration(self): \r\n accelerationVector = []\r\n timesOfAcc = []\r\n velocityVector = []\r\n positionVector = []\r\n accelerationMean = np.array([-0.00089, 0.00118, 0.05586])\r\n #The mean of the static acceleration is subtracted from the acceleration arrays, and all is rounded to 5dp\r\n for i in range(len(self.inputData)):\r\n accelerationVector.append( np.around( np.array(self.inputData[i][1] - accelerationMean), decimals = 5) )\r\n timesOfAcc.append(self.inputData[i][0])\r\n \r\n # low pass filter to remove noise from the LiDAR sensor\r\n\r\n for i in range(len(self.inputData)):\r\n if abs(accelerationVector[i][0]) < 1.4:\r\n accelerationVector[i][0] = 0\r\n if abs(accelerationVector[i][1]) < 1.4:\r\n accelerationVector[i][1] = 0\r\n else:\r\n pass\r\n\r\n\r\n #integration of acceleration to velocity\r\n tempVelocity = np.zeros(3)\r\n for i in range(len(accelerationVector) - 1):\r\n tempVelocity = tempVelocity + (accelerationVector[i+1] + accelerationVector[i])*(timesOfAcc[i+1] - timesOfAcc[i])*0.5 \r\n velocityVector.append(tempVelocity)\r\n\r\n #need to pop the first timestamp so that lists match,\r\n #as we are using trapezium rule to integrate and with a short enough period between values\r\n #this has a negigible affect on precision\r\n timesOfVel = copy.deepcopy(timesOfAcc)\r\n timesOfVel.pop(0)\r\n\r\n #integration of velocity to position\r\n tempPosition = np.zeros(3)\r\n for i in range(len(velocityVector) - 1):\r\n tempPosition = tempPosition + (velocityVector[i+1] + velocityVector[i])*(timesOfVel[i+1] - timesOfVel[i])*0.5\r\n positionVector.append(tempPosition)\r\n\r\n timesOfPos = copy.deepcopy(timesOfVel)\r\n timesOfPos.pop(0)\r\n\r\n #provide the object new parameters so that the plotting method can use them\r\n self.timesOfPos = timesOfPos\r\n self.timesOfVel = timesOfVel\r\n self.timesOfAcc = timesOfAcc\r\n self.accelerationVector = accelerationVector\r\n self.velocityVector = velocityVector\r\n self.positionVector = positionVector\r\n\r\n def plotting(self, fileNameForSaving):\r\n # source for increasing the font size, default font size is 10: https://stackoverflow.com/a/3900167\r\n plt.rcParams.update({'font.size': 11})\r\n\r\n currentAccX = []\r\n currentAccY = []\r\n currentAccZ = []\r\n \r\n for i in range(len(self.accelerationVector)):\r\n currentAccX.append(self.accelerationVector[i][0])\r\n currentAccY.append(self.accelerationVector[i][1])\r\n currentAccZ.append(self.accelerationVector[i][2])\r\n\r\n #This line of code, if activated for a stationary data file, this will print the mean acceleration of the file. \r\n #This can be used to find the bias acceleration for the inertial measurement unit.\r\n #print('The mean of the acceleration data is (in order of X,Y,Z):\\n%f\\n%f\\n%f'%(np.mean(currentAccX), np.mean(currentAccY), np.mean(currentAccZ) ))\r\n \r\n plt.plot(self.timesOfAcc, currentAccZ, '-', label = 'Z Acceleration')\r\n plt.plot(self.timesOfAcc, currentAccY, '-', label = 'Y Acceleration')\r\n plt.plot(self.timesOfAcc, currentAccX, '-', label = 'X Acceleration')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Linear Acceleration (ms$^{-2}$)')\r\n plt.minorticks_on()\r\n # plt.title('Acceleration over time of NGIMU')\r\n plt.legend(loc = 2)\r\n plt.savefig(\"%s_acceleration.jpg\"%(fileNameForSaving), bbox_inches='tight')\r\n plt.show()\r\n \r\n currentVelX = []\r\n currentVelY = []\r\n currentVelZ = []\r\n \r\n for i in range(len(self.velocityVector)):\r\n currentVelX.append(self.velocityVector[i][0])\r\n currentVelY.append(self.velocityVector[i][1])\r\n currentVelZ.append(self.velocityVector[i][2])\r\n \r\n plt.plot(self.timesOfVel, currentVelX, '-', label = 'X Velocity')\r\n plt.plot(self.timesOfVel, currentVelY, '-', label = 'Y Velocity')\r\n plt.plot(self.timesOfVel, currentVelZ, '-', label = 'Z Velocity')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Velocity (ms$^{-1}$)')\r\n # plt.title('Velocity over time of NGIMU')\r\n plt.legend(loc = 2)\r\n plt.savefig(\"%s_velocity.jpg\"%(fileNameForSaving), bbox_inches='tight')\r\n plt.show()\r\n \r\n currentPosX = []\r\n currentPosY = []\r\n currentPosZ = []\r\n \r\n for i in range(len(self.positionVector)):\r\n currentPosX.append(self.positionVector[i][0])\r\n currentPosY.append(-1 * self.positionVector[i][1]) # there is a minus one here because it seems the NGIMU was backwards the whole time\r\n currentPosZ.append(self.positionVector[i][2])\r\n \r\n plt.plot(self.timesOfPos, currentPosX, '-', label = 'X Position')\r\n plt.plot(self.timesOfPos, currentPosY, '-', label = 'Y Position')\r\n plt.plot(self.timesOfPos, currentPosZ, '-', label = 'Z Position')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Position (m)')\r\n #plt.title('Position over time of NGIMU')\r\n plt.legend(loc = 2)\r\n plt.savefig(\"%s_position.jpg\"%(fileNameForSaving), bbox_inches='tight')\r\n plt.show()\r\n \r\n # the following section of code will generate a 2d plot of position in x vs position in y.\r\n # It also contains a dummy plot, which will instead return the final magnitude of position \r\n # of the sensor (since we won't be showing corner test diagrams). If we want to add corner\r\n # test diagrams, these won't really work, maybe instead calculate the difference in displacement?\r\n plt.plot(currentPosX, currentPosY, '-')\r\n # dummy plot\r\n plt.plot([], [], ' ', label=\"Vehicle moved %s m overall\"%(np.around(\r\n np.sqrt(currentPosX[-1] * currentPosX[-1] + currentPosY[-1] * currentPosY[-1])\r\n , decimals=2)))\r\n plt.legend(frameon=False)\r\n plt.xlabel('Position over time in x (m)')\r\n plt.ylabel('Position over time in y (m)')\r\n plt.savefig(\"%s_2d_position.jpg\"%(fileNameForSaving), bbox_inches='tight')\r\n plt.show()\r\n\r\n \"\"\"at this point, we have the new 3D animation code.\r\n This has been cobbled together from various sources, but\r\n mainly from matplotlibanimator.py, after using matplotlibanimator_3.py \r\n to understand the core concepts.\r\n \"\"\"\r\n \"\"\"\r\n fig = plt.figure()\r\n ax = p3.Axes3D(fig)\r\n\r\n \r\n This section of code will be used to try and reduce the animation and playing time. For example, the 30s of real data played out as closer to \r\n 2mins 30s of gif. As a result, we will try selecting fewer of the data points, 1 in 10, 1 in 5 for example, and pass this shortened list to\r\n the animation functions, like gen and FuncAnimation.\r\n \r\n #remainder/modulo operator: https://stackoverflow.com/a/5584604\r\n currentPosXShort = [currentPosX[0]]\r\n currentPosYShort = [currentPosY[0]]\r\n currentPosZShort = [currentPosZ[0]]\r\n timesOfPosShort = [self.timesOfPos[0]]\r\n \r\n for i in range(len(self.timesOfPos)):\r\n if i % 5 == 0:\r\n currentPosXShort.append(currentPosX[i])\r\n currentPosYShort.append(currentPosY[i])\r\n currentPosZShort.append(currentPosZ[i])\r\n timesOfPosShort.append(self.timesOfPos[i])\r\n\r\n def gen():\r\n i = 0\r\n while i < len(timesOfPosShort):\r\n yield np.array([currentPosXShort[i], currentPosYShort[i], currentPosZShort[i]])\r\n i += 1\r\n \r\n def update(num, data, line):\r\n line.set_data(data[:2, :num])\r\n line.set_3d_properties(data[2, :num])\r\n \r\n data = np.array(list(gen())).T\r\n line, = ax.plot(data[0, 0:1], data[1, 0:1], data[2, 0:1]) #nope, no error here.\r\n \r\n #Setting the axes properties\r\n ax.set_xlim3d([-2, 2])\r\n ax.set_xlabel('X Position (m)')\r\n \r\n ax.set_ylim3d([-2, 2])\r\n ax.set_ylabel('Y Position (m)')\r\n \r\n ax.set_zlim3d([-2, 2])\r\n ax.set_zlabel('Z Position (m)')\r\n \r\n ani = animation.FuncAnimation(fig, update, len(timesOfPosShort), fargs=(data, line), interval=1, blit=False)\r\n #ani.save('matplot003.mp4', writer='ffmpeg')\r\n plt.show()\r\n \r\n #source: https://stackoverflow.com/a/38121759\r\n\r\n #23/11 more animation guidance: https://stackoverflow.com/a/28077104\r\n #change of animation writer to ffmpeg, can only be done on machines that can install software: https://stackoverflow.com/a/31193532\r\n\r\n #and in the comments of that:\r\n \r\n If saving as video instead of .gif then ani.save('test.mp4', writer='ffmpeg', codec='h264') should replace the last line. \r\n If you want to find out which codecs are available then run ffmpeg -codec in the terminal. \r\n Given that you want to use ffmpeg as the writer. \r\n \r\n \"\"\"\r\n \r\n # This code will produce a 2d animation of the position x vs y. \r\n fig = plt.figure()\r\n ax = plt.axes(xlim=(-0.01, 0.3), ylim=(-0.01, 0.15), xlabel= ('x Position over time (m)'), ylabel=('y Position over time (m)'))\r\n\r\n def gen():\r\n i = 0\r\n while i < len(currentPosX):\r\n yield np.array([currentPosX[i], currentPosY[i]])\r\n i += 1\r\n \r\n def update(num, data, line):\r\n line.set_data(data[:2, :num])\r\n \r\n data = np.array(list(gen())).T\r\n line, = ax.plot(data[0, 0:1], data[1, 0:1]) #nope, no error here.\r\n \r\n ani = animation.FuncAnimation(fig, update, len(currentPosX), fargs=(data, line), interval=1, blit=False)\r\n # ani.save('matplot003.gif')\r\n plt.show()\r\n\r\n\r\n\r\n\r\n","sub_path":"NGIMU processing/NGIMU_integration_and_plotting.py","file_name":"NGIMU_integration_and_plotting.py","file_ext":"py","file_size_in_byte":10680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"125097717","text":"class User:\n active_users = 0\n\n def __init__(self, first, last, age):\n self.first = first\n self.last = last\n self.age = age\n User.active_users += 1\n\n @classmethod\n def display_active_users(cls):\n print(f\"There are currently {cls.active_users} active users\")\n\n @classmethod\n def from_string(cls, data_str):\n fst, lst, age = data_str.split()\n return cls(fst, lst, int(age))\n\n def logout(self):\n User.active_users -= 1\n\n def __repr__(self):\n return f\"User {self.first} {self.last} aged {self.age}\"\n\nUser.display_active_users()\njane = User(\"Jane\", \"Doe\", 18)\npaul = User.from_string(\"Paul Rep 19\")\n\nprint(jane)\nprint(paul)\npaul.logout()\nUser.display_active_users()\n\n","sub_path":"py_z_h/oop/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"539854183","text":"import numpy as np \n\ntrain_seq = np.genfromtxt('../data/digits4000_txt/digits4000_trainset.txt').astype(np.uint16) # (2000,2)\ntest_seq = np.genfromtxt('../data/digits4000_txt/digits4000_testset.txt').astype(np.uint16) # (2000,2)\n\n# image and label\ndigits_vec = np.genfromtxt('../data/digits4000_txt/digits4000_digits_vec.txt') # (4000,28,28)\ndigits_vec = digits_vec.reshape(len(digits_vec), 28, 28).astype(np.uint8)\ndigits_labels = np.genfromtxt('../data/digits4000_txt/digits4000_digits_labels.txt').astype(np.uint8) # (4000,)\n\nx_train = digits_vec[train_seq[:,0] - 1]\ny_train = digits_labels[train_seq[:,1] - 1]\n\nx_test = digits_vec[test_seq[:,0] - 1]\ny_test = digits_labels[test_seq[:,1] - 1]\n\n# challenge test image and label\nx_test1 = np.genfromtxt('../data/challenge/cdigits_digits_vec.txt')\nx_test1 = x_test1.reshape(len(x_test1), 28, 28).astype(np.uint8)\ny_test1 = np.genfromtxt('../data/challenge/cdigits_digits_labels.txt').astype(np.uint8)","sub_path":"1-Machine-Learning/Tools/Neural-Network/Image-Classification/Mnist/tensorflow/use_other_data.py","file_name":"use_other_data.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"413896371","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass dataClassCNN:\n def __init__(self, datapath):\n X_train, y_train = load_mnist(datapath, kind='train')\n X_test, y_test = load_mnist(datapath, kind='t10k')\n\n self.numbOfTrainSamples = X_train.shape[0]\n self.numbOfTestSamples = X_test.shape[0]\n\n #reshape to 28x28\n X_train = np.resize(X_train, (self.numbOfTrainSamples, 28, 28))\n X_test = np.resize(X_test, (self.numbOfTestSamples, 28, 28))\n\n #add depth channel\n X_train = X_train[:,:,:,np.newaxis]\n X_test = X_test[:, :, :, np.newaxis]\n\n #cast to float32\n X_train = X_train.astype(dtype=np.float32)\n X_test = X_test.astype(dtype=np.float32)\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_test -= mean_image\n\n # plt.figure()\n # plt.imshow(X_train[1,:,:,0])\n # plt.colormaps('gray')\n\n self.X_train = X_train\n self.X_test = X_test\n self.y_train = y_train\n self.y_test = y_test\n self.numbOfClasses = 10\n self.numbOfFeatures = [X_train.shape[1], X_train.shape[2], X_train.shape[3]]\n self.label_strings = ['T-shirt / top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n self.testCounter = 0\n self.test_batch_size = 500\n return\n\n def next_training_batch(self, batch_size):\n ind = np.random.randint(self.numbOfTrainSamples, size=batch_size)\n y_onehot = np.zeros((batch_size, self.numbOfClasses))\n y_onehot[np.arange(batch_size), self.y_train[ind]] = 1\n return self.X_train[ind, :,:,:], y_onehot\n\n def get_test_data(self):\n ind = np.linspace(self.testCounter*self.test_batch_size, (self.testCounter+1)*self.test_batch_size-1, num=self.test_batch_size, dtype=np.int32)\n y_onehot = np.zeros((self.test_batch_size, self.numbOfClasses))\n y_onehot[np.arange(self.test_batch_size), self.y_test[ind]] = 1\n self.testCounter = self.testCounter + 1\n if self.testCounter*self.test_batch_size >= self.numbOfTestSamples:\n self.testCounter = 0\n return self.X_test[ind, :,:,:], y_onehot\n\n\ndef load_mnist(path, kind='train'):\n import os\n import gzip\n import numpy as np\n\n \"\"\"Load MNIST data from `path`\"\"\"\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels\n\n","sub_path":"INF5860/convNet_solution/utils/dataClass.py","file_name":"dataClass.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"303768744","text":"import tkinter as tk\nfrom Panels.EditorPanel import *\nfrom Panels.Components.EditTestPanel import *\nfrom Panels.Components.EditGroupPanel import *\nfrom Panels.Helper.constants import *\nfrom Panels.Helper.CustomWidgets import *\n\n# Main UI to hold the EditGroupPanel and EditTestPanel\n# Primary interface for passing specific test groups and tests\n# between the TestPanel and the Edit*Panels\nclass EditorPanel(Panel):\n def __init__(self, root, app):\n Panel.__init__(self, root, app, 'Editor Panel', EDITOR_WIDTH, EDITOR_HEIGHT)\n self._edit_group_panel = EditGroupPanel(self)\n self._edit_test_panel = EditTestPanel(self)\n\n # Execute the given test\n def execute_code(self, test):\n if self._edit_test_panel.update() and \\\n self.app.global_panel.validate_variables():\n self.app.terminal.execute_code(test)\n\n # Toggle/change the group panel\n def toggle_group_panel(self, group):\n if group is self._edit_group_panel.group:\n success = self._edit_group_panel.close()\n if success:\n self.toggle_test_panel(None, all_tests=True)\n else:\n success = self._edit_group_panel.set(group)\n\n if success: self._edit_test_panel.close()\n\n # Toggle/change the test panel\n def toggle_test_panel(self, test, all_tests=False):\n if test is self._edit_test_panel.test or all_tests:\n self._edit_test_panel.close()\n else:\n self._edit_test_panel.set(test)\n\n # Save all values currently stored in the UI\n # Returns whether or not the save was successful\n # need to do this separately to not toggle multiple errors\n def save_all(self):\n if not self._edit_test_panel.update():\n return False\n return self._edit_group_panel.update()\n\n # Close the panel\n def close_panel(self):\n if not self._edit_test_panel.close():\n return False\n return self._edit_group_panel.close()","sub_path":"Panels/EditorPanel.py","file_name":"EditorPanel.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"653092183","text":"from django.urls import path\r\nfrom django.contrib.auth import views as auth_views\r\nfrom . import views\r\n\r\napp_name = 'Schedule'\r\n\r\nurlpatterns = [\r\n # /Schedule/\r\n path('', views.index_view.as_view(), name='index'),\r\n \r\n # /Schedule/register\r\n path('register/', views.user_form_view.as_view(), name='register'),\r\n \r\n # /Schedule/71 (SPACES ARE IMPORTANT LOL)\r\n path('/', views.detail_view.as_view(), name='detail'),\r\n \r\n path('apts//', views.detail_view_apt.as_view(), name='detail_apt'),\r\n \r\n \r\n #/Schedule/apt/add/\r\n path('apt/add//', views.apt_create.as_view(),name=\"apt-add\"),\r\n \r\n #/Schedule/apt/#\r\n path('apt/update//', views.apt_update.as_view(), name=\"apt-update\"),\r\n \r\n #/Schedule/apt/#\r\n path('apt//delete/', views.apt_delete.as_view(), name=\"apt-delete\"),\r\n \r\n # /Schedule/login\r\n path('logout/', views.user_logout, name='logout'),\r\n path('login/', auth_views.login, {'template_name': 'Schedule/login.html'}, name='login'),\r\n \r\n #/Schedule/class_list/\r\n path('class_list//', views.user_class_list_detail.as_view(), name='class_list'),\r\n \r\n #===========================================================================\r\n # REST API \r\n #===========================================================================\r\n path('apts/all/', views.ApppointmentList.as_view(), name=\"jsonapt\"),\r\n path('users/all/', views.UserList.as_view(), name=\"jsonuser\"),\r\n path('classes/all/', views.ClassesList.as_view(), name=\"jsonclass\"),\r\n path('class_dates/all/', views.ClassDatesList.as_view(), name=\"jsondate\"),\r\n]","sub_path":"Schedule/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"360150334","text":"import time\nimport Adafruit_CharLCD as LCD\nfrom requests import get\nfrom datetime import datetime\nimport socket\n\n# Checking IP addresses\n\ntestIP = \"8.8.8.8\"\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.connect((testIP, 0))\nipaddr = s.getsockname()[0]\neipaddr = get('https://api.ipify.org').text\nmessage = ipaddr + \"\\n\" + eipaddr\n\n# Raspberry Pi pin setup\nlcd_rs = 25\nlcd_en = 24\nlcd_d4 = 23\nlcd_d5 = 17\nlcd_d6 = 18\nlcd_d7 = 22\nlcd_backlight = 2\n\n# Define LCD column and row size for 16x2 LCD.\nlcd_columns = 16\nlcd_rows = 2\n\nlcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows, lcd_backlight)\nlcd.clear()\nlcd.message(message)\n# Wait 5 seconds\n\n","sub_path":"E-hearing/bootIP/bootIP.py","file_name":"bootIP.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"470192295","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Basic example for a bot that awaits an answer from the user\n# This program is dedicated to the public domain under the CC0 license.\n\nimport logging\nfrom telegram import Emoji, ForceReply, ReplyKeyboardMarkup\nfrom telegram.ext import Updater\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - '\n '%(message)s',\n level=logging.INFO)\n\n# Define the different states a chat can be in\nMENU, AWAIT_CONFIRMATION, AWAIT_INPUT = range(3)\n\n# Python 2 and 3 unicode differences\ntry:\n YES, NO = (Emoji.WHITE_HEAVY_CHECK_MARK.decode('utf-8'),\n Emoji.CROSS_MARK.decode('utf-8'))\nexcept AttributeError:\n YES, NO = (Emoji.THUMBS_UP_SIGN, Emoji.THUMBS_DOWN_SIGN)\n\n# States are saved in a dict that maps chat_id -> state\nstate = dict()\n# Sometimes you need to save data temporarily\ncontext = dict()\n# This dict is used to store the settings value for the chat.\n# Usually, you'd use persistence for this (e.g. sqlite).\nvalues = dict()\n\n\n# Example handler. Will be called on the /set command and on regular messages\ndef set_value(bot, update):\n chat_id = update.message.chat_id\n user_id = update.message.from_user.id\n text = update.message.text\n chat_state = state.get(chat_id, MENU)\n chat_context = context.get(chat_id, None)\n\n # Since the handler will also be called on messages, we need to check if\n # the message is actually a command\n if chat_state == MENU and text[0] == '/':\n state[chat_id] = AWAIT_INPUT # set the state\n context[chat_id] = user_id # save the user id to context\n bot.sendMessage(chat_id,\n text=\"Por favor ingresa tu valor o \"\n \"/cancel para cancelar\",\n reply_markup=ForceReply())\n\n # If we are waiting for input and the right user answered\n elif chat_state == AWAIT_INPUT and chat_context == user_id:\n state[chat_id] = AWAIT_CONFIRMATION\n\n # Save the user id and the answer to context\n context[chat_id] = (user_id, update.message.text)\n reply_markup = ReplyKeyboardMarkup([[YES, NO]], resize_keyboard=True, one_time_keyboard=True)\n bot.sendMessage(chat_id, text=\"Estas seguro?\",\n reply_markup=reply_markup)\n\n # If we are waiting for confirmation and the right user answered\n elif chat_state == AWAIT_CONFIRMATION and chat_context[0] == user_id:\n state[chat_id] = MENU\n context[chat_id] = None\n if text == YES:\n values[chat_id] = chat_context[1]\n bot.sendMessage(chat_id,\n text=\"Valor cambiado a : %s.\" % values[chat_id])\n else:\n bot.sendMessage(chat_id,\n text=\"Valor no cambiado: %s.\"\n % values.get(chat_id, ''))\n\n\n# Handler for the /cancel command.\n# Sets the state back to MENU and clears the context\ndef cancel(bot, update):\n chat_id = update.message.chat_id\n state[chat_id] = MENU\n context[chat_id] = None\n\ndef payments(bot, update):\n chat_id = update.message.chat_id\n state[chat_id] = MENU\n context[chat_id] = None\n reply_markup = ReplyKeyboardMarkup([[\"Deposito\"], [\"Recarga\"], [\"Servicios\"], [\"Consulta\"]], resize_keyboard=True, one_time_keyboard=True)\n bot.sendMessage(chat_id, text=\"Selecciona la transaccion:\",\n reply_markup=reply_markup)\n\n\ndef help(bot, update):\n bot.sendMessage(update.message.chat_id, text=\"Usa /set para configurar\")\n\n\n# Create the Updater and pass it your bot's token.\nupdater = Updater(\"202564241:AAGv1eVqFi9goTaNBQL8ZbymLH5xnnoqcHg\")\n\n# The command\nupdater.dispatcher.addTelegramCommandHandler('set', set_value)\n# The answer and confirmation\nupdater.dispatcher.addTelegramMessageHandler(set_value)\nupdater.dispatcher.addTelegramCommandHandler('cancel', cancel)\nupdater.dispatcher.addTelegramCommandHandler('start', help)\nupdater.dispatcher.addTelegramCommandHandler('help', help)\nupdater.dispatcher.addTelegramCommandHandler('payments', payments)\n\n\n# Start the Bot\nupdater.start_polling()\n\n# Run the bot until the user presses Ctrl-C or the process receives SIGINT,\n# SIGTERM or SIGABRT\nupdater.idle()","sub_path":"state_machine.py","file_name":"state_machine.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"558972560","text":"import setuptools\n\nlong_description = '''\nThis package extends the python-mwlinks toolkit released by mediawiki. \n\nExtracts links from MediaWiki with a focus on Wikipedia.\n\nThis library add multi-process utilities for extracting wikilinks from MediaWiki XML database dumps.\n'''\n\nsetuptools.setup(\n name=\"python-mwlinks\",\n version=\"0.0.1\",\n url=\"https://github.com/hunterhector/python-mwlinks\",\n\n description=\"A python MediaWiki Link parser\",\n long_description=long_description,\n\n packages=setuptools.find_packages(),\n platforms='any',\n\n install_requires=[\n 'mwxml',\n 'mwtypes',\n 'docopt',\n 'jsonable',\n ],\n extras_require={\n },\n package_data={\n },\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"198633884","text":"from pytezos.michelson.pack import pack\nfrom pytezos.repl.control import instruction\nfrom pytezos.repl.context import Context\nfrom pytezos.repl.types import assert_stack_type, Option, Pair, String, Bytes, List, BigMap, Map, Set, Or, Bool, Nat, \\\n Unit, StackItem, dispatch_type_map\nfrom pytezos.michelson.pack import unpack\n\n\n@instruction(['CAR', 'CDR'])\ndef do_car(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, Pair)\n idx = {'CAR': 0, 'CDR': 1}\n res = top.get_element(idx[prim])\n ctx.push(res, annots=annots)\n\n\n@instruction('CONCAT')\ndef do_concat(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, [String, Bytes, List])\n if type(top) in [String, Bytes]:\n second = ctx.pop1()\n val_type = dispatch_type_map(top, second, {\n (String, String): str,\n (Bytes, Bytes): bytes\n })\n res = type(top)(val_type(top) + val_type(second))\n elif type(top) == List:\n res_type = top.val_type()\n val_type, sep = {\n String: (str, ''),\n Bytes: (bytes, b'')\n }[res_type]\n res = res_type(sep.join(map(val_type, top)))\n else:\n assert False\n ctx.push(res, annots=annots)\n\n\n@instruction('CONS')\ndef do_cons(ctx: Context, prim, args, annots):\n val, container = ctx.pop2()\n assert_stack_type(container, List)\n res = container.prepend(val)\n ctx.push(res, annots=annots)\n\n\n@instruction('EMPTY_BIG_MAP', args_len=2)\ndef do_empty_big_map(ctx: Context, prim, args, annots):\n res = ctx.big_maps.empty(k_type_expr=args[0], v_type_expr=args[1])\n ctx.push(res, annots=annots)\n\n\n@instruction('EMPTY_MAP', args_len=2)\ndef do_empty_map(ctx: Context, prim, args, annots):\n res = Map.empty(k_type_expr=args[0], v_type_expr=args[1])\n ctx.push(res, annots=annots)\n\n\n@instruction('EMPTY_SET', args_len=1)\ndef do_empty_set(ctx: Context, prim, args, annots):\n res = Set.empty(k_type_expr=args[0])\n ctx.push(res, annots=annots)\n\n\n@instruction('GET')\ndef do_get(ctx: Context, prim, args, annots):\n key, container = ctx.pop2()\n assert_stack_type(container, [Map, BigMap])\n\n if type(container) == Map:\n val = container.find(key)\n else:\n val = ctx.big_maps.find(container, key)\n\n if val is not None:\n res = Option.some(val)\n else:\n res = Option.none(container.val_type_expr())\n\n ctx.push(res, annots=annots)\n\n\n@instruction(['LEFT', 'RIGHT'], args_len=1)\ndef do_left(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n if prim == 'LEFT':\n res = Or.left(r_type_expr=args[0], item=top)\n else:\n res = Or.right(l_type_expr=args[0], item=top)\n ctx.push(res, annots=annots)\n\n\n@instruction('MEM')\ndef do_mem(ctx: Context, prim, args, annots):\n key, container = ctx.pop2()\n assert_stack_type(container, [Set, Map, BigMap])\n if type(container) == BigMap:\n res = Bool(ctx.big_maps.contains(container, key))\n else:\n res = Bool(key in container)\n ctx.push(res, annots=annots)\n\n\n@instruction('NIL', args_len=1)\ndef do_nil(ctx: Context, prim, args, annots):\n nil = List.empty(args[0])\n ctx.push(nil, annots=annots)\n\n\n@instruction('NONE', args_len=1)\ndef do_none(ctx: Context, prim, args, annots):\n none = Option.none(args[0])\n ctx.push(none, annots=annots)\n\n\n@instruction('PACK')\ndef do_pack(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n res = Bytes(pack(top.val_expr, top.type_expr))\n ctx.push(res, annots=annots)\n\n\n@instruction('PAIR')\ndef do_pair(ctx: Context, prim, args, annots):\n left, right = ctx.pop2()\n res = Pair.new(left, right)\n ctx.push(res, annots=annots)\n\n\n@instruction('SIZE')\ndef do_size(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, [String, Bytes, List, Set, Map])\n res = Nat(len(top))\n ctx.push(res, annots=annots)\n\n\n@instruction('SLICE')\ndef do_slice(ctx: Context, prim, args, annots):\n offset, length, s = ctx.pop3()\n assert_stack_type(s, [String, Bytes])\n offset, length = int(offset), int(length)\n if offset + length <= len(s):\n res = Option.some(s[offset:offset+length])\n else:\n res = Option.none(type(s)().type_expr)\n ctx.push(res, annots=annots)\n\n\n@instruction('SOME')\ndef do_some(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n res = Option.some(top)\n ctx.push(res, annots=annots)\n\n\n@instruction('UNIT')\ndef do_unit(ctx: Context, prim, args, annots):\n ctx.push(Unit(), annots=annots)\n\n\n@instruction('UNPACK', args_len=1)\ndef do_unpack(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, Bytes)\n try:\n val_expr = unpack(data=bytes(top), type_expr=args[0])\n item = StackItem.parse(val_expr=val_expr, type_expr=args[0])\n res = Option.some(item)\n except Exception as e:\n ctx.print(f'failed: {e}')\n res = Option.none(args[0])\n ctx.push(res, annots=annots)\n\n\n@instruction('UPDATE')\ndef do_update(ctx: Context, prim, args, annots):\n key, val, container = ctx.pop3()\n assert_stack_type(container, [Set, Map, BigMap])\n\n if type(container) == Set:\n assert_stack_type(val, Bool)\n if val:\n res = container.add(key)\n else:\n res = container.remove(key)\n else:\n assert_stack_type(val, Option)\n if val.is_none():\n res = container.remove(key)\n else:\n res = container.update(key, val.get_some())\n\n ctx.push(res, annots=annots)\n","sub_path":"pytezos/repl/structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"523119545","text":"# Cracking the Coding Interview\n# Problem 1.1\n# string has unique characters?\n\n\ndef is_unique(string):\n # use hash table (dictionary) to store seen chars\n if type(string) is not str:\n raise ValueError(\"is_unique argument must be a string\")\n table = {}\n for letter in string:\n if table.get(letter) is not None:\n return False\n table[letter] = True\n return True\n\n\ndef is_unique_alt(string):\n # use ASCII values as an index to a list to check for repeats\n if type(string) is not str:\n raise ValueError(\"is_unique argument must be a string\")\n table = [None] * 256\n for letter in string:\n if table[ord(letter)] is None:\n table[ord(letter)] = True\n else:\n return False\n return True\n\n\nassert is_unique(\"1234\") is True\nassert is_unique(\"helloworld\") is False\n\nassert is_unique_alt(\"1234\") is True\nassert is_unique_alt(\"helloworld\") is False\n\n","sub_path":"CrackingTheCodingInterview/ArraysAndStrings/is_unique.py","file_name":"is_unique.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"275602598","text":"import pickle\r\nimport socket\r\nfrom _thread import *\r\nfrom JobList import *\r\nfrom JobCreator import *\r\nfrom JobSeeker import *\r\nfrom FileRecord import *\r\nimport sys\r\ntry:\r\n from ip2geotools.databases.noncommercial import DbIpCity\r\nexcept ImportError:\r\n print(\"Need to install ip2geotools to continue\")\r\n sys.exit(0)\r\n\r\nclass Server(object):\r\n\r\n #Message Variables:\r\n initialConnectionMessage = [\"LOGIN \", \"POSITION SELECTION: \", \"\",\r\n \"\"]\r\n\r\n jobCreatorCommandMessage = [\"CREATEJOB \",\r\n \"REMOVEJOB \",\r\n \"VIEWJOBS\",\r\n \"STARTJOB \",\r\n \"JOBTYPE SELECTION: \",\r\n \"\", \"\",\r\n \"\", \"\",\r\n \"\", \"\"]\r\n\r\n jobSeekerCommandMessage = [\"VIEWJOBS\",\r\n \"JOINJOB \",\r\n \"COMPLETEJOB \"]\r\n\r\n def __init__(self):\r\n self.ServerSocket = socket.socket()\r\n self.host = '127.0.0.1'\r\n self.port = 1233\r\n self.ThreadCount = 0\r\n self.jobListOBJ = JobList()\r\n self.fileRecordOBJ = FileRecord()\r\n self.jobCreatorList = []\r\n self.jobSeekerList = []\r\n self.command = \"\"\r\n self.parameterList = []\r\n self.readBackup()\r\n self.count = 0\r\n\r\n\r\n # Bind socket to port\r\n try:\r\n self.ServerSocket.bind((self.host, self.port))\r\n except socket.error as e:\r\n print(str(e))\r\n\r\n print('Waiting for a Connection..')\r\n self.ServerSocket.listen(5)\r\n\r\n def main(self):\r\n while True:\r\n Client, address = self.ServerSocket.accept()\r\n print('Connected to: ' + address[0] + ':' + str(address[1]))\r\n start_new_thread(self.threadedClient, (Client,))\r\n self.ThreadCount += 1\r\n print('Thread Number: ' + str(self.ThreadCount))\r\n\r\n #COMPLETE\r\n def threadedClient(self, connection):\r\n self.connectionMessage(connection)\r\n\r\n while True:\r\n # Limiting to 2048 Bytes\r\n clientMessage = connection.recv(2048)\r\n\r\n # Receiving Message From Client\r\n self.command = pickle.loads(clientMessage)\r\n\r\n self.ParseCommand(self.command)\r\n\r\n self.commandRouting(connection, self.parameterList)\r\n\r\n #COMPLETE\r\n def connectionMessage(self, connection):\r\n connection.send(pickle.dumps(self.initialConnectionMessage))\r\n\r\n #COMPLETE\r\n def ParseCommand(self, Command):\r\n self.parameterList = Command.split(\" \")\r\n\r\n #COMPLETE\r\n def login(self, connection, parameterList):\r\n if parameterList[3] == \"JobCreator\":\r\n self.jobCreatorList.append(JobCreator(parameterList[1], parameterList[2]))\r\n connection.send(pickle.dumps(self.jobCreatorCommandMessage))\r\n\r\n elif parameterList[3] == \"JobSeeker\":\r\n self.jobCreatorList.append(JobSeeker(parameterList[1], parameterList[2]))\r\n connection.send(pickle.dumps(self.jobSeekerCommandMessage))\r\n else:\r\n connection.send(pickle.dumps(\"Not a valid position\"))\r\n\r\n #COMPLETE\r\n def createJob(self, connection, parameterList):\r\n connection.send(pickle.dumps(\"Job has been created and added to the Job List\"))\r\n self.jobListOBJ.updateJobList(parameterList[1], parameterList[2], parameterList[3], parameterList[4],\r\n parameterList[5])\r\n\r\n self.fileRecordOBJ.updateJobListBackup(self.jobListOBJ.listofjobs)\r\n\r\n #COMPLETE\r\n def removeJob(self, connection, parameterList):\r\n for Job in self.jobListOBJ.listofjobs:\r\n if Job.jobParameters[0] == parameterList[1] and Job.jobParameters[1] == parameterList[2]:\r\n connection.send(pickle.dumps(Job.FullJob + \" has been removed from the Job List\"))\r\n self.jobListOBJ.listofjobs.remove(Job)\r\n else:\r\n connection.send(pickle.dumps(\"Entered Job Does Not Exist In Job List\"))\r\n\r\n #COMPLETE\r\n def viewJobs(self, connection):\r\n if len(self.jobListOBJ.listofjobs) == 0:\r\n connection.send(pickle.dumps(\"No Jobs Posted\"))\r\n else:\r\n try:\r\n connection.send(pickle.dumps(self.jobListOBJ.listofjobs))\r\n except EOFError:\r\n pass\r\n\r\n #COMPLETE\r\n def joinJob(self, connection, parameterList):\r\n count = 0\r\n for Job in self.jobListOBJ.listofjobs:\r\n count += 1\r\n\r\n if Job.jobParameters[0] == parameterList[1] and Job.jobParameters[1] == parameterList[\r\n 2] and Job.NumOfSeekers == \"Job Started\":\r\n connection.send(pickle.dumps(\"Job is full\"))\r\n break\r\n\r\n if count > len(self.jobListOBJ.listofjobs):\r\n connection.send(pickle.dumps(\"Entered Job Does Not Exist In Job List\"))\r\n break\r\n\r\n if Job.jobParameters[0] == parameterList[1] and Job.jobParameters[1] == parameterList[2] and int(\r\n Job.NumOfSeekers) != 0:\r\n connection.send(pickle.dumps(parameterList[3] + \" has joined: \" + Job.FullJob))\r\n Job.JobSeekerList.append(parameterList[3])\r\n Job.NumOfSeekers = int(Job.NumOfSeekers) - 1\r\n Job.NumOfSeekers = str(Job.NumOfSeekers)\r\n break\r\n\r\n #COMPLETE\r\n def startJob(self, connection, parameterList):\r\n count = 0\r\n for Job in self.jobListOBJ.listofjobs:\r\n count += 1\r\n if count > len(self.jobListOBJ.listofjobs):\r\n connection.send(pickle.dumps(\"Entered Job Does Not Exist In Job List\"))\r\n break\r\n\r\n if Job.jobParameters[0] == parameterList[1] and Job.jobParameters[1] == parameterList[2]:\r\n connection.send(pickle.dumps(Job.FullJob + \" has been started\"))\r\n Job.setNumOfSeekers(\"Job Started\")\r\n break\r\n\r\n #COMPLETE\r\n def completeJob(self, connection, parameterList):\r\n\r\n print(\"Sending Job Type To Client\")\r\n connection.send(pickle.dumps(parameterList[2]))\r\n print(\"Sending Target IP To Client (If Needed)\")\r\n connection.send(pickle.dumps(parameterList[3]))\r\n print(\"Sending Target Port To Client (If Needed)\")\r\n connection.send(pickle.dumps(parameterList[4]))\r\n\r\n print(\"Waiting For Response From Client\")\r\n\r\n #Limiting to 2048 Bytes\r\n clientOutput = connection.recv(2048)\r\n\r\n print(\"Received Response From Client\")\r\n\r\n #Receiving Message From Client\r\n clientCompletion = pickle.loads(clientOutput)\r\n\r\n print(\"Response From Client Saved\")\r\n\r\n #Recording Multi Lined Client Output\r\n if type(clientCompletion) == list:\r\n\r\n for hosts in clientCompletion:\r\n self.fileRecordOBJ.recordOutput(hosts)\r\n\r\n #Recording Single Lined Client Output\r\n else:\r\n self.fileRecordOBJ.recordOutput(clientCompletion)\r\n\r\n #COMPLETE\r\n def commandRouting(self, connection, parameterList):\r\n if parameterList[0] == \"LOGIN\":\r\n self.login(connection, parameterList)\r\n elif parameterList[0] == \"CREATEJOB\":\r\n self.createJob(connection, parameterList)\r\n elif parameterList[0] == \"REMOVEJOB\":\r\n self.removeJob(connection, parameterList)\r\n elif parameterList[0] == \"VIEWJOBS\":\r\n self.viewJobs(connection)\r\n elif parameterList[0] == \"JOINJOB\":\r\n self.joinJob(connection, parameterList)\r\n elif parameterList[0] == \"STARTJOB\":\r\n self.startJob(connection, parameterList)\r\n elif parameterList[0] == \"COMPLETEJOB\":\r\n self.completeJob(connection, parameterList)\r\n else:\r\n connection.send(pickle.dumps(\"Invalid Command\"))\r\n\r\n #COMPLETE\r\n def readBackup(self):\r\n try:\r\n backup = open(\"JobBackup.txt\", 'r')\r\n backupList = backup.readlines()\r\n\r\n for lines in backupList:\r\n self.ParseCommand(lines.rstrip('\\n'))\r\n self.jobListOBJ.updateJobList(self.parameterList[0], self.parameterList[1], self.parameterList[2],\r\n self.parameterList[3], self.parameterList[4])\r\n print(\"Jobs Have Been Restored\")\r\n except IOError:\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n s = Server()\r\n s.main()","sub_path":"Final Project/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":8947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"155650648","text":"# Purpose\n#\tCalculate barcode counts from read counts\n\n\n# !! NOTE !!\n# if inputting DNAbc and RNAbc, be sure to enter the DNAbc files first. \n\n\n# Usage \n#\tpython x2.collapdsed2counts.py ... \n\n# Arguments\n#\t\t\tdirectory of input data\n#\t\t\t\tname of sample (is used to name the job)\n# integer. Number of gigabytes of memory requested for computation\n# integer. Maximum computation time requested \n# email to send alerts to \n\n# Inputs\n# directory and filename of _collapsed.txt file (output of x1.collapseSeq.py)\n#\t\tthe next input file\n#\t!!! Note! x3.bc2enhancer.py assumes all DNAbc counts come before RNAbc counts. Therefore, input all DNAbc counts before RNAbc counts when calling this program. \t\n# Outputs\n# \tbarcode2reads.txt\t\ttsv file \n#\t\t\t\t\theader line = filenames inputted into analysis\n#\t\t\t\t\tcolumn1 = barcode\n#\t\t\t\t\tcolumn2-n = barcode counts [units = reads per million within each file]\n\n\n\n\n\nimport os\nimport sys\n\n# ensure all arguments passed\ntry:\n\toutputDir=sys.argv[1]+'/' \n\tjobName=sys.argv[2]\n\tmem=sys.argv[3]\n\tjobHours=sys.argv[4]\n\temail=sys.argv[5]\n\tinputList=sys.argv[6:]\n\tinputStr=' '.join(inputList)\nexcept IndexError:\n\tprint(\"Error: Not all arguments passed\")\n\n# Make dirs for submission scripts and batch stderr/stdout files to be saved\nos.system(\"mkdir \"+outputDir+\"stdout 2>/dev/null\")\nos.system(\"mkdir \"+outputDir+\"stderr 2>/dev/null\")\n\n# Script version to use\nRNAseq2reads='1.RNAseq2reads.py'\n\n# Create batch submit script\nline_out=\"#!/bin/bash\\n\"\nline_out+=\"#SBATCH --partition=shared\\n\"\nline_out+=\"#SBATCH --job-name=RNAseq2reads:\"+jobName+\"\\n\"\nline_out+=\"#SBATCH --nodes=1\\n\"\nline_out+=\"#SBATCH --ntasks-per-node=1\\n\"\nline_out+=\"#SBATCH --mem=\"+mem+\"G\\n\"\nline_out+=\"#SBATCH --time=\"+jobHours+\":00:00\\n\"\nline_out+=\"#SBATCH --output=\"+outputDir+\"stdout/\"+jobName+\".out.txt\\n\"\nline_out+=\"#SBATCH --error=\"+outputDir+\"stderr/\"+jobName+\".err.txt\\n\"\nline_out+=\"#SBATCH --export=ALL\\n\"\nline_out+=\"#SBATCH --mail-user=\"+email+\"\\n\"\nline_out+=\"#SBATCH --mail-type=ALL\\n\"\nline_out+=\"module load python\\n\" # load package numpy\nline_out+=\"module load scipy\\n\" # load package numpy\nline_out+=\"module load biopython\\n\" # load package biopython\nline_out+=\" \".join([\"python\",RNAseq2reads,outputDir]+inputList) \n\n# Write and submit batch script\nwith open(\"submit_RNAseq2reads.sh\",\"w\") as fn:\n\tfn.write(line_out)\nos.system(\"sbatch submit_RNAseq2reads.sh\")\n\n# Copy submit script\nos.system(\"mkdir \"+outputDir+\"submit-scripts 2>/dev/null\")\nos.system(\"cp submit_RNAseq2reads.sh \"+outputDir+\"submit-scripts/RNAseq2reads-\"+jobName+\".submit.sh\")\n\n# Copy script to data file\nos.system(\"mkdir \"+outputDir+\"scripts 2>/dev/null\")\nos.system(' '.join([\"cp\",RNAseq2reads,outputDir+\"scripts/\"+RNAseq2reads]))\n","sub_path":"RNA_library/x2.collapsed2counts.py","file_name":"x2.collapsed2counts.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"601161077","text":"from numba import njit\nfrom numba.typed import List\nimport numpy as np\nfrom ..utils.stack_txt_LR import *\n\n# @njit\ndef get_semicircle(txt,deg,x0,y0):\n #make the initialization mesh\n img = 0*txt[...,0].copy()\n color_left_of_line(out=img, x0=x0, y0=y0, deg = deg, value=1.) #make left of line mesh 0 or 1\n color_outside_range(out=img, x0=x0,y0=y0,r=64, val=0.0) #make circle mesh 0 or 1\n #already done: hadamard product of the two.\n return img\n\n@njit\ndef color_within_range(x0,y0,r, out, val=1.0, width=512,height=512):\n\tfor x in range(width):\n\t\tdx = x-x0\n\t\tfor y in range(height):\n\t\t\tdy = y-y0\n\t\t\tif np.sqrt(dx**2+dy**2)<=r:\n\t\t\t\tout[y,x] = val\n@njit\ndef color_outside_range(x0,y0,r, out, val=0.0):\n width = out.shape[0]\n height = out.shape[1]\n for x in range(width):\n dx = x-x0\n for y in range(height):\n dy = y-y0\n if np.sqrt(dx**2+dy**2)>r:\n out[y,x] = val\n\n\n@njit\ndef make_coordinate_textures(txt):\n txtx = txt.copy()\n txty = txt.copy()\n for y in range(txt.shape[0]):\n for x in range(txt.shape[1]):\n txtx[x,y] = x\n txty[x,y] = y\n return txtx, txty\n\n# @njit #njit crashes rn\ndef color_left_of_line(out, x0, y0, deg = 45, value=10.):\n width = out.shape[1]\n x0 = int(x0)\n y0 = int(y0)\n for y in range(out.shape[0]):\n l = linear_interpolate_row_to_column(y, x0=x0, y0=y0, deg = deg)\n for x in range(width):\n if x\"\n html = html+\"

Summary of keywords matched per province - \"+pdate+\"

\"+summary+\"

See attached CSV file for full details.

\"\n page = MIMEText(html, 'html')\n \n\n file = MIMEBase('application', \"octet-stream\")\n file.set_payload(open(\"report.csv\", \"rb\").read())\n encoders.encode_base64(file)\n file.add_header('Content-Disposition', 'attachment; filename=\"report.csv\"')\n\n msg.attach(page)\n msg.attach(file)\n\n maxSend = 10\n \n while True:\n try:\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.ehlo()\n s.starttls()\n s.ehlo()\n s.login(user = config.USER, password = config.PASSWORD)\n #s.send_message(msg)\n \n for address in addresses:\n print (\"sending to: \"+ address)\n s.sendmail(me, address, msg.as_string())\n\n s.quit()\n break\n except Exception as e:\n print (e)\n k = 10\n while k > 0: \n print (\"Retrying to send after \"+str(k)+\" second/s...\")\n k-=1\n time.sleep(1)\n maxSend-=1\n if maxSend == 0:\n print(\"Failed to send...\")\n break\n\n\n\n\n\n\n###Innitial Loading of settings\nprint (\"Loading settings..\")\nq = \"select * from settings where settings_id = 1\"\ncur = conn.cursor()\ncur.execute(q)\nsettings = cur.fetchone()\nconn.commit()\ncur.close()\n\nrefreshRate = settings[1] #hours - befores generating new report\nrangeTime = settings[2] #hours - time range of posts which will be included in the report\n\n\nprint (\"Loading provinces' polygons..\") # provinces\n\n## used this to convert data to json format\n##provincesFile = open('provincesPoints.txt','r')\n##jsonProvincesFile = open('jsonProvinces.txt','w')\n##provincesDictionary = {}\n##i = 1\n##for line in iter(provincesFile):\n## line = line.strip().split(',')\n## \n## if int(line[0]) in provincesDictionary:\n## provincesDictionary[int(line[0])].append((float((line[2])),float((line[3]))))\n## else:\n## provincesDictionary[int(line[0])]=[(float((line[2])),float((line[3])))]\n## print(line[0] + \" ----------------------------------\")\n## time.sleep(2)\n## \n## print(i)\n## i+=1\n## \n##jsonProvincesFile.write(json.dumps(provincesDictionary))\n##provincesFile.close()\n##jsonProvincesFile.close()\n\n\njsonCoordsFile = open('jsonProvinces.txt','r')\ncoords = json.loads(jsonCoordsFile.read())\njsonCoordsFile.close()\ni = 1\nprovince = {}\nwhile i<=81:\n province[i] = MultiPoint(coords[str(i)]).convex_hull\n i+= 1\n\nwordProvince = {\n 1:\"Abra\",\n 2:\"Agusan del Norte\",\n 3:\"Agusan del Sur\",\n 4:\"Aklan\",\n 5:\"Albay\",\n 6:\"Antique\",\n 7:\"Apayao\",\n 8:\"Aurora\",\n 9:\"Basilan\",\n 10:\"Bataan\",\n 11:\"Batanes\",\n 12:\"Batangas\",\n 13:\"Benguet\",\n 14:\"Biliran\",\n 15:\"Bohol\",\n 16:\"Bukidnon\",\n 17:\"Bulacan\",\n 18:\"Cagayan\",\n 19:\"Camarines Norte\",\n 20:\"Camarines Sur\",\n 21:\"Camiguin\",\n 22:\"Capiz\",\n 23:\"Catanduanes\",\n 24:\"Cavite\",\n 25:\"Cebu\",\n 26:\"Compostela Valley\",\n 27:\"Davao del Norte\",\n 28:\"Davao del Sur\",\n 29:\"Davao Oriental\",\n 30:\"Dinagat Islands\",\n 31:\"Eastern Samar\",\n 32:\"Guimaras\",\n 33:\"Ifugao\",\n 34:\"Ilocos Norte\",\n 35:\"Ilocos Sur\",\n 36:\"Iloilo\",\n 37:\"Isabela\",\n 38:\"Kalinga\",\n 39:\"La Union\",\n 40:\"Laguna\",\n 41:\"Lanao del Norte\",\n 42:\"Lanao del Sur\",\n 43:\"Leyte\",\n 44:\"Maguindanao\",\n 45:\"Marinduque\",\n 46:\"Masbate\",\n 47:\"Metropolitan Manila\",\n 48:\"Misamis Occidental\",\n 49:\"Misamis Oriental\",\n 50:\"Mountain Province\",\n 51:\"Negros Occidental\",\n 52:\"Negros Oriental\",\n 53:\"North Cotabato\",\n 54:\"Northern Samar\",\n 55:\"Nueva Ecija\",\n 56:\"Nueva Vizcaya\",\n 57:\"Occidental Mindoro\",\n 58:\"Oriental Mindoro\",\n 59:\"Palawan\",\n 60:\"Pampanga\",\n 61:\"Pangasinan\",\n 62:\"Quezon\",\n 63:\"Quirino\",\n 64:\"Rizal\",\n 65:\"Romblon\",\n 66:\"Samar\",\n 67:\"Sarangani\",\n 68:\"Siquijor\",\n 69:\"Sorsogon\",\n 70:\"South Cotabato\",\n 71:\"Southern Leyte\",\n 72:\"Sultan Kudarat\",\n 73:\"Sulu\",\n 74:\"Surigao del Norte\",\n 75:\"Surigao del Sur\",\n 76:\"Tarlac\",\n 77:\"Tawi-Tawi\",\n 78:\"Zambales\",\n 79:\"Zamboanga del Norte\",\n 80:\"Zamboanga del Sur\",\n 81:\"Zamboanga Sibugay\"\n }\n\n\n\nwhile True: #loop for reporting\n\n\n ####get keywords\n print (\"Loading keywords..\")\n q = \"select * from keywords where active= true\"\n cur = conn.cursor()\n cur.execute(q)\n rows = cur.fetchall()\n conn.commit()\n cur.close()\n keywordSet = []\n for rows in rows:\n keywordSet.append(rows[1].lower())\n\n print (\"Loading Events categories\")\n cat_and_thres = {}\n q = \"select a.word,a.threshold,string_agg(c.name,',') from keywords as a \\\n inner join events_keywords as b on b.keyword_id = a.keyword_id inner join \\\n events as c on c.event_id = b.event_id where a.active = true group by a.word,a.threshold\"\n cur = conn.cursor()\n cur.execute(q)\n rows = cur.fetchall()\n conn.commit()\n cur.close()\n\n for cat in rows:\n cat_and_thres[cat[0]] = [cat[1],cat[2]]\n \n ###Innitial test phrases for keywords\n print (\"Loading test phrases for keywords..\")\n testphrases = {}\n q = \"select b.word,a.text,a.classification from keyword_test_data as a \\\n inner join keywords as b on b.keyword_id = a.keyword_id where b.active = true\"\n cur = conn.cursor()\n cur.execute(q)\n rows = cur.fetchall()\n conn.commit()\n cur.close()\n\n for entry in rows:\n key = entry[0].strip().lower()\n txt = entry[1].strip().lower()\n cls = entry[2].strip().lower()\n \n if testphrases.get(key):\n testphrases[key].append((txt,cls))\n else:\n testphrases[key]= [(txt,cls)]\n \n\n\n #####get included posts\n print (\"Loading tweets..\")\n borderStamp = datetime.now() - timedelta(hours=rangeTime)\n q = \"select * from streams where stamp > '\"+str(borderStamp)+\"'\"\n cur = conn.cursor()\n cur.execute(q)\n rows = cur.fetchall()\n conn.commit()\n cur.close()\n\n\n\n #print (\"Matching keywords and calculating sentiments..\")\n\n #gmap = gmplot.GoogleMapPlotter(12.20139027, 121.0227814, 6.7)\n #gmap.coloricon = \"http://www.googlemapsmarkers.com/v1/%s/\"\n #dots = []\n #matchDots = []\n i = 0\n y = 0\n perProvinceCount = {}\n\n # ----- delete reported tweets table contents and insert new list ---------->\n q = \"delete from reported_post; delete from provincial_summary\"\n cur = conn.cursor()\n cur.execute(q)\n conn.commit()\n cur.close()\n\n\n for rows in rows:\n point = Point(rows[2],rows[3]) # lon,lat\n x = searchPoint(point)\n if x:\n i+=1\n #dots.append((rows[3],rows[2])) #lat,lon\n \n text = rows[1]\n # ----- perform cleaning the text first ----------------->\n text = re.sub(r'http\\S+', '', text.strip()) # remove urls\n text = text.lower() #normalize text to lower case\n text = re.sub(r'[^\\w\\s]','',text) # remove punctuation\n textSet = text.split() #split into lists\n\n \n # ----- perform the text and keyword matching here ---------->\n wordMatch = \",\".join(list(set(keywordSet).intersection(set(textSet)))) #get the matching words if exist\n if len(wordMatch):\n y+=1\n #matchDots.append((rows[3],rows[2])) #lat,lon\n\n #test with naive bayes per word matched\n pos = 0\n neg = 0\n neu = 0\n cat = ''\n \n textwords = wordMatch.split(\",\")\n \n for kw in textwords:\n \n if testphrases.get(kw):\n cl = NaiveBayesClassifier(testphrases[kw])\n\n if cl.classify(text) == 'positive':\n pos+=1\n elif cl.classify(text) == 'negative':\n neg+=1\n elif cl.classify(text) == 'neutral':\n neu+=1\n else:\n print(\"Failed to classify sentiment...\")\n\n if cat == \"\":\n cat = cat_and_thres[kw][1]\n else:\n cat = cat +\",\"+ cat_and_thres[kw][1]\n \n else:\n print(\"Keyword not found in the test phrases dictionary...\")\n\n\n if perProvinceCount.get(x):\n perProvinceCount[x] += 1\n else:\n perProvinceCount[x] = 1\n \n \n \n cat = cat.split(\",\")\n cat = \",\".join(set(cat))\n \n \n # insert the tweet with matching keywords into reports database (comma separated ang keywords) w/ province number\n q = \"insert into reported_post values('\"+str(rows[0])+\"','\"+rows[1].replace(\"'\",\"''\")+\"','\"+repr(rows[2])+\"','\"+repr(rows[3])+\"'\\\n ,'\"+str(rows[4])+\"','\"+wordMatch+\"','\"+str(x)+\"','\"+str(pos)+\"','\"+str(neg)+\"','\"+str(neu)+\"','\"+str(rows[5])+\"','\"+wordProvince[x]+\"','\"+cat+\"')\"\n cur = conn.cursor()\n cur.execute(q)\n conn.commit()\n cur.close()\n\n \n\n sortedResult = sorted(perProvinceCount.items(), key=operator.itemgetter(1),reverse=True)\n allProvSummery = ''\n allProvSummeryTagged = ''\n postList = ''\n summaryWrite = ''\n eventsList = ''\n send = False\n\n pdate = datetime.now()\n entrypdate = pdate\n pdate = pdate.strftime('%a %b %d %H:%M:%S %Y')\n\n reportFile = open('report.csv','w')\n reportFile.write(\"Tweet ID,Screen Name,Text,Longitude,Latitude,Timestamp,Keywords,Events,Province,Sentiment\\n\")\n reportFile.write(\" , , , , , , , , ,Positive,Negative,Neutral\\n\")\n for row in sortedResult:\n \n # query reported tweets sort by provinces with and write to CSV or text file\n q = \"select * from reported_post where province = \"+str(row[0])\n cur = conn.cursor()\n cur.execute(q)\n rows = cur.fetchall()\n conn.commit()\n cur.close()\n kw = ''\n provsummary = ''\n provPos = 0\n provNeg = 0\n provNeu = 0\n cat = ''\n provcat = ''\n \n for rows in rows:\n\n provPos+=rows[7]\n provNeg+=rows[8]\n provNeu+=rows[9]\n if kw == '':\n kw = rows[5]\n else: \n kw = kw+\",\"+rows[5]\n\n xk = kw.split(\",\")\n\n for kwc in xk:\n if cat == \"\":\n cat = cat_and_thres[kwc][1]\n else:\n cat = cat +\",\"+ cat_and_thres[kwc][1]\n cat = cat.split(\",\")\n cat = \",\".join(set(cat)) \n \n line = str(rows[0])+\",\"+str(rows[10]).replace(\",\",\"\")+\",\"+rows[1].replace('\\n', ' ').replace('\\r', '').replace(',', '')+\",\"+str(rows[2])+\",\"+str(rows[3])+\",\"+str(rows[4])+\",\"+str(rows[5]).replace(\",\",\"|\")+\",\"+cat.replace(\",\",\"|\")+\",\"+wordProvince[rows[6]]+\",\"+str(rows[7])+\",\"+str(rows[8])+\",\"+str(rows[9])+\"\\n\"\n line = line.encode('ascii','replace')\n reportFile.write(line.decode())\n \n\n \n kw = kw.split(\",\")\n kwCounter = Counter(kw)\n \n for kword in kwCounter:\n \n if provsummary == '':\n provsummary = kword+\"(\"+str(kwCounter[kword])+\")\"\n provcat = cat_and_thres[kword][1]\n else:\n provsummary = provsummary+ \",\"+kword+\"(\"+str(kwCounter[kword])+\")\"\n provcat = provcat+\",\"+cat_and_thres[kword][1]\n \n if kwCounter[kword] >= cat_and_thres[kword][0]: #check if the keyword repetition is greater than the keyword threshold and list down the events then get the set\n if eventsList == \"\":\n eventsList = cat_and_thres[kword][1]\n else:\n eventsList = eventsList+\",\"+cat_and_thres[kword][1]\n \n \n provcat = provcat.split(\",\")\n provcat = \",\".join(set(provcat))\n\n if allProvSummery =='':\n allProvSummery = wordProvince[row[0]]+\" -> Total Match: \"+str(perProvinceCount[row[0]])+\" - Sentiments(Pos[\"+str(provPos)+\"],Neg[\"+str(provNeg)+\"],Neu[\"+str(provNeu)+\"] - Events[\"+provcat+\"] - Keywords : \"+provsummary\n allProvSummeryTagged = \"\"+wordProvince[row[0]]+\"\"+str(perProvinceCount[row[0]])+\"\"+provsummary +\"\"+provcat+\"\"+str(provPos)+\"\"+str(provNeg)+\"\"+str(provNeu)+\"\"\n\n else:\n allProvSummery = allProvSummery+\"\\n\"+wordProvince[row[0]]+\" -> Total Match: \"+str(perProvinceCount[row[0]])+\" - Sentiments(Pos[\"+str(provPos)+\"],Neg[\"+str(provNeg)+\"],Neu[\"+str(provNeu)+\"] - Events[\"+provcat+\"] - Keywords : \"+provsummary\n allProvSummeryTagged = allProvSummeryTagged +\"\"+wordProvince[row[0]]+\"\"+str(perProvinceCount[row[0]])+\"\"+provsummary +\"\"+provcat+\"\"+str(provPos)+\"\"+str(provNeg)+\"\"+str(provNeu)+\"\"\n\n #insert into summary table\n q = \"insert into provincial_summary values('\"+wordProvince[row[0]]+\"',\"+str(perProvinceCount[row[0]])+\",'\"+provsummary+\"','\"+provcat+\"',\"+str(provPos)+\",\"+str(provNeg)+\",\"+str(provNeu)+\",'\"+str(entrypdate)+\"')\"\n cur = conn.cursor()\n cur.execute(q)\n conn.commit()\n cur.close()\n \n summaryWrite = allProvSummery + \"\\n\"\n \n allProvSummeryTagged = ''+allProvSummeryTagged+\"
ProvinceTotal keywords matchedKeywordsEventsSentiments
PositiveNegativeNeutral
\"\n\n reportFile.close() \n\n print (\"\\n\"+allProvSummery)\n\n print (\"\\nReport Settings..\")\n print (\"Total plots = \"+ str(i))\n print (\"Total match = \"+ str(y))\n print (\"Refresh rate (hour interval)= \"+ str(refreshRate))\n print (\"Range Time (hour)= \"+str(rangeTime))\n\n #print (\"\\nGenerating Map..\")\n #lats, lons = zip(*dots)\n #gmap.scatter(lats, lons, 'red', size=40, marker=False) # uncomment later >>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n #lats, lons = zip(*matchDots)\n #gmap.scatter(lats, lons, '#1ad0f0', size=20, marker=True) # uncomment later >>>>>>>>>>>>>>>>>>>>>>>>>>>>\n## gmap.draw(\"twitterMap.html\") # uncomment later >>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n\n eventsList = eventsList.split(\",\")\n eventsList = set(eventsList)\n addressList = ''\n for ev in eventsList:\n q = \"select string_agg(a.email,',') from subscribers as a\\\n inner join subscribers_events as b on b.sub_id = a.sub_id\\\n inner join events as c on c.event_id = b.event_id\\\n where c.name = '\"+ev+\"' AND a.send_notification = 'true' group by a.email\"\n cur = conn.cursor()\n cur.execute(q)\n addr = cur.fetchall()\n conn.commit()\n\n for e_addr in addr:\n\n if addressList == '':\n addressList = str(\",\".join(set(e_addr)))\n else:\n addressList = addressList+\",\"+str(\",\".join(set(e_addr)))\n send = True\n\n addressList = addressList.split(\",\")\n addressList = set(addressList)\n addressList = \",\".join(addressList)\n\n #send = False # comment later >>>>>>>>>>>>>>>>>>>>>>>>>>>>\n \n if send:\n print (\"\\nSending Emails...\")\n send_email(addressList,allProvSummeryTagged,pdate)\n\n print(\"\\nProcess Done @ \"+ str(datetime.now()))\n\n dlay = refreshRate*3600\n time.sleep(dlay)\n\n ###reload settings for possible changes\n print (\"Reloading settings..\")\n q = \"select * from settings where settings_id = 1\"\n cur = conn.cursor()\n cur.execute(q)\n settings = cur.fetchone()\n conn.commit()\n cur.close()\n\n refreshRate = settings[1] #hours - befores generating new report\n rangeTime = settings[2] #hours - time range of posts which will be included in the report\n \n\n\n \n\n \n\n\n\n\n \n","sub_path":"reportGenerator.py","file_name":"reportGenerator.py","file_ext":"py","file_size_in_byte":17924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"163221919","text":"#!/usr/bin/env python3\nimport sys\nfrom Bio import SeqIO\n\ndef calc_ratio(seq_list):\n seq0 = seq_list[0]\n seq1 = seq_list[1]\n\n transition_cnt = 0\n transverstion_cnt = 0\n\n is_transition = lambda x, y: {x, y} in [{'A', 'G'}, {'C', 'T'}]\n is_transversion = lambda x, y: not is_transition(x, y)\n\n for i, c0 in enumerate(seq0):\n c1 = seq1[i]\n if c0 != c1:\n if is_transition(c0, c1):\n transition_cnt += 1\n if is_transversion(c0, c1):\n transverstion_cnt += 1\n\n print(transition_cnt)\n print(transverstion_cnt)\n print(transition_cnt/transverstion_cnt)\n\nif __name__ == '__main__':\n seq_list = []\n filename = sys.argv[1]\n for seq_record in SeqIO.parse(filename, 'fasta'):\n seq_list.append(seq_record.seq)\n\n calc_ratio(seq_list)","sub_path":"rosalind/stronghold/tran.py","file_name":"tran.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"61165582","text":"import os\nimport pytest\nimport shakedown # required by @sdk_utils.dcos_X_Y_or_higher\n\nimport sdk_install\nimport sdk_utils\nimport sdk_networks\n\n\nfrom tests.config import (\n PACKAGE_NAME,\n DEFAULT_TASK_COUNT\n)\n\noverlay_nostrict = pytest.mark.skipif(os.environ.get(\"SECURITY\") == \"strict\",\n reason=\"overlay tests currently broken in strict\")\n\n@pytest.fixture(scope='module', autouse=True)\ndef configure_package(configure_security):\n try:\n sdk_install.uninstall(PACKAGE_NAME)\n sdk_utils.gc_frameworks()\n sdk_install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT,\n additional_options=sdk_networks.ENABLE_VIRTUAL_NETWORKS_OPTIONS)\n\n yield # let the test session execute\n finally:\n sdk_install.uninstall(PACKAGE_NAME)\n\n\n@pytest.mark.sanity\n@pytest.mark.smoke\n@pytest.mark.overlay\n@overlay_nostrict\n@sdk_utils.dcos_1_9_or_higher\ndef test_install():\n sdk_networks.check_task_network(\"template-0-node\")\n","sub_path":"frameworks/template/tests/test_overlay.py","file_name":"test_overlay.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"458184342","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 20 16:54:13 2018\n\n@author: tianan.lai\n\"\"\"\n\nimport pandas as pd \nimport numpy as np\n\nimport os \n\nimport pandas as pd\n\n\nfrom statsmodels.tsa.arima_model import ARIMA\n\nfrom statsmodels.tsa.stattools import adfuller\n\n\ndef read_data(cut_start_time=None):\n df=pd.read_csv(\"./xlsx/TIANAN_1220_sum3.csv\",parse_dates=['AD_ORDERED_DAY_LOC'])\n df=df[['WAREHOUSE_NAME',\"AD_ORDERED_DAY_LOC\",\"ORDER_NUM\"]]\n df=df.rename({\"AD_ORDERED_DAY_LOC\":\"date\",\"ORDER_NUM\":\"ORDER_UNIT_NUM\"},axis=1)\n \n df=df[df['date']>'1900-01-01 00:00:00']\n df[\"WAREHOUSE_NAME\"]=df[\"WAREHOUSE_NAME\"].map(lambda x:x if x not in [\"USKY2 Warehouse\",\"USKY3 Warehouse\"] else \"USKY3 Warehouse\")\n drop_ware=[\"UKKM Warehouse\",'UKBH Warehouse','PLSC Warehouse','USSR Warehouse',]\n df=df[~(df[\"WAREHOUSE_NAME\"].isin(drop_ware))]\n if cut_start_time is not None:\n df=df[df['date']<=cut_start_time]\n print(cut_start_time)\n df = df[~pd.isnull(df['WAREHOUSE_NAME'])]\n df=df.groupby(['WAREHOUSE_NAME','date']).agg('sum').reset_index()\n mintime_cut={\"AUME Warehouse\":\"2018-08-01\",\"BEMO Warehouse\":\"2016-11-1\"\n ,\"UKGF Warehouse\":\"2017-04-01\"\n ,\"USKY3 Warehouse\":\"2018-01-01\",\"USKYN Warehouse\":\"2016-02-01\"\n ,\"USTX Warehouse\":\"2017-01-01\",\"USWC2 Warehouse\":\"2018-04-01\"}\n for name,mintime in mintime_cut.items():\n df=df[~((df['WAREHOUSE_NAME']==name)&(df['date'] -1e-10:\n\t\t\t\t\t\tdis = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\traise ValueError('The distance is negative.')\n\t\t\t\tdis_mat[i, j] = np.sqrt(dis)\n\t\t\t\tdis_mat[j, i] = dis_mat[i, j]\n\t\tdis_max = np.max(np.max(dis_mat))\n\t\tdis_min = np.min(np.min(dis_mat[dis_mat != 0]))\n\t\tdis_mean = np.mean(np.mean(dis_mat))\n\t\treturn dis_mat, dis_max, dis_min, dis_mean\n\t\t\t\n\t\t\t\n\tdef __compute_gram_matrix(self):\n\t\tstart_time = time.time()\n\t\t\n\t\tif self._parallel == 'imap_unordered':\n\t\t\tgram_matrix = self._compute_gm_imap_unordered()\n\t\telif self._parallel == None:\n\t\t\tgram_matrix = self._compute_gm_series()\n\t\telse:\n\t\t\traise Exception('Parallel mode is not set correctly.')\n\t\t\n\t\tself._run_time = time.time() - start_time\n\t\tif self._verbose:\n\t\t\tprint('Gram matrix of size %d built in %s seconds.'\n\t\t\t % (len(self._graphs), self._run_time))\n\t\t\t\n\t\treturn gram_matrix\n\t\t\t\n\t\t\t\n\tdef _compute_gm_series(self):\n\t\tpass\n\n\n\tdef _compute_gm_imap_unordered(self):\n\t\tpass\n\t\n\t\n\tdef __compute_kernel_list(self, g1, g_list):\n\t\tstart_time = time.time()\n\t\t\n\t\tif self._parallel == 'imap_unordered':\n\t\t\tkernel_list = self._compute_kernel_list_imap_unordered(g1, g_list)\n\t\telif self._parallel == None:\n\t\t\tkernel_list = self._compute_kernel_list_series(g1, g_list)\n\t\telse:\n\t\t\traise Exception('Parallel mode is not set correctly.')\n\t\t\n\t\tself._run_time = time.time() - start_time\n\t\tif self._verbose:\n\t\t\tprint('Graph kernel bewteen a graph and a list of %d graphs built in %s seconds.'\n\t\t\t % (len(g_list), self._run_time))\n\t\t\t\n\t\treturn kernel_list\n\t\n\n\tdef _compute_kernel_list_series(self, g1, g_list):\n\t\tpass\n\n\t\n\tdef _compute_kernel_list_imap_unordered(self, g1, g_list):\n\t\tpass\n\t\n\t\n\tdef __compute_single_kernel(self, g1, g2):\n\t\tstart_time = time.time()\n\t\t\n\t\tkernel = self._compute_single_kernel_series(g1, g2)\n\t\t\n\t\tself._run_time = time.time() - start_time\n\t\tif self._verbose:\n\t\t\tprint('Graph kernel bewteen two graphs built in %s seconds.' % (self._run_time))\n\t\t\t\n\t\treturn kernel\n\t\n\t\n\tdef _compute_single_kernel_series(self, g1, g2):\n\t\tpass\n\t\n\t\n\tdef is_graph(self, graph):\n\t\tif isinstance(graph, nx.Graph):\n\t\t\treturn True\n\t\tif isinstance(graph, nx.DiGraph):\n\t\t\treturn True \n\t\tif isinstance(graph, nx.MultiGraph):\n\t\t\treturn True \n\t\tif isinstance(graph, nx.MultiDiGraph):\n\t\t\treturn True \n\t\treturn False\n\t\n\t\n\t@property\n\tdef graphs(self):\n\t\treturn self._graphs\n\t\n\t\n\t@property\n\tdef parallel(self):\n\t\treturn self._parallel\n\t\n\t\n\t@property\n\tdef n_jobs(self):\n\t\treturn self._n_jobs\n\n\n\t@property\n\tdef verbose(self):\n\t\treturn self._verbose\n\t\n\t\n\t@property\n\tdef normalize(self):\n\t\treturn self._normalize\n\t\n\t\n\t@property\n\tdef run_time(self):\n\t\treturn self._run_time\n\t\n\t \n\t@property\n\tdef gram_matrix(self):\n\t\treturn self._gram_matrix\n\t\n\t@gram_matrix.setter\n\tdef gram_matrix(self, value):\n\t\tself._gram_matrix = value\n\t\n\t \n\t@property\n\tdef gram_matrix_unnorm(self):\n\t\treturn self._gram_matrix_unnorm \n\n\t@gram_matrix_unnorm.setter\n\tdef gram_matrix_unnorm(self, value):\n\t\tself._gram_matrix_unnorm = value","sub_path":"gklearn/kernels/graph_kernel.py","file_name":"graph_kernel.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"503324578","text":"import numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom songfp.functions import local_peaks\n\nfrom hypothesis import given, settings\nimport hypothesis.strategies as st\nimport hypothesis.extra.numpy as hnp\n\n\ndef old_local_peaks(log_spectrogram, amp_min, p_nn):\n \"\"\"\n Parameters\n ----------\n log_spectrogram : numpy.ndarray, shape=(n_freq, n_time)\n Log-scaled spectrogram. Columns are the periodograms of successive segments of a\n frequency-time spectrum.\n\n amp_min : float\n Amplitude threshold applied to local maxima\n\n p_nn : int\n Number of cells around an amplitude peak in the spectrogram in order\n\n Returns\n -------\n List[Tuple[int, int]]\n Time and frequency values of local peaks in spectrogram. Sorted by ascending\n frequency and then time.\"\"\"\n from songfp.functions import generate_binary_structure, iterate_structure\n from scipy.ndimage.filters import maximum_filter\n\n struct = generate_binary_structure(2, 1)\n neighborhood = iterate_structure(struct, p_nn)\n\n # find local maxima using our filter shape\n local_max = (\n maximum_filter(log_spectrogram, footprint=neighborhood) == log_spectrogram\n ) # where spectrogram aligns with local maxes\n foreground = log_spectrogram >= amp_min\n # Boolean mask of S with True at peaks that are in foreground, and are above the threshold\n detected_peaks = local_max & foreground\n\n # Extract peaks; encoded in terms of time and freq bin indices.\n # dt and df are always the same size for the spectrogram that is produced,\n # so the bin indices consistently map to the same physical units:\n # t_n = n*dt, f_m = m*df (m and n are integer indices)\n # Thus we can codify our peaks with integer bin indices instead of their\n # physical (t, f) coordinates. This makes storage and compression of peak\n # locations much simpler.\n\n # take transpose so peaks are ordered by time then frequency\n ts, fs = (i.astype(np.int16) for i in np.where(detected_peaks.T))\n return list(zip(ts, fs))\n\n\n@settings(deadline=None)\n@given(\n array=hnp.arrays(\n shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=5),\n dtype=float,\n elements=st.floats(-1e6, 1e6),\n ),\n data=st.data(),\n)\ndef test_peak_implementations(array: np.ndarray, data: st.DataObject):\n amp_min = data.draw(st.floats(array.min(), array.max()), label=\"amp_min\")\n p_nn = data.draw(st.integers(1, min(array.shape) // 2), label=\"p_nn\")\n\n expected = np.array((old_local_peaks(array, amp_min, p_nn)))\n desired = np.array((local_peaks(array, amp_min, p_nn)))\n\n assert_array_equal(expected, desired)\n","sub_path":"AudioProject-master/tests/test_peaks.py","file_name":"test_peaks.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"420360883","text":"# pylint: disable=wrong-import-position\n\nAPP_NAME = \"fb_post\"\nOPERATION_NAME = \"get_replies_to_comment\"\nREQUEST_METHOD = \"get\"\nURL_SUFFIX = \"comment/{comment_id}/replies/\"\n\nfrom .test_case_01 import TestCase01GetRepliesToCommentAPITestCase\nfrom .test_case_02 import TestCase02GetRepliesToCommentAPITestCase\n\n__all__ = [\n \"TestCase01GetRepliesToCommentAPITestCase\",\n \"TestCase02GetRepliesToCommentAPITestCase\"\n]\n","sub_path":"fb_post/views/get_replies_to_comment/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"346675699","text":"import board, time, math\n\nclass DictionaryWords:\n\n\tdef __init__(self, filename):\n\t\tself.words = {}\n\t\tself.lookupTime = 0\n\t\tdictFile = open(filename, 'r')\n\t\tfor line in dictFile:\n\t\t\tline = line.rstrip()\n\t\t\ttokens = line.split()\n\t\t\tif len(tokens) == 1:\n\t\t\t\tcount = -1\n\t\t\telif len(tokens) == 2:\n\t\t\t\tcount = int(tokens[1])\n\t\t\t\t\n\t\t\tself.words[tokens[0]] = count\n\n\tdef isValid(self, word, vocabulary = -1):\n\t\t\n\t\n\t\tif board.Board.DEBUG_ERRORS:\n\t\t\tstartTime = time.time()\n\t\n\n\t\tif self.words.has_key(word):\n\t\t\tvalue = self.words[word]\n\t\t\tsuccess = True\n\t\t\tif vocabulary > 0:\n\t\t\t\t\n\n\t\t\t\tif value <= 0:\n\t\t\t\t\tvalue = 1\n\t\t\t\t\t\n\t\t\t\tif value < vocabulary:\n\t\t\t\t\tsuccess = False\n\t\t\n\t\telse:\n\t\t\tsuccess = False\n\t\t\n\t\t\n\t\t\t\t\t\n\t\tif board.Board.DEBUG_ERRORS:\n\t\t\ttimeSpent = time.time()-startTime\n\t\t\tself.lookupTime += timeSpent\n\t\t\n\t\treturn success\n\n\tdef matchWithBlanks(self, word, vocabulary = -1, assignment=[]):\n\t\t\n\n\t\tif not ' ' in word:\n\t\t\tif self.isValid(word, vocabulary):\n\t\t\t\treturn [assignment]\n\t\t\telse:\n\t\t\t\treturn []\n\t\telse:\n\t\t\ti = word.find(' ')\n\t\t\tblankAssignments = []\n\t\t\tfor code in range(ord('A'), ord('Z')+1):\n\t\t\t\tchar = chr(code)\n\t\t\t\tif i == 0:\n\t\t\t\t\tnewWord = char + word[1:]\n\t\t\t\telif i == len(word)-1:\n\t\t\t\t\tnewWord = word[:-1] + char\n\t\t\t\telse:\n\t\t\t\t\tnewWord = word[:i] + char + word[i+1:]\n\t\t\t\t\n\t\t\t\tnewAssignment = assignment[:]\n\t\t\t\tnewAssignment.append(char)\n\t\t\t\tresults = self.matchWithBlanks(newWord, vocabulary, newAssignment)\n\t\t\t\tfor result in results:\n\t\t\t\t\tblankAssignments.append(result)\n\t\t\t\t\t\t\t\n\t\t\treturn blankAssignments\n\n\tdef setUsage(self, word, usage):\n\t\tword = word.upper()\n\t\tword = word.rstrip()\n\t\tif self.isValid(word):\n\t\t\tself.words[word] = usage\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef saveUsage(self, filename):\n\t\twith open(filename, 'w') as outfile:\n\t\t\tkeylist = self.words.keys()\n\t\t\tkeylist.sort()\n\t\t\tfor w in keylist:\t\n\t\t\t\toutfile.write(w+\"\\t\"+str(self.words[w])+\"\\n\")\t\n\n\tdef resetLookupTime(self):\n\t\tself.lookupTime = 0\n\n\tdef difficultyToUsage(self, difficulty):\n\t\talpha = 10 - (difficulty/10.0)*6.5\n\t\tusage = math.exp(alpha)\n\n\t\tif difficulty >= 9.999:\n\t\t\tusage = -1\n\t\t\t\n\t\treturn usage\n","sub_path":"OOAD_project/dictionarywords.py","file_name":"dictionarywords.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"152597759","text":"#!/usr/bin/python3\n\"\"\"\nStream camera feed from IREC 2109 payload\nUses USB 5.8GHz receiver and the v4l2 library\n\nauthor: Will Merges, RIT Launch Initiative\n\"\"\"\nimport cv2\nimport os\nimport sys\n\ntry:\n id = int(sys.argv[1])\n fps = int(sys.argv[2])\nexcept:\n print(\"receiver ID and fps must be an integer\")\n sys.exit()\n\noutfile = \"out.mp4\"\nif len(sys.argv) > 3:\n outfile = sys.argv[3]\n if(outfile.rfind(\".mp4\") == len(outfile)-4):\n print(\"writing to \"+outfile)\n else:\n print(\"outfile must be a .mp4\")\n outfile = \"out.mp4\"\n print(\"writing to 'out.mp4' instead\")\nelse:\n print(\"no outfile supplied, writing to 'out.mp4'\")\n\n\nos.system(\"mkdir -p videos\")\n\nvc = cv2.VideoCapture(id)\nfourcc = cv2.VideoWriter_fourcc(*'MP4V')\nout = cv2.VideoWriter(\"videos/\"+outfile, fourcc, float(fps), (640, 480))\n\n\nwhile 1:\n _,frame = vc.read()\n\n frame = cv2.flip(frame, 1)\n\n out.write(frame)\n\n frame = cv2.resize(frame, (640*2, 480*2))\n cv2.imshow(\"device: \"+str(id), frame)\n\n if cv2.waitKey(33) == 27: #esc\n break\n\nvc.release()\nout.release()\ncv2.destroyAllWindows()\n","sub_path":"camera_code/rx/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"615372098","text":"#jumpingJack.py\n#Scott Ziegler\n#this will make a stick man\nfrom graphics import *\n\nwin = GraphWin(\"Jack\",500,500)\nwin.setCoords(0,0,100,100)\ncenter = Point(50,75)\ncirc = Circle(center, 10)\ncirc.draw(win)\nline = Line(Point(50,65),Point(50,30))\n##left leg\nline2=Line(Point(50,30),Point(35,15))\n##right leg \nline3=Line(Point(50,30),Point(65,15))\n##right arm\nline4=Line(Point(50,55),Point(65,35))\n##left arm\nline5=Line(Point(50,55),Point(35,35))\n##drawing\nline2.draw(win)\nline.draw(win)\nline3.draw(win)\nline4.draw(win)\nline5.draw(win)\n##Buttons\nstartButton = Rectangle(Point(13,90), Point(22,80))\nstartButton.draw(win)\nstopButton = Rectangle(Point(30,90), Point(39,80))\nstopButton.draw(win)\nstart = Text(Point(17, 85), \"Start\")\nstart.draw(win)\nstop = Text(Point(35, 85), \"Stop\")\nstop.draw(win)\nquitButton = Rectangle(Point(75,90), Point(84,80))\nquitButton.draw(win)\nquit = Text(Point(80, 85), \"Quit\")\nquit.draw(win)\n\ndef start():\n while True:\n p = win.getMouse()\n x = p.getX()\n y = p.getY()\n if x >= 13 and x <= 22 and y >= 80 and y <= 90: # Check to see if start clicked\n return True\nwhile start()==True:\n for i in range():\n line.move(0,-20)\n line2.move(0,-20)\n line3.move(0,-20)\n line4.move(0,-20)\n line5.move(0,-20)\n circ.move(0,20)\n line.move(0,20)\n line2.move(0,20)\n line3.move(0,20)\n line4.move(0,20)\n line5.move(0,20)\n\n break\n\n\n# Check for stop will updating something on screen\ndef stop():\n while True:\n p = win.checkMouse()\n if p != None:\n x = p.getX()\n y = p.getY()\n if x >= 30 and x <= 39 and y >= 80 and y <= 90: # Check to see if start clicked\n break\nwhile stop()==True:\n line.move(0,20)\n line2.move(0,20)\n line3.move(0,20)\n line4.move(0,20)\n line5.move(0,20)\n circ.move(0,-20)\n line.move(0,-20)\n line2.move(0,-20)\n line3.move(0,-20)\n line4.move(0,-20)\n line5.move(0,-20)\nwhile True:\n p = win.checkMouse()\n if p != None:\n x = p.getX()\n y = p.getY()\n if x >= 75 and x <= 84 and y >= 80 and y <= 90: # Check to see if start clicked\n win.close()\n break\n\n\n\n\n\n\n####moves Jack\n## win.getMouse()\n## line.move(0,-50)\n## line2.move(0,-50)\n## line3.move(0,-50)\n## line4.move(0,-50)\n## line5.move(0,-50)\n## circ.move(0,50)\n## line.move(0,50)\n## line2.move(0,50)\n## line3.move(0,50)\n## line4.move(0,50)\n## line5.move(0,50)\n##\n##\n####closes window\n## win.getMouse()\n## win.close()\n\n","sub_path":"jumpingJack.py","file_name":"jumpingJack.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"178493633","text":"'''\nIl tris e' un popolarissimo gioco. Si gioca su una griglia quadrata di 3×3 caselle.\nA turno, i due giocatori scelgono una cella vuota e vi disegnano il proprio simbolo \n(un giocatore ha come simbolo una \"o\" e l'avversario una 'x'). \nVince il giocatore che riesce a disporre tre dei propri simboli in linea retta \norizzontale, verticale o diagonale. Se la griglia viene riempita \nsenza che nessuno dei giocatori sia riuscito a completare una linea \nretta di tre simboli, il gioco finisce in parità. Nel caso in cui il gioco \nfinisse in parità, la partita è detta \"patta\". \nPer convenzione a griglia vuota la prima mossa spetta sempre al giocatore 'o'\n\nUna configurazione del gioco e' dunque univocamente determinata dal contenuto della griglia.\n\nNel seguito assumiamo che il contenuto della griglia sia rappresentato tramite lista di liste.\nLa dimensione della lista di liste M e' 3x3 ed M[i][j] contiene '', 'x', o 'o' a seconda \nche la cella della griglia appartenente all'iesima riga e j-ma colonna sia ancora libera, \ncontenga il simbolo 'x' o contenga il simbolo 'o'. \n\nData una configurazione C del gioco, l'albero di gioco per C e' l'albero che \nsi ottiene ricorsivamente partendo dalla configurazione C e assegnando come figli le configurazioni \nche e' possibile ottenere da C con una mossa ulteriore del gioco. Ovviamente risulteranno \nfoglie dell'albero i possibili esiti della partita vale a dire le diverse configurazioni cui e' \npossibile arrivare partendo da C e che rappresentano patte, vittorie per 'o' o vittorie per 'x'.\nSe veda ad esempio l'immagine albero_di_gioco.png che mostra l' albero di gioco che si ottiene a partire \ndalla configurazione rappresentata da [['x', 'o', 'o'], ['x', 'x', 'o'], ['', '', '']]\n \n\nSi consideri la seguente Classe di oggetti:\n\n\nclass NodoTris:\n def __init__(self, griglia):\n self.nome = griglia\n self.lista_figli = [] \n\n\nBisogna progettare le seguente funzione \n\ngen_tree(griglia)\nche, data la configurazione di gioco griglia, costruisce l'albero di gioco che si ottiene a partire \ndalla configurazione griglia e ne restituisce la radice. I nodi dell'albero devono essere \noggetti della classe NodoTris.\n\nPer testare la correttezza della vostra implementazione di gen_tree() il grade utilizzera' quattro metodi \ndella classe NodoTris che dovete comunque implementare: \n\n1)\ntipo(self)\nche, dato un nodo NodoTris, restituisce:\n 'o' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'o'\n 'x' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'x'\n '-' se la configurazione rappresentata dal nodo e' una configurazione di patta\n '?' se la configurazione rappresentata dal nodo e' una configurazione di gioco non ancora terminato\n\n2)\nesiti(self)\nche, dato un nodo radice di un albero di gioco, restituisce una tripla con i possibili \nesiti della partita che ha come configurazione iniziale quella rappresentata dal nodo. \nPiu' precisamente: il primo elemento della tripla è il numero di patte possibili, \nil secondo è il numero di possibili vittorie per il giocatore 'o' mentre il terzo elemento \ne' il numero di possibili vittorie per il giocatore 'x'.\n\n3)\nvittorie_livello(self, giocatore, h)\nche, dato un nodo radice di un albero di gioco, uno dei due giocatori ed un intero h,\nrestituisce il numero di nodi che rappresentano una vittoria per il giocatore e si \ntrovano ad altezza h nell'albero. In altri termini restituisce il numero di vittorie possibili \nper giocatore in esattamente h mosse, nella partita che ha come configurazione iniziale \nquella rappresentata dalla radice dell'albero.\n\n4)\nstrategia_vincente(self,giocatore)\nche, dato un nodo radice di un albero di gioco ed uno dei due giocatori, restituisce True o False. \nRestituisce True se giocatore ha una strategia vincente nella partita \nche ha come configurazione iniziale quella rappresentata dal nodo radice, False altrimenti.\n\nNota che un giocatore ha una strategia vincente rispetto ad una certa configurazione se, \nqualunque siano le mosse dell'avversario ha sempre la possibilita' di rispondere in modo \nche la partita termini con la sua vittoria.\n\nPotete ovviamente definire ulteriori funzioni e altri metodi per la Classe NodiTris \nse li ritenete utili al fine della risoluzione del compito.\n\nPotete assumere che le configurazioni di gioco rappresentate da griglia siano sempre configurazioni \nlecite (vale a dire ottenute dopo un certo numero di mosse a parire dalla griglia vuota).\n\n\nAVVERTENZE: non usare caratteri non ASCII, come le lettere accentate; non\nimportare moduli che non sono nella libreria standard.\n\nATTENZIONE: i test vengono eseguiti con un timeout globale di 2*N secondi (se il grader esegue N test).\n'''\nimport copy\n\npatte = 0\n \nvittorie_o = 0\n \nvittorie_x = 0\n\naltezza = 0\n \nclass NodoTris:\n\n def __init__(self, griglia):\n \n self.nome = griglia\n \n self.lista_figli = [] #lista dei nodi figli\n \n def tipo(self):\n '''inserire qui il vostro codice'''\n \n return controllavincitore(self)\n \n def esiti(self):\n '''inserire qui il vostro codice'''\n \n global patte\n \n patte = 0\n \n global vittorie_o\n \n vittorie_o = 0\n \n global vittorie_x\n \n vittorie_x = 0\n \n gen_tree(self.nome)\n \n return (patte, vittorie_o, vittorie_x)\n \n def vittorie_livello(self, giocatore, h):\n '''inserire qui il vostro codice'''\n \n \n \n \n \n def strategia_vincente(self,giocatore):\n '''inserire qui il vostro codice'''\n \n def stampa_griglia(self):\n \n print(str(self.nome[0]) + \"\\n\" + str(self.nome[1]) + \"\\n\" + str(self.nome[2]))\n \ndef gen_tree(griglia):\n '''inserire qui il vostro codice''' \n \n global patte\n \n global vittorie_o\n \n global vittorie_x\n \n global altezza\n \n Nodo = NodoTris(griglia)\n\n altezza +=1\n\n esito = controllavincitore(Nodo)\n \n if esito != '?':\n \n if esito == '-':\n \n patte += 1\n \n elif esito == 'o':\n \n vittorie_o += 1\n \n else:\n \n vittorie_x += 1\n \n return Nodo\n \n else:\n \n # Per Ogni cella vuota fai una mossa con lo stesso giocatore e metti la griglia con la mossa\n # fatta nella lista dei figli del Nodo di questa chiamata di gen_tree, dopodicche chiama gen_tree per ogni\n # griglia della lista_figli\n \n giocatore = scegligiocatore(Nodo.nome)\n \n for colonna in range(0, 3):\n \n for riga in range(0, 3):\n \n if Nodo.nome[riga][colonna] == '':\n \n NuovoNodo = NodoTris([['', '', ''],['', '', ''],['', '', '']])\n \n NuovoNodo.nome = copy.deepcopy(Nodo.nome)\n \n NuovoNodo.nome[riga][colonna] = giocatore\n \n Nodo.lista_figli.append(NuovoNodo)\n \n for ogniNodo in Nodo.lista_figli:\n \n ogniNodo.lista_figli.append(gen_tree(ogniNodo.nome))\n \n return Nodo\n \ndef scegligiocatore(griglia):\n \n conta_x = 0\n \n conta_o = 0\n \n for colonna in range(0, 3):\n \n for riga in range (0, 3):\n \n if griglia[riga][colonna] == 'x':\n \n conta_x += 1\n \n elif griglia[riga][colonna] == 'o':\n \n conta_o += 1\n \n if conta_x >= conta_o:\n \n return 'o'\n \n else:\n \n return 'x'\n\ndef controllavincitore(NodoDaControllare):\n \n '''\n \n Controllo se la griglia ha una configurazione di vittoria per uno dei due giocatori\n La funzione ritorna:\n - o per vittoria o;\n - x per vittoria x;\n - ? partita ancora da terminare;\n - p per partita patta.\n \n '''\n \n griglia = NodoDaControllare.nome\n \n # Prima riga \n if griglia[0][0] == griglia[0][1] == griglia[0][2]:\n \n if griglia[0][0] == 'o' or griglia[0][0] == 'x':\n \n return griglia[0][0]\n \n # Seconda riga \n if griglia[1][0] == griglia[1][1] == griglia[1][2]:\n \n if griglia[1][0] == 'o' or griglia[1][0] == 'x':\n \n return griglia[1][0] \n \n # Terza riga \n if griglia[2][0] == griglia[2][1] == griglia[2][2]:\n \n if griglia[2][0] == 'o' or griglia[2][0] == 'x':\n \n return griglia[2][0]\n \n # Prima colonna \n if griglia[0][0] == griglia[1][0] == griglia[2][0]:\n \n if griglia[0][0] == 'o' or griglia[0][0] == 'x':\n \n return griglia[0][0] \n \n # Seconda colonna \n if griglia[0][1] == griglia[1][1] == griglia[2][1]:\n \n if griglia[0][1] == 'o' or griglia[0][1] == 'x':\n \n return griglia[0][1] \n \n # Terza colonna \n if griglia[0][2] == griglia[1][2] == griglia[2][2]:\n \n if griglia[0][2] == 'o' or griglia[0][2] == 'x':\n \n return griglia[0][2] \n \n # Diagonale da AS a BD\n if griglia[0][0] == griglia[1][1] == griglia[2][2]:\n \n if griglia[0][0] == 'o' or griglia[0][0] == 'x':\n \n return griglia[0][0] \n \n # Diagonale da AD a BS\n if griglia[0][2] == griglia[1][1] == griglia[2][0]:\n \n if griglia[0][2] == 'o' or griglia[0][2] == 'x':\n \n return griglia[0][2] \n \n # Se sono qui non ha vinto nessuno\n # controllo se tutte le celle sono piene altrimenti la partita \n # e ancora in corso\n \n for r in range(0, 3):\n \n for c in range(0, 3):\n \n if griglia[r][c] == '':\n \n return '?'\n \n return '-'\n","sub_path":"students/693260/homework04/program02.py","file_name":"program02.py","file_ext":"py","file_size_in_byte":10264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"531930340","text":"#!/usr/bin/python3\n\nimport sys\nimport os.path\nfrom PyQt5.QtWidgets import QApplication, QWidget\n\ndef open_file(path):\n if os.path.exists(path):\n return open(path, \"a+\")\n return open(path, \"w+\")\n \n\ndef read_file(f):\n str = f.read(10)\n return str\n\ndef close(f):\n f.close()\n\nif __name__ == '__main__':\n fi = open_file(input(\"Type in the file path\\n\"))\n\n fi.write(input(\"Enter some text:\\n\"))\n \n print(read_file(fi))\n\n fi.close()\n\n \n\n","sub_path":"tmp/file_manager.py","file_name":"file_manager.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"479628481","text":"#一些通用的方法\r\nimport re\r\nimport IPy\r\nfrom .config_7750 import *\r\n\r\ndef address_check(pool, inside, subnet):\r\n '''pool, inside, subnet 地址检查\r\n 1.判断inside中的私网地址是否在pool和subnet中\r\n 2.去掉pool,subnet中的私网地址\r\n 3.判断pool中的地址是否和subnet一致'''\r\n\r\n #先判断inside中的私网地址是否在pool和subnet中\r\n for item in inside:\r\n if item not in pool:\r\n return 'inside 中的:{} 不在pool中'\r\n else:\r\n pool.remove(item)\r\n if item not in subnet:\r\n return 'inside 中的:{} 不在subnet中'\r\n else:\r\n subnet.remove(item)\r\n\r\n #判断pool中的地址是否和subnet一致\r\n\r\n if pool != subnet:\r\n return 'pool中的地址是和subnet不一致'\r\n\r\ndef compare_ip(ip, ip_range):\r\n '''检查ip range 是否在正确范围内'''\r\n\r\n ip_b = IPy.IP(ip_range.split('-')[0])\r\n ip_e = IPy.IP(ip_range.split('-')[1])\r\n\r\n ip = IPy.IP(ip)\r\n ip = ip[2:-1]\r\n if ip_b not in ip or ip_e not in ip:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef get_pool_ip(config):\r\n p_dhcp = r'(?s)(echo \"Local DHCP Server \\(Base Router\\) Configuration\"\\n.*?\\n {4}exit)'\r\n p_pool = r'(?s)(pool \"(.*?)\" create.*?\\n {16}exit)'\r\n p_subnet = PAT['subnet']\r\n ips = []\r\n res_dhcp = re.search(p_dhcp, config)\r\n if res_dhcp:\r\n res_pool = re.findall(p_pool, res_dhcp.group())\r\n for item in res_pool:\r\n res_subnet = re.findall(p_subnet, item[0])\r\n for subnet in res_subnet:\r\n ips.append(subnet[1])\r\n\r\n return ips\r\n\r\ndef get_nat_address(config):\r\n res = []\r\n p_nat = generate_pat(2, 'l2-aware', 16)\r\n res_nat = re.search(p_nat, config)\r\n\r\n if res_nat:\r\n res = re.findall(PAT['address'], res_nat.group())\r\n\r\n return res\r\n\r\ndef get_vprn_pool_ip(config):\r\n p_dhcp = r'(?s)(echo \"Local DHCP Server \\(Services\\) Configuration\"\\n.*?\\n {4}exit)'\r\n p_vprn = generate_pat(0, 'vprn', 8)\r\n p_pool = generate_pat(1, 'pool', 20)\r\n p_subnet = PAT['subnet']\r\n res = []\r\n res_dhcp = re.search(p_dhcp, config)\r\n if res_dhcp:\r\n res_vprn = re.findall(p_vprn, res_dhcp.group())\r\n for vprn in res_vprn:\r\n subnet = []\r\n res_pool = re.findall(p_pool, vprn[0])\r\n for item in res_pool:\r\n res_subnet = re.findall(p_subnet, item[0])\r\n subnet += res_subnet\r\n\r\n res.append((vprn[1], subnet))\r\n\r\n\r\n return res\r\n\r\ndef get_ies_address(config):\r\n '''获取ies subscriber_interface 中的 address'''\r\n\r\n config_7750 = Config_7750(config)\r\n p_sub_inter = PAT['subscriber_interface']\r\n p_ies = r''\r\n res = []\r\n ies = config_7750.get_ies()\r\n for item in ies:\r\n res_sub_inter = re.findall(p_sub_inter, item.config)\r\n if res_sub_inter:\r\n for sub_inter in res_sub_inter:\r\n res += re.findall(PAT['address'], sub_inter[0])\r\n\r\n return res\r\n\r\ndef get_vprn_address(config):\r\n '''获取vprn subscriber_interface 中的 address'''\r\n\r\n config_7750 = Config_7750(config)\r\n p_sub_inter = PAT['subscriber_interface']\r\n res = []\r\n business = config_7750.get_child()\r\n for item in business:\r\n if item._type != 'vprn':\r\n continue\r\n\r\n address = []\r\n res_sub_inter = re.findall(p_sub_inter, item.config)\r\n if res_sub_inter:\r\n for sub_inter in res_sub_inter:\r\n address += re.findall(PAT['address'], sub_inter[0])\r\n if address != []:\r\n res.append((item.name, address))\r\n\r\n\r\n return res\r\n\r\n\r\ndef is_include(a, b):\r\n '''判断b是否包含a'''\r\n res = []\r\n for item in a:\r\n if item not in b:\r\n res.append(item)\r\n\r\n return res\r\n\r\n\r\ndef get_prefix_list(config):\r\n ips = []\r\n res_prefix_list = re.search(PAT['prefix_list'], config)\r\n if res_prefix_list:\r\n res_ip = re.findall(PAT['ipv4'], res_prefix_list.group())\r\n\r\n for ip in res_ip:\r\n ips.append(ip)\r\n\r\n return ips\r\n\r\n\r\ndef oct2bin(num):\r\n res = bin(int(num)).replace('0b', '')\r\n while len(res) != 8:\r\n res = '0' + res\r\n\r\n return res\r\n\r\n\r\ndef address_is_in_one_net(add1, add2):\r\n add1_ip = add1.split('/')[0]\r\n ym = add1.split('/')[1]\r\n\r\n add2_ip = add2.split('/')[0]\r\n ym2 = add2.split('/')[1]\r\n\r\n if ym != ym2:\r\n return False\r\n\r\n add1_list = add1_ip.split('.')\r\n add2_list = add2_ip.split('.')\r\n\r\n add1_bin_str = ''\r\n add2_bin_str = ''\r\n for i in add1_list:\r\n add1_bin_str += oct2bin(int(i))\r\n\r\n for i in add2_list:\r\n add2_bin_str += oct2bin(int(i))\r\n\r\n if add1_bin_str[:int(ym)-1] == add2_bin_str[:int(ym)-1]:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef address_include_each(addr1, addr2):\r\n '''检查两组地址是否在一个网段'''\r\n\r\n err_ips = []\r\n\r\n for addr1_ip in addr1:\r\n is_in = False\r\n for addr2_ip in addr2:\r\n if address_is_in_one_net(addr1_ip, addr2_ip):\r\n is_in = True\r\n if not is_in:\r\n err_ips.append(addr1_ip)\r\n\r\n\r\n for addr2_ip in addr2:\r\n is_in = False\r\n for addr1_ip in addr1:\r\n if address_is_in_one_net(addr1_ip, addr2_ip):\r\n is_in = True\r\n if not is_in:\r\n err_ips.append(addr2_ip)\r\n\r\n\r\n return err_ips\r\n\r\ndef address_is_in_list(addr, ip_list):\r\n '''检查ip网段 是否在 一组ip的网段中'''\r\n \r\n for ip in ip_list:\r\n if address_is_in_one_net(addr, ip):\r\n return True\r\n\r\n return False\r\n\r\ndef nat_check(ip):\r\n '''判断地址是否为私网'''\r\n nat_a = IPy.IP('10.0.0.0-10.255.255.255', make_net=1)\r\n nat_b = IPy.IP('172.16.0.0-172.31.255.255', make_net=1)\r\n nat_c = IPy.IP('192.168.0.0-192.168.255.255', make_net=1)\r\n ip = IPy.IP(ip, make_net=1)\r\n is_nat = False\r\n if ip in nat_a or ip in nat_b or ip in nat_c:\r\n is_nat = True\r\n # ip_nums = ip.split('.')\r\n # if ip_nums[0] == '100' and (42 <= int(ip_nums[1]) <= 113):\r\n # return True\r\n # else:\r\n # return False\r\n\r\n return is_nat\r\n\r\n\r\ndef get_outside_pool(config):\r\n '''获取outside pool 中的 address-range'''\r\n\r\n p_outside = generate_pat(2, 'outside', 12)\r\n res_outside = re.search(p_outside, config)\r\n\r\n res_address_range = []\r\n if res_outside:\r\n res_address_range = re.findall(PAT['address_range'], res_outside.group())\r\n\r\n\r\n return res_address_range\r\n\r\n\r\ndef get_static_route(config):\r\n '''获取静态路由'''\r\n\r\n res_static_route = re.findall(PAT['static_route'], config)\r\n return [item[1] for item in res_static_route]\r\n\r\n\r\ndef remove_right_space(text):\r\n '''去掉每行右边空格'''\r\n\r\n res = ''\r\n for i in text.splitlines():\r\n res += i.rstrip() + '\\n'\r\n\r\n return res\r\n\r\ndef add_space(text, space):\r\n '''在文本每行加空格'''\r\n res = ''\r\n lines = text.splitlines()\r\n for i in lines:\r\n res = res + '\\n{}{}'.format(space, i)\r\n\r\n return res\r\n\r\n\r\n\r\n","sub_path":"app/check_pool/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"132226470","text":"from flask import request\nfrom flask import render_template\nimport sys\nimport os\nimport csv\nfrom main import getPath\n\nsys.path.insert(1, getPath()+'/Scene/model/')\nfrom model_CSV_scene import DataCSV\nfrom datetime import datetime\n\nclass CSVController():\n def CSV_controller():\n start = request.form.get('start', default=0, type=int)\n limit= request.form.get('limit', default=20, type=int )\n logstart = datetime.now()\n\n if request.form.get('import_CSV'): \n file_path = request.form.get('CSV_file_name')\n DataCSV.import_data(file_path)\n csv_data = csv.reader(open(file_path))\n row_count = (sum(1 for row in csv_data)) - 1 \n x = str(row_count)\n DataCSV.update_log('Import CSV - '+x,1,logstart)\n result = DataCSV.get_data(start,limit)\n elif request.form.get('Btn_delete'):\n data = request.form['name']\n DataCSV.delete_data(data) \n elif request.form.get('Btn_save'):\n name = request.form['name']\n description = request.form['description']\n filename = request.form['filename']\n filerow = request.form['filerow']\n file_path = ''\n row_count = 0\n result = DataCSV.get_data(start,limit)\n DataCSV.save_data(name,description,filename,filerow) \n elif request.form.get('Btn_search'): \n text = request.form.get('search')\n result = DataCSV.search_data(text) \n file_path = ''\n row_count = 0\n elif request.form.get('Btn_search1'): \n text1 = request.form.get('search1')\n result = DataCSV.search_data1(text1) \n file_path = ''\n row_count = 0 \n else:\n result = DataCSV.get_data(start,limit)\n file_path = ''\n row_count = 0\n\n path, filename = os.path.split(file_path)\n count = DataCSV.count_data()\n\n if request.form.get('Btn_save'):\n return render_template('tmpl_LIST_scene.html', title='List Scene Category',listdata=result, countdata=count) \n else:\n return render_template('tmpl_CSV_scene.html', title='CSV Scene',csvdata=result,countdata=count,filepath=filename,csvrow=row_count) ","sub_path":"Scene/controller/controller_CSV_scene.py","file_name":"controller_CSV_scene.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"117109222","text":"from typing import List\n\n\nclass BIT2D:\n def __init__(self, n1: int, n2: int):\n self.n1 = n1\n self.n2 = n2\n self._tree = [[0] * (n2 + 1) for _ in range(n1 + 1)]\n\n @staticmethod\n def _lowbit(x):\n return x & (-x)\n\n def update(self, i: int, j: int, x: int):\n now = self.query(i, j) - self.query(i - 1, j) - self.query(i, j - 1) + self.query(i - 1, j - 1)\n self.add(i, j, x - now)\n\n def add(self, i: int, j: int, x: int):\n i_lst, j_lst = [], []\n while i <= self.n1:\n i_lst.append(i)\n i += BIT2D._lowbit(i)\n while j <= self.n2:\n j_lst.append(j)\n j += BIT2D._lowbit(j)\n for ii in i_lst:\n for jj in j_lst:\n self._tree[ii][jj] += x\n\n def query(self, i: int, j: int) -> int:\n i_lst, j_lst = [], []\n while i > 0:\n i_lst.append(i)\n i -= BIT2D._lowbit(i)\n while j > 0:\n j_lst.append(j)\n j -= BIT2D._lowbit(j)\n ans = 0\n for ii in i_lst:\n for jj in j_lst:\n ans += self._tree[ii][jj]\n return ans\n\n def range_query(self, i1: int, j1: int, i2: int, j2: int) -> int:\n return self.query(i2, j2) - self.query(i2, j1 - 1) - self.query(i1 - 1, j2) + self.query(i1 - 1, j1 - 1)\n\n\nclass NumMatrix:\n\n def __init__(self, matrix: List[List[int]]):\n if not matrix or not matrix[0]:\n n1, n2 = 0, 0\n else:\n n1, n2 = len(matrix), len(matrix[0])\n self.BIT2D = BIT2D(n1, n2)\n for i in range(n1):\n for j in range(n2):\n self.BIT2D.update(i + 1, j + 1, matrix[i][j])\n\n def update(self, row: int, col: int, val: int) -> None:\n self.BIT2D.update(row + 1, col + 1, val)\n\n def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:\n return self.BIT2D.range_query(row1 + 1, col1 + 1, row2 + 1, col2 + 1)\n\n\nif __name__ == \"__main__\":\n matrix = [\n [3, 0, 1, 4, 2],\n [5, 6, 3, 2, 1],\n [1, 2, 0, 1, 5],\n [4, 1, 0, 1, 7],\n [1, 0, 3, 0, 5]\n ]\n obj = NumMatrix(matrix)\n print(obj.sumRegion(2, 1, 4, 3)) # 8\n obj.update(3, 2, 2)\n print(obj.sumRegion(2, 1, 4, 3)) # 10\n print()\n\n obj = NumMatrix([[]])\n print()\n\n obj = NumMatrix([[1, 2]])\n print(obj.sumRegion(0, 0, 0, 0))\n print(obj.sumRegion(0, 1, 0, 1))\n print(obj.sumRegion(0, 0, 0, 1))\n obj.update(0, 0, 3)\n obj.update(0, 1, 5)\n print(obj.sumRegion(0, 0, 0, 1))\n","sub_path":"0301-0400/0308/0308_Python_1.py","file_name":"0308_Python_1.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"218522305","text":"#-*- coding: utf-8 -*-\nimport copy\nimport datetime\nfrom apps.common import utils\nfrom apps.models.user_compete import UserCompete\nfrom apps.models.compete_rank import get_compete_rank\nfrom apps.models.user_cards import UserCards\nfrom apps.models.user_equipments import UserEquipments\nfrom apps.config.game_config import get_game_config\nfrom apps.models.virtual.card_npc import CardNpc\nfrom apps.models.dungeon import Dungeon\nfrom apps.models.user_compete_record import UserCompeteRecord\nfrom apps.models.user_compete_message import UserCompeteMessage\n\ndef get_compete_info(oc_user,params):\n \"\"\"获取用户的竞技首页\n \"\"\"\n data = {}\n user_cards_obj = UserCards.get(oc_user.uid)\n fight_force = user_cards_obj.fight_force()\n need_user_rating = get_game_config(\"compete_config\").get(\"need_user_rating\",1000)\n if need_user_rating > fight_force:\n return 1,{\"msg\":\"fight force not arrive\"}\n \n user_compete_obj = UserCompete.get_instance(oc_user.uid)\n data[\"my_rank\"] = user_compete_obj.my_rank()\n data[\"my_reward\"] = user_compete_obj.get_my_reward()\n data[\"compete_num\"] = user_compete_obj.compete_info.get(\"compete_num\",0)\n return 0,data\n\ndef get_my_enemy(oc_user,params):\n \"\"\"获取我的竞技对手\n \"\"\"\n data = {}\n user_compete_obj = UserCompete.get_instance(oc_user.uid)\n data[\"compete_list\"] = user_compete_obj.get_my_enemy()\n data[\"enemy_info\"] = user_compete_obj.compete_info[\"enemy_info\"]\n return 0,data\n\ndef get_rank_info(oc_user,params):\n \"\"\"获取前20名的排行\n \"\"\"\n data = {}\n compete_rank_obj = get_compete_rank(oc_user.subarea)\n ranking_list = compete_rank_obj.get(20)\n data['ranking_list'] = {}\n #先查看真实用户的前20名的排名\n for uid,rank in ranking_list:\n if int(rank) >= 0:\n temp = {}\n temp['uid'] = uid\n temp['name'] = oc_user.username\n temp['lv'] = oc_user.property_info.property_info[\"lv\"]\n user_card_obj = UserCards.get(uid)\n temp[\"cid\"] = user_card_obj.cid\n user_equipments_obj = UserEquipments.get_instance(uid)\n equipments = {}\n for _,v in user_card_obj.equipments.items():\n if v:\n equipments[v] = user_equipments_obj.equipments[v]\n temp[\"equipments\"] = equipments\n temp[\"user_equipments\"] = user_card_obj.equipments\n data['ranking_list'][int(rank)] = temp\n \n #如果前20名中存在npc,则读取npc配置 \n top_npc = get_game_config(\"compete_npc_config\",'1')[\"top_npc\"]\n compete_config = get_game_config(\"compete_config\",'1') \n for i in range(20):\n i+=1\n if (i not in data[\"ranking_list\"]) and (str(i) in top_npc):\n temp = {}\n npc_lv = int(compete_config[\"rank_conf\"][str(i)][\"npc_lv_base\"])\n top_npc_config = top_npc[str(i)]\n npc_card_obj = CardNpc.get(top_npc_config[\"icon\"],npc_lv,top_npc_config)\n npc_card_dict = copy.deepcopy(npc_card_obj.__dict__)\n npc_card_dict.pop(\"card_detail\")\n npc_card_dict.pop(\"card_category_config\")\n npc_card_dict.pop(\"subarea\")\n temp.update(npc_card_dict)\n temp[\"uid\"] = \"npc\"\n temp[\"name\"] = top_npc_config[\"name\"]\n temp[\"lv\"] = npc_lv\n data['ranking_list'][str(i)] = temp\n return 0,data\n\ndef extand_num(oc_user,params):\n \"\"\"增加我的竞技次数\n \"\"\"\n compete_config = get_game_config(\"compete_config\",1)\n cost_diamond = compete_config.get(\"contestNum_cost\",20)\n user_property_obj = UserProperty.get_instance(oc_user.uid)\n if user_property_obj.minus_diamond(cost_diamond):\n user_compete_obj = UserCompete.get_instance(oc_user.uid)\n user_compete_obj.compete_info[\"compete_num\"] += 1\n user_compete_obj.put()\n return 0,{\"compete_num\":user_compete_obj.compete_info[\"compete_num\"]}\n return 1,{'msg':'not enough diamond'}\n\ndef compete_record(oc_user,params):\n \"\"\"竞技记录\n \"\"\"\n data = {}\n compete_record_obj = UserCompeteRecord.hgetall(oc_user.uid)\n if not compete_record_obj:\n return 0,data\n n = 0\n for k,v in compete_record_obj.items():\n n+=1 \n compete_uid = v[\"compete_uid\"]\n if compete_uid != \"npc\":\n try:\n result = {}\n #获取用户的个人信息\n compete_uid =oc_user.uid\n user_equipments_obj = UserEquipments.get_instance(compete_uid)\n user_cards_obj = UserCards.get(compete_uid)\n equipments = {}\n for _,v_ in user_cards_obj.equipments.items():\n if v_:\n equipments[v_] = user_equipments_obj.equipments[v_]\n \n user_card_dict = user_cards_obj.card_obj('0',dun_type=\"defense\").__dict__\n user_card_dict.pop('card_category_config')\n user_card_dict.pop('card_detail')\n \n result.update({'user_equipmenmts':user_cards_obj.equipments,'equipments':equipments})\n result.update(user_card_dict)\n v.update({\"user_info\":result})\n except:\n pass\n if n > 10:\n compete_record = UserCompeteRecord.hget(oc_user.uid,k)\n compete_record.delete()\n \n data[\"result\"] = compete_record_obj\n return 0,data\n\ndef compete_message(oc_user,params):\n \"\"\"我的留言\n \"\"\"\n data = {}\n #now = datetime.datetime.now()\n compete_message_obj = UserCompeteMessage.hgetall(oc_user.uid)\n message = copy.deepcopy(compete_message_obj)\n if not message:\n return 0,data\n for m in message:\n message[m].pop(\"uid\")\n message[m].pop(\"ouid\")\n message[m].update({\"datetime\":message[m][\"record_info\"][-1][\"create_time\"]})\n# is_changed = False\n# for record in message[m][\"record_info\"]:\n# create_time = datetime.datetime.strptime(record[\"create_time\"],\"%Y-%m-%d %H:%M:%S\")\n# time_delta = now - create_time\n# if time_delta.days < 3:\n# message[m][\"record_info\"].remove(record)\n# if len(message[m][\"record_info\"]) == 0:\n# obj = UserCompeteMessage.hget(oc_user.uid,m)\n# obj.delete() \n# message.pop(m) \n# else: \n# is_changed = True\n# \n# if is_changed:\n# obj = UserCompeteMessage.hget(oc_user.uid,m)\n# obj.record_info = message[m][\"record_info\"]\n# obj.hput()\n \n data[\"result\"] = message\n return 0,data\n\ndef send_message(oc_user,params):\n \"\"\"留言\n \"\"\"\n to_uid = params.get(\"to_uid\")\n content = params.get(\"content\",'')\n if len(content) > 40:\n return 1,{\"msg\":\"content too long\"}\n \n compete_message_obj = UserCompeteMessage.hget(oc_user.uid,to_uid)\n compete_message_obj.set_message(oc_user.uid,to_uid,content)\n \n compete_message_obj = UserCompeteMessage.hget(to_uid,oc_user.uid)\n compete_message_obj.set_message(oc_user.uid,to_uid,content)\n return 0,{}\n\ndef delete_message(oc_user,params):\n \"\"\"删除留言\n \"\"\"\n to_uid = params.get(\"to_uid\")\n compete_message_obj = UserCompeteMessage.hget(oc_user.uid,to_uid)\n compete_message_obj.delete\n return 0,{} \n\ndef start(oc_user,params):\n \"\"\"竞技战斗信息\n \"\"\"\n data = {}\n enemy_id = int(params.get(\"enemy_id\",3))\n user_compete_obj = UserCompete.get_instance(oc_user.uid)\n enemy_obj = user_compete_obj.compete_info[\"enemy_info\"][enemy_id]\n dun = Dungeon(oc_user.uid, dun_type=\"compete\")\n dun.run(dun_type=\"compete\",sk_type=\"boss\",enemy_id=enemy_id)\n enemy_rank = enemy_obj[\"rank\"]\n enemy_uid = enemy_obj[\"uid\"]\n enemy_name = enemy_obj[\"name\"]\n my_rank = user_compete_obj.my_rank()\n if dun.is_success:\n compete_rank_obj = get_compete_rank(oc_user.subarea)\n #交换排名,并且记录信息\n if enemy_uid == \"npc\":\n compete_rank_obj.set(oc_user.uid,enemy_rank)\n sid = utils.create_gen_id()\n #如果是npc,只记录我的信息\n compete_record_obj = UserCompeteRecord.hget(oc_user.uid,sid)\n compete_record_obj.set_record(enemy_uid,enemy_name,1,1,my_rank,enemy_rank)\n else:\n compete_rank_obj.set(oc_user.uid,enemy_rank)\n compete_rank_obj.set(enemy_uid,my_rank)\n sid = utils.create_gen_id()\n #添加竞技记录,如果是真人,双方都要记录\n compete_record_obj = UserCompeteRecord.hget(oc_user.uid,sid)\n compete_record_obj.set_record(enemy_uid,enemy_name,1,1,my_rank,enemy_rank)\n \n compete_record_obj = UserCompeteRecord.hget(enemy_uid,sid)\n compete_record_obj.set_record(oc_user.uid,oc_user.baseinfo[\"username\"],2,0,enemy_rank,my_rank)\n else:\n sid = utils.create_gen_id()\n #添加竞技记录,失败只记录我自己的\n compete_record_obj = UserCompeteRecord.hget(oc_user.uid,sid)\n compete_record_obj.set_record(enemy_uid,enemy_name,1,0)\n \n data['monster_list'] = {\"cid\":enemy_obj[\"cid\"],\"name\":enemy_obj[\"name\"],\\\n \"hp\":enemy_obj[\"hp\"],\"mp\":enemy_obj[\"mp\"]} \n data['result'] = dun.result\n data['my_deck'] = []\n for obj in dun.heros:\n dic = {}\n dic[\"cid\"] = obj.card.cid\n dic[\"hp\"] = obj.max_hp\n dic[\"mp\"] = obj.max_mp\n data[\"my_deck\"].append(dic) \n data[\"is_success\"] = dun.is_success\n return 0,data\n","sub_path":"python/project/plague/my_plague/apps/logics/compete.py","file_name":"compete.py","file_ext":"py","file_size_in_byte":9716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"644288929","text":"#!usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport time\nfrom werkzeug.utils import secure_filename\nfrom flask import current_app\nfrom qiniu import Auth, put_data\n\nfrom .common import get_random_string, compatmd5\n\nfrom momeet.lib import rdb\n\n\ndef get_qiniu_default_config():\n field_default_dict = {\n 'access_key': 'QINIU_ACCESS_KEY',\n 'secret_key': 'QINIU_SECRET_KEY',\n 'image_host': 'QINIU_IMAGE_HOST',\n 'bucket_name': 'QINIU_IMAGE_BUCKET',\n 'cut_suffix_format': 'QINIU_IMAGE_CUT_SUFFIX'\n }\n default_config = dict()\n for field, config_name in field_default_dict.iteritems():\n default_config[field] = current_app.config[config_name]\n return default_config\n\n\nclass QiniuHelper(object):\n ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\n\n def __init__(self, **kwargs):\n # 没有传递配置过来,则获取默认配置。这里这么做其实也是为了便于task中使用此类。\n if not kwargs:\n kwargs = get_qiniu_default_config()\n self.init_config(kwargs)\n self.client = Auth(self.access_key, self.secret_key)\n self.token_key = 'momeet_{bucket_name}_qi_niu_upload_token'.format(\n bucket_name=self.bucket_name\n )\n\n def init_config(self, kwargs):\n for field, value in kwargs.iteritems():\n setattr(self, field, value)\n\n def get_upload_token(self):\n token = rdb.get(self.token_key)\n if not token:\n policy = {\n 'returnBody': '{\"key\": $(key), \"hash\": $(etag), \"image\": $(imageInfo)}'\n }\n qn = self.client\n token = qn.upload_token(self.bucket_name, policy=policy)\n rdb.setex(self.token_key, 3000, token)\n return token\n\n def upload(self, file_name, file_data):\n image_host = self.image_host\n token = self.get_upload_token()\n ret, info = put_data(token, file_name, file_data)\n if info.status_code == 200:\n return_url = image_host + ret.get('key')\n image = ret.get('image', {})\n if image:\n cut_suffix = self.cut_suffix_format.format(\n width=image.get('width'),\n height=image.get('height')\n )\n return_url = return_url + cut_suffix\n return return_url\n\n def allowed_file(self, filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in self.ALLOWED_EXTENSIONS\n\n def upload_img(self, file):\n filename = file.filename\n if not self.allowed_file(filename):\n return ''\n file_suffix = os.path.splitext(secure_filename(filename))[-1].lower()\n filename = compatmd5(get_random_string() + secure_filename(filename) + str(time.time())) + file_suffix\n return self.upload(filename, file.stream)\n\n def upload_avator(self, file_name, file_data):\n if not self.allowed_file(file_name):\n return ''\n file_suffix = os.path.splitext(secure_filename(file_name))[-1].lower()\n file_name = compatmd5(get_random_string() + secure_filename(file_name) + str(time.time())) + file_suffix\n return self.upload(file_name, file_data)\n\n\ndef get_qiniu_img_w_h(image_url):\n # http://7vzrwi.com1.z0.glb.clouddn.com/f4b00c5b471014d8103f5457a9fcd43b.gif?imageMogr/v2/thumbnail/130x153\n data = image_url.split('/')[-1]\n w, h = data.split('x')\n return w, h\n","sub_path":"momeet/utils/qnutil.py","file_name":"qnutil.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"219331580","text":"import logging\nimport json\nfrom importers.taxonomy import settings, taxonomy_service\nfrom collections import OrderedDict\nfrom importers.repository import elastic\nfrom valuestore.taxonomy import tax_type\nfrom pkg_resources import resource_string\n\nlogging.basicConfig()\nlogging.getLogger(__name__).setLevel(logging.INFO)\nlog = logging.getLogger(__name__)\n\n\ndef create_valuestore_jobs(taxonomy_jobterms, taxonomy_jobgroups,\n taxonomy_jobfields):\n jobfields = {\n field['LocaleFieldID']: OrderedDict(\n [('id', str(field['LocaleFieldID'])),\n ('type', tax_type['yrkesomrade']),\n ('label', field['Term']), ('description', field['Description']),\n ('num_id', int(field['LocaleFieldID']))])\n for field in taxonomy_jobfields\n }\n jobgroups = {\n field['LocaleCode']: OrderedDict(\n [('id', str(field['LocaleCode'])),\n ('type', tax_type['yrkesgrupp']),\n ('label', field['Term']), ('description', field['Description']),\n ('num_id', int(field['LocaleCode'])),\n ('parent', jobfields[field['LocaleFieldID']])])\n for field in taxonomy_jobgroups\n }\n jobterms = {\n field['OccupationNameID']:\n OrderedDict([('id', str(field['OccupationNameID'])),\n ('type', tax_type['yrkesroll']),\n ('label', field['Term']),\n ('num_id', int(field['LocaleCode'])),\n ('parent', jobgroups[field['LocaleCode']])])\n for field in taxonomy_jobterms\n }\n return (jobterms, jobgroups, jobfields)\n\n\ndef create_valuestore_geo(file_places, taxonomy_municipalities, taxonomy_regions,\n taxonomy_countries):\n countries = {\n field['CountryID']:\n OrderedDict([('id', str(field['CountryID'])),\n ('type', tax_type['land']),\n ('label', field['Term']),\n ('num_id', int(field['CountryID'])),\n ('country_code', field['CountryCode'])])\n for field in taxonomy_countries\n }\n regions = {\n field['NationalNUTSLevel3Code']: OrderedDict(\n [('id', str(field['NationalNUTSLevel3Code'])),\n ('type', tax_type['lan']),\n ('label', field['Term']), ('num_id',\n int(field['NationalNUTSLevel3Code']))])\n for field in taxonomy_regions\n }\n municipalities = {\n field['NationalNUTSLAU2Code']: OrderedDict(\n [('id', str(field['NationalNUTSLAU2Code'])),\n ('type', tax_type['kommun']), ('label', field['Term']),\n ('parent', regions[field['NationalNUTSLevel3Code']]),\n ('num_id', int(field['NationalNUTSLAU2Code']))])\n for field in taxonomy_municipalities\n }\n\n places = {}\n for place in file_places:\n identifier = \"%s-%s\" % (place['kommunkod'], _slugify(place['label']))\n municipality = municipalities[place['kommunkod']]\n places[identifier] = dict({'id': identifier}, **place)\n places[identifier]['parent'] = municipality\n\n return (places, municipalities, regions, countries)\n\n\ndef _slugify(string):\n return string.lower().replace('å', 'a') \\\n .replace('ä', 'a').replace('ö', 'o').replace(' ', '_') if string else None\n\n\ndef create_valuestore_skills(taxonomy_skills):\n skills = {\n field['SkillID']:\n OrderedDict([('id', str(field['SkillID'])),\n ('type', tax_type['kompetens']),\n ('label', field['Term']), ('description', field['Term'])])\n for field in taxonomy_skills\n }\n return (skills)\n\n\ndef create_valuestore_work_time_extent(taxonomy_work_time_extent):\n wte = {\n field['WorkTimeExtentID']:\n OrderedDict([('id', str(field['WorkTimeExtentID'])),\n ('type', tax_type['arbetstidsomfattning']),\n ('label', field['Term'])])\n for field in taxonomy_work_time_extent\n }\n return (wte)\n\n\ndef create_valuestore_languages(taxonomy_languages):\n languages = {\n field['LanguageID']:\n OrderedDict([('id', str(field['LanguageID'])),\n ('type', tax_type['sprak']),\n ('label', field['Term']), ('num_id',\n int(field['LanguageID']))])\n for field in taxonomy_languages\n }\n return (languages)\n\n\ndef create_valuestore_employment_types(taxonomy_employmenttypes):\n employment_types = {\n field['EmploymentTypeID']:\n OrderedDict([('id', str(field['EmploymentTypeID'])),\n ('type', tax_type['anstallningstyp']),\n ('label', field['Term']),\n ('num_id', int(field['EmploymentTypeID']))])\n for field in taxonomy_employmenttypes\n }\n return employment_types\n\n\ndef create_valuestore_driving_licence(taxonomy_drivinglicence):\n driving_licence = {\n field['DrivingLicenceID']:\n OrderedDict([('id', str(field['DrivingLicenceID'])),\n ('type', tax_type['korkort']),\n ('label', field['Term']),\n ('description', field['Description']),\n ('num_id', int(field['DrivingLicenceID']))])\n for field in taxonomy_drivinglicence\n }\n return driving_licence\n\n\ndef fetch_full_taxonomy():\n try:\n taxonomy_jobfields = taxonomy_service.get_all_job_fields()\n taxonomy_jobgroups = taxonomy_service.get_all_job_groups()\n taxonomy_jobterms = taxonomy_service.get_all_job_terms()\n taxonomy_countries = taxonomy_service.get_all_countries()\n taxonomy_regions = taxonomy_service.get_all_regions()\n taxonomy_municipalities = taxonomy_service.get_all_municipalities()\n taxonomy_languages = taxonomy_service.get_all_languages()\n taxonomy_work_time_extent = taxonomy_service.get_all_work_time_extent()\n taxonomy_skills = taxonomy_service.get_all_skills()\n taxonomy_employmenttypes = taxonomy_service.get_all_employment_types()\n taxonomy_drivinglicence = taxonomy_service.get_all_driving_licences()\n except Exception as e:\n log.error('Failed to fetch valuesets from Taxonomy Service', e)\n raise\n # Load places\n file_places = json.loads(resource_string('importers.taxonomy.resources',\n 'platser.json'))\n (valuestore_jobterm,\n valuestore_jobgroup, valuestore_jobfield) = create_valuestore_jobs(\n taxonomy_jobterms, taxonomy_jobgroups, taxonomy_jobfields)\n (valuestore_places, valuestore_municipalities,\n valuestore_regions,\n valuestore_countries) = create_valuestore_geo(file_places,\n taxonomy_municipalities,\n taxonomy_regions,\n taxonomy_countries)\n valuestore_languages = create_valuestore_languages(taxonomy_languages)\n valuestore_work_time_extent = create_valuestore_work_time_extent(\n taxonomy_work_time_extent)\n valuestore_skills = create_valuestore_skills(taxonomy_skills)\n valuestore_employmenttypes = create_valuestore_employment_types(\n taxonomy_employmenttypes)\n valuestore_drivinglicences = create_valuestore_driving_licence(\n taxonomy_drivinglicence)\n return (\n list(valuestore_jobterm.values())\n + list(valuestore_jobgroup.values())\n + list(valuestore_jobfield.values())\n + list(valuestore_places.values())\n + list(valuestore_municipalities.values())\n + list(valuestore_regions.values())\n + list(valuestore_countries.values())\n + list(valuestore_languages.values())\n + list(valuestore_work_time_extent.values())\n + list(valuestore_skills.values())\n + list(valuestore_employmenttypes.values())\n + list(valuestore_drivinglicences.values())\n )\n\n\ndef check_if_taxonomyversion_already_exists():\n try:\n tax_versions = taxonomy_service.get_taxonomy_version()\n except Exception as e:\n log.error('Failed to get taxonomy version from taxonomy service', e)\n raise\n highest_version = max([v['BastaxonomiId'] for v in tax_versions])\n expected_index_name = settings.ES_TAX_INDEX_BASE + str(highest_version)\n log.info(\n 'Expected index name based on taxonomy service version is {}'.format(\n expected_index_name))\n try:\n index_exists = elastic.index_exists(expected_index_name)\n except Exception as e:\n log.error('Failed to check index existence on elastic', e)\n raise\n return (expected_index_name, index_exists)\n\n\ndef update_search_engine_valuestore(indexname, indexexists, values):\n # Create and/or update valuestore index\n try:\n elastic.create_index(indexname, settings.TAXONOMY_INDEX_CONFIGURATION)\n elastic.bulk_index(values, indexname, ['type', 'id'])\n except Exception as e:\n log.error('Failed to load values into search engine', e)\n raise\n # Create and/or assign index to taxonomy alias and\n # assign old index to archive alias\n try:\n if (elastic.alias_exists(settings.ES_TAX_INDEX_ALIAS)):\n alias = elastic.get_alias(settings.ES_TAX_INDEX_ALIAS)\n elastic.update_alias(\n indexname, list(alias.keys()), settings.ES_TAX_INDEX_ALIAS)\n if (not indexexists):\n if (elastic.alias_exists(settings.ES_TAX_ARCHIVE_ALIAS)):\n elastic.add_indices_to_alias(list(alias.keys()),\n settings.ES_TAX_ARCHIVE_ALIAS)\n else:\n elastic.put_alias(\n list(alias.keys()), settings.ES_TAX_ARCHIVE_ALIAS)\n else:\n elastic.put_alias([indexname], settings.ES_TAX_INDEX_ALIAS)\n except Exception as e:\n log.error('Failed to update aliases', e)\n raise\n\n\ndef start():\n (indexname, indexexist) = check_if_taxonomyversion_already_exists()\n values = fetch_full_taxonomy()\n update_search_engine_valuestore(indexname, indexexist, values)\n\n\nif __name__ == '__main__':\n start()\n","sub_path":"importers/taxonomy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"475407745","text":"#\n# @lc app=leetcode id=116 lang=python3\n#\n# [116] Populating Next Right Pointers in Each Node\n#\n\n# @lc code=start\n\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\n\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n if not root:\n return None\n count = 0\n store = []\n def generator(node, count):\n if not node:\n return\n if count >= len(store):\n store.append([])\n store[count].append(node)\n if node.left:\n generator(node.left, count + 1)\n if node.right:\n generator(node.right, count + 1)\n generator(root, count)\n for item in store:\n for i in range(len(item) - 1):\n item[i].next = item[i + 1]\n item[-1].next = None\n return store[0][0]\n# @lc code=end\n\n","sub_path":"Python3/116.populating-next-right-pointers-in-each-node.py","file_name":"116.populating-next-right-pointers-in-each-node.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"274258746","text":"\"\"\"\n Remove Duplicate Characters From String\n\n Description: Given a input string, remove all the duplicate characters and\n return a new string with unique characters\n\"\"\"\n\nfrom src.utils.logger import get_logger\n\nLOGGER = get_logger(__name__)\n\n\ndef remove_duplicates(input_str: str) -> str:\n \"\"\"\n Convert an input string into a new string with unique characters\n\n Arguments:\n input_str {string} -- input string with repeating/non-repeating characters\n\n Returns:\n string -- string with unique characters\n \"\"\"\n\n unique_chars_str = \"\"\n unique_chars = list()\n chars_map = dict()\n\n # Iterate through each character in string\n for character in input_str: # O(n)\n if not chars_map.get(character): # O(1)\n chars_map[character] = True\n unique_chars.append(character)\n\n # Join all the unique characters in the list to form a string\n unique_chars_str = unique_chars_str.join(unique_chars)\n\n return unique_chars_str\n\n\ninput_value = \"dsdhsjkkakdjs\"\nLOGGER.info(\"Input String : %s\", input_value)\n\noutput_value = remove_duplicates(input_value)\nLOGGER.info(\"Output Unique String : %s\", output_value)\n\n# Shorhand for the above function\noutput_value = \"\".join({char for char in input_value})\nLOGGER.info(\n \"Output Unique String (Single Line Solution, but without the original order) : %s\",\n output_value,\n)\n","sub_path":"src/interviews/strings/duplicates.py","file_name":"duplicates.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"277814137","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Word2Vec - Skip-gram model \n# Parameters (size, window) 중 window 찾기\n# Window: 3~9까지 변화 \n\n# Word2Vec을 위한 라이브러리 \nfrom gensim.models import Word2Vec\n# 한국어 처리\nfrom konlpy.tag import Kkma\nfrom konlpy.utils import pprint\n# 그래프 표현하기 (고차원을 저차원으로 표현해주는 그래프)\nfrom sklearn.manifold import TSNE\nfrom matplotlib import font_manager, rc\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport gensim.models as g\nimport matplotlib.font_manager as fm\n# Warnings문 나오는 거 방지용\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport math\nfrom scipy.spatial import distance\n# 한국어 깨짐 방지 위해 한국어 폰트 설정\nfont_location = 'C:\\Windows\\\\Fonts\\\\batang.ttc'\nfont_name = font_manager.FontProperties(fname=font_location).get_name()\nrc('font', family=font_name)\n# 글자 깨짐 방지\nmpl.rcParams['axes.unicode_minus'] = False\n\nfile = open(\"tagList_d5.txt\", \"r\", encoding=\"UTF-8\")\n\ndata = []\n\n# 한 줄씩 읽기\ntemp = [] \nwhile True:\n line = file.readline()\n if not line: break\n temp.append(line)\n\n# 리스트에 저장하기 \nfor t in temp:\n data.append(t.split())\n\n\n# 단어 하나씩 추출하기\nword = []\nfor d in data:\n for da in d:\n word.append(da)\nprint(\"단어의 총 개수: \", len(word))\nword = set(word)\nprint(\"단어의 unique한 값들의 개수: \", len(word))\n\n# 어느 정도 범위에 있는 애들은 허용하기\ndef ok(x, list, x_location):\n result.append(x_location)\n for l in list:\n if (abs(x - l) <= 0.03 and x != l):\n print(\"\\ts = \", windowDetail[list.index(l)], \" -> \", l)\n result.append(windowDetail[list.index(l)])\n\n\nprint(\"Skip-gram:\")\n# size: number of dimensions of the embeddings (default = 100)\n# window: target word와 target word 주변 단어 간의 최대 거리 (default = 5)\n# min_count: 단어 빈도 수가 이 값보다 작으면 무시됨 (default = 5) \n# workers: numbers of partitions during training (default = 3)\n\n# euclidean distance와 cosine distance가 저장될 공간 준비하기\neuc1 = []\neuc2 = []\neuc3 = []\neuc4 = []\neuc5 = []\neuc6= []\ncos1 = []\ncos2 = []\ncos3 = []\ncos4 = []\ncos5 = []\ncos6= []\n\nwindowDetail = [] # 다수결로 투표할 때 사용될 공간: 후보\nresult = [] # 다수결로 투표할 때 사용될 공간: 투표결과 \n\n# Window는 3~9까지 변화\nfor w in range (3, 10):\n print(\"\\nwindow = %d\" %w)\n \n # euclidean distance 초기 값 0\n dist1 = 0\n dist2 = 0\n dist3 = 0\n dist4 = 0\n dist5 = 0\n dist6 = 0\n # cosine distance 초기 값 0\n c_dist1 = 0\n c_dist2 = 0\n c_dist3 = 0\n c_dist4 = 0\n c_dist5 = 0\n c_dist6 = 0\n \n repeat = 0 # size에 변화시켜준 횟수\n windowDetail.append(w)\n \n # size를 다양하게 반영시키며 접근(50~350까지 25씩 변경)\n for r in range(0, 5):\n repeat = repeat + 1\n minCount = 20\n skip_model = Word2Vec(data, sg=1, min_count = minCount, iter = 5, size = 100, window = w)\n\n skip_model.save('SkipFile')\n #print(\"\\nsize = %d\" %s)\n \n store_model = g.Doc2Vec.load('SkipFile')\n vocab = list(store_model.wv.vocab)\n \n # 사용할 태그 \n s1 = '아침'\n s2 = '모닝콜'\n s3 = '잔잔한'\n s4 = '고요한'\n s5 = '카페'\n s6 = '휴식'\n # s7 = '운동'\n s8 = '클럽'\n s9 = 'EDM'\n s10 = '조용한'\n s11 = '여름'\n s12 = '추위'\n \n # 유사태그: 아침-모닝콜\n dist1 = distance.euclidean(store_model.wv.word_vec(s1),(store_model.wv.word_vec(s2))) + dist1\n c_dist1 = store_model.similarity(s1, s2) + c_dist1\n # 유사태그: 잔잔한-고요한\n dist2 = distance.euclidean(store_model.wv.word_vec(s3),(store_model.wv.word_vec(s4))) + dist2\n c_dist2 = store_model.similarity(s3, s4) + c_dist2\n # 유사태그: 카페-휴식\n dist3 = distance.euclidean(store_model.wv.word_vec(s5),(store_model.wv.word_vec(s6))) + dist3\n c_dist3 = store_model.similarity(s5, s6) + c_dist3\n # 상반태그: 아침-클럽\n dist4 = distance.euclidean(store_model.wv.word_vec(s1),(store_model.wv.word_vec(s8))) + dist4\n c_dist4 = store_model.similarity(s1, s8) + c_dist4\n # 상반태그: EDM-조용한\n dist5 = distance.euclidean(store_model.wv.word_vec(s9),(store_model.wv.word_vec(s10))) + dist5\n c_dist5 = store_model.similarity(s9, s10) + c_dist5\n # 상반태그: 여름-추위\n dist6 = distance.euclidean(store_model.wv.word_vec(s11),(store_model.wv.word_vec(s12))) + dist6\n c_dist6 = store_model.similarity(s11, s11) + c_dist6\n \n # window 동일하지만 변화를 준 size의 평균값 구하기\n euc1.append(dist1/repeat)\n euc2.append(dist2/repeat)\n euc3.append(dist3/repeat)\n euc4.append(dist4/repeat)\n euc5.append(dist5/repeat)\n cos1.append(c_dist1/repeat)\n cos2.append(c_dist2/repeat)\n cos3.append(c_dist3/repeat)\n cos4.append(c_dist4/repeat)\n cos5.append(c_dist5/repeat)\n\n print(s1, \"- \", s2) \n print(dist1/repeat)\n print(c_dist1/repeat) \n print(s3, \"- \", s4) \n print(dist2/repeat)\n print(c_dist2/repeat) \n print(s5, \"- \", s6) \n print(dist3/repeat)\n print(c_dist3/repeat) \n print(s1, \"- \", s8) \n print(dist4/repeat)\n print(c_dist4/repeat) \n print(s9, \"- \", s10) \n print(dist5/repeat)\n print(c_dist5/repeat) \n\n\n\n\n# 유사도가 높아야 되는 애들 출력하기\nprint(s1, \"- \", s2)\nprint(\"Euc w = \", windowDetail[euc1.index(min(euc1))], \" -> \", min(euc1))\nok(min(euc1),euc1, windowDetail[euc1.index(min(euc1))])\nprint(\"Cos w = \", windowDetail[cos1.index(max(cos1))], \" -> \", max(cos1))\nok(max(cos1),cos1, windowDetail[cos1.index(max(cos1))])\nprint(s3, \"- \", s4)\nprint(\"Euc w = \", windowDetail[euc2.index(min(euc2))], \" -> \", min(euc2))\nok(min(euc2),euc2, windowDetail[euc2.index(min(euc2))])\nprint(\"Cos w = \", windowDetail[cos2.index(max(cos2))], \" -> \", max(cos2))\nok(max(cos2),cos2, windowDetail[cos2.index(max(cos2))])\nprint(s5, \"- \", s6)\nprint(\"Euc w = \", windowDetail[euc3.index(min(euc3))], \" -> \", min(euc3))\nok(min(euc3),euc3, windowDetail[euc3.index(min(euc3))])\nprint(\"Cos w = \", windowDetail[cos3.index(max(cos3))], \" -> \", max(cos3))\nok(max(cos3),cos3, windowDetail[cos3.index(max(cos3))])\n\n# 유사도가 낮아야 되는 애들 출력하기\nprint(s1, \"- \", s8)\nprint(\"Euc w = \", windowDetail[euc4.index(max(euc4))], \" -> \", max(euc4))\nok(max(euc4), euc4, windowDetail[euc4.index(max(euc4))])\nprint(\"Cos w = \", windowDetail[cos4.index(min(cos4))], \" -> \", min(cos4))\nok(min(cos4), euc4, windowDetail[cos4.index(min(cos4))])\nprint(s9, \"- \", s10)\nprint(\"Euc w = \", windowDetail[euc5.index(max(euc5))], \" -> \", max(euc5))\nok(max(euc5), euc5, windowDetail[euc5.index(max(euc5))])\nprint(\"Cos w = \", windowDetail[cos5.index(min(cos5))], \" -> \", min(cos5))\nok(min(cos5), euc5,windowDetail[cos5.index(min(cos5))])\nfile.close()\n\nresult_count = []\nfinal = {}\n\n# print(result)\nresult.sort()\n\nfor w in windowDetail:\n #result.count(w)\n result_count.append(result.count(w))\n #final[windowDetail(w)] = result_count(w)\nprint(\"[3, 4, 5, 6, 7, 8, 9]\")\nprint(result_count)\n\nfile.close()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"dataProcessing_Submit/FindParameters_SkipGram_Window.py","file_name":"FindParameters_SkipGram_Window.py","file_ext":"py","file_size_in_byte":7476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"116972785","text":"from flask import Flask, render_template, redirect, request, jsonify\nfrom flask_pymongo import PyMongo\nimport xgboost as xgb\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os\nimport seaborn as sns\nimport joblib\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder\n\n\nlinear_model = joblib.load(\"models/LR.h5\")\nxgb_model = joblib.load(\"models/xgbmodel.h5\")\n#rf_model = joblib.load(\"models/rf.h5\")\n\n\napp = Flask(__name__)\napp.static_folder = \"templates/static\"\n\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/predict\", methods=[\"GET\", \"POST\"])\ndef predict():\n if request.method == \"POST\":\n testdf = []\n boroughtype = int(request.form[\"boroughradio\"])\n roomtype = int(request.form[\"roomtyperadio\"])\n review_input = request.form[\"reviewmonth\"]\n avail_input = request.form[\"availability\"]\n testdf.append(float(review_input))\n testdf.append(int(avail_input))\n borough = [\"Bronx\", \"Brooklyn\", \"Manhattan\", \"Queens\", \"Staten Island\"]\n room = [\"Entire Home/Apt\", \"Private Room\", \"Shared Room\"]\n boroughpicked = borough[boroughtype]\n roompicked = room[roomtype]\n for x in range(5):\n if boroughtype == 0:\n testdf.append(1)\n else:\n testdf.append(0)\n boroughtype = boroughtype - 1\n for x in range(3):\n if roomtype == 0:\n testdf.append(1)\n else:\n testdf.append(0)\n roomtype = roomtype - 1\n \n df = pd.DataFrame([testdf], columns=[\n \"reviews_per_month\",\n \"availability_365\",\n \"neighbourhood_group_Bronx\",\n \"neighbourhood_group_Brooklyn\",\n \"neighbourhood_group_Manhattan\",\n \"neighbourhood_group_Queens\",\n \"neighbourhood_group_Staten Island\",\n \"room_type_Entire home/apt\",\n \"room_type_Private room\",\n \"room_type_Shared room\"])\n xgbtest = xgb.DMatrix(df)\n linear_predict = linear_model.predict(df)\n xgb_predict = xgb_model.predict(xgbtest)\n #rf_predict = rf_model.predict(df)\n #price = [linear_predict[0][0], xgb_predict[0], rf_predict[0]]\n price = [linear_predict[0][0], xgb_predict[0]]\n\n print(price)\n \n minprice = round(min(price), 2)\n maxprice = round(max(price), 2)\n avgprice = round((sum(price) / 2), 2)\n \n return render_template(\"predict.html\", MinPrice=minprice, MaxPrice=maxprice, Average=avgprice, Borough=boroughpicked, RoomType=roompicked, ReviewPerMonth=review_input, Availability=avail_input)\n else:\n return render_template(\"predict.html\")\n\n\n@app.route(\"/infographic\")\ndef infographic():\n return render_template(\"infographic.html\")\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"226218071","text":"from django import template\nfrom django.templatetags.static import static\nfrom datetime import datetime\nregister = template.Library()\n\n# Django incluison tag plays elegant way to separete bootstrap template logic\n# from app template, that separation is need for theme the projects_type\n\n# Pass in kwargs the elements to fill the cards\n\n# Please note that all templates are contained in cards\n# You are free to arrange them in grids or other elements\n\n@register.inclusion_tag('includes/alerts.html')\ndef alerts_dropdown(*args, **kwargs):\n # Obs: this code does not handle date time format or timezone\n # this shoud be handled in your's views\n alerts = {}\n print(args)\n for alert in args:\n alerts.update(alert)\n\n return {\"alerts\":alerts,}\n\n\n@register.simple_tag()\ndef set_alert(*args, **kwargs):\n # Set alert number, badge, read, date, text, fa icon from below options\n # fa-file-alt, fa-donate, fa-exclamation-triangle\n # Obs: this code does not handle date time format or timezone\n # this shoud be handled in your's views\n\n alert = {}\n number = kwargs['number']\n alert['bg'] = kwargs['bg']\n alert['was_read'] = kwargs['was_read']\n alert['date'] = kwargs['date']\n alert['short_text'] = kwargs['short_text']\n alert['fa_icon'] = kwargs['fa_icon']\n\n\n return {number:alert}\n","sub_path":"core/templatetags/alerts_tag.py","file_name":"alerts_tag.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"180252485","text":"import os\nimport sys\nimport gym\nimport torch\nfrom collections import deque\nfrom copy import deepcopy\nfrom dqn_agent import DQNAgent\nfrom utils import *\n\n\nif __name__ == \"__main__\":\n EPISODES = 500000\n HEIGHT = 84\n WIDTH = 84\n HISTORY_SIZE = 4\n PLOT_FOLDER = 'save_graph'\n MODEL_FOLDER = 'save_model'\n plot_path, model_path = check_dirs_exist(PLOT_FOLDER, MODEL_FOLDER)\n\n env = gym.make('BreakoutDeterministic-v4')\n env.reset()\n state_size = env.observation_space.shape\n action_size = env.action_space.n - 1\n\n scores, episodes = [], []\n agent = DQNAgent(action_size, HISTORY_SIZE)\n recent_reward = deque(maxlen=100)\n frame = 0\n\n for e in range(EPISODES):\n done = False\n score = 0\n\n history = np.zeros([5, 84, 84], dtype=np.uint8)\n step = 0\n state = env.reset()\n\n history = get_init_state(history, state, HISTORY_SIZE, HEIGHT, WIDTH)\n\n while not done:\n step += 1\n frame += 1\n if agent.render:\n env.render()\n\n action = agent.get_action(np.float32(history[:4, :, :]) / 255.)\n\n next_state, reward, done, info = env.step(action+1)\n\n pre_proc_next_state = preprocess(next_state, HEIGHT, WIDTH)\n history[4, :, :] = pre_proc_next_state\n\n r = np.clip(reward, -1, 1)\n\n agent.append_sample(deepcopy(pre_proc_next_state), action, r, done)\n\n if frame >= agent.train_start:\n agent.train_model(frame)\n if frame % agent.update_target == 0:\n agent.update_target_model()\n\n score += reward\n history[:4, :, :] = history[1:, :, :]\n\n if frame % 50000 == 0:\n scores.append(score)\n episodes.append(e)\n save_plot(episodes, scores, plot_path)\n print(f'Saved plot to {plot_path}')\n\n if done:\n recent_reward.append(score)\n print(f\"episode: {e}, score: {score}, memory length: {len(agent.memory)}, \"\n f\"epsilon: {agent.epsilon}, steps: {step}, recent reward: {np.mean(recent_reward)}\")\n\n # if the mean of scores of last 10 episode is bigger than 400, stop training\n if np.mean(recent_reward) > 50:\n torch.save(agent.model, os.path.join(model_path, 'breakout_dqn'))\n sys.exit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"541520882","text":"\"\"\"\nGiven a binary tree, each node has value 0 or 1. Each root-to-leaf path represents a binary number starting with the most significant bit. For example, if the path is 0 -> 1 -> 1 -> 0 -> 1, then this could represent 01101 in binary, which is 13.\n\nFor all leaves in the tree, consider the numbers represented by the path from the root to that leaf.\n\nReturn the sum of these numbers.\n\nExample 1:\n\nInput: [1,0,1,0,1,0,1]\nOutput: 22\nExplanation: (100) + (101) + (110) + (111) = 4 + 5 + 6 + 7 = 22\n\nNote:\n\n The number of nodes in the tree is between 1 and 1000.\n node.val is 0 or 1.\n The answer will not exceed 2^31 - 1.\n\"\"\"\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def sumRootToLeaf(self, root: TreeNode) -> int:\n \n num_list = []\n def dfs(root,binary):\n if root:\n if not root.left and not root.right:\n num_list.append(int(binary+str(root.val),2))\n return \n dfs(root.left,binary+str(root.val))\n dfs(root.right,binary+str(root.val))\n dfs(root,\"\")\n return sum(num_list)","sub_path":"leetcode/September-30-day/week2/sum_of_root_to_leaf_bin_numbers.py","file_name":"sum_of_root_to_leaf_bin_numbers.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"646063415","text":"\"\"\"\n\nCopyright 2015, Institute for Systems Biology\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nfrom sys import argv as cmdline_argv, stdout\nimport logging\n\nfrom scripts.feature_def_gen.feature_def_utils import DataSetConfig, build_bigquery_service, \\\n submit_query_async, poll_async_job, download_query_result, write_tsv, \\\n load_config_json\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n_ch = logging.StreamHandler(stream=stdout)\nlogger.addHandler(_ch)\n\nVALUE_FIELD_NUM_MUTATIONS = 'num_mutations'\nVALUES = frozenset(['variant_classification', 'variant_type', 'sequence_source', VALUE_FIELD_NUM_MUTATIONS])\nFIELDNAMES = ['gene_name', 'protein_name', 'num_search_hits', 'value_field', 'internal_feature_id']\n\n\nclass RPPAFeatureDefConfig(object):\n def __init__(self, project_id, target_config, rppa_table_name, out_path):\n self.project_id = project_id\n self.target_config = target_config\n self.rppa_table_name = rppa_table_name\n self.output_csv_path = out_path\n\n @classmethod\n def from_dict(cls, param):\n project_id = param['project_id']\n target_config = DataSetConfig.from_dict(param['target_config'])\n table_name = param['rppa_table_name']\n output_csv_path = param['output_csv_path']\n\n return cls(project_id, target_config, table_name, output_csv_path)\n\n\ndef build_feature_query(config):\n query_template = (\"SELECT gene_name, protein_name \\\n FROM [{main_project_name}:{main_dataset_name}.{table_name}] \\\n WHERE gene_name IS NOT NULL \\\n GROUP BY gene_name, protein_name\")\n\n query_str = query_template.format(\n main_project_name=config.target_config.project_name,\n main_dataset_name=config.target_config.dataset_name,\n table_name=config.rppa_table_name\n )\n\n feature_type = get_feature_type()\n logger.debug(str(feature_type) + \" SQL: \" + query_str)\n\n return query_str\n\n\n# TODO remove duplicate code\ndef get_feature_type():\n return 'RPPA'\n\n\ndef build_internal_feature_id(feature_type, gene, protein):\n return '{feature_type}:{gene}:{protein}'.format(\n feature_type=feature_type,\n gene=gene,\n protein=protein\n )\n\n\ndef unpack_rows(row_item_array):\n feature_type = get_feature_type()\n result = []\n for row in row_item_array:\n gene_name = row['f'][0]['v']\n protein_name = row['f'][1]['v']\n\n for value_field in VALUES:\n result.append({\n 'num_search_hits': 0,\n 'gene_name': gene_name,\n 'protein_name': protein_name,\n 'value_field': value_field,\n 'internal_feature_id': build_internal_feature_id(feature_type, gene_name, protein_name)\n })\n\n return result\n\n\ndef main():\n config_file_path = cmdline_argv[1]\n config = load_config_json(config_file_path, RPPAFeatureDefConfig)\n\n logger.info(\"Building BigQuery service...\")\n bigquery_service = build_bigquery_service()\n\n result = []\n query = build_feature_query(config)\n\n # Insert BigQuery job\n query_job = submit_query_async(bigquery_service, config.project_id, query)\n\n # Poll for completion of query\n job_id = query_job['jobReference']['jobId']\n logger.info('job_id = \"' + str(job_id) + '\\\"')\n\n poll_async_job(bigquery_service, config, job_id)\n\n query_result = download_query_result(bigquery_service, query_job)\n rows = unpack_rows(query_result)\n result.extend(rows)\n\n write_tsv(config.output_csv_path, result, FIELDNAMES)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"scripts/feature_def_gen/protein_features.py","file_name":"protein_features.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"554879756","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0141_auto_20161010_1254'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BookingOpsAlerts',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('time_to_send', models.DateTimeField()),\n ('alert_status', models.PositiveSmallIntegerField(choices=[(1, b'Pending'), (2, b'Sent'), (3, b'Cancelled')])),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='OpsAlertType',\n fields=[\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('id', models.IntegerField(serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=1024)),\n ('time_diff', models.IntegerField()),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.RemoveField(\n model_name='packageprice',\n name='dealer_price',\n ),\n migrations.AddField(\n model_name='packageprice',\n name='dealer_labour_price',\n field=models.DecimalField(default=0, help_text=b'Inclusive of Tax', max_digits=10, decimal_places=2),\n ),\n migrations.AddField(\n model_name='packageprice',\n name='dealer_material_price',\n field=models.DecimalField(default=0, help_text=b'Inclusive of Tax', max_digits=10, decimal_places=2),\n ),\n migrations.AddField(\n model_name='packageprice',\n name='dealer_part_price',\n field=models.DecimalField(default=0, help_text=b'Inclusive of Tax', max_digits=10, decimal_places=2),\n ),\n migrations.AddField(\n model_name='packageprice',\n name='show_savings',\n field=models.BooleanField(default=False),\n ),\n migrations.AddField(\n model_name='bookingopsalerts',\n name='alert_type',\n field=models.ForeignKey(to='core.OpsAlertType'),\n ),\n migrations.AddField(\n model_name='bookingopsalerts',\n name='booking',\n field=models.ForeignKey(related_name='booking_opsalert', to='core.Booking'),\n ),\n ]\n","sub_path":"bumper2/core/old_migrations/0142_auto_20161013_1527.py","file_name":"0142_auto_20161013_1527.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"258026600","text":"import sys\nimport numpy as np\nimport pandas as pd\nsys.path.insert(0,'../')\n\nfrom strategy_tester.account import Account\nfrom strategy_tester.asset import Stock\nfrom strategy_tester.strategy import Strategy\nfrom strategy_tester.risk_management import ConstantRate\nfrom strategy_tester.simulate import BackTest\nfrom strategy_tester.utils import generate_data\n\n\nclass CustomAllocation(Strategy):\n def __init__(self,n=5,n_samples=60,max_stocks=10,*args,**kwargs):\n super().__init__(*args,**kwargs)\n self.n = n\n self.n_samples = n_samples\n self.max_stocks = max_stocks\n \n self.n_stocks = 0\n self.past_data = []\n self.last_selected = None\n self.last_t = 0\n \n def filter_stocks(self,past_data,n=5):\n df = pd.DataFrame(past_data)\n df = df.tail(100) # get most 100 of most recent prices\n r = df.diff()/df # calculate returns\n means = r.mean() # calculate means\n return means.sort_values()[-n:] # get top n of them\n \n def decide_long_open(self,spot_price,timestamp,Account,exog):\n output = {}\n t = timestamp\n if t.hour == 0 and t.minute == 0: # run once a day at 00:00\n self.past_data.append(spot_price)\n if len(self.past_data) < self.n_samples: # wait for sufficient number of samples\n output = None\n else:\n selected = self.filter_stocks(self.past_data,self.n) # choose best performing n stocks so far\n\n for asset_id in self.on: # for each stock this strategy is registered to work on\n if asset_id in spot_price.keys() and asset_id in selected.index: # if that stock is in the received list of prices and selected for buying\n if self.n_stocks < self.max_stocks:\n # decide order parameters\n args = {\n 'type':'market',\n 'size':self.RiskManagement.order_size(Account),\n 'strike_price':spot_price[asset_id],\n }\n output[asset_id] = args # {'decision':True,'params':args}\n self.n_stocks += 1\n self.last_selected = selected # store currently selected stocks for later comparison\n del self.past_data[0] # remove first item (roll on a constant size)\n else:\n output = None\n return output\n\n def decide_short_open(self,spot_price,timestamp,Account,exog):\n \"No short sells.\"\n return None\n \n def decide_long_close(self,order,spot_price,timestamp,Account,exog):\n t = timestamp\n if self.last_t != t:\n self.last_t = t\n if t.hour == 0 and t.minute == 0:\n self.past_data.append(spot_price)\n if len(self.past_data) < self.n_samples:\n return False\n else:\n selected = self.filter_stocks(self.past_data,self.n) # selected for long (that we would have select for buying)\n\n self.selected_to_close = []\n for stock in self.last_selected.index: # for each last chosen stocks\n if stock not in selected.index:\n self.selected_to_close.append(stock)\n del self.past_data[0]\n return order.asset_id in self.selected_to_close # close if the order is selected for close\n else:\n return False\n else:\n return order.asset_id in self.selected_to_close # close if the order is selected for close\n \n def decide_short_close(self,order,spot_price,timestamp,Account,exog):\n return False\n\n\nstocks = [Stock(generate_data(10000,freq='1h'),name=f'stock_{i}',short_name=f'STCK_{i}') for i in range(20)] # randomly generate stock prices\naccount = Account(initial_balance=1000) # initialize an account with 1000 USD balance\nrisk_man = ConstantRate(0.05) # constant lot size with 5% of balance each time\n\nstrategy = CustomAllocation(RiskManagement=risk_man,id=45450,name='custom_buyer')\nfor stock in stocks:\n strategy = stock.register(strategy) # allow strategy to use all of them, either one or multiple\n\nsim = BackTest(Account=account,Strategy=strategy).run(stocks)\nprint(sim.Account.balances)","sub_path":"examples/portfolio_selection.py","file_name":"portfolio_selection.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"212510238","text":"import datetime\nimport unittest\n\nfrom dateutil.relativedelta import relativedelta\n\nfrom framework.utils import get_epoch_from_datetime\nfrom mcfw.properties import object_factory\nfrom mcfw.rpc import parse_complex_value\nfrom plugins.trash_calendar.bizz import update_service_setting\nfrom plugins.trash_calendar.bizz.common import get_collections, get_activity\nfrom plugins.trash_calendar.consts import TrashActivity\nfrom plugins.trash_calendar.models.common import Street, StreetCollection, \\\n Collection\nfrom plugins.trash_calendar.to import StreetTO, CollectionTO, ActivityTO\nfrom plugins.trash_calendar.to.service import ServiceSettingsTO\n\n\nclass Test(unittest.TestCase):\n\n def setup(self):\n # https://cloud.google.com/appengine/docs/standard/python/tools/localunittesting\n from google.appengine.datastore import datastore_stub_util\n from google.appengine.ext import testbed\n\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n\n self.testbed.init_app_identity_stub()\n self.testbed.init_blobstore_stub()\n self.testbed.init_datastore_v3_stub()\n self.testbed.init_files_stub()\n self.testbed.init_memcache_stub()\n self.testbed.init_urlfetch_stub()\n\n\n def test_load_activities(self):\n self.setup()\n \n to = ServiceSettingsTO() \n to.sik = u'test_sik'\n to.name = u'test city'\n to.api_key = u'test_api_key'\n to.backend = u'default'\n to.branding_updated_date = None\n to.params = {\n u'country_code': u'BE',\n u'postal_code': u'test_postal_code'\n }\n \n update_service_setting(to)\n \n l = get_collections(to.backend,\n to.params[u'country_code'],\n to.params[u'postal_code'],\n StreetTO(number=1,\n name=u'test_street'),\n 1)\n self.assertEqual(l, [])\n \n collection_key_1 = u'BE-test_postal_code-2019-route-1'\n day1 = datetime.date.today() + relativedelta(days=1)\n collection_key_2 = u'BE-test_postal_code-2020-route-1'\n day2 = datetime.date.today() + relativedelta(years=1)\n collection_key_3 = u'BE-test_postal_code-2021-route-1'\n day3 = datetime.date.today() + relativedelta(years=2)\n \n Street(\n parent=Street.create_parent_key(to.backend, to.params[u'country_code'], to.params[u'postal_code']),\n name=u'test_street',\n aliases=[u'test_street'],\n collection_keys=[collection_key_1, collection_key_2],\n collections=[StreetCollection(year=day1.year, conditions=None, collection_key=collection_key_1),\n StreetCollection(year=day2.year, conditions=None, collection_key=collection_key_2),\n StreetCollection(year=day3.year, conditions=None, collection_key=collection_key_3)]\n ).put()\n \n activities1 = [TrashActivity.PLASTIC_METAL_CARTONS, TrashActivity.REST]\n epoch1 = get_epoch_from_datetime(day1)\n Collection(\n key=Collection.create_key(to.backend, to.params[u'country_code'], epoch1, collection_key_1),\n activities=activities1,\n epoch=epoch1,\n year=day1.year,\n month=day1.month,\n day=day1.day,\n ).put()\n \n activities2 = [TrashActivity.PAPER_CARDBOARD, TrashActivity.TEXTILE]\n epoch2 = get_epoch_from_datetime(day2)\n Collection(\n key=Collection.create_key(to.backend, to.params[u'country_code'], epoch2, collection_key_2),\n activities=activities2,\n epoch=epoch2,\n year=day2.year,\n month=day2.month,\n day=day2.day,\n ).put()\n \n activities3 = [TrashActivity.PRUNING_WASTE, TrashActivity.PRUNING_WASTE_ON_DEMAND]\n epoch3 = get_epoch_from_datetime(day3)\n Collection(\n key=Collection.create_key(to.backend, to.params[u'country_code'], epoch3, collection_key_3),\n activities=activities3,\n epoch=epoch3,\n year=day3.year,\n month=day3.month,\n day=day3.day,\n ).put()\n \n l = get_collections(to.backend,\n to.params[u'country_code'],\n to.params[u'postal_code'],\n StreetTO(number=1,\n name=u'test_street'),\n 1)\n self.assertEqual(l, [CollectionTO(activity=get_activity(TrashActivity.PLASTIC_METAL_CARTONS), epoch=epoch1, year=day1.year, day=day1.day, month=day1.month),\n CollectionTO(activity=get_activity(TrashActivity.REST), epoch=epoch1, year=day1.year, day=day1.day, month=day1.month),\n CollectionTO(activity=get_activity(TrashActivity.PAPER_CARDBOARD), epoch=epoch2, year=day2.year, day=day2.day, month=day2.month),\n CollectionTO(activity=get_activity(TrashActivity.TEXTILE), epoch=epoch2, year=day2.year, day=day2.day, month=day2.month)])\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"211378482","text":"#パッケージのインポート\nimport random\nimport math\n\n# ゲームの状態##\nclass State:\n # 初期化\n def __init__(self, pieces=None, enemy_pieces=None, depth=0):\n # 方向定数(縦横8マスまで)\n self.dxy = ((0, -1),(0, -2), (0, -3), (0, -4),(0, -5), (0, -6), (0, -7),(0, -8), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0), (0, 1),(0, 2), (0, 3), (0, 4),(0, 5), (0, 6), (0, 7),(0, 8), (-1, 0), (-2, 0), (-3, 0), (-4, 0), (-5, 0), (-6, 0), (-7, 0), (-8, 0))\n\n # 駒の配置\n self.pieces = pieces if pieces != None else [0] * (81)\n self.enemy_pieces = enemy_pieces if enemy_pieces != None else [0] * (81)\n self.depth = depth\n\n # 駒の初期配置\n if pieces == None or enemy_pieces == None:\n self.pieces = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n self.enemy_pieces = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\n\n#負けかどうか##\n def is_lose(self):\n count = 0\n pieces0 = self.pieces\n for i in range(81):\n if pieces0[i] == 1:\n count += 1\n count_enemy = 0\n pieces1 = self.enemy_pieces\n for i in range(81):\n if pieces1[80-i] == 1:\n count_enemy += 1\n if count < 2 and count_enemy >= 2:\n return True\n elif self.depth >= 300:\n if count < count_enemy:\n return True\n else:\n return False\n\n\n\n\n # 引き分けかどうか##\n def is_draw(self):\n count = 0\n pieces0 = self.pieces\n for i in range(81):\n if pieces0[i] == 1:\n count += 1\n count_enemy = 0\n pieces1 = self.enemy_pieces\n for i in range(81):\n if pieces1[80-i] == 1:\n count_enemy += 1\n if count == count_enemy:\n return self.depth >= 300 # 300手\n\n # ゲーム終了かどうか##\n def is_done(self):\n return self.is_lose() or self.is_draw()\n\n # デュアルネットワークの入力の2次元配列の取得##\n def pieces_array(self):\n # プレイヤー毎のデュアルネットワークの入力の2次元配列の取得\n def pieces_array_of(pieces):\n table_list = []\n \n table = [0] * 81\n table_list.append(table)\n for i in range(81):\n if pieces[i] == 1:\n table[i] = 1\n\n return table_list\n\n # デュアルネットワークの入力の2次元配列の取得##\n return [pieces_array_of(self.pieces), pieces_array_of(self.enemy_pieces)]\n\n # 駒の移動先と移動元を行動に変換\n def position_to_action(self, position, direction):\n return position * 32 + direction\n\n # 行動を駒の移動先と移動元に変換\n def action_to_position(self, action):\n return (int(action/32), action%32)\n\n # 合法手のリストの取得\n def legal_actions(self):\n actions = []\n for p in range(81):\n # 駒の移動時\n if self.pieces[p] != 0:\n actions.extend(self.legal_actions_pos(p))\n\n return actions\n def move_check(self, position_src):\n piece_type = self.pieces[position_src]\n directions = []\n if piece_type == 1: # 「歩」か「と」\n directions = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]\n root =[]\n rootout=[]\n for direction in directions:\n #コマの進む方向を考えるあと引く前で分類 あと引く前が1から8:(1,0) -1から-8:(-1,0) 9以上 -9以下\n if self.dxy[direction][0] >= 1 and self.dxy[direction][1] == 0: \n dch = 1\n elif self.dxy[direction][0] <= -1 and self.dxy[direction][1] == 0: \n dch = -1\n elif self.dxy[direction][0] == 0 and self.dxy[direction][1] >= 1: \n dch = 9\n elif self.dxy[direction][0] == 0 and self.dxy[direction][1] <= -1:\n dch = -9\n focused = position_src + dch\n for i in range(8):\n if focused > 80 or focused < 0:\n continue\n elif self.pieces[focused] != 1 and self.enemy_pieces[80-focused] !=1:\n if dch ==1:\n if focused != position_src + self.dxy[direction][0]:\n focused = focused + dch\n elif focused == position_src + self.dxy[direction][0]:\n root.extend([focused])\n break\n elif dch ==-1:\n if focused != position_src + self.dxy[direction][0]:\n focused = focused + dch\n elif focused == position_src + self.dxy[direction][0]: \n root.extend([focused])\n break\n elif dch ==9:\n if focused != position_src + 9 * self.dxy[direction][1]:\n focused = focused + dch\n elif focused == position_src + 9 * self.dxy[direction][1]: \n root.extend([focused])\n break\n elif dch ==-9:\n if focused != position_src + 9 * self.dxy[direction][1]:\n focused = focused + dch\n elif focused == position_src + 9 * self.dxy[direction][1]: \n root.extend([focused])\n break\n elif self.pieces[focused] == 1:\n continue\n elif self.enemy_pieces[80-focused] ==1:\n continue\n return root \n\n\n # 駒の移動時の合法手のリストの取得\n def legal_actions_pos(self, position_src):\n actions = []\n\n # 駒の移動可能な方向\n piece_type = self.pieces[position_src]\n directions = []\n if piece_type == 1: # 「歩」か「と」\n directions = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]\n \n for direction in directions:\n # 駒の移動元\n x = position_src%9 + self.dxy[direction][0]\n y = int(position_src/9) + self.dxy[direction][1]\n p = x + y * 9\n\n # 移動可能時は合法手として追加\n if 0 <= x and x <= 8 and 0<= y and y <= 8 and self.pieces[p] == 0 and self.enemy_pieces[80-p] == 0 and p in self.move_check(position_src):\n actions.append(self.position_to_action(p, direction))\n\n return actions\n\n\n # 次の状態の取得\n def next(self, action):\n # 次の状態の作成\n state = State(self.pieces.copy(), self.enemy_pieces.copy(), self.depth+1)\n\n # 行動を(移動先, 移動元)に変換\n position_dst, position_src = self.action_to_position(action)\n\n # 駒の移動\n\n # 駒の移動元\n x = position_dst%9 - self.dxy[position_src][0]\n y = int(position_dst/9) - self.dxy[position_src][1]\n position_src = x + y * 9\n\n # 駒の移動\n state.pieces[position_dst] = state.pieces[position_src]\n state.pieces[position_src] = 0\n\n # 相手の駒が存在する時は取る\n\n #敵の駒を挟んでいる場合はとる(オセロ応用)\n #上下左右\n remove_check1 = position_dst + 1\n remove_check2 = position_dst - 1\n remove_check3 = position_dst + 9\n remove_check4 = position_dst - 9\n for i in range(10): \n if int(remove_check1 / 9) != int(position_dst / 9):\n break\n elif remove_check1 > 80 or remove_check1 < 0:\n break\n elif self.enemy_pieces[80-remove_check1] ==0:\n break\n elif self.enemy_pieces[80-remove_check1] ==1:\n remove_check1 += 1\n elif self.pieces[remove_check1] ==1:\n if remove_check1 == position_dst +1:\n break\n elif remove_check1 != position_dst +1:\n for j in range(remove_check1 - position_dst-1):\n state.enemy_pieces[80-position_sdt-j] = 0\n break\n for i in range(10):\n if int(remove_check2 / 9) != int(position_dst / 9):\n break\n elif remove_check2 > 80 or remove_check2 < 0:\n break\n elif self.enemy_pieces[80-remove_check2] ==0:\n break\n elif self.enemy_pieces[80-remove_check2] ==1:\n remove_check1 += -1\n elif self.pieces[remove_check1] ==1:\n if remove_check2 == position_dst -1:\n break\n elif remove_check2 != position_dst +1:\n for j in range(position_dst - remove_check2 -1):\n state.enemy_pieces[80-position_sdt+j] = 0\n break\n\n for i in range(10):\n if remove_check3 % 9 != position_dst % 9:\n break\n elif remove_check3 > 80 or remove_check3 < 0:\n break\n elif self.enemy_pieces[80-remove_check3] ==0:\n break\n elif self.enemy_pieces[80-remove_check3] ==1:\n remove_check1 += 9\n elif self.pieces[remove_check1] ==1:\n if remove_check3 == position_dst + 9:\n break\n elif remove_check3 != position_dst +9:\n for j in range(int((remove_check3 - position_dst)/9) -1):\n state.enemy_pieces[80-position_sdt- 9*j] = 0\n break\n for i in range(10):\n if remove_check4 % 9 != position_dst % 9:\n break\n elif remove_check4 > 80 or remove_check4 < 0:\n break \n elif self.enemy_pieces[80-remove_check4] ==0:\n break\n elif self.enemy_pieces[80-remove_check4] ==1:\n remove_check1 += -9\n elif self.pieces[remove_check4] ==1:\n if remove_check4 == position_dst -9:\n break\n elif remove_check4 != position_dst -9:\n for j in range(int((position_dst - remove_check4)/9) -1):\n state.enemy_pieces[80-position_sdt + 9*j] = 0\n break \n \n\n #敵の駒が囲まれている場合はとる\n #今回は端で3個固まるところまで定義#エラーが出たら囲碁を応用\n #0の場合\n if self.enemy_pieces[80-0] ==1:\n #0に敵駒、1,2の味方に囲まれる\n if self.pieces[1]==1 and self.pieces[9]==1:\n state.enemy_pieces[80-0]=0\n #0,1に敵駒、9に味方駒\n elif self.enemy_pieces[80-1] ==1 and self.pieces[9]==1:\n #2,9,10で囲まれる\n if self.pieces[2]==1 and self.pieces[10]==1:\n state.enemy_pieces[80-0]=0\n state.enemy_pieces[80-1]=0\n #0,1,2に敵駒\n elif self.enemy_pieces[80-2] == 1:\n #3,9,10,11で囲まれる\n if self.pieces[10]==1 and self.pieces[11]==1 and self.pieces[3]==1:\n state.enemy_pieces[80-0]=0\n state.enemy_pieces[80-1]=0\n state.enemy_pieces[80-2]=0\n #0,9に敵駒、1に味方駒\n elif self.enemy_pieces[80-9] ==1 and self.pieces[1]==1:\n #1, 10,18で囲まれる\n if self.pieces[10]==1 and self.pieces[18]==1:\n state.enemy_pieces[80-0]=0\n state.enemy_pieces[80-9]=0\n #0,9,18に敵駒\n elif self.enemy_pieces[80-18]==1:\n if self.pieces[10]==1 and self.pieces[19]==1 and self.pieces[27]==1:\n state.enemy_pieces[80-0]=0\n state.enemy_pieces[80-9]=0\n state.enemy_pieces[80-18]=0 \n elif self.enemy_pieces[80-1] ==1 and self.enemy_pieces[80-9]==1:\n if self.pieces[2]==1 and self.pieces[10]==1 and self.pieces[18]==1:\n state.enemy_pieces[80-0]=0\n state.enemy_pieces[80-1]=0\n state.enemy_pieces[80-9]=0 \n #8の場合\n if self.enemy_pieces[80-8] ==1:\n #8に敵駒、7,17の味方に囲まれる\n if self.pieces[7]==1 and self.pieces[17]==1:\n state.enemy_pieces[80-0]=0\n #7,8に敵駒、17に味方駒\n elif self.enemy_pieces[80-7] ==1 and self.pieces[17]==1:\n #6、16、17で囲まれる\n if self.pieces[6]==1 and self.pieces[16]==1:\n state.enemy_pieces[80-7]=0\n state.enemy_pieces[80-8]=0\n #6,7,8に敵駒\n elif self.enemy_pieces[80-6] == 1:\n #5,15, 16,17で囲まれる\n if self.pieces[15]==1 and self.pieces[16]==1 and self.pieces[5]==1:\n state.enemy_pieces[80-6]=0\n state.enemy_pieces[80-7]=0\n state.enemy_pieces[80-8]=0\n #8,17に敵駒、7に味方駒\n elif self.enemy_pieces[80-17] ==1 and self.pieces[7]==1:\n #7, 16,26で囲まれる\n if self.pieces[16]==1 and self.pieces[26]==1:\n state.enemy_pieces[80-8]=0\n state.enemy_pieces[80-17]=0\n #8,17,26に敵駒\n elif self.enemy_pieces[80-26]==1:\n if self.pieces[16]==1 and self.pieces[25]==1 and self.pieces[35]==1:\n state.enemy_pieces[80-0]=0\n state.enemy_pieces[80-17]=0\n state.enemy_pieces[80-26]=0 \n elif self.enemy_pieces[80-7] ==1 and self.enemy_pieces[80-17]==1:\n if self.pieces[6]==1 and self.pieces[16]==1 and self.pieces[26]==1:\n state.enemy_pieces[80-7]=0\n state.enemy_pieces[80-8]=0\n state.enemy_pieces[80-17]=0\n\n #72の場合\n if self.enemy_pieces[80-72] ==1:\n #63,73の味方に囲まれる\n if self.pieces[63]==1 and self.pieces[73]==1:\n state.enemy_pieces[80]==0\n #73に敵駒、63に味方駒\n elif self.enemy_pieces[80-73] ==1 and self.pieces[63]==1:\n #63、64、74で囲まれる\n if self.pieces[64]==1 and self.pieces[74]==1:\n state.enemy_pieces[80-72]=0\n state.enemy_pieces[80-73]=0\n #72,73,74に敵駒\n elif self.enemy_pieces[80-74] == 1:\n #63,64, 65,75で囲まれる\n if self.pieces[64]==1 and self.pieces[65]==1 and self.pieces[75]==1:\n state.enemy_pieces[80-72]=0\n state.enemy_pieces[80-73]=0\n state.enemy_pieces[80-74]=0\n #63,72に敵駒、73に味方駒\n elif self.enemy_pieces[80-63] ==1 and self.pieces[73]==1:\n #54,64,73で囲まれる\n if self.pieces[54]==1 and self.pieces[64]==1:\n state.enemy_pieces[80-63]=0\n state.enemy_pieces[80-72]=0\n #54,63,72に敵駒\n elif self.enemy_pieces[80-54]==1:\n if self.pieces[45]==1 and self.pieces[55]==1 and self.pieces[64]==1:\n state.enemy_pieces[80-54]=0\n state.enemy_pieces[80-63]=0\n state.enemy_pieces[80-72]=0 \n elif self.enemy_pieces[80-63] ==1 and self.enemy_pieces[80-73]==1:\n if self.pieces[54]==1 and self.pieces[64]==1 and self.pieces[74]==1:\n state.enemy_pieces[80-63]=0\n state.enemy_pieces[80-72]=0\n state.enemy_pieces[80-73]=0 \n\n #80の場合\n if self.enemy_pieces[80-80] ==1:\n #71,79の味方に囲まれる\n if self.pieces[71]==1 and self.pieces[79]==1:\n state.enemy_pieces[80]==0\n #79に敵駒、71に味方駒\n elif self.enemy_pieces[80-79] ==1 and self.pieces[71]==1:\n #71,70,78で囲まれる\n if self.pieces[70]==1 and self.pieces[78]==1:\n state.enemy_pieces[80-80]=0\n state.enemy_pieces[80-79]=0\n #78,79,80に敵駒\n elif self.enemy_pieces[80-78] == 1:\n #69, 70, 71, 77で囲まれる\n if self.pieces[69]==1 and self.pieces[70]==1 and self.pieces[77]==1:\n state.enemy_pieces[80-78]=0\n state.enemy_pieces[80-79]=0\n state.enemy_pieces[80-80]=0\n #71,80に敵駒、79に味方駒\n elif self.enemy_pieces[80-71] ==1 and self.pieces[79]==1:\n #62,70,79で囲まれる\n if self.pieces[62]==1 and self.pieces[70]==1:\n state.enemy_pieces[80-71]=0\n state.enemy_pieces[80-80]=0\n #62,71,80に敵駒\n elif self.enemy_pieces[80-62]==1:\n if self.pieces[53]==1 and self.pieces[61]==1 and self.pieces[70]==1:\n state.enemy_pieces[80-62]=0\n state.enemy_pieces[80-71]=0\n state.enemy_pieces[80-80]=0 \n elif self.enemy_pieces[80-71] ==1 and self.enemy_pieces[80-79]==1:\n if self.pieces[62]==1 and self.pieces[70]==1 and self.pieces[78]==1:\n state.enemy_pieces[80-80]=0\n state.enemy_pieces[80-71]=0\n state.enemy_pieces[80-79]=0 \n\n # 駒の交代\n w = state.pieces\n state.pieces = state.enemy_pieces\n state.enemy_pieces = w\n return state\n\n # 先手かどうか\n def is_first_player(self):\n return self.depth%2 == 0\n\n # 文字列表示\n def __str__(self):\n pieces0 = self.pieces if self.is_first_player() else self.enemy_pieces\n pieces1 = self.enemy_pieces if self.is_first_player() else self.pieces\n\n str = ''\n\n # ボード\n for i in range(81):\n if pieces0[i] == 1:\n str += 'F'\n elif pieces1[80-i] == 1:\n str += 'T'\n else:\n str += '-'\n if i % 9 == 8:\n str += '\\n'\n\n return str\n\n# ランダムで行動選択\ndef random_action(state):\n legal_actions = state.legal_actions()\n return legal_actions[random.randint(0, len(legal_actions)-1)]\n\n# 動作確認\nif __name__ == '__main__':\n # 状態の生成\n state = State()\n\n # ゲーム終了までのループ\n while True:\n # ゲーム終了時\n if state.is_done():\n break\n\n # 次の状態の取得\n state = state.next(random_action(state))\n\n # 文字列表示\n print(state)\n print()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":19746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"600099138","text":"import numpy as np\r\nimport os\r\nimport cv2\r\nfrom joblib import dump, load\r\nfrom sklearn.svm import SVC\r\nfrom random import shuffle\r\nfrom tqdm import tqdm\r\nfrom math import *\r\nimport YOLO\r\n\r\n\r\ndef HOGSVM(test_dir):\r\n face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n GenderClf = load('GenderSVM.joblib')\r\n HOGexpressionClf = load('HOGexpressionSVMClf.joblib')\r\n\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n fontScale = 1\r\n fontColors = [(0,0,255),(0,255,255),(255,0,255),(255,255,0),(255,0,0)]\r\n lineType = 2\r\n expressionLabels = ['Neutral','Anger','Disgust','Fear','Happy','Sadness','Surprise']\r\n GenderLabels = ['Male','Female']\r\n\r\n\r\n hogX = cv2.HOGDescriptor()\r\n cap = cv2.VideoCapture(test_dir)\r\n frameRate = cap.get(5)\r\n preds, gpreds = [0]*7, [0,0]\r\n t = 0\r\n while 1:\r\n frameId = cap.get(1)\r\n ret, frame = cap.read()\r\n if not ret: break\r\n if frameId%floor(frameRate): continue\r\n faces = face_cascade.detectMultiScale(frame, 1.3,5)\r\n\r\n i = 0\r\n for (x,y,w,h) in faces:\r\n if w<100 or h<100: continue\r\n cv2.rectangle(frame,(x,y),(x+w,y+h),fontColors[i%5],4)\r\n roi_gray = frame[y:y+h, x:x+w]\r\n roi_gray = cv2.resize(roi_gray, (64, 128), interpolation = cv2.INTER_AREA)\r\n hog_features = hogX.compute(roi_gray)\r\n hog_features = np.array(hog_features)\r\n hog_features = hog_features.reshape(3780,)\r\n hog_features = hog_features[np.newaxis,:]\r\n\r\n prede = HOGexpressionClf.predict(hog_features)[0]\r\n predg = GenderClf.predict(hog_features)[0]\r\n preds[prede]+=1\r\n gpreds[predg]+=1\r\n\r\n cv2.putText(frame,expressionLabels[prede]+' '+GenderLabels[predg],\r\n (x,y-10),font,fontScale,\r\n fontColors[0],lineType)\r\n i+=1\r\n cv2.imshow('image',frame)\r\n k = cv2.waitKey(30) & 0xff\r\n if k == 27: break\r\n t+=1\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n totalPreds = sum(preds)\r\n for i in range(7): print('{}: {} %'.format(expressionLabels[i],round(preds[i]/totalPreds,4)*100))\r\n mxIndx = np.argmax(np.array(preds))\r\n ExDict = dict(zip(expressionLabels,list(range(7))))\r\n Exy_true = ExDict[test_dir.split('\\\\')[-2]]\r\n if preds[Exy_true]==preds[mxIndx] and Exy_true!=mxIndx: mxIndx=Exy_true\r\n print('HOG Winning expression: {} with Accuracy: {} %'.format(expressionLabels[mxIndx],round(preds[mxIndx]/totalPreds,4)*100))\r\n\r\n totalX = sum(gpreds)\r\n mxIndx2 = np.argmax(np.array(gpreds))\r\n GenDict = dict(zip(GenderLabels,[0,1]))\r\n Geny_true = GenDict[test_dir.split('_')[-2]]\r\n if gpreds[Geny_true]==gpreds[mxIndx2] and Geny_true!=mxIndx2: mxIndx2=Geny_true\r\n print('Winning Gender: {} with Accuracy: {} %'.format(GenderLabels[mxIndx2],round(gpreds[mxIndx2]/totalX,4)*100))\r\n\r\n return (preds, gpreds)\r\n\r\n","sub_path":"Code/ModeA_HOG_SVM.py","file_name":"ModeA_HOG_SVM.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"148366199","text":"#!/usr/bin/env python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the aVeryBigSum function below.\ndef aVeryBigSum(ar):\n # Big integers on python are automatically assigned\n big_sum = 0\n for a in ar:\n big_sum += a\n return big_sum\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n ar_count = int(input())\n ar = list(map(int, input().rstrip().split()))\n result = aVeryBigSum(ar)\n fptr.write(str(result) + '\\n')\n fptr.close()","sub_path":"solving/math/bigsum.py","file_name":"bigsum.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"609633195","text":"\"\"\"\ntools for random number generation\n\"\"\"\nimport numpy as np\n\ndef seed_initial_1D_state(cell_count, random_mod=0.5):\n \"\"\"\n Make 1D array of 0s, then change some to 1s if random number between 0 and 1\n is less than the random_mod.\n \"\"\"\n initial_state = np.zeros(cell_count, int)\n for i in range(0, cell_count):\n if np.random.random() < random_mod:\n initial_state[i] = 1\n\n return initial_state\n\ndef seed_initial_2D_state(x_dim, y_dim, random_mod=0.45):\n initial_state = np.zeros((y_dim, x_dim), int)\n for y in range(0, y_dim):\n for x in range(0, x_dim):\n if np.random.random() < random_mod:\n initial_state[y][x] = 1\n\n return initial_state","sub_path":"utils/random_utils.py","file_name":"random_utils.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"31900554","text":"from collections import defaultdict\nfrom config import ARTICLE_FOLDER\nimport pandas as pd\nimport os.path\nimport glob\nimport json\n\n\ninteresting = ['AFM', 'PFA', 'IRT', 'MIRTb10', 'MIRTb20', 'KTM(iswf0)', 'KTM(iswf20)', 'KTM(iswfe5)']\nfull = set()\n\narray = defaultdict(dict)\nexperiments = glob.glob('*/*/*/results.json')\ndatasets = set()\nfor experiment in experiments:\n with open(experiment) as f:\n results = json.load(f)\n if 'dataset' not in results['args'] or (results['args']['dataset'][-2:] != '42' and 'kiloboss' not in experiment): # X42\n print('skip', experiment)\n continue\n dataset = os.path.basename(results['args']['dataset']).replace('-', '')\n datasets.add(dataset)\n short_legend = results['legends']['short']\n full_legend = results['legends']['full']\n if ':' in full_legend:\n shortname = full_legend.split(':')[0]\n elif short_legend == 'swf0':\n shortname = 'PFA'\n elif short_legend == 'sa0':\n shortname = 'AFM'\n else:\n shortname = 'KTM({:s})'.format(short_legend)\n if shortname == 'MIRTb':\n shortname += str(results['args']['d'])\n if shortname in interesting:\n full.add(full_legend)\n if 'boss' in dataset:\n print(shortname)\n array[dataset][shortname] = results['metrics']['AUC']\ndf = pd.DataFrame.from_dict(array).round(4)\n# Find winner\nfor dataset in datasets:\n extremum = df[dataset].max()\n winners = df.query('abs({:s} - @extremum) <= 0.00011'.format(dataset))\n df.loc[winners.index, dataset] = winners[dataset].map(lambda entry: r'\\textbf{{{:.4f}}}'.format(entry))\ndf.transpose()[interesting].fillna('--').to_latex(os.path.join(ARTICLE_FOLDER, 'summary-now.tex'), escape=False)\nprint(df)\n\nfor legend in full:\n print(legend)\n","sub_path":"results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"543090974","text":"class Solution:\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n n = len(s)\n s_inv = s[::-1] # s inverse\n max_len = 0\n temp_val = \"\"\n if n == 1 or n == 0:\n return s\n # Find longset common string between s and s_inv\n for i in range(n):\n if max_len <= n-i+1:\n for j in range(max_len, n-i+1):\n if s[i:i + j] == s_inv[n-i-j:n-i]:\n temp_val = s[i:i + j]\n max_len = j \n\n return temp_val","sub_path":"hw6/leetcode5_longestpalindromicsubstring.py","file_name":"leetcode5_longestpalindromicsubstring.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"636578508","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport sys, os\nsys.path.append('../../..')\nfrom apps.common import nfsprovisioner\nfrom tools import k8s_tools\nfrom metadata import AppInfo\nfrom copy import deepcopy\nfrom apps import replaceDockerRepo\nfrom tools import crypto_tools, ssh_tools\nimport jinja2\nimport yaml\nimport subprocess\nfrom time import sleep\nfrom tools import k8s_tools\nfrom pprint import pprint\nfrom codecs import open as open\nfrom storagenode import datastoragenode, logstoragenode\nfrom apps.storage import getClsObj\nfrom apps import mergeTwoDicts\n\n\n\nclass NacosTool(object):\n def __init__(self, namespace='default', nacosdatapath='nacostool',\n harbor=None, retrytimes=600):\n\n namespace = namespace.strip()\n self.RetryTimes = int(retrytimes)\n\n self.AppInfo = deepcopy(AppInfo)\n\n self.AppInfo['DataStorageAddr'] = datastoragenode['hostname']\n self.AppInfo['DataStorageBasePath'] = datastoragenode['basepath']\n self.AppInfo['LogStorageAddr'] = logstoragenode['hostname']\n self.AppInfo['LogStorageBasePath'] = logstoragenode['basepath']\n\n\n self.AppInfo['NacosDataPath'] = os.path.join(self.AppInfo['DataStorageBasePath'], '-'.join([namespace, nacosdatapath]))\n\n self.AppInfo['Namespace'] = namespace\n\n self.AppInfo['HarborAddr'] = harbor\n self.k8sObj = k8s_tools.K8SClient()\n\n self.DataStorageObj = getClsObj(datastoragenode['type'])(**datastoragenode)\n self.LogStorageObj = getClsObj(logstoragenode['type'])(**logstoragenode)\n\n def setupStorage(self):\n TmpResponse = self.DataStorageObj.installStorage(basedir=self.AppInfo['DataStorageBasePath'])\n if TmpResponse['ret_code'] != 0:\n return TmpResponse\n\n print ('create NacosTool Storage successfully')\n\n self.DataStorageObj.createSubFolder(self.AppInfo['NacosDataPath'])\n\n self.TmpStoragePathDict = dict()\n self.TmpStoragePathDict['NacosDataPath'] = self.DataStorageObj.generateRealPath(self.AppInfo['NacosDataPath'])\n\n\n\n\n print ('setup NacosTool Storage successfully')\n\n return {\n 'ret_code': 0,\n 'result': ''\n }\n\n def generateValues(self):\n self.AppInfo['NaocsToolImage'] = replaceDockerRepo(self.AppInfo['NaocsToolImage'], self.AppInfo['HarborAddr'])\n self.AppInfo['NFSProvisionerImage'] =replaceDockerRepo(self.AppInfo['NFSProvisionerImage'],\n self.AppInfo['HarborAddr'])\n\n\n\n def renderTemplate(self):\n TmpCWDPath = os.path.abspath(__file__)\n TmpCWDPath = os.path.dirname(TmpCWDPath)\n self.AppInfo['TargetNamespaceDIR'] = os.path.join(TmpCWDPath, self.AppInfo['TargetNamespaceDIR'])\n\n if not os.path.isdir(os.path.realpath(self.AppInfo['TargetNamespaceDIR'])):\n os.mkdir(os.path.realpath(self.AppInfo['TargetNamespaceDIR']))\n if not os.path.isdir(os.path.realpath(os.path.join(self.AppInfo['TargetNamespaceDIR'],\n self.AppInfo['Namespace']))):\n os.mkdir(os.path.realpath(os.path.join(self.AppInfo['TargetNamespaceDIR'],\n self.AppInfo['Namespace'])))\n\n TmpTargetNamespaceDIR = os.path.join(self.AppInfo['TargetNamespaceDIR'], self.AppInfo['Namespace'],\n self.AppInfo['AppName'])\n TmpTargetNamespaceDIR = os.path.normpath(os.path.realpath(TmpTargetNamespaceDIR))\n\n\n if not os.path.isdir(TmpTargetNamespaceDIR):\n os.mkdir(TmpTargetNamespaceDIR)\n\n if not os.path.isfile(os.path.join(TmpTargetNamespaceDIR, 'values.yaml')):\n self.generateValues()\n\n TmpAppInfo = mergeTwoDicts(self.AppInfo, self.TmpStoragePathDict)\n\n with open(os.path.join(TmpTargetNamespaceDIR, 'values.yaml'), mode='wb') as f:\n yaml.safe_dump(self.AppInfo, f)\n\n TmpCWDPath = os.path.abspath(__file__)\n TmpCWDPath = os.path.dirname(TmpCWDPath)\n\n subprocess.Popen('/usr/bin/cp -r %s %s'%(os.path.join(TmpCWDPath, 'resource'),\n TmpTargetNamespaceDIR), shell=True)\n sleep (5)\n\n for basepath, _, files in os.walk(os.path.join(TmpTargetNamespaceDIR, 'resource')):\n for file in files:\n TmpContent = ''\n with open(os.path.join(basepath, file), mode='rb', encoding='utf-8') as f:\n TmpContent = f.read()\n TmpContent = jinja2.Template(TmpContent).render(TmpAppInfo)\n\n with open(os.path.join(basepath, file), mode='wb', encoding='utf-8') as f:\n f.write(TmpContent)\n\n with open(os.path.join(TmpTargetNamespaceDIR, 'values.yaml'), mode='rb', encoding='utf-8') as f:\n self.AppInfo = yaml.safe_load(f)\n\n\n def applyYAML(self):\n print ('Create namespace: '+str(self.AppInfo['Namespace']))\n TmpResponse = self.k8sObj.createNamespace(name=self.AppInfo['Namespace'])\n if TmpResponse['ret_code'] != 0:\n print (TmpResponse)\n return TmpResponse\n\n\n print ('Apply NacosTool ....')\n if True:\n try:\n print ('delete pod....')\n tmp=self.k8sObj.deleteNamespacedPod(name='nacostool', namespace=self.AppInfo['Namespace'])\n print (tmp)\n except Exception as e:\n pass\n\n TmpTargetNamespaceDIR = os.path.join(self.AppInfo['TargetNamespaceDIR'], self.AppInfo['Namespace'],\n self.AppInfo['AppName'])\n TmpTargetNamespaceDIR = os.path.normpath(os.path.realpath(TmpTargetNamespaceDIR))\n\n\n self.k8sObj.createResourceFromYaml(filepath=os.path.join(TmpTargetNamespaceDIR, 'resource', 'nacostool-pv.yaml'),\n namespace=self.AppInfo['Namespace']\n )\n self.k8sObj.createResourceFromYaml(filepath=os.path.join(TmpTargetNamespaceDIR, 'resource', 'nacostool-pvc.yaml'),\n namespace=self.AppInfo['Namespace']\n )\n\n\n TmpResponse = self.k8sObj.createResourceFromYaml(filepath=os.path.join(TmpTargetNamespaceDIR, 'resource',\n 'nacostool-deploy.yaml'),namespace=self.AppInfo['Namespace'])\n if TmpResponse['ret_code'] != 0:\n print (TmpResponse)\n return TmpResponse\n\n isRunning=False\n for itime in range(self.RetryTimes):\n print ('current times: '+str(itime))\n TmpResponse = self.k8sObj.getNamespacedPod(name='nacostool',\n namespace=self.AppInfo['Namespace'])['result'].to_dict()\n\n TmpPodStatus = TmpResponse['status']['phase']\n print (TmpPodStatus)\n\n if TmpPodStatus == 'Failed':\n print ('Pod failed ')\n return {\n 'ret_code': 1,\n 'result': 'Pod failed'\n }\n elif TmpPodStatus == 'Succeeded':\n print ('Pod Succeeded')\n return {\n 'ret_code': 0,\n 'result': 'Pod Succeeded'\n }\n\n print ('Pod is running, waitting...')\n sleep (10)\n\n print ('Timeout waitting for pod')\n return {\n 'ret_code': 1,\n 'result': 'Timeout waitting for pod'\n }\n\n\n\n def start(self):\n TmpResponse = self.setupStorage()\n if TmpResponse['ret_code'] != 0:\n return TmpResponse\n\n self.renderTemplate()\n\n TmpResponse = self.applyYAML()\n return TmpResponse\n\n\n\n\nif __name__ == \"__main__\":\n tmp = NacosTool(namespace='sly2', nfsinfo=dict(hostname='192.168.200.168', port=1022, username='root', password='!QAZ2wsx1234',\n basepath='/TRS/DATA'))\n tmp.start()\n","sub_path":"apps/common/nacostool/nacostool.py","file_name":"nacostool.py","file_ext":"py","file_size_in_byte":8221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"638207894","text":"#!/usr/bin/env python\n\nimport logging\n\nimport psutil\n\n\nLANTERN_ARGS = set(\"{{ lantern_args }}\".split())\n\ndef run():\n any_killed = False\n try:\n lantern_pid = int(file('{{ lantern_pid }}').read().strip())\n except Exception as e:\n logging.exception(e)\n lantern_pid = \"\"\n for proc in psutil.process_iter():\n if (proc.pid == lantern_pid\n or (len(LANTERN_ARGS.intersection(proc.cmdline()))\n > len(LANTERN_ARGS) / 2)):\n logging.info(\"Terminating %r...\" % proc.cmdline())\n proc.terminate()\n any_killed = True\n if not any_killed:\n logging.info(\"No Lantern found.\");\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n filename='/home/lantern/kill_lantern.log',\n format='%(asctime)s %(levelname)-8s %(message)s')\n run()\n","sub_path":"salt/fallback_proxy/kill_lantern.py","file_name":"kill_lantern.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"308808532","text":"\nfrom airflow import DAG\nfrom airflow.contrib.sensors.file_sensor import FileSensor\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom datetime import datetime, timedelta\n\nimport csv\nimport requests\nimport json\n\ndefault_args = {\n \"owner\": \"airflow\",\n \"start_date\": datetime(2019, 1, 1),\n \"depends_on_past\": False,\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n \"email\": \"youremail@host.com\",\n \"retries\": 1,\n \"retry_delay\": timedelta(minutes=5)\n }\n\nwith DAG(dag_id=\"test_kerberos_conn\", schedule_interval=\"0 1 * * *\", default_args = default_args, ) as dag:\n \n start = DummyOperator(task_id='start_task', retries=3)\n \n start_Bash = BashOperator(\n task_id='start_Bash',\n bash_command=\"echo hello BashOperator\",\n retries=3\n )\n\n test_kerberos = BashOperator(\n task_id=\"test_kerberos\",\n bash_command=\"\"\"\n echo $HADOOP_HOME\n klist\n hdfs dfs -ls /user\n echo \"$(AIRFLOW_HOME)\"\n hdfs dfs -put $AIRFLOW_HOME/dags/files/forex_currencies.csv /tmp\n hdfs dfs -ls /tmp\n \"\"\",\n retries=3\n )\n\n#Flow\nstart >> start_Bash >> test_kerberos\n","sub_path":"airflow-section-3/mnt/airflow/dags/test_kerberos_conn.py","file_name":"test_kerberos_conn.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"353600974","text":"import numpy as np\nimport cv2\nimport sys\nimport math\nfrom matplotlib import pyplot as plt\nimport string\nfrom os import listdir\nfrom os.path import isfile, join\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.utils.data as data_utils \nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom model import *\n\n\n\n\n\n\n\n\n\nd = dict.fromkeys(string.ascii_uppercase,[])\nd1 = dict.fromkeys(string.digits,[])\nd.update(d1)\ncnt=0\nlength=0\nfor key in d.keys():\n\tmypath = 'Classes_test/'+key+'/';\n\tonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\tlength+=len(onlyfiles)\n\t#print(onlyfiles)\n\td[key] = onlyfiles\nprint(length)\ninp=np.ones((length,1,40,40))\ntarget=np.zeros((length,))\ni=0\nfor key in d.keys():\n\tlst = []\n\tfor element in d[key]:\n\t\ttemp_path='Classes_test/'+key+'/'+ element\n\t\tlst.append(temp_path)\n\t\ttemp=cv2.imread(temp_path,0)\n\t\t# temp= cv2.resize(temp,(40,40),interpolation=cv2.INTER_CUBIC)\n\t\t#temp=temp.ravel()\n\t\t#print(type(temp),temp.shape,inp.shape,type(inp))\n\t\t#print(inp.shape)\n\t\tinp[i][0]=temp\n\t\tinp[i][0]=temp\n\t\tif (ord(key)<65):\n\t\t\tnum=ord(key)-48\n\t\telse:\n\t\t\tnum=ord(key)-55\n\t\t# target[i][num]=1\n\t\ttarget[i]=num\n\t\ti+=1\n\t\t\t\t#print(inp.shape,target.shape)\n\tcnt+=1\n\td[key] = lst\n\tprint(key,inp.shape)\n\nfeatures=torch.from_numpy(inp)\ntargets=torch.from_numpy(target)\n#target=target.reshape((target.shape[0],))\n#print(inp.shape,target.shape,type(data_utils.TensorDataset))\ntest = data_utils.TensorDataset(features, targets) \ntest_loader = data_utils.DataLoader(test, batch_size=20, shuffle=True)\n\n\nnet=torch.load('trainmodel_web1.pt')\n\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\n# images=images.double()\n# labels=labels.long()\noutputs = net(Variable(images).float())\ncorrect = 0\ntotal = 0\nfor data in test_loader:\n images, labels = data\n outputs = net(Variable(images).float())\n _, predicted = torch.max(outputs.data, 1)\n labels=labels.long()\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\nprint('Training Accuracy: %d %%' % (\n 100 * correct / total))\n","sub_path":"Webmail1/test_web1.py","file_name":"test_web1.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"386229334","text":"##\n## Imprima la suma de la columna 2 por cada letra de la \n## primera columna, ordneados alfabeticamente.\n##\n## A,37\n## B,36\n## C,27\n## D,23\n## E,67\n##\nx = open('data.csv','r').readlines()\nx = [z.replace('\\n', '') for z in x]\nx = [z.split('\\t') for z in x]\ncol = [i[0] for i in x]\nsum_col = [(c, sum([int(i[1]) for i in x if i[0] == c])) for c in set(col)]\nprint(*['{},{}'.format(col,str(s_col)) for col,s_col in sorted(sum_col)], sep='\\n')\n","sub_path":"q03.py","file_name":"q03.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"3957561","text":"import os\nfrom flask import Flask\nfrom flask import request, jsonify, send_from_directory, make_response, current_app\nfrom flask_cors import CORS, cross_origin\nimport json\nimport numpy as np\nfrom PIL import Image\nimport base64\nimport io\nfrom datetime import timedelta\nimport functools\nfrom flask_talisman import Talisman\n\ncnxtSSL = ('./cert/certificate.crt','./cert/private.key')\n\napp = Flask(__name__)\nTalisman(app)\nCORS(app, support_credentials=True)\napp.url_map.strict_slashes = False\n\n\nROOT_DIR = os.getcwd()\n\n# os.chdir({ROOT_DIR+'/TFModels/research'})\n# !protoc object_detection/protos/*.proto --python_out=.\n# !python setup.py build\n# !python setup.py install\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\ndef crossdomain(origin=None, methods=None, headers=None, max_age=21600,\n attach_to_all=True, automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n # use str instead of basestring if using Python 3.x\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n # use str instead of basestring if using Python 3.x\n if not isinstance(origin, str):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n \"\"\" Determines which methods are allowed\n \"\"\"\n if methods is not None:\n return methods\n\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n \"\"\"The decorator function\n \"\"\"\n def wrapped_function(*args, **kwargs):\n \"\"\"Caries out the actual cross domain code\n \"\"\"\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n\n h = resp.headers\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Headers'] = \\\n \"Origin, X-Requested-With, Content-Type, Accept, Authorization\"\n\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n\n f.provide_automatic_options = False\n return functools.update_wrapper(wrapped_function, f)\n return decorator\n\n\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as viz_utils\nfrom object_detection.builders import model_builder\n\nimport tensorflow as tf\nfrom object_detection.utils import config_util\nfrom object_detection.protos import pipeline_pb2\nfrom google.protobuf import text_format\n\n\n# Load pipeline config and build a detection model\nconfigs = config_util.get_configs_from_pipeline_file(ROOT_DIR + '/Model/pipeline.config')\ndetection_model = model_builder.build(model_config=configs['model'], is_training=False)\n\n\n# # Restore checkpoint\nckpt = tf.compat.v2.train.Checkpoint(model=detection_model)\nckpt.restore(os.path.join(ROOT_DIR+'/Model/', 'ckpt-6')).expect_partial()\n\n\n# @tf.function\ndef detect_fn(image):\n\n global detection_model\n \n image, shapes = detection_model.preprocess(image)\n prediction_dict = detection_model.predict(image, shapes)\n detections = detection_model.postprocess(prediction_dict, shapes)\n d = {\n \"detection_boxes\": detections['detection_boxes'].numpy().tolist(),\n \"detection_classes\": detections['detection_classes'].numpy().tolist(),\n \"detection_scores\": detections['detection_scores'].numpy().tolist()\n }\n\n return json.dumps(d,cls=NumpyEncoder)\n\n# image_np = Image.open(ROOT_DIR + '/Data/test.jpg')\n# input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\n# detections = detect_fn(input_tensor)\n# print(detections)\n\n\n@app.route('/')\n# @crossdomain(origin='*')\n# @cross_origin(supports_credentials=True)\ndef hw():\n return jsonify({\"code\":200,\"data\": 'What\\'s the sqrt of 13325?'})\n\n\n\n@app.route('/')\n# @crossdomain(origin='*')\n# @cross_origin(supports_credentials=True)\ndef hwf(path):\n if path != \"\":\n return send_from_directory('',path)\n else:\n return jsonify({\"code\":404,\"data\": 'file not found'})\n\n\n\n@app.route('/predict', methods = ['GET', 'POST'])\n# @crossdomain(origin='*')\n# @cross_origin(supports_credentials=True)\ndef upload_file():\n print(request)\n if (request.method == 'POST' and request.json['img']):\n img = request.json['img']\n img = img.split('base64,')[1] \n img = base64.b64decode(str(img))\n img = Image.open(io.BytesIO(img)) \n img = img.convert('RGB')\n img = np.asarray(img,dtype=np.float)\n\n # return jsonify({\"code\":201,\"data\": img.shape})\n \n input_tensor = tf.convert_to_tensor(np.expand_dims(img, 0), dtype=tf.float32)\n d = detect_fn(input_tensor)\n \n response = jsonify({\"code\":200,\"data\": d})\n return response\n\n else:\n response = jsonify({\"code\":201,\"data\": 'Error'})\n return response\n\n\n@app.after_request\ndef after_request(response):\n # response.headers.add('Access-Control-Allow-Origin', '')\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers.add('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept, Authorization')\n return response\n\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port=5000, debug=True, ssl_context = cnxtSSL )\n # app.run(host='0.0.0.0', port=80, debug=True )\n\n","sub_path":"Backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"174119096","text":"# a simple PDF scraper using pyPDF2 and tkinter for the open/save functions.\n# opens up a file and outputs the scrape of every page on a new line in a CSV.\n# poorly commented... I'll work on that\n\nimport PyPDF2\nfrom PyPDF2 import PdfFileMerger\nimport csv\nimport tkinter as tk\nfrom tkinter import *\nfrom tkinter import filedialog\nimport sys\nimport os\n\nroot = Tk()\nroot.withdraw()\n\n# Popup to ask where to save the CSV and what to call the CSV\nfile_save = filedialog.asksaveasfilename(defaultextension='.csv')\n\n# print(file_save)\n\nfile_path = filedialog.askopenfilename()\n\nwith open(file_save, 'w', newline='') as csvfile:\n\n pdfFileObj = open(file_path, 'rb')\n\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n pages = pdfReader.numPages\n \n # For debugging\n print(pdfReader.numPages)\n\n # can obviously be whatever you want. This was applicable to my situation.\n fieldnames = ['Testpack', 'PDF_Data']\n CSVOutput = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n # Scrub from each page in the range and output to new line in CSV\n for x in range(pages):\n pageObj = pdfReader.getPage(x)\n\n print(pdfReader.resolvedObjects)\n \n CSVOutput.writerow({'Testpack': pdfFileObj, 'PDF_Data': pageObj.extractText()})\n\n # pageObj = pdfReader.getPage(0)\n # print(pageObj.extractText())\nroot.deiconify()\ntext = tk.Text(root, heigh=2, width=30)\ntext.pack()\ntext.insert(tk.END, \"Data scrape complete.\")\nroot.mainloop()\n","sub_path":"PDF_Scraper.py","file_name":"PDF_Scraper.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"554730274","text":"#!usr/bin/env python\n\nimport sys\nver = 'This is Python version ' + sys.version;\nstring1 = 'Python is the best language for String manipulation!';\nstring2 = string1[::-1];\nstring3 = string1[::-2];\nstring4 = string1.swapcase();\nstring5 = string1.upper();\nnuma = string1.count('a');\nnumA = string1.count('A');\n\nspecial = 'The sentence \\'{}\\' contains\\n{} \\'a\\' letter, and\\n{} \\'A\\' letters!'.format(string1, numa, numA);\n\nlist1 = string1.replace(' ', '\\n');\nlist2 = string5.replace(' ', '\\n');\n\nprint('{}\\n\\n{}\\n\\n{}\\n\\n{}\\n\\n{}\\n\\n\\n{}\\n\\n{}\\n\\n{}'.format(ver, string1, string2, string3, string4, special, list1, list2));\n","sub_path":"homework/3/Prob2.py","file_name":"Prob2.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"499035066","text":"from been.core import SiteFeedSource, source_registry\n\nclass Delicious(SiteFeedSource):\n url_format = 'http://feeds.delicious.com/v2/rss/{username}?count=50'\n kind = 'delicious'\n def process_event(self, event):\n event['author'] = self.config['username']\n event['summary'] = 'bookmarked ' + event['data']['title']\n return event\nsource_registry.add(Delicious)\n","sub_path":"been/source/delicious.py","file_name":"delicious.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"348450348","text":"###############################\n# for interactive terminal\nimport __main__ as main\nif not hasattr(main,'__file__'):\n\tfrom kzpy3.utils2 import *\n\tpythonpaths(['kzpy3','kzpy3/Grapher_app'])\n#\n###############################\nfrom Parameters_Module import *\nfrom kzpy3.vis2 import *\n\nimport Display_Graph_Module\n#from Car_Data_app.Names_Module import *\nexec(identify_file_str)\n\"\"\"\n\t* Have playback at fix rate, not machine capacity\n\t* Parameterize all those little display constants\n\t* Write out total time\n\t* Print out all topic values at current time\n\t* Show left and right images\n\t* Allow programatic display, exactly corresponding to network training needs\n\t* Need to display data from hdf5 files or from data extracted from neural network inputs/outputs\n\t* Need to display from ros\n\"\"\"\n_ = dictionary_access\n\nfor a in Args.keys():\n\t_(P,a,equals,_(Args,a))\n\nP[X_PIXEL_SIZE_INIT],P[Y_PIXEL_SIZE_INIT] = P[X_PIXEL_SIZE],P[Y_PIXEL_SIZE]\n\n\n\n\"\"\"\n##########################################\n#\nh5py_run_path = '/home/karlzipser/Desktop/bdd_car_data_July2017_LCR/h5py/direct_Tilden_LCR_23Jul17_10h27m34s_Mr_Yellow'\nl_ = opj(h5py_run_path,'left_timestamp_metadata.h5py')\no_ = opj(h5py_run_path,'original_timestamp_data.h5py')\n#\n##########################################\n# left-timestamp bound data\nL = h5r(l_)\nO = h5r(o_ )\nOO = {}\nfor topic_ in P[TOPICS].keys():\n\tif topic_ in L.keys():\n\t\tOO[topic_] = {}\n\t\tOO[topic_][ts] = L[ts][:]\n\t\tOO[topic_][vals] = L[topic_][:]\nOO[left_image] = {}\nOO[left_image][ts] = L[ts][:]\nOO[left_image][vals] = O[left_image][vals]\n#\n#########################################\n\n\"\"\"\n\n\n\n\n\n\n\"\"\"\nif __name__ == '__main__':\n\n\t\n\tD = Display_Graph_Module.Display_Graph(topics,OO)\n\ttimer=Timer(0)\n\twhile True:\n\t\ttimer.reset()\n\t\tD[show]()\n\t\tprint timer.time()\n\"\"\"\n\n\n\n\n\n\nimport roslib\nimport std_msgs.msg\nimport geometry_msgs.msg\nimport rospy\nimport cv2\nfrom cv_bridge import CvBridge,CvBridgeError\nfrom sensor_msgs.msg import Image\nexec(identify_file_str)\nbridge = CvBridge()\n\n\n\n\n\nR = {}\nfor topic_ in [steer, motor, state, encoder,\n\tacc_x,acc_y,acc_z,\n\tgyro_x,gyro_y,gyro_z,\n\tgyro_heading_x,gyro_heading_y,gyro_heading_z,\n\tleft_image,right_image\n\t]:\n\tR[topic_] = {ts:[],vals:[]}\n\n\ndef steer__callback(msg):\n\tR[steer][ts].append(time.time())\n\tR[steer][vals].append(msg.data)\n\ndef motor__callback(msg):\n\tR[motor][ts].append(time.time())\n\tR[motor][vals].append(msg.data)\n\ndef state__callback(msg):\n\tR[state][ts].append(time.time())\n\tR[state][vals].append(msg.data)\n\ndef encoder__callback(msg):\n\tR[encoder][ts].append(time.time())\n\tR[encoder][vals].append(msg.data)\n\ndef acc__callback(msg):\n\tt_ = time.time()\n\tR[acc_x][ts].append(t_)\n\tR[acc_x][vals].append(msg.x)\n\tR[acc_y][ts].append(t_)\n\tR[acc_y][vals].append(msg.y)\n\tR[acc_z][ts].append(t_)\n\tR[acc_z][vals].append(msg.z)\n\ndef gyro__callback(msg):\n\tt_ = time.time()\n\tR[gyro_x][ts].append(t_)\n\tR[gyro_x][vals].append(msg.x)\n\tR[gyro_y][ts].append(t_)\n\tR[gyro_y][vals].append(msg.y)\n\tR[gyro_z][ts].append(t_)\n\tR[gyro_z][vals].append(msg.z)\n\n\ndef gyro_heading__callback(msg):\n\tt_ = time.time()\n\tR[gyro_heading_x][ts].append(t_)\n\tR[gyro_heading_x][vals].append(msg.x)\n\tR[gyro_heading_y][ts].append(t_)\n\tR[gyro_heading_y][vals].append(msg.y)\n\tR[gyro_heading_z][ts].append(t_)\n\tR[gyro_heading_z][vals].append(msg.z)\n\n\ndef left_image__callback(data):\n\tR[left_image][ts].append(time.time())\n\tR[left_image][vals].append( bridge.imgmsg_to_cv2(data,\"rgb8\") )\n\ndef right_image__callback(data):\n\tR[right_image][ts].append(time.time())\n\tR[right_image][vals].append( bridge.imgmsg_to_cv2(data,\"rgb8\") )\n\n\n\nrospy.init_node('listener',anonymous=True)\n\nrospy.Subscriber('/bair_car/steer', std_msgs.msg.Int32, callback=steer__callback)\nrospy.Subscriber('/bair_car/motor', std_msgs.msg.Int32, callback=motor__callback)\nrospy.Subscriber('/bair_car/state', std_msgs.msg.Int32, callback=state__callback)\nrospy.Subscriber('/bair_car/encoder', std_msgs.msg.Float32, callback=encoder__callback)\nrospy.Subscriber('/bair_car/acc', geometry_msgs.msg.Vector3, callback=acc__callback)\nrospy.Subscriber('/bair_car/gyro', geometry_msgs.msg.Vector3, callback=gyro__callback)\nrospy.Subscriber('/bair_car/gyro_heading', geometry_msgs.msg.Vector3, callback=gyro_heading__callback)\nrospy.Subscriber(\"/bair_car/zed/right/image_rect_color\",Image,right_image__callback,queue_size = 1)\nrospy.Subscriber(\"/bair_car/zed/left/image_rect_color\",Image,left_image__callback,queue_size = 1)\n\n\n\n\n\n\nprint('Make sure this has been done if necessary:\\n\\texport ROS_MASTER_URI=http://nvidia@192.168.1.11:11311')\ntimer=Timer(0)\nwhile len(R[steer][ts]) < 100:\n\tprint('waiting for ROS data . . .')\n\tpause(0.5)\nwhile True:\n\ttimer.reset()\n\tfor m_ in [ts,vals]:\n\t\tif left_image in R:\n\t\t\tR[left_image][m_] = R[left_image][m_][-1:]\n\t\t\tR[right_image][m_] = R[left_image][m_][-1:]\n\tfor topic_ in R.keys():\n\t\t#print len(R[topic_][ts]),P[TOPIC_STEPS_LIMIT]\n\t\tif len(R[topic_][ts]) > P[TOPIC_STEPS_LIMIT]:\n\t\t\tfor m_ in [ts,vals]:\n\t\t\t\tR[topic_][m_] = R[topic_][m_][-P[TOPIC_STEPS_LIMIT]:]\n\tD = Display_Graph_Module.Display_Graph(topics,R)\n\tD[show]()#start_time,D[end_time]-10)\n\t#print timer.time()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#EOF\n\n#EOF","sub_path":"Grapher_app/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"253613445","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('df_user', '0001_initial'),\n ('df_goods', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Cart',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('create_datetime', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),\n ('update_datetime', models.DateTimeField(auto_now=True, verbose_name='更新时间')),\n ('is_delete', models.BooleanField(verbose_name='删除标记', default=False)),\n ('goods_count', models.ImageField(verbose_name='商品数目', upload_to='')),\n ('goods', models.ForeignKey(verbose_name='商品', to='df_goods.Goods')),\n ('passport', models.ForeignKey(verbose_name='所属用户', to='df_user.Passport')),\n ],\n options={\n 'db_table': 's_cart',\n },\n ),\n ]\n","sub_path":"dailyfresh/df_cart/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"376323691","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_one_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/organizations/enable-aws-service-access.html\nif __name__ == '__main__':\n \"\"\"\n\tdisable-aws-service-access : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/organizations/disable-aws-service-access.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # service-principal : The service principal name of the AWS service for which you want to enable integration with your organization. This is typically in the form of a URL, such as `` service-abbreviation .amazonaws.com`` .\n \"\"\"\n add_option_dict = {}\n\n #######################################################################\n # parameter display string\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_one_parameter(\"organizations\", \"enable-aws-service-access\", \"service-principal\", add_option_dict)\n\n\n\n\n\n","sub_path":"organizations_write_1/aws-service-acces_enable.py","file_name":"aws-service-acces_enable.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"68893966","text":"from flask import Flask, request, redirect, url_for, flash\nfrom werkzeug.utils import secure_filename\nimport requests\nimport os, json\n\napp = Flask(__name__)\napp.secret_key = 'd5581c21abc693c36798ae91b765966c811c3755b95d63eb399df7ec69b5aa52'\n\nconfig = json.loads(open('config.json', 'r').read())\n\napp.config['UPLOAD_FOLDER'] = config['upload_folder']\nALLOWED_EXTENSIONS = set(config['allowed_extensions'])\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/uploadHospitalBed', methods=['POST'])\ndef uploadHospitalBed():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return 'NO_FILE_PART'\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return 'NO_FILE'\n if file and allowed_file(file.filename):\n \n url = 'http://localhost:5001/persistHospitalBed'\n files = {'file': file}\n r = requests.post(url, files=files)\n return r.content\n return 'VOID'\n else:\n return 'REQUEST_NOT_POST'","sub_path":"services/acquisition/acquisition.py","file_name":"acquisition.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"647927043","text":"import math\r\nimport numpy as np\r\n\r\n\r\ndef filtro_boxes(boxes, scores, iou_threshold=0.2, dist_eucl_threshold=55, score_threshold=0.25):\r\n \"\"\"Esta funcion realiza un filtrado de las anotaciones creadas por la prediccion de manera que se \r\n eliminan aquellas que se solapan o superponen con otras en función de unos umbrales. Estos umbrales\r\n son el IOU (area de interseccion/area de union) y la distancia euclidea entre los centros de dos regiones.\r\n Seguidamente, se eliminan aquellas regiones que no superen el indice de confianza establecido en el \r\n parametro score_threshold\r\n\r\n Args:\r\n boxes ([type]): Lista con las coordenadas de cada una de las regiones\r\n scores ([type]): Lista con los niveles de confianza de cada una de las regiones\r\n score_threshold ([type]): umbral de confianza\r\n iou_threshold (float, optional): umbral de IOU.\r\n dist_eucl_threshold (int, optional): umbral de distancia euclidea\r\n\r\n Returns:\r\n [type]: Se devuelve una lista con las coordenadas de las nuevas regiones filtradas.\r\n \"\"\"\r\n boxes_filt = boxes\r\n scores_filt = scores\r\n\r\n indices_delete_boxes = []\r\n indice_i = 0\r\n\r\n for i in boxes:\r\n x1A = i[0]\r\n y1A = i[1]\r\n x2A = i[2]\r\n y2A = i[3]\r\n\r\n indice_j = 0\r\n for j in boxes[indice_i:]:\r\n iou = 0\r\n dist_eucl = 1000\r\n x1B = j[0]\r\n y1B = j[1]\r\n x2B = j[2]\r\n y2B = j[3]\r\n\r\n # Arriba izquierda - Abajo derecha\r\n if ((x1A < x1B) and (y1A < y1B) and (x2A < x2B) and (y2A < y2B) and (x1B < x2A) and (y1B < y2A)) or (\r\n (x1A > x1B) and (y1A > y1B) and (x2A > x2B) and (y2A > y2B) and (x1A < x2B) and (y1A < y2B)):\r\n x_left = max(x1A, x1B)\r\n y_top = max(y1A, y1B)\r\n x_right = min(x2A, x2B)\r\n y_bottom = min(y2A, y2B)\r\n\r\n # Arriba derecha\r\n elif ((x1A < x1B) and (y1A > y1B) and (x2A < x2B) and (y2A > y2B) and (x1B < x2A) and (y1A < y2B)):\r\n x_left = x1B\r\n y_top = y1A\r\n x_right = x2A\r\n y_bottom = y2B\r\n\r\n # Abajo izquierda\r\n elif ((x1A > x1B) and (y1A < y1B) and (x2A > x2B) and (y2A < y2B) and (x1A < x2B) and (y1B < y2A)):\r\n x_left = x1A\r\n y_top = y1B\r\n x_right = x2B\r\n y_bottom = y2A\r\n\r\n # Arriba\r\n elif ((x1A < x1B) and (y1A > y1B) and (x2A > x2B) and (y2A > y2B) and (y1A < y2B) and (x1B < x2A)):\r\n x_left = x1B\r\n y_top = y1A\r\n x_right = x2B\r\n y_bottom = y2B\r\n\r\n # Derecha\r\n elif ((x1A < x1B) and (y1A < y1B) and (x2A < x2B) and (y2A > y2B) and (y1A < y2B) and (x1B < x2A)):\r\n x_left = x1B\r\n y_top = y1B\r\n x_right = x2A\r\n y_bottom = y2B\r\n\r\n # Abajo\r\n elif ((x1A < x1B) and (y1A < y1B) and (x2A > x2B) and (y2A < y2B) and (y1A < y2B) and (x1B < x2A)):\r\n x_left = x1B\r\n y_top = y1B\r\n x_right = x2B\r\n y_bottom = y2A\r\n\r\n # Izquierda\r\n elif ((x1A > x1B) and (y1A < y1B) and (x2A > x2B) and (y2A > y2B) and (y1A < y2B) and (x1B < x2A)):\r\n x_left = x1A\r\n y_top = y1B\r\n x_right = x2B\r\n y_bottom = y2B\r\n\r\n # Arriba ancho\r\n elif ((x1A > x1B) and (y1A > y1B) and (x2A < x2B) and (y2A > y2B) and (y1A > y2B) and (x1A < x2B)):\r\n x_left = x1A\r\n y_top = y1A\r\n x_right = x2A\r\n y_bottom = y2B\r\n\r\n # Bajo ancho\r\n elif ((x1A > x1B) and (y1A < y1B) and (x2A < x2B) and (y2A < y2B) and (y1A < y2B) and (x1A < x2B)):\r\n x_left = x1A\r\n y_top = y1B\r\n x_right = x2A\r\n y_bottom = y2A\r\n\r\n # Izquierda ancho\r\n elif ((x1A > x1B) and (y1A > y1B) and (x2A > x2B) and (y2A < y2B) and (y1A < y2B) and (x1A < x2B)):\r\n x_left = x1A\r\n y_top = y1A\r\n x_right = x2B\r\n y_bottom = y2A\r\n\r\n # Derecha ancho\r\n elif ((x1A < x1B) and (y1A > y1B) and (x2A < x2B) and (y2A < y2B) and (y1A < y2B) and (x1A < x2B)):\r\n x_left = x1B\r\n y_top = y1A\r\n x_right = x2A\r\n y_bottom = y2A\r\n\r\n # Horizontal\r\n elif ((x1A > x1B) and (y1A < y1B) and (x2A < x2B) and (y2A > y2B) and (y1A < y2B) and (x1A < x2B)):\r\n x_left = x1A\r\n y_top = y1B\r\n x_right = x2A\r\n y_bottom = y2B\r\n\r\n # Vertical\r\n elif ((x1A < x1B) and (y1A > y1B) and (x2A > x2B) and (y2A < y2B) and (y1A < y2B) and (x1A < x2B)):\r\n x_left = x1B\r\n y_top = y1A\r\n x_right = x2B\r\n y_bottom = y2A\r\n\r\n # Dentro\r\n elif (x1A < x1B and y1A < y1B and x2A > x2B and y2A > y2B) or (\r\n (x1A > x1B and y1A > y1B and x2A < x2B and y2A < y2B)):\r\n centroAx = x1A + (x2A - x1A) / 2\r\n centroAy = y1A + (y2A - y1A) / 2\r\n centroBx = x1B + (x2B - x1B) / 2\r\n centroBy = y1B + (y2B - y1B) / 2\r\n dist_eucl = math.sqrt(\r\n (centroAx - centroBx) ** 2 + (centroAy - centroBy) ** 2)\r\n\r\n if dist_eucl < dist_eucl_threshold:\r\n boxes_filt[indice_i + indice_j][:] = [0, 0, 0, 0]\r\n indice_j += 1\r\n continue\r\n\r\n else:\r\n indice_j += 1\r\n continue\r\n\r\n # The intersection of two axis-aligned bounding boxes\r\n intersection_area = (x_right - x_left) * (y_bottom - y_top)\r\n\r\n # compute the area of both AABBs\r\n bb1_area = (x2A - x1A) * (y2A - y1A)\r\n bb2_area = (x2B - x1B) * (y2B - y1B)\r\n\r\n # compute the intersection over union\r\n iou = intersection_area / \\\r\n float(bb1_area + bb2_area - intersection_area)\r\n\r\n if (iou > iou_threshold):\r\n boxes_filt[indice_i + indice_j][:] = [0, 0, 0, 0]\r\n\r\n indice_j += 1\r\n indice_i += 1\r\n\r\n for i in range(len(boxes_filt)):\r\n result = np.all((boxes_filt[i] == 0))\r\n if result:\r\n indices_delete_boxes.append(i)\r\n\r\n boxes_filt = np.delete(boxes_filt, indices_delete_boxes, axis=0)\r\n scores_filt = np.delete(scores_filt, indices_delete_boxes, axis=0)\r\n\r\n indices_delete_scores = []\r\n\r\n for indice, score in enumerate(scores_filt):\r\n if score < score_threshold:\r\n indices_delete_scores.append(indice)\r\n\r\n boxes_filt = np.delete(boxes_filt, indices_delete_scores, axis=0)\r\n # scores_filt = np.delete(scores_filt, indices_delete_scores, axis=0)\r\n\r\n return boxes_filt\r\n","sub_path":"inferencia/filtro.py","file_name":"filtro.py","file_ext":"py","file_size_in_byte":7100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"608557340","text":"import math\r\nfrom numpy import array, dot\r\nfrom numpy.random import rand\r\nimport random\r\n\r\n\r\nclass Car:\r\n MAX_SPEED = 10\r\n MIN_SPEED = 0\r\n\r\n MAX_STEER_LEFT = (math.pi / 4)\r\n MAX_STEER_RIGHT = -1 * (math.pi / 4)\r\n\r\n def __init__(self, start_x, start_y, car_id):\r\n self.car_id = car_id\r\n\r\n self.start_x = start_x\r\n self.start_y = start_y\r\n\r\n self.x = start_x\r\n self.y = start_y\r\n\r\n self.direction = 0\r\n self.speed = self.MIN_SPEED\r\n self.steer = 0\r\n\r\n self.dist_l = 5\r\n self.dist_c = 5\r\n self.dist_r = 5\r\n\r\n self.view_range = (random.random() * 140) + 10\r\n\r\n self.acceleration = (random.random() * 4) + 1\r\n self.deceleration = (random.random() * 4) + 1\r\n self.speed_threshold = (random.random() * 100) + 50\r\n\r\n self.soft_steer = math.pi / ((random.random() * 100) + 4)\r\n self.hard_steer = math.pi / ((random.random() * 50) + 4)\r\n\r\n self.steer_threshold = (random.random() * 100) + 50\r\n\r\n self.control_rods = rand(4, 4)\r\n self.bias = rand(4) * 10\r\n\r\n self.collisions = 0\r\n self.checkpoint_num = 0\r\n\r\n def speed_up(self):\r\n if self.MIN_SPEED <= self.speed <= self.MAX_SPEED:\r\n self.speed += self.acceleration\r\n\r\n if self.speed > self.MAX_SPEED:\r\n self.speed = self.MAX_SPEED\r\n\r\n def speed_down(self):\r\n if self.MIN_SPEED <= self.speed <= self.MAX_SPEED:\r\n self.speed -= self.deceleration\r\n\r\n if self.speed < self.MIN_SPEED:\r\n self.speed = self.MIN_SPEED\r\n\r\n def steer_left_soft(self):\r\n if self.MAX_STEER_RIGHT <= self.steer <= self.MAX_STEER_LEFT:\r\n self.steer += self.soft_steer\r\n\r\n if self.steer > self.MAX_STEER_LEFT:\r\n self.steer = self.MAX_STEER_LEFT\r\n\r\n def steer_left_hard(self):\r\n if self.MAX_STEER_RIGHT <= self.steer <= self.MAX_STEER_LEFT:\r\n self.steer += self.hard_steer\r\n\r\n if self.steer > self.MAX_STEER_LEFT:\r\n self.steer = self.MAX_STEER_LEFT\r\n\r\n def steer_right_soft(self):\r\n if self.MAX_STEER_RIGHT <= self.steer <= self.MAX_STEER_LEFT:\r\n self.steer -= self.soft_steer\r\n\r\n if self.steer < self.MAX_STEER_RIGHT:\r\n self.steer = self.MAX_STEER_RIGHT\r\n\r\n def steer_right_hard(self):\r\n if self.MAX_STEER_RIGHT <= self.steer <= self.MAX_STEER_LEFT:\r\n self.steer -= self.hard_steer\r\n\r\n if self.steer < self.MAX_STEER_RIGHT:\r\n self.steer = self.MAX_STEER_RIGHT\r\n\r\n def update_direction(self):\r\n self.direction = (self.direction + (2 * math.pi) + self.steer) % (2 * math.pi)\r\n\r\n def move(self):\r\n inputs = [self.dist_l, self.dist_c, self.dist_r, self.speed]\r\n output = dot(inputs, self.control_rods) + self.bias\r\n\r\n if output[1] > self.speed_threshold:\r\n self.speed_up()\r\n elif output[3] > self.speed_threshold:\r\n self.speed_down()\r\n\r\n if output[2] < output[0] < self.steer_threshold:\r\n self.steer_left_soft()\r\n self.update_direction()\r\n self.steer_right_soft()\r\n\r\n elif output[0] < output[2] < self.steer_threshold:\r\n self.steer_right_soft()\r\n self.update_direction()\r\n self.steer_left_soft()\r\n\r\n elif output[0] < self.steer_threshold < output[2]:\r\n self.steer_right_hard()\r\n self.update_direction()\r\n self.steer_left_hard()\r\n\r\n elif output[2] < self.steer_threshold < output[0]:\r\n self.steer_left_hard()\r\n self.update_direction()\r\n self.steer_right_hard()\r\n\r\n elif output[2] < output[0]:\r\n self.steer_left_soft()\r\n self.update_direction()\r\n self.steer_right_soft()\r\n\r\n elif output[0] < output[2]:\r\n self.steer_right_soft()\r\n self.update_direction()\r\n self.steer_left_soft()\r\n\r\n def reset_stats(self):\r\n self.x = self.start_x\r\n self.y = self.start_y\r\n self.direction = 0\r\n self.speed = self.MIN_SPEED\r\n self.steer = 0\r\n\r\n self.dist_l = 5\r\n self.dist_c = 5\r\n self.dist_r = 5\r\n\r\n self.collisions = 0\r\n self.checkpoint_num = 0\r\n","sub_path":"model/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"384644037","text":"from menu import Menu, MenuItem\nfrom coffee_maker import CoffeeMaker\nfrom money_machine import MoneyMachine\n\n\nmoney_machine = MoneyMachine()\ncoffee_maker = CoffeeMaker()\nmenu = Menu()\nis_on = True\n\n\n\nwhile is_on:\n options = menu.get_items()\n choice = input(f\"What would you like to drink? {options} \\nif you don't to proceed type Off \" ) .lower()\n if choice == \"off\":\n is_on = False\n elif choice==\"report\":\n coffee_maker.report()\n money_machine.report()\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"570806691","text":"import socket\r\nimport yaml\r\nfrom argparse import ArgumentParser\r\nimport json\r\nfrom protocol import validate_request, form_response\r\nfrom actions import resolve\r\nimport logging\r\n\r\n\r\ndef get_conf(conf_path):\r\n with open(conf_path) as file:\r\n return yaml.load(file, Loader=yaml.Loader)\r\n\r\n\r\nparser = ArgumentParser()\r\n\r\nparser.add_argument('-c', '--config', type=str, required=False, help='Used to specify configuration file path')\r\n\r\nargs = parser.parse_args()\r\n\r\nconfig = {}\r\npath = ''\r\n\r\nif args.config:\r\n path = args.config\r\nelse:\r\n try:\r\n path = 'conf.yml'\r\n config = get_conf(path)\r\n except FileNotFoundError:\r\n config = {'host': 'localhost', 'port': 8000, 'buffer_size': 1024}\r\n\r\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s',\r\n handlers=[logging.FileHandler('main.log'), logging.StreamHandler()])\r\n\r\nhost, port = config.get('host'), config.get('port')\r\ntry:\r\n sock = socket.socket()\r\n sock.bind((host, port))\r\n sock.listen(5)\r\n\r\n logging.info(f'Server initiated at: {host}:{port}')\r\n\r\n while True:\r\n client, (client_host, client_port) = sock.accept()\r\n logging.info(f'Client connected at host: {client_host}, port: {client_port}')\r\n received_request = client.recv(config.get('buffer_size'))\r\n received_request = json.loads(received_request.decode())\r\n if validate_request(received_request):\r\n action_name = received_request.get('action')\r\n controller = resolve(action_name)\r\n if controller:\r\n try:\r\n logging.info('Request OK')\r\n response = controller(received_request)\r\n except Exception as exception:\r\n logging.critical(f'Error encountered: {exception}')\r\n response = form_response(received_request, code=500, data='Server Error')\r\n else:\r\n logging.error('Controller not found')\r\n response = form_response(received_request, code=404, data='Action not found')\r\n else:\r\n logging.error('Invalid request')\r\n response = form_response(received_request, code=404, data='Invalid request')\r\n response = json.dumps(response)\r\n client.send(response.encode())\r\n client.close()\r\nexcept KeyboardInterrupt:\r\n print('Server was shut down')\r\n","sub_path":"messenger/server/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"446427625","text":"import logging\n\nlogging.basicConfig(level=logging.WARN)\n\nDEBUG = logging.DEBUG\nWARN = logging.WARN\nERROR = logging.ERROR\nFATAL = logging.FATAL\n\ntry:\n _is_init == None\nexcept NameError:\n _isdoctest=False\n _doctest_level=logging.WARN\n _all_loggers=[]\n _is_init = None\n\n# this gets a new logger for the module\n# changes to the logging format should be made\n# here so they are uniform across the modules\ndef getLogger( name, level ):\n logger = logging.getLogger(name)\n _all_loggers.append(logger)\n if( _isdoctest ):\n logger.setLevel(_doctest_level)\n else:\n logger.setLevel(level)\n return logger\n\n# this is the default level during doctest mode\ndef setDoctest( logger ):\n _isdoctest = True\n # set all of the loggers to the doctest level\n for l in _all_loggers:\n l.setLevel(_doctest_level)\n","sub_path":"linux/Utils/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"12355845","text":"#!/usr/bin/env python\n\nimport sys\nimport collections\nimport itertools\n\ninfilename = 'input.txt'\nif len(sys.argv) > 2 and sys.argv[1] == '-i':\n infilename = sys.argv[2]\n\nprint('Using input file: %s' % infilename)\n\nf = open(infilename, 'r')\ndata = f.readlines()\nf.close()\n\ndata = [line.strip() for line in data]\n\nip_reg = 5\nip = 0\ndata = data[1:]\n\nops = ['addr', 'addi', 'mulr','muli','banr','bani','borr','bori','setr','seti','gtir','gtri','gtrr','eqir','eqri','eqrr']\n\nregs = (0,0,0,0,0,0)\n\ndef run_op(inst, regs):\n op,a,b,c = inst\n regs = list(regs)\n if op == 'addr':\n regs[c] = regs[a] + regs[b]\n elif op == 'addi':\n regs[c] = regs[a] + b\n elif op == 'mulr':\n regs[c] = regs[a] * regs[b]\n elif op == 'muli':\n regs[c] = regs[a] * b\n elif op == 'banr':\n regs[c] = regs[a] & regs[b]\n elif op == 'bani':\n regs[c] = regs[a] & b\n elif op == 'borr':\n regs[c] = regs[a] | regs[b]\n elif op == 'bori':\n regs[c] = regs[a] | b\n elif op == 'setr':\n regs[c] = regs[a]\n elif op == 'seti':\n regs[c] = a\n elif op == 'gtir':\n if a > regs[b]:\n regs[c] = 1\n else:\n regs[c] = 0\n elif op == 'gtri':\n if regs[a] > b:\n regs[c] = 1\n else:\n regs[c] = 0\n elif op == 'gtrr':\n if regs[a] > regs[b]:\n regs[c] = 1\n else:\n regs[c] = 0\n elif op == 'eqir':\n if a == regs[b]:\n regs[c] = 1\n else:\n regs[c] = 0\n elif op == 'eqri':\n if regs[a] == b:\n regs[c] = 1\n else:\n regs[c] = 0\n elif op == 'eqrr':\n if regs[a] == regs[b]:\n regs[c] = 1\n else:\n regs[c] = 0\n else:\n print('error', op)\n sys.exit()\n return tuple(regs)\n\nip = regs[ip_reg]\nwhile ip >= 0 and ip < len(data):\n #ip = regs[ip_reg]\n line = data[ip]\n\n op = line.split()[0]\n a,b,c = list(map(int, line.split()[1:]))\n inst = (op, a, b, c)\n \n lregs = list(regs)\n lregs[ip_reg] = ip\n regs = tuple(lregs)\n\n #print ip, regs, \n regs = run_op(inst, regs)\n ip = regs[ip_reg] + 1\n\n #print inst, regs\n \nprint(regs)\n","sub_path":"2018/19/puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"149733951","text":"import abc\nimport click\nimport neuro_sdk\nimport operator\nimport sys\nfrom neuro_cli.asyncio_utils import Runner\nfrom typing import Callable, Generic, List, Optional, Sequence, Tuple, TypeVar, cast\n\nfrom neuro_flow.batch_runner import BatchRunner\nfrom neuro_flow.cli.root import Root\nfrom neuro_flow.live_runner import LiveRunner\nfrom neuro_flow.storage import FSStorage, NeuroStorageFS, Storage\n\n\nif sys.version_info >= (3, 7):\n from contextlib import AsyncExitStack\nelse:\n from async_exit_stack import AsyncExitStack\n\n_T = TypeVar(\"_T\")\n\n\nclass AsyncType(click.ParamType, Generic[_T], abc.ABC):\n def convert(\n self, value: str, param: Optional[click.Parameter], ctx: Optional[click.Context]\n ) -> _T:\n assert ctx is not None\n root = cast(Root, ctx.obj)\n with Runner() as runner:\n return runner.run(self.async_convert(root, value, param, ctx))\n\n @abc.abstractmethod\n async def async_convert(\n self,\n root: Root,\n value: str,\n param: Optional[click.Parameter],\n ctx: Optional[click.Context],\n ) -> _T:\n pass\n\n def complete(\n self, ctx: click.Context, args: Sequence[str], incomplete: str\n ) -> List[Tuple[str, Optional[str]]]:\n root = cast(Root, ctx.obj)\n with Runner() as runner:\n return runner.run(self.async_complete(root, ctx, args, incomplete))\n\n @abc.abstractmethod\n async def async_complete(\n self, root: Root, ctx: click.Context, args: Sequence[str], incomplete: str\n ) -> List[Tuple[str, Optional[str]]]:\n pass\n\n\nclass LiveJobType(AsyncType[str]):\n name = \"job\"\n\n def __init__(self, allow_all: bool = False):\n self._allow_all = allow_all\n\n async def async_convert(\n self,\n root: Root,\n value: str,\n param: Optional[click.Parameter],\n ctx: Optional[click.Context],\n ) -> str:\n return value\n\n async def async_complete( # type: ignore[return]\n self,\n root: Root,\n ctx: click.Context,\n args: Sequence[str],\n incomplete: str,\n ) -> List[Tuple[str, Optional[str]]]:\n async with AsyncExitStack() as stack:\n client = await stack.enter_async_context(neuro_sdk.get())\n storage: Storage = await stack.enter_async_context(\n FSStorage(NeuroStorageFS(client))\n )\n runner = await stack.enter_async_context(\n LiveRunner(root.config_dir, root.console, client, storage)\n )\n variants = list(runner.flow.job_ids)\n if self._allow_all:\n variants += [\"ALL\"]\n return [\n (job_id, None) for job_id in variants if job_id.startswith(incomplete)\n ]\n\n\nLIVE_JOB = LiveJobType(allow_all=False)\nLIVE_JOB_OR_ALL = LiveJobType(allow_all=True)\n\n\nclass LiveJobSuffixType(AsyncType[str]):\n name = \"suffix\"\n\n def __init__(self, *, args_to_job_id: Callable[[Sequence[str]], str]):\n self._args_to_job_id = args_to_job_id\n\n async def async_convert(\n self,\n root: Root,\n value: str,\n param: Optional[click.Parameter],\n ctx: Optional[click.Context],\n ) -> str:\n return value\n\n async def async_complete( # type: ignore[return]\n self,\n root: Root,\n ctx: click.Context,\n args: Sequence[str],\n incomplete: str,\n ) -> List[Tuple[str, Optional[str]]]:\n job_id = self._args_to_job_id(args)\n async with AsyncExitStack() as stack:\n client = await stack.enter_async_context(neuro_sdk.get())\n storage: Storage = await stack.enter_async_context(\n FSStorage(NeuroStorageFS(client))\n )\n runner = await stack.enter_async_context(\n LiveRunner(root.config_dir, root.console, client, storage)\n )\n return [\n (suffix, None)\n for suffix in await runner.list_suffixes(job_id)\n if suffix.startswith(incomplete)\n ]\n\n\nSUFFIX_AFTER_LIVE_JOB = LiveJobSuffixType(args_to_job_id=operator.itemgetter(-1))\n\n\nclass LiveImageType(AsyncType[str]):\n name = \"image\"\n\n def __init__(self, allow_all: bool = False):\n self._allow_all = allow_all\n\n async def async_convert(\n self,\n root: Root,\n value: str,\n param: Optional[click.Parameter],\n ctx: Optional[click.Context],\n ) -> str:\n return value\n\n async def async_complete( # type: ignore[return]\n self,\n root: Root,\n ctx: click.Context,\n args: Sequence[str],\n incomplete: str,\n ) -> List[Tuple[str, Optional[str]]]:\n async with AsyncExitStack() as stack:\n client = await stack.enter_async_context(neuro_sdk.get())\n storage: Storage = await stack.enter_async_context(\n FSStorage(NeuroStorageFS(client))\n )\n runner = await stack.enter_async_context(\n LiveRunner(root.config_dir, root.console, client, storage)\n )\n variants = [\n image\n for image, image_ctx in runner.flow.images.items()\n if image_ctx.context is not None\n ]\n if self._allow_all:\n variants += [\"ALL\"]\n return [(image, None) for image in variants if image.startswith(incomplete)]\n\n\nLIVE_IMAGE_OR_ALL = LiveImageType(allow_all=True)\n\n\nclass LiveVolumeType(AsyncType[str]):\n name = \"volume\"\n\n def __init__(self, allow_all: bool = False):\n self._allow_all = allow_all\n\n async def async_convert(\n self,\n root: Root,\n value: str,\n param: Optional[click.Parameter],\n ctx: Optional[click.Context],\n ) -> str:\n return value\n\n async def async_complete( # type: ignore[return]\n self,\n root: Root,\n ctx: click.Context,\n args: Sequence[str],\n incomplete: str,\n ) -> List[Tuple[str, Optional[str]]]:\n async with AsyncExitStack() as stack:\n client = await stack.enter_async_context(neuro_sdk.get())\n storage: Storage = await stack.enter_async_context(\n FSStorage(NeuroStorageFS(client))\n )\n runner = await stack.enter_async_context(\n LiveRunner(root.config_dir, root.console, client, storage)\n )\n variants = [\n volume.id\n for volume in runner.flow.volumes.values()\n if volume.local is not None\n ]\n if self._allow_all:\n variants += [\"ALL\"]\n return [(image, None) for image in variants if image.startswith(incomplete)]\n\n\nLIVE_VOLUME_OR_ALL = LiveVolumeType(allow_all=True)\n\n\nclass BatchType(AsyncType[str]):\n name = \"batch\"\n\n def __init__(self, allow_all: bool = False):\n self._allow_all = allow_all\n\n async def async_convert(\n self,\n root: Root,\n value: str,\n param: Optional[click.Parameter],\n ctx: Optional[click.Context],\n ) -> str:\n return value\n\n async def async_complete(\n self,\n root: Root,\n ctx: click.Context,\n args: Sequence[str],\n incomplete: str,\n ) -> List[Tuple[str, Optional[str]]]:\n variants = []\n for file in root.config_dir.config_dir.rglob(\"*.yml\"):\n # We are not trying to parse properly to allow autocompletion of\n # broken yaml files\n if \"batch\" in file.read_text():\n variants.append(file.stem)\n if self._allow_all:\n variants += [\"ALL\"]\n return [(batch, None) for batch in variants if batch.startswith(incomplete)]\n\n\nBATCH = BatchType(allow_all=False)\nBATCH_OR_ALL = BatchType(allow_all=True)\n\n\nclass BakeType(AsyncType[str]):\n name = \"bake\"\n\n async def async_convert(\n self,\n root: Root,\n value: str,\n param: Optional[click.Parameter],\n ctx: Optional[click.Context],\n ) -> str:\n return value\n\n async def async_complete(\n self,\n root: Root,\n ctx: click.Context,\n args: Sequence[str],\n incomplete: str,\n ) -> List[Tuple[str, Optional[str]]]:\n variants = []\n async with AsyncExitStack() as stack:\n client = await stack.enter_async_context(neuro_sdk.get())\n storage: Storage = await stack.enter_async_context(\n FSStorage(NeuroStorageFS(client))\n )\n runner: BatchRunner = await stack.enter_async_context(\n BatchRunner(root.config_dir, root.console, client, storage)\n )\n try:\n async for bake in runner.get_bakes():\n variants.append(bake.bake_id)\n except ValueError:\n pass\n return [(bake, None) for bake in variants if bake.startswith(incomplete)]\n\n\nBAKE = BakeType()\n\n\nclass BakeTaskType(AsyncType[str]):\n name = \"task\"\n\n def __init__(\n self,\n *,\n args_to_bake_id: Callable[[Sequence[str]], str],\n args_to_attempt: Callable[[Sequence[str]], int],\n include_started: bool = True,\n include_finished: bool = True,\n ):\n self._args_to_bake_id = args_to_bake_id\n self._args_to_attempt = args_to_attempt\n self._include_started = include_started\n self._include_finished = include_finished\n\n async def async_convert(\n self,\n root: Root,\n value: str,\n param: Optional[click.Parameter],\n ctx: Optional[click.Context],\n ) -> str:\n return value\n\n async def async_complete(\n self,\n root: Root,\n ctx: click.Context,\n args: Sequence[str],\n incomplete: str,\n ) -> List[Tuple[str, Optional[str]]]:\n variants: List[str] = []\n bake_id = self._args_to_bake_id(args)\n attempt_no = self._args_to_attempt(args)\n async with AsyncExitStack() as stack:\n client = await stack.enter_async_context(neuro_sdk.get())\n storage: Storage = await stack.enter_async_context(\n FSStorage(NeuroStorageFS(client))\n )\n runner: BatchRunner = await stack.enter_async_context(\n BatchRunner(root.config_dir, root.console, client, storage)\n )\n attempt = await runner.get_bake_attempt(bake_id, attempt_no=attempt_no)\n started, finished = await storage.fetch_attempt(attempt)\n if self._include_finished:\n variants.extend(\".\".join(parts) for parts in finished.keys())\n if self._include_started:\n variants.extend(\n \".\".join(parts) for parts in started.keys() if parts not in finished\n )\n return [(task, None) for task in variants if task.startswith(incomplete)]\n\n\ndef extract_attempt_no(args: Sequence[str]) -> int:\n for index, arg in enumerate(args):\n if arg == \"-a\" or arg == \"--attempt\":\n try:\n return int(args[index + 1])\n except (ValueError, IndexError):\n pass\n return -1\n\n\nFINISHED_TASK_AFTER_BAKE = BakeTaskType(\n include_started=False,\n include_finished=True,\n args_to_bake_id=operator.itemgetter(-1),\n args_to_attempt=extract_attempt_no,\n)\n","sub_path":"neuro_flow/cli/click_types.py","file_name":"click_types.py","file_ext":"py","file_size_in_byte":11365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"60842387","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom word2number import w2n\n\n\ndef get_products_from_page(url):\n \"\"\"Return data from all books.\"\"\"\n\n def get_data_from_book(book):\n \"\"\"Return data from one book.\"\"\"\n src_img = book.find(\"img\").get(\"src\")\n src_img = src_img.replace(\"../\", \"\")\n image = \"http://books.toscrape.com/\" + src_img\n\n in_stock = False\n in_stock_or_not = book.find(\"p\", {\"class\", \"instock\"}).text\n if \"In stock\" in in_stock_or_not:\n in_stock = True\n\n name = book.find(\"h3\").find(\"a\").text\n\n price = book.find(\"p\", {\"class\", \"price_color\"}).text\n price = price.replace(\"Â\", \"\")\n\n rating = book.find(\"p\", {\"class\", \"star-rating\"}).get(\"class\")[1]\n rating = w2n.word_to_num(rating)\n\n return {\n \"image\": image,\n \"in_stock\": in_stock,\n \"name\": name,\n \"price\": price,\n \"rating\": rating,\n }\n\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n books = soup.find_all(\"article\", {\"class\", \"product_pod\"})\n\n result = list(map(get_data_from_book, books))\n return result\n","sub_path":"Task2/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"539443763","text":"# Constants\nDIM = 4 # dimension of the board DIMxDIM\nEMPTYSLOT = 0\nQUIT = 0\n\ndef initialize_board():\n ''' Creates the initial board according to the user input.\n The board is a list of lists.\n The list contains DIM elements (rows), each of which contains DIM elements (columns)'''\n numbers = input().split()\n numbers = [int(number) for number in numbers]\n puzzle_board = []\n index = 0\n for _ in range(DIM):\n row = numbers[index:index + DIM]\n index += DIM\n puzzle_board.append(row)\n\n return puzzle_board\n \n\ndef display(puzzle_board):\n ''' Display the board, printing it one row in each line '''\n print()\n for i in range(DIM):\n for j in range(DIM):\n if puzzle_board[i][j] == EMPTYSLOT:\n print(\"X\\t\", end=\"\")\n else:\n print(str(puzzle_board[i][j]) + \"\\t\", end=\"\")\n print()\n print()\n\n\ndef move(puzzle_board):\n \"\"\" Takes the puzzle_board as a parameter and switches the X and the inputed number if they are together \"\"\"\n place_to_move = int(input())\n if place_to_move == QUIT: # End Game\n return False\n moving_to = 0\n for row in puzzle_board:\n if 0 in row:\n position = (puzzle_board.index(row), row.index(0)) # The position og the X\n if place_to_move in row:\n moving_to = (puzzle_board.index(row), row.index(place_to_move)) # The position og the inputed number\n\n possible_moves = {\n \"up\":(position[0]-1, position[1]),\n \"down\":(position[0]+1, position[1]),\n \"left\":(position[0], position[1]-1),\n \"right\":(position[0], position[1]+1)\n }\n\n if moving_to in possible_moves.values(): # Switching places\n puzzle_board[moving_to[0]][moving_to[1]] = EMPTYSLOT\n puzzle_board[position[0]][position[1]] = place_to_move\n \n return True\n\n\npuzzle_board = initialize_board()\nplaying = True\nwhile playing:\n display(puzzle_board)\n playing = move(puzzle_board)\n\n# Example for a puzzle board input: 5 3 13 7 14 10 0 11 1 4 6 8 12 9 2 15 \n","sub_path":"Tímaverkefni/Practice exam/sliding_puzzle.py","file_name":"sliding_puzzle.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"516485025","text":"import argparse\nfrom numpy.random import rand as ra\nfrom functools import reduce\nfrom time import time\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.mllib.clustering import KMeans\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--master', help='Spark master URL (default: \"local[*]\")', default=\"local[*]\")\nparser.add_argument('--size', help='number of macro-records to generate (default: 10000)', default=10000, type=int)\nparser.add_argument('--scale', help='number of records per macro-record to generate (default: 100)', default=100, type=int)\nparser.add_argument('--dim', help='number of dimensions in each record (default=128)', type=int, default=128)\nparser.add_argument('--partitions', help='number of partitions to operate on (default=64)', type=int, default=64)\nparser.add_argument('--iterations', help='number of iterations in each training run (default=32)', type=int, default=32)\nparser.add_argument('--runs', help='number of training runs (default=10)', type=int, default=10)\nparser.add_argument('--clusters', help='number of cluster centers to find (default=128)', type=int, default=128)\nparser.add_argument('--config', metavar=\"KEY=VAL\", help=\"add KEY=VAL to Spark's configuration\", action='append', default=[], dest='config')\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n print(args)\n protospark = SparkSession.builder.appName(\"k-means-gen\").master(args.master)\n spark = reduce(lambda x, y: x.config(*y.split(\"=\")), args.config, protospark).getOrCreate()\n rdd = spark.sparkContext.parallelize(range(args.size), args.partitions).flatMap(lambda f: [f] * args.scale).map(lambda x: ra(args.dim))\n\n runs = args.runs\n clusters = args.clusters\n iterations = args.iterations\n\n sc = spark.sparkContext\n logger = sc._jvm.org.apache.log4j\n logger.LogManager.getLogger(\"org\"). setLevel( logger.Level.ERROR )\n\n print(\"data generated, starting timer\")\n \n start_time = time()\n\n runtimes = []\n \n for run in (range(runs)):\n print(\"starting run %d\" % run)\n run_time = time()\n model = KMeans.train(rdd, clusters, iterations)\n runtimes.append(time() - run_time)\n print(\"finished in %r seconds\" % runtimes[-1])\n\n end_time = time()\n\n sc.stop()\n\n print(\"completed %d run%s in %s seconds; times were %r\" % (runs, (runs > 1 and \"s\" or \"\"), end_time - start_time, runtimes))\n","sub_path":"kmeans-memory/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"41420419","text":"import base64\nfrom itertools import chain\nfrom io import BytesIO as IO\n\nfrom PIL import Image, ImageChops\n\nfrom rdkit.Chem import AllChem as Chem\nfrom rdkit.Chem import Draw\n\ntry:\n Draw.DrawingOptions.atomLabelFontFace = \"DejaVu Sans\"\n Draw.DrawingOptions.atomLabelFontSize = 18\nexcept KeyError: # Font \"DejaVu Sans\" is not available\n pass\n\ntry:\n # Try to import Avalon so it can be used for generation of 2d coordinates.\n from rdkit.Avalon import pyAvalonTools as pyAv\n USE_AVALON_2D = True\nexcept ImportError:\n print(\"* Avalon not available. Using RDKit for 2d coordinate generation.\")\n USE_AVALON_2D = False\n\n\ndef check_2d_coords(mol, force=False):\n \"\"\"Check if a mol has 2D coordinates and if not, calculate them.\"\"\"\n if not force:\n try:\n mol.GetConformer()\n except ValueError:\n force = True # no 2D coords... calculate them\n\n if force:\n if USE_AVALON_2D:\n pyAv.Generate2DCoords(mol)\n else:\n mol.Compute2DCoords()\n\n\ndef make_transparent(img):\n img = img.convert(\"RGBA\")\n pixdata = img.load()\n width, height = img.size\n for y in range(height):\n for x in range(width):\n if pixdata[x, y] == (255, 255, 255, 255):\n pixdata[x, y] = (255, 255, 255, 0)\n return img\n\n\ndef autocrop(im, bgcolor=\"white\"):\n if im.mode != \"RGB\":\n im = im.convert(\"RGB\")\n bg = Image.new(\"RGB\", im.size, bgcolor)\n diff = ImageChops.difference(im, bg)\n bbox = diff.getbbox()\n if bbox:\n return im.crop(bbox)\n return None # no contents\n\n\ndef b64_mol(mol, size=300, hlsss=None):\n img_file = IO()\n if isinstance(mol, list):\n img = autocrop(Draw.MolsToGridImage(mol, size=(size, size)))\n else:\n if hlsss is not None:\n if isinstance(hlsss, str):\n hlsss = hlsss.split(\",\")\n atoms = set()\n for smi in hlsss:\n m = Chem.MolFromSmiles(smi)\n if m:\n matches = list(chain(*mol.GetSubstructMatches(m)))\n else:\n matches = []\n if len(matches) > 0:\n atoms = atoms.union(set(matches))\n atoms = list(atoms)\n else:\n atoms = []\n try:\n img = autocrop(Draw.MolToImage(mol, size=(size, size), highlightAtoms=atoms))\n except UnicodeEncodeError:\n print(Chem.MolToSmiles(mol))\n mol = Chem.MolFromSmiles(\"*\")\n img = autocrop(Draw.MolToImage(mol, size=(size, size)))\n img = make_transparent(img)\n img.save(img_file, format='PNG')\n b64 = base64.b64encode(img_file.getvalue())\n b64 = b64.decode()\n img_file.close()\n return b64\n\n\ndef mol_img_tag(mol, size=300, options=None):\n if isinstance(mol, str): # convert from Smiles on-the-fly, when necessary\n mol = Chem.MolFromSmiles(mol)\n check_2d_coords(mol)\n tag = \"\"\"\"Mol\"/\"\"\"\n if options is None:\n options = \"\"\n img_tag = tag.format(options, b64_mol(mol, size=size))\n return img_tag\n","sub_path":"mol_frame/mol_images.py","file_name":"mol_images.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"51192134","text":"# Artificial NEural Network\n\n# Step 1: Data Preprocessing\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\n# Deal with categorical data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\n#Encoding Country\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\n#Encoding Genders\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_1.fit_transform(X[:, 2])\nonehotencoder = OneHotEncoder(categorical_features = [1])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n# Encoding the Dependent Variable\nlabelencoder_y = LabelEncoder()\ny = labelencoder_y.fit_transform(y)\n\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# Part 2: Making a ANN\n# Importing libraries\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\n\n\n# Initializing ANN - sequence of layers\nclassifier = Sequential()\n#\n## Adding the input layer and the first hidden layer with dropout (deals with overfitting)\nclassifier.add(Dense(units=6,kernel_initializer='uniform',activation = 'relu', input_dim=11))\nclassifier.add(Dropout(rate = 0.1))\n## Adding the second hidden layer with dropout\nclassifier.add(Dense(units=6,kernel_initializer='uniform',activation = 'relu'))\nclassifier.add(Dropout(rate=0.1))\n## Adding the output layer\nclassifier.add(Dense(units=1,kernel_initializer='uniform',activation = 'sigmoid'))\n#\n## Compiling the ANN\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n#\n## Fitting the ANN to the training set\nclassifier.fit(X_train, y_train, batch_size = 10, epochs = 100)\n#\n## Part 3: Predicting the Test set results\ny_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)\n#\n## Make a single prediction\nnew_prediction = classifier.predict(sc.transform(np.array([[0,0,600,1,40,3,60000,2,1,1,50000]])))\nnew_prediction = (new_prediction>0.5)\n#\n## Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n# Part 4: Tuning, evaluating and improving the ANN\n\n# Evaluating the ANN\nfrom keras.wrappers.scikit_learn import KerasClassifier \nfrom sklearn.model_selection import cross_val_score\n\ndef build_classifier():\n classifier = Sequential()\n classifier.add(Dense(units=6,kernel_initializer='uniform' , activation = 'relu', input_dim=11))\n classifier.add(Dense(units=6,kernel_initializer='uniform' , activation = 'relu'))\n classifier.add(Dense(units = 1,kernel_initializer='uniform', activation = 'sigmoid'))\n classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n return classifier\n\nclassifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, epochs = 100)\naccuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)\nmean = accuracies.mean()\nvariance = accuracies.std()\n\n# Improving the ANN\n# Dropout Regularization to reduce overfitting, if needed (see variance)\n\n# Tuning he ANN\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.wrappers.scikit_learn import KerasClassifier \nfrom sklearn.model_selection import cross_val_score\n\ndef build_classifier(optimizer):\n classifier = Sequential()\n classifier.add(Dense(units=6,kernel_initializer='uniform' , activation = 'relu', input_dim=11))\n classifier.add(Dense(units=6,kernel_initializer='uniform' , activation = 'relu'))\n classifier.add(Dense(units = 1,kernel_initializer='uniform', activation = 'sigmoid'))\n classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])\n return classifier\n\nclassifier = KerasClassifier(build_fn = build_classifier)\nparameters = {'batch_size' : [25, 32],\n 'epochs' : [100, 500],\n 'optimizer' : ['adam', 'rmsprop'] }\n\ngrid_search = GridSearchCV(estimator = classifier, \n param_grid = parameters,\n scoring = 'accuracy',\n cv = 10)\ngrid_search = grid_search.fit(X_train, y_train)\nbest_parameters = grid_search.best_params_\nbest_accuracy = grid_search.best_score_\nprint(best_parameters)\nprint(best_accuracy)\n\n# Best Parameters = 32, 500, adam\n# Best Accuracy = 84,5 %\n","sub_path":"ML_alg/ANNs/first_ann.py","file_name":"first_ann.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"641915693","text":"# 作者:hao.ren3\n# 时间:2019/10/8 15:10\n# IDE:PyCharm\n\nfrom matplotlib.patches import Ellipse\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#给定的位置和协方差画一个椭圆\ndef draw_ellipse(position, covariance, ax=None, **kwargs):\n ax = ax or plt.gca()\n #将协方差转换为主轴\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n\n #画出椭圆\n for nsig in range(1, 4):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))\n#画图\ndef plot_gmm(gmm, X, label=True, ax=None):\n ax = ax or plt.gca()\n labels = gmm.fit(X).predict(X)\n if label:\n ax.scatter(X[:, 0], X[:, 1], c=labels, s=4, cmap='viridis', zorder=2)\n else:\n ax.scatter(X[:, 0], X[:, 1], s=4, zorder=2)\n ax.axis('equal')\n w_factor = 0.2 / gmm.weights_.max()\n for pos, covar, w in zip(gmm.means_, gmm.covariances_ , gmm.weights_):\n draw_ellipse(pos, covar, alpha=w * w_factor)\n plt.show()","sub_path":"Hao_Test/tools/ellipse.py","file_name":"ellipse.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"271602534","text":"#!/usr/bin/env python3\n\n\"\"\"this is a short script that runs findRFI and saves the results to a python pickle file\"\"\"\n\nimport sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom LoLIM.utilities import processed_data_dir, logger\nfrom LoLIM.IO.raw_tbb_IO import MultiFile_Dal1, filePaths_by_stationName\nfrom LoLIM.findRFI import FindRFI\n\nfrom os import mkdir\nfrom os.path import isdir\nfrom pickle import dump\n\nfrom LoLIM.definitions import definitions\nD = definitions()\n\n\n## these lines are anachronistic and should be fixed at some point\nfrom LoLIM import utilities\nutilities.default_raw_data_loc = D.default_raw_data_loc\nutilities.default_processed_data_loc = D.default_processed_data_loc\n\nlog = logger()\n\nif __name__ == \"__main__\":\n timeID = D.timeID\n output_folder = \"/findRFI\"\n out_fname = \"/findRFI_results\"\n block_size = D.block_size\n initial_block = 3000\n num_blocks = 20\n max_blocks = 500\n\n skip_stations = []#D.bad_stations\n\n\n processed_data_dir = processed_data_dir(timeID)\n\n output_fpath = processed_data_dir + output_folder\n if not isdir(output_fpath):\n mkdir(output_fpath)\n\n log.set(output_fpath+'/log.txt', True)\n log.take_stdout()\n\n\n #### get paths to raw data by station ####\n raw_fpaths = filePaths_by_stationName(timeID)\n\n output = {}\n\n for station in raw_fpaths.keys():\n if station in skip_stations:\n continue\n print(\"station\", station)\n\n path = output_fpath + '/' + station\n if not isdir(path):\n mkdir(path)\n\n TBB_data = MultiFile_Dal1( raw_fpaths[station] )\n out = FindRFI(TBB_data, block_size, initial_block, num_blocks, max_blocks, verbose=True, figure_location=path)\n output[station] = out\n\n with open(output_fpath+out_fname, 'wb') as fout:\n dump(output, fout)\n","sub_path":"LIM_scripts/examples/run_findRFI.py","file_name":"run_findRFI.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"617687410","text":"from django.conf.urls import patterns, include, url\nimport views\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'simple_store.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', views.catalog, name='catalog'),\n url(r'^cart/$', views.cart, name='cart'),\n url(r'^cart/remove/$', views.removefromcart, name='remove'),\n url(r'^cart/checkout/$', views.checkout, name=\"checkout\"),\n url(r'^cart/checkout/complete/$', views.completeOrder, name=\"complete_order\"),\n url(r'^admin-login/$', views.adminLogin, name=\"admin_login\"),\n url(r'^admin-panel/$', views.adminDashboard, name='admin')\n )\n\n","sub_path":"store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"551136208","text":"#!/usr/bin/env python\n\nimport unittest\nimport sys\nimport os\nimport wx\nfrom pmagpy import pmag\nfrom dialogs import magic_grid2 as magic_grid\n\n#import wx.lib.inspection\n#import numpy as np\n#import ipmag\n\nWD = pmag.get_test_WD()\n\nclass TestMagicGrid(unittest.TestCase):\n \"\"\"\n testing for MagicGrid class\n \"\"\"\n\n def setUp(self):\n self.app = wx.App()\n self.frame = wx.Frame(None, wx.ID_ANY, 'Title', size=(600, 600))\n self.frame.pnl = wx.Panel(self.frame, name='a panel')\n row_labels = ['alpha', 'bravo', 'charlie', 'whiskey', 'x-ray', 'y', 'z']\n col_labels = ['delta', 'echo', 'foxtrot', 'gamma']\n self.grid = magic_grid.MagicGrid(self.frame.pnl, 'grid', row_labels, col_labels, size=(600, 600))\n self.grid.InitUI()\n self.grid.size_grid()\n\n def tearDown(self):\n #self.frame.Destroy() # this does not work and causes strange errors\n self.app.Destroy()\n os.chdir(WD)\n\n def test_add_row(self):\n label = 'new_label'\n self.grid.add_row(label)\n last_row = self.grid.GetNumberRows() - 1\n self.assertEqual(label, str(self.grid.GetCellValue(last_row, 0)))\n\n def test_add_row_no_label(self):\n self.grid.add_row()\n last_row = self.grid.GetNumberRows() - 1\n self.assertEqual('', self.grid.GetCellValue(last_row, 0))\n self.assertEqual('', self.grid.row_labels[-1])\n\n def test_remove_row(self):\n num_rows = self.grid.GetNumberRows()\n last_row_name = self.grid.GetCellValue(num_rows - 1, 0)\n self.grid.remove_row()\n self.assertEqual(num_rows - 1, self.grid.GetNumberRows())\n new_num_rows = self.grid.GetNumberRows()\n new_last_row_name = self.grid.GetCellValue(new_num_rows - 1, 0)\n self.assertNotEqual(new_num_rows, num_rows)\n self.assertNotEqual(new_last_row_name, last_row_name)\n self.assertEqual('y', self.grid.row_labels[-1])\n\n def test_remove_row_charlie(self):\n old_row_name = self.grid.GetCellValue(2, 0)\n self.assertEqual('charlie', old_row_name)\n self.grid.remove_row(2)\n self.assertEqual('whiskey', self.grid.GetCellValue(2, 0))\n self.assertEqual('whiskey', self.grid.row_labels[2])\n\n @unittest.skip('this just hangs')\n def test_add_col(self):\n label = 'new_label'\n self.grid.add_col(label)\n cols = self.grid.GetNumberCols()\n self.assertEqual(label, str(self.grid.GetColLabelValue(cols-1)))\n self.assertEqual(label, self.grid.col_labels[-1])\n\n @unittest.skipIf(sys.platform != 'darwin', 'fails remotely for unknown reason')\n def test_remove_col(self):\n self.grid.add_col('victor')\n num_cols = self.grid.GetNumberCols()\n result = self.grid.remove_col(2)\n new_num_cols = self.grid.GetNumberCols()\n self.assertNotEqual(num_cols, new_num_cols)\n # remove foxtrot, gamma should be in position 2\n self.assertEqual('gamma', self.grid.GetColLabelValue(2))\n self.assertEqual('gamma', self.grid.col_labels[2])\n self.assertNotIn('foxtrot', self.grid.col_labels)\n\n def test_changes_after_row_delete(self):\n self.grid.changes = {1, 3, 6}\n self.grid.remove_row(3)\n correct_changes = {-1, 1, 5}\n self.assertEqual(correct_changes, self.grid.changes)\n\n def test_changes_after_multiple_row_delete(self):\n self.grid.changes = {1, 2, 3, 6}\n self.grid.remove_row(2)\n self.grid.remove_row(3)\n correct_changes = {-1, 1, 2, 4}\n self.assertEqual(correct_changes, self.grid.changes)\n","sub_path":"pmagpy_tests/test_er_magic_dialogs.py","file_name":"test_er_magic_dialogs.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"46501852","text":"import Tkinter\nimport ttk\n\nwindow = Tkinter.Tk()\nnotebook = ttk.Notebook(window)\nnotebook.pack()\nsubframe = Tkinter.Frame(window)\nsubframe.pack()\nnotebook.add(subframe, text=\"tab\", state=\"normal\")\ndef buttonaction():\n notebook.tab(0, state=\"disabled\")\nbutton = Tkinter.Button(subframe, command=buttonaction, text=\"click to disable tab\")\nbutton.pack()\n\nif __name__ == \"__main__\":\n window.mainloop()","sub_path":"view/disabledTABTEST.py","file_name":"disabledTABTEST.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"38360855","text":"import cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Original image\nimage = cv.imread(\"./Resources/Photos/cats.jpg\")\ncv.imshow(\"Original\", image)\n\n# Gray scale histogram\ngray_image = cv.cvtColor(image, cv.COLOR_RGB2GRAY)\ncv.imshow(\"Gray\", gray_image)\n\ngray_hist = cv.calcHist([gray_image], [0], None, [256], [0, 256])\n\n# Masked histogram\nblank = np.zeros(image.shape[:2], dtype=\"uint8\")\ncircle_mask = cv.circle(\n blank.copy(), (image.shape[1] // 2 + 170, image.shape[0] // 2 + 100), 100, 255, -1\n)\nmasked_gray = cv.bitwise_and(gray_image, gray_image, mask=circle_mask)\nmasked = cv.bitwise_and(image, image, mask=circle_mask)\ncv.imshow(\"Masked Gray\", masked_gray)\ncv.imshow(\"Masked\", masked)\nmasked_gray_hist = cv.calcHist([gray_image], [0], circle_mask, [256], [0, 256])\n\nplt.figure()\nplt.title(\"Grayscale Histogram\")\nplt.ylabel(\"# of pixesl\")\nplt.xlabel(\"Bins\")\nplt.plot(gray_hist, \"-k\")\nplt.plot(masked_gray_hist, \"--k\")\nplt.legend([\"Grayscale\", \"Masked Grayscale\"])\nplt.xlim([0, 256])\nplt.pause(0.001) # workaround for non blocking plot\n\n# BGR histograms\nbgr = cv.split(image) # split image by channels\n\nb_histo = cv.calcHist(bgr, [0], None, [256], [0, 256])\ng_histo = cv.calcHist(bgr, [1], None, [256], [0, 256])\nr_histo = cv.calcHist(bgr, [2], None, [256], [0, 256])\nmasked_b_histo = cv.calcHist(bgr, [0], circle_mask, [256], [0, 256])\nmasked_g_histo = cv.calcHist(bgr, [1], circle_mask, [256], [0, 256])\nmasked_r_histo = cv.calcHist(bgr, [2], circle_mask, [256], [0, 256])\n\nplt.figure()\nplt.title(\"BGR Histogram\")\nplt.ylabel(\"# of pixesl\")\nplt.xlabel(\"Bins\")\nplt.plot(b_histo, \"b\")\nplt.plot(g_histo, \"g\")\nplt.plot(r_histo, \"r\")\nplt.plot(masked_b_histo, \"--b\")\nplt.plot(masked_g_histo, \"--g\")\nplt.plot(masked_r_histo, \"--r\")\nplt.legend([\"Blue\", \"Green\", \"Red\", \"Masked_Blue\", \"Masked_Green\", \"Masked_Red\"])\nplt.xlim([0, 256])\nplt.pause(0.001) # workaround for non blocking plot\n\n\ncv.waitKey(0)\ncv.destroyAllWindows()\n","sub_path":"histograms.py","file_name":"histograms.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"468633400","text":"#!/usr/bin/env python\n# HW04_ex00\n\n# Create a program that does the following:\n# - creates a random integer from 1 - 25\n# - asks the user to guess what the number is\n# - validates input is a number\n# - tells the user if they guess correctly\n# - if not: tells them too high/low\n# - only lets the user guess five times\n# - then ends the program\n################################################################################\n# Imports\n\nfrom random import randint\n\n\n# Body\n\ndef fiveGuesses():\n\trandomNumber = randint(1,25)\n\tguesses = 1\n\twhile guesses <= 5:\n\t\tguess = raw_input(\"Guess a number between 1 and 25, please: \\n\")\n\t\tguess = int(guess)\n\t\tif type(guess) != int:\n\t\t\tguess = raw_input(\"Hey, that's not a number. Please enter a number. \\n\")\n\t\telif guess > randomNumber:\n\t\t\tprint(\"Too high! Try again.\")\n\t\t\tguesses = guesses + 1\n\t\telif guess < randomNumber: \n\t\t\tprint(\"Too low! Try again.\")\n\t\t\tguesses = guesses + 1\n\t\telse: \n\t\t\tprint(\"Good work; you guessed it right!\")\n\tif guesses > 5:\n\t\tprint(\"Ope! You've run out of guesses. Better luck next time!\")\n\n\n################################################################################\ndef main():\n\n\n print(\"Hello World!\") # Remove this and replace with your function calls\n\n fiveGuesses()\n \n\nif __name__ == '__main__':\n main()","sub_path":"HW04_ex00.py","file_name":"HW04_ex00.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"366969772","text":"# Software License Agreement\n__version__ = \"0.0.1\"\n__status__ = \"Production\"\n__license__ = \"BSD\"\n__copyright__ = \"Copyright (c) 2015, P.A.N.D.O.R.A. Team. All rights reserved.\"\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of P.A.N.D.O.R.A. Team nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n__author__ = \"Chamzas Konstantinos\"\n__maintainer__ = \"Chamzas Konstantinos\"\n__email__ = \"chamzask@gmail.com\"\n\nimport os\nimport rospkg\n\nfrom python_qt_binding import loadUi\nfrom python_qt_binding.QtCore import Slot, QTimer\nfrom python_qt_binding.QtGui import QWidget\nfrom pandora_sensor_msgs.msg import BatteryMsg\nfrom .widget_info import WidgetInfo\n\nbattery_topic = \"sensors/battery\"\n\n\nclass BatteryWidget(QWidget):\n\n \"\"\"\n BatteryWidget.start must be called in order to update topic pane.\n \"\"\"\n\n def __init__(self, plugin=None):\n\n super(BatteryWidget, self).__init__()\n\n # Load Ui and name the widget\n self.id_ = \"Battery\"\n rp = rospkg.RosPack()\n ui_file = os.path.join(\n rp.get_path('pandora_rqt_gui'),\n 'resources', 'BatteryWidget.ui')\n loadUi(ui_file, self)\n\n # create the subcribers\n self.widget_info_batteries = WidgetInfo(battery_topic, BatteryMsg)\n\n # create and connect the timer\n self.timer_refresh_widget = QTimer(self)\n self.timer_refresh_widget.timeout.connect(self.refresh_topics)\n\n def start(self):\n self.widget_info_batteries.start_monitoring()\n self.timer_refresh_widget.start(100)\n\n # Connected slot to the timer in order to refresh\n @Slot()\n def refresh_topics(self):\n\n if self.widget_info_batteries.last_message is not None:\n\n self.lcd1.display(\n self.widget_info_batteries.last_message.voltage[0])\n self.lcd2.display(\n self.widget_info_batteries.last_message.voltage[1])\n\n self.PSUBatteryBar.setValue(\n (self.widget_info_batteries.last_message.voltage[0] - 19) * 20)\n self.MotorBatteryBar.setValue(\n (self.widget_info_batteries.last_message.voltage[1] - 19) * 20)\n\n # Method called when the Widget is terminated\n def shutdown(self):\n self.widget_info_batteries.stop_monitoring()\n self.timer_refresh_widget.stop()\n","sub_path":"pandora_rqt_gui/src/pandora_rqt_gui/battery_widget.py","file_name":"battery_widget.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"306612418","text":"import pytest\nimport eval\nimport numpy as np\nimport classification\nimport tree\nimport data_loader as dl\n\n\ndef test_count_leaf_nodes():\n leaf = tree.TLeaf(['A','A','B'])\n t = tree.TNode(None, None, leaf, tree.TNode(None,None, leaf, leaf))\n assert(t.count_leaf_nodes() == 3)\n\n\ndef test_prune():\n in_X, in_y = dl.load_data('data/simple2.txt')\n h = (len(in_X) // 2)\n X_valid = np.split(in_X, [h])[0]\n X = np.split(in_X, [h])[1]\n y_valid = np.split(in_y, [h])[0]\n y = np.split(in_y, [h])[1]\n t = classification.DecisionTreeClassifier()\n t.train(X, y)\n assert(t.root.get_depth() == 17)\n assert(t.prune(X_valid, y_valid, alpha=0.01, in_place=False).root.get_depth() == 5)\n assert(t.prune(X_valid, y_valid, alpha=1, in_place=False).root.get_depth() == 1)\n\n\ndef test_stop_level():\n in_X, in_y = dl.load_data('data/simple2.txt')\n for stop_level in range(2, 10):\n t = classification.DecisionTreeClassifier()\n t.train(in_X, in_y, stop_level=stop_level)\n assert(t.root.get_depth() == stop_level)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"44485919","text":"\"\"\"\nThe preferences menu handles:\n - Font Size\n - Background Color\n - Text Color\n - Annotation Color\n - Annotation Size\n - Clipping Min\n - Clipping Max\n\n\"\"\"\nimport os\nimport sys\nimport locale\n\nfrom qtpy.QtCore import Qt\nfrom qtpy import QtGui\nfrom qtpy.QtWidgets import (\n QLabel, QPushButton, QGridLayout, QApplication, QHBoxLayout, QVBoxLayout,\n QTabWidget, QWidget, QScrollArea, QTextEdit)\n\nfrom pyNastran.gui.utils.qt.pydialog import PyDialog\n#from pyNastran.gui.utils.qt.qpush_button_color import QPushButtonColor\n#from pyNastran.gui.utils.qt.dialogs import save_file_dialog\n\nCREDITS = \"\"\"\npyNastran was written by Steve Doyle since 2011. This product contains the following third party modules:\n\n * Python, the programming language, written by Guido van Rossum and many contributors.\n\n * Qt5 cross-platform GUI toolkit, developed by many contributors.\n\n * PyQt5 Python bindings for Qt5, by Riverbank Computing Limited.\n\n * Python Imaging Library, developed by Secret Labs AB and Fredrik Lundh.\n\n * Scintilla, a source code editor widget, written by Neil Hodgson and many contributors.\n\n * Docutils, tools for ReST document conversion, written by David Goodger and contributors.\n\n * Pygments by Georg Brandl, Armin Ronacher, Tim Hatch, and contributors.\n\nWe gratefully acknowledge the efforts of all that have contributed to these and the other open source products and tools that are used in the development of pyNastran.\n\"\"\".replace('\\n', '
')\n\nclass AboutWindow(PyDialog):\n \"\"\"\n +-------------+\n | AboutWindow |\n +------------------------+\n | Origin/P1 cid x y z |\n | P2 cid x y z |\n | z-axis cid x y z |\n | tol cid x y z |\n | |\n | Apply OK Cancel |\n +------------------------+\n \"\"\"\n def __init__(self, data, win_parent=None, show_tol=True):\n \"\"\"\n Saves the data members from data and\n performs type checks\n \"\"\"\n PyDialog.__init__(self, data, win_parent)\n\n self._updated_preference = False\n\n self._default_font_size = data['font_size']\n #self.out_data = data\n\n self.setWindowTitle('About pyNastran GUI')\n self.create_widgets(show_tol)\n self.create_layout()\n #self.set_connections()\n self.on_font(self._default_font_size)\n #self.show()\n\n def create_widgets(self, show_tol):\n \"\"\"creates the display window\"\"\"\n # CORD2R\n #self.origin_label = QLabel(\"Origin:\")\n #self.zaxis_label = QLabel(\"Z Axis:\")\n #self.xz_plane_label = QLabel(\"XZ Plane:\")\n\n #-----------------------------------------------------------------------\n # closing\n self.apply_button = QPushButton('Apply')\n self.ok_button = QPushButton('OK')\n self.cancel_button = QPushButton('Cancel')\n\n def create_layout(self):\n ok_cancel_box = QHBoxLayout()\n ok_cancel_box.addWidget(self.apply_button)\n ok_cancel_box.addWidget(self.ok_button)\n ok_cancel_box.addWidget(self.cancel_button)\n\n #---------------------\n version_tab, len_version = _version_tab(ok_cancel_box)\n package_tab = _package_tab(len_version)\n credits_tab = _credits_tab()\n # --------------------\n tab_widget = QTabWidget()\n tab_widget.addTab(version_tab, 'Version')\n tab_widget.addTab(package_tab, 'Packages')\n tab_widget.addTab(credits_tab, 'Credits')\n\n #---------------------\n vbox_outer = QVBoxLayout()\n vbox_outer.addWidget(tab_widget)\n #---------------------\n\n self.setLayout(vbox_outer)\n #hint = vbox.sizeHint()\n #print(hint)\n\n # PySide2.QtCore.QSize(516, 212)\n #hint.setHeight(hint.height() * 1.3)\n #hint.setWidth(hint.width() * 1.1)\n #self.setFixedSize(hint)\n\n #def set_connections(self):\n #\"\"\"creates the actions for the menu\"\"\"\n #self.method_pulldown.currentIndexChanged.connect(self.on_method)\n #self.zaxis_method_pulldown.currentIndexChanged.connect(self.on_zaxis_method)\n #self.plane_color_edit.clicked.connect(self.on_plane_color)\n\n #self.apply_button.clicked.connect(self.on_apply)\n #self.ok_button.clicked.connect(self.on_ok)\n #self.cancel_button.clicked.connect(self.on_cancel)\n ## closeEvent\n #return\n\n def on_font(self, value=None):\n \"\"\"update the font for the current window\"\"\"\n if value is None:\n value = self.font_size_edit.value()\n font = QtGui.QFont()\n font.setPointSize(value)\n self.setFont(font)\n\n def on_ok(self):\n #passed = self.on_apply()\n #if passed:\n self.close()\n #self.destroy()\n\n def on_cancel(self):\n self.out_data['close'] = True\n self.close()\n\ndef get_packages(len_version=80):\n import sys\n import numpy\n import scipy\n #import matplotlib\n #import pandas\n import vtk\n from pyNastran.gui.qt_version import qt_version\n\n\n if qt_version == 'pyqt5':\n import PyQt5\n qt_name = 'PyQt5'\n _qt_version = PyQt5.__version__\n elif qt_version == 'pyside2':\n import PySide2\n qt_name = 'PySide2'\n _qt_version = PySide2.__version__\n else:\n raise NotImplementedError(qt_version)\n\n import importlib\n\n python = str(sys.version_info)\n packages = {\n 'Python' : python + ' ' * (len_version - len(python) + 15),\n 'numpy' : numpy.__version__,\n 'scipy' : scipy.__version__,\n #'matplotlib' : matplotlib.__version__,\n #'pandas' : pandas.__version__,\n 'matplotlib' : 'N/A',\n 'pandas' : 'N/A',\n 'vtk' : vtk.VTK_VERSION,\n #'PyQt5':,\n qt_name : _qt_version,\n }\n for name in ['matplotlib', 'pandas', 'docopt']:\n module = importlib.import_module(name, package=None)\n packages[name] = module.__version__\n return packages\n\ndef _version_tab(ok_cancel_box):\n import pyNastran\n platform = sys.platform\n localei, unused_encoding = locale.getdefaultlocale()\n try:\n os_version = str(sys.getwindowsversion())\n except:\n os_version = '???'\n\n import platform\n cpu = platform.processor()\n\n version_data = {\n 'Product': 'pyNastran GUI',\n 'Version': pyNastran.__version__,\n 'Release Type': 'Final Release',\n 'Release Date': pyNastran.__releaseDate__,\n #'Cache Directory': ,\n 'OS' : f'win32 (sys.platform={platform})',\n 'OS Version' : os_version,\n 'CPU': cpu,\n 'Memory': str(['1000 bytes']),\n 'Locale': localei,\n }\n len_version = len(os_version)\n grid = grid_from_dict(version_data)\n\n hbox = QHBoxLayout()\n hbox.addLayout(grid)\n hbox.addStretch()\n\n vbox = QVBoxLayout()\n vbox.addLayout(hbox)\n vbox.addStretch()\n vbox.addLayout(ok_cancel_box)\n\n #---------------------\n version_tab = QWidget()\n version_tab.setLayout(vbox)\n\n return version_tab, len_version\n\ndef _package_tab(len_version=80):\n \"\"\"makes the packages tab\"\"\"\n packages = get_packages(len_version=len_version)\n grid = grid_from_dict(packages)\n\n vbox = QVBoxLayout()\n vbox.addLayout(grid)\n vbox.addStretch()\n\n package_tab = QWidget()\n package_tab.setLayout(vbox)\n return package_tab\n\ndef grid_from_dict(mydict):\n irow = 0\n grid = QGridLayout()\n for key, valuei in mydict.items():\n label = QLabel(key + ':')\n label.setAlignment(Qt.AlignRight)\n\n value = QLabel(valuei)\n value.setTextInteractionFlags(Qt.TextSelectableByMouse)\n grid.addWidget(label, irow, 0)\n grid.addWidget(value, irow, 1)\n irow += 1\n return grid\n\n#class Window(QScrollArea):\n #def __init__(self):\n #super(Window, self).__init__()\n #widget = QWidget()\n #layout = QVBoxLayout(widget)\n #layout.setAlignment(Qt.AlignTop)\n #for index in range(100):\n #layout.addWidget(QLabel('Label %02d' % index))\n #self.setWidget(widget)\n #self.setWidgetResizable(True)\n\ndef _credits_tab():\n #scroll = QScrollArea()\n #scroll.setWidget(self)\n #scroll.setWidgetResizable(True)\n ##scroll.setFixedHeight(400)\n #layout.addWidget(scroll)\n\n #vbox = QVBoxLayout()\n #vbox.addLayout(layout)\n #vbox.addStretch()\n\n scrollArea = QScrollArea()\n scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\n scrollArea.setWidgetResizable(True)\n #scrollArea->setGeometry( 10, 10, 200, 200 );\n\n package_tab = QWidget()\n scrollArea.setWidget(package_tab)\n\n widget = QWidget(scrollArea)\n\n vbox = QVBoxLayout(widget)\n text = QTextEdit(CREDITS)\n text.setReadOnly(True)\n vbox.addWidget(text)\n #vbox.addLayout(scrollArea)\n\n package_tab = QWidget()\n package_tab.setLayout(vbox)\n return package_tab\n\ndef main():\n # kills the program when you hit Cntl+C from the command line\n # doesn't save the current state as presumably there's been an error\n import signal\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n\n import sys\n # Someone is launching this directly\n # Create the QApplication\n app = QApplication(sys.argv)\n #The Main window\n data = {\n 'font_size' : 8,\n #'cids' : [0, 1, 2, 3],\n 'name' : 'main',\n\n }\n main_window = AboutWindow(data, show_tol=True)\n main_window.show()\n # Enter the main loop\n app.exec_()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pyNastran/gui/menus/about/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":9476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"136086895","text":"import math\n\nn = int(input().strip())\n\ndef nextPermutation(nums):\n i = 0\n flag = False\n iVal = 0\n while (i < len(nums) - 1):\n if nums[i] < nums[i + 1]:\n flag = True\n iVal = max(iVal, i)\n i += 1\n \n if flag:\n j = iVal + 1\n jVal = j\n while (j < len(nums)):\n if nums[j] > nums[iVal]:\n jVal = max(jVal, j)\n j += 1\n \n t = nums[iVal]\n nums[iVal] = nums[jVal]\n nums[jVal] = t\n \n nums[iVal + 1:] = nums[iVal + 1:][::-1]\n \n else:\n nums[:] = nums[:][::-1]\n \n return nums\n \ndef convert(a):\n s = \"\"\n for x in a:\n s = s + str(x)\n return int(s)\n \na = [x + 1 for x in range(n)]\nproductSet = set()\nfor _ in range(0, math.factorial(n)):\n for i in range(0, n - 2):\n for j in range(i + 1, n - 1):\n x = convert(a[:i + 1])\n y = convert(a[i + 1: j + 1])\n z = convert(a[j + 1:])\n if x * y == z:\n productSet.add(z)\n a = nextPermutation(a)\n \nprint(sum(productSet))\n\n","sub_path":"32.py","file_name":"32.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"121247082","text":"def secondLargestNum(A,n):\n\tnumOfComparisons = 0\n\tif n == 0 or n == 1:\n\t\treturn\n\telif n == 2:\n\t\treturn min(A[0],A[1])\n\telse:\n\t\tmaxNum = A[0]\n\t\tindexOfMax = 0\n\t\tfor i in range(1,n):\n\t\t\tif A[i] > maxNum:\n\t\t\t\tmaxNum = A[i]\n\t\t\t\tindexOfMax = i\n\n\t\tif indexOfMax == 0:\n\t\t\tsecondMax = A[1]\n\t\t\tfor i in range(2,n-1):\n\t\t\t\tif A[i] > secondMax:\n\t\t\t\t\tsecondMax = A[i]\n\n\t\telse:\n\t\t\tsecondMax = 0\n\t\t\tfor i in range(1,n-1):\n\t\t\t\tif i == indexOfMax:\n\t\t\t\t\tcontinue\n\t\t\t\telif A[i] > secondMax:\n\t\t\t\t\tsecondMax = A[i]\n\t\treturn secondMax\n\nnums = [8,7,2,4,1,5,6,3]\nprint(secondLargestNum(nums,len(nums)))","sub_path":"secondLargestNum.py","file_name":"secondLargestNum.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"454929648","text":"from epdatapro20082015v2 import *\r\nfrom spatialmode import *\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib import cm\r\nmonths =[\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"]\r\n\r\n\r\ncrop_path = r\"D:\\Cornell\\EthiopianDrought\\CropType2015\\agg_clip60.tif\"\r\ncrop = gdal.Open(crop_path)\r\ngeo_t = crop.GetGeoTransform()\r\nWidth,Height = crop.RasterXSize,crop.RasterYSize\r\ncrop_raster = crop.ReadAsArray()\r\nmask = np.where(crop_raster == 255)\r\n# crop_raster[crop_raster != 255] = np.nan\r\n# print(mask)\r\nvRow = mask[0]\r\nvCol = mask[1]\r\nind = vRow*Width + vCol\r\n\r\npvipath =r\"D:\\Cornell\\EthiopianDrought\\AData\\PVIDaily\"\r\n\r\n\r\n\r\n# 计算矢量边界\r\n\r\ndaShapefile = r\"D:\\Cornell\\EthiopianDrought\\ETH_outline_SHP\\ETH_outline.shp\"\r\n\r\ndriver = ogr.GetDriverByName(\"ESRI Shapefile\")\r\ndataSource = driver.Open(daShapefile, 0)\r\nlayer = dataSource.GetLayer()\r\nfeature = layer.GetFeature(0)\r\ngeo = feature.GetGeometryRef()\r\ngeo = str(geo).split(\"((\")[1].split(\"))\")[0].split(\",\")\r\nx = []\r\ny = []\r\nfor term in geo:\r\n x.append(float(term.split(\" \")[0]))\r\n y.append(float(term.split(\" \")[1]))\r\n\r\nx = np.array(x)\r\ny = np.array(y)\r\nx = (x - geo_t[0]) / geo_t[1]\r\ny = (y - geo_t[3]) / geo_t[5]\r\n\r\n# plt.imshow(crop_raster)\r\n# # plt.colorbar()\r\n# plt.plot(x,y)\r\n# plt.show()\r\n\r\nfor year in range(2003,2019):\r\n yy = str(year)\r\n short_pvi = gdal.Open(os.path.join(pvipath, \"short_pvi_{}.tif\".format(yy))).ReadAsArray() * (1.0)\r\n long_pvi = gdal.Open(os.path.join(pvipath, \"long_pvi_{}.tif\".format(yy))).ReadAsArray() * (1.0)\r\n # short_pvi_list = np.zeros_like(short_pvi,dtype=np.float)\r\n # long_pvi_list = np.zeros_like(long_pvi, dtype=np.float)\r\n # np.put(short_pvi_list,ind,np.take(short_pvi,ind))\r\n # np.put(long_pvi_list,ind,np.take(long_pvi,ind))\r\n\r\n # print(\"short min max\",short_pvi_list.min(),short_pvi_list.max())\r\n # print(\"long min max\", long_pvi_list.min(), long_pvi_list.max())\r\n\r\n short_pvi[crop_raster !=255] = -9999\r\n long_pvi[crop_raster != 255] = -9999\r\n\r\n fig = plt.figure(figsize=(8, 3))\r\n plt.title(\"{} PVI Map \".format(yy) + '\\n', fontsize=16)\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n ax1 = fig.add_subplot(1, 2, 1)\r\n ax1.set_title(\"Short Rains PVI Map\")\r\n mask1 = np.where(short_pvi > -9999)\r\n short_pvi[short_pvi == -9999] = np.nan\r\n vmin = short_pvi[mask1].min()\r\n vmax = short_pvi[mask1].max()\r\n # print(\"short maxmin value\", vmin, vmax)\r\n\r\n cax1 = ax1.imshow(short_pvi, cmap=plt.get_cmap(\"rainbow\"), vmin=0, vmax=1.0)\r\n cbar1 = plt.colorbar(cax1, ax=ax1, fraction=0.036, pad=0.04)\r\n ax1.set_xticks([])\r\n ax1.set_yticks([])\r\n ax1.plot(x,y)\r\n ax2 = fig.add_subplot(1, 2, 2)\r\n ax2.set_title(\"long Rains PVI Map\")\r\n mask2 = np.where(long_pvi > -9999)\r\n long_pvi[long_pvi == -9999] = np.nan\r\n vmin = long_pvi[mask2].min()\r\n vmax = long_pvi[mask2].max()\r\n # print(\"maxmin value\", vmin, vmax)\r\n\r\n cax2 = ax2.imshow(long_pvi, cmap=plt.get_cmap(\"rainbow\"), vmin=0, vmax=1.0)\r\n cbar2 = plt.colorbar(cax2, ax=ax2, fraction=0.036, pad=0.04)\r\n ax2.set_xticks([])\r\n ax2.set_yticks([])\r\n ax2.plot(x,y)\r\n fig.tight_layout() # 调整整体空白\r\n path2 = os.path.join(r\"D:\\Cornell\\EthiopianDrought\\CropCSV\\PVIMap\", yy + \"PVI60.jpg\")\r\n plt.savefig(path2)\r\n plt.close()\r\n # plt.show()\r\n\r\n\r\n","sub_path":"EthiopiaDrought/PVImap仅显示像素.py","file_name":"PVImap仅显示像素.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"72005744","text":"import tkinter as tk\r\nimport sys\r\nfrom PIL import Image, ImageTk\r\nimport winsound\r\nfrom time import *\r\nfrom random import randint\r\nfrom threading import Thread\r\n\r\n############################# Global functions\r\n\r\ndef openImage(filename): # opens all type of images for further use\r\n image = Image.open(filename)\r\n tkImage = ImageTk.PhotoImage(image)\r\n return tkImage\r\n\r\ndef sound(): \r\n winsound.PlaySound(\"button.wav\", winsound.SND_ALIAS)\r\n\r\ndef getText(textFile): # opens text file, reads info from it and then closes it\r\n try:\r\n f = open(textFile, \"r\")\r\n text = f.read()\r\n return text\r\n \r\n finally:\r\n f.close()\r\n\r\ndef combine_funcs(*funcs): # combines multiple functions given for further use when passed as command\r\n def combined_func(*args, **kwargs):\r\n for f in funcs:\r\n f(*args, **kwargs)\r\n return combined_func\r\n\r\ndef binarySearch(selectedGameList, gameID): #binary search that takes a list and an object and returns true if that object is in the list, and otherwise - false\r\n if len(selectedGameList) != 0: \r\n mid = len(selectedGameList)//2\r\n if selectedGameList[mid]==gameID:\r\n return True\r\n elif gameID < selectedGameList[mid]:\r\n return binarySearch(selectedGameList[:mid], gameID) # recursion until object is found or none is left \r\n else:\r\n return binarySearch(selectedGameList[mid+1:], gameID)\r\n else:\r\n return False\r\n \r\n############################## Global variables:\r\n \r\nglobal sec\r\nsec = 1 \r\nglobal selectedGameList\r\nselectedGameList = []\r\nglobal imageDict, labelDict\r\nimageDict = {}\r\nlabelDict = {}\r\nglobal playerStartPos\r\nplayerStartPos = ()\r\n\r\n############################\r\nclass Game():\r\n '''class for defining a game'''\r\n def __init__(self, ID, name, category, price, rating,imageName, labelName, x, y):\r\n self.ID = ID\r\n self.name = name\r\n self.categ = category\r\n self.price = price\r\n self.rating = rating\r\n self.imageName = imageName\r\n self.labelName = labelName\r\n self.X = x\r\n self.Y = y\r\n\r\n global imageDict, labelDict\r\n imageDict[ID] = self.imageName #puts game id as a key and game image as a value into a dict\r\n labelDict[ID] = self.labelName #puts game id as a key and game label used for shopping list into a dict \r\n\r\n def setCoord(self, x, y): # idk if used\r\n \r\n self.X = x\r\n self.Y = y\r\n########################################################################################################\r\n\r\nclass GameShop(tk.Tk):\r\n '''GUI class for creating the game window and swiching between frames'''\r\n def __init__(self, *args, **kwargs):\r\n\r\n tk.Tk.__init__(self, *args,**kwargs)\r\n tk.Tk.iconbitmap(self, default = \"icon.ico\")\r\n container = tk.Frame(self)\r\n self.title(\"Game Shop\")\r\n \r\n container.pack(side = \"top\", fill = \"both\", expand = True)\r\n\r\n self.frames = {} #all game frames\r\n for F in (StartPage, InfoPage, SelectionPage, GamePage): #if new frame is created add it here\r\n \r\n frame = F(container, self)\r\n \r\n self.frames[F] = frame\r\n\r\n frame.grid(row = 0, column = 0, sticky = \"nsew\")\r\n\r\n self.show_frame(StartPage)\r\n\r\n def show_frame(self, cont):\r\n \r\n frame = self.frames[cont]\r\n frame.tkraise()\r\n\r\n\r\n##############################################################################################################################################\r\nclass StartPage(tk.Frame):\r\n '''GUI class that sets all widgets in the startpage '''\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n\r\n #setting the background \r\n self.menuBackGround = openImage('menuPage.png')\r\n canvas1 = tk.Canvas(self, width = 1024, height = 768, bg = \"white\")\r\n canvas1.create_image(1024/2,768/2,image = self.menuBackGround)\r\n canvas1.pack()\r\n #creating game title on screen\r\n canvas1.create_text(512, 150, fill = 'grey6', font = (\"fixedsys\", 52, 'bold'), text = 'Welcome to the\\nGame Shop',justify = 'center' )\r\n\r\n #creating start button that will open selection page\r\n button1 = tk.Button(canvas1, text = \"START GAME\", font = (\"fixedsys\",18), command = lambda: combine_funcs(controller.show_frame(SelectionPage),sound()), cursor = 'hand2',borderwidth=3,foreground= \"white\", bg = \"forest green\", activebackground= \"lime green\", activeforeground= \"white\")\r\n button1.place(x = 512, y = 500, height = 57, width = 292, anchor = 'n')\r\n #cretaing option button that will open info page\r\n button2 = tk.Button(canvas1, text = \"INFORMATION\",font = (\"fixedsys\",18), command = lambda:combine_funcs( controller.show_frame(InfoPage),sound()), cursor = 'hand2',borderwidth=3, foreground= \"white\", bg = \"DodgerBlue3\", activebackground= \"DodgerBlue2\", activeforeground= \"white\")\r\n button2.place(x = 512, y = 575, height = 57, width = 292, anchor = 'n')\r\n #creating exit button that will shut down the program\r\n button3 = tk.Button(canvas1, text = \"EXIT GAME\",font = (\"fixedsys\",18), command = lambda: combine_funcs(sound(), app.destroy()), cursor = 'hand2', borderwidth= 3,foreground= \"white\", bg = \"firebrick4\", activebackground= \"firebrick3\", activeforeground= \"white\")\r\n button3.place(x = 512, y = 650, height = 57, width = 292, anchor = 'n')\r\n \r\n\r\n#############################################################################################################################################\r\nclass InfoPage(tk.Frame):\r\n \"\"\"GUI class that creates all widgets in info page and manages them \"\"\"\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n #images that are used in this class, must be created as 'self.image' otherwise wont be shown\r\n self.infoBackGround = openImage('menuPage.png')\r\n self.backIcon = openImage('back.png')\r\n self.forwardIcon = openImage('for.png')\r\n #setting the background\r\n canvas2 = tk.Canvas(self, width = 1024, height = 768, bg = \"white\")\r\n canvas2.create_image(1024/2,768/2,image = self.infoBackGround)\r\n canvas2.pack()\r\n #creating the title and middle text that are permanent\r\n canvas2.create_text(512, 100, fill = 'grey6', font = (\"fixedsys\", 48, \"bold\"), text = \"INFORMATION\", justify = 'center')\r\n canvas2.create_text(512, 450, fill = 'grey6', font = (\"fixedsys\", 18), text = getText('text7.txt'),justify = 'center' )\r\n \r\n self.pageCount = 0 #page counter to know on with part of the changeable text we are\r\n #navigation buttons\r\n buttonBack = tk.Button(canvas2, text = \"<\", font = (\"fixedsys\",18,'bold'),state = 'disabled', command = lambda : goBack(self.pageCount), cursor = 'hand2', borderwidth = 3, foreground= \"white\", bg = \"purple4\", activebackground= \"purple3\", activeforeground= \"white\")\r\n buttonBack.place(x = 100,y = 430, height = 35, width = 35)\r\n buttonForward = tk.Button(canvas2, text = \">\", font = (\"fixedsys\",18,'bold'), command = lambda : goForward(self.pageCount), cursor = 'hand2', borderwidth = 3, foreground= \"white\", bg = \"purple4\", activebackground= \"purple3\", activeforeground= \"white\")\r\n buttonForward.place(x = 890,y = 430, height = 35, width = 35)\r\n #text that will be changed when navigation button pressed\r\n changeableText = canvas2.create_text(512, 295, fill = 'grey6', font = (\"fixedsys\", 18), text = getText('text1.txt'), justify = 'center')\r\n #start game button that shows selection page frame\r\n button4 = tk.Button(canvas2, text = \"START GAME\", font = (\"fixedsys\",18), command = lambda: combine_funcs(controller.show_frame(SelectionPage),sound(), resetPages()), cursor = 'hand2',borderwidth=3,foreground= \"white\", bg = \"forest green\", activebackground= \"lime green\", activeforeground= \"white\")\r\n button4.place(x = 512, y = 575, height = 57, width = 292, anchor = 'n')\r\n #button that shows menu frame again\r\n button5 = tk.Button(canvas2, text = \"BACK\", font = (\"fixedsys\",18), command = lambda :combine_funcs( controller.show_frame(StartPage),sound(), resetPages()), cursor = 'hand2', borderwidth= 3,foreground= \"white\", bg = \"dark orange\", activebackground= \"orange\", activeforeground= \"white\")\r\n button5.place(x = 512, y = 650, height = 57, width = 292, anchor = 'n')\r\n\r\n def resetPages(): # pages should reset when leaving info page\r\n self.pageCount = 0\r\n txt = getText('text1.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n \r\n def goForward(count): # when forward button pressed, change changeable text data file into the further one and add +1 to the counter\r\n\r\n self.pageCount = count + 1\r\n if self.pageCount== 1:\r\n txt = getText('text2.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n elif self.pageCount == 2:\r\n txt = getText('text3.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n elif self.pageCount == 3:\r\n txt = getText('text4.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n elif self.pageCount == 4:\r\n txt = getText('text5.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n elif self.pageCount== 5:\r\n txt = getText('text6.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n\r\n #this part handles button state - back disabled when on the first page and forward - when on the last one\r\n if self.pageCount >0:\r\n buttonBack.config(state = 'normal')\r\n else:\r\n buttonBack.config(state = 'disabled')\r\n \r\n if self.pageCount < 5:\r\n buttonForward.config(state ='normal')\r\n else:\r\n buttonForward.config(state = 'disabled')\r\n\r\n \r\n def goBack(count):# when back button pressed, change changeable text data file into the previous one and add -1 to the counter\r\n\r\n self.pageCount = count - 1\r\n if self.pageCount == 0:\r\n txt = getText('text1.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n elif self.pageCount == 1:\r\n txt = getText('text2.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n elif self.pageCount == 2:\r\n txt = getText('text3.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n elif self.pageCount == 3:\r\n txt = getText('text4.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n elif self.pageCount == 4:\r\n txt = getText('text5.txt')\r\n canvas2.itemconfigure(changeableText, text = txt)\r\n\r\n if self.pageCount > 0 :\r\n buttonBack.config(state = 'normal')\r\n else:\r\n buttonBack.config(state = 'disabled')\r\n \r\n if self.pageCount < 5:\r\n buttonForward.config(state = 'normal')\r\n else:\r\n buttonForward.config(state = 'disabled')\r\n########################################################################################################################\r\nclass SelectionPage(tk.Frame):\r\n '''GUI class that creates objects in selection page and manages them'''\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n # defining all images that are used by this class\r\n self.selectionBackGround = openImage('selectPage.png')\r\n self.miniShop = openImage('miniShop.png')\r\n self.selectno=openImage('sel0.jpg')\r\n self.selectyes=openImage('sel1.png')\r\n self.menu=openImage('menu.png')\r\n self.game1 = openImage(imageDict[1])\r\n self.game2 = openImage(imageDict[2])\r\n self.game3 = openImage(imageDict[3])\r\n self.game4 = openImage(imageDict[4])\r\n self.game5 = openImage(imageDict[5])\r\n self.game6 = openImage(imageDict[6])\r\n self.game7 = openImage(imageDict[7])\r\n self.game8 = openImage(imageDict[8])\r\n self.game9 = openImage(imageDict[9])\r\n self.game10 = openImage(imageDict[10])\r\n self.game11 = openImage(imageDict[11])\r\n self.game12 = openImage(imageDict[12])\r\n self.game13 = openImage(imageDict[13])\r\n self.game14 = openImage(imageDict[14])\r\n self.game15 = openImage(imageDict[15])\r\n self.game16 = openImage(imageDict[16])\r\n self.game17 = openImage(imageDict[17])\r\n\r\n self.game1L = openImage(labelDict[1])\r\n self.game2L = openImage(labelDict[2])\r\n self.game3L = openImage(labelDict[3])\r\n self.game4L = openImage(labelDict[4])\r\n self.game5L = openImage(labelDict[5])\r\n self.game6L = openImage(labelDict[6])\r\n self.game7L = openImage(labelDict[7])\r\n self.game8L = openImage(labelDict[8])\r\n self.game9L = openImage(labelDict[9])\r\n self.game10L = openImage(labelDict[10])\r\n self.game11L = openImage(labelDict[11])\r\n self.game12L = openImage(labelDict[12])\r\n self.game13L = openImage(labelDict[13])\r\n self.game14L = openImage(labelDict[14])\r\n self.game15L = openImage(labelDict[15])\r\n self.game16L = openImage(labelDict[16])\r\n self.game17L = openImage(labelDict[17])\r\n\r\n #variables sucj as counters and the ones that contain widget states\r\n self.activatedAction = False\r\n self.activatedAdventure = False\r\n self.activatedIndie = False\r\n self.activatedPuzzle = False\r\n self.activatedSports = False\r\n self.posCount = 0\r\n self.entrypos = 0\r\n self.listCount =0\r\n \r\n def setStart(pos): # sets player position acoording which button for setting start on the map was pressed, updates data in Player class\r\n if pos == 1:\r\n gamer.startPos = (1,0)\r\n Player.updatePos(gamer,1, 0)\r\n elif pos == 2:\r\n gamer.startPos = (8,25)\r\n Player.updatePos(gamer,8, 25)\r\n\r\n #sets background\r\n canvas3 = tk.Canvas(self, width = 1024, height = 768, bg = \"lightBlue\")\r\n canvas3.create_image(1024/2,768/2,image = self.selectionBackGround)\r\n canvas3.pack()\r\n\r\n #category buttons\r\n cButton1 = tk.Button(canvas3, text = \"Action\", font = (\"fixedsys\",12), command = lambda: combine_funcs( expandGames(1),createGameButtons(1, self.activatedAction)), cursor = 'hand2', borderwidth = 3, foreground= \"white\", bg = \"purple4\", activebackground= \"purple3\", activeforeground= \"white\")\r\n cButton1.place(x = 450, y = 100, height = 35, width = 90)\r\n cButton2 = tk.Button(canvas3, text = \"Adventure\", font = (\"fixedsys\",12), command = lambda: combine_funcs( expandGames(2),createGameButtons(2, self.activatedAdventure)), cursor = 'hand2', borderwidth = 3, foreground= \"white\", bg = \"purple4\",activebackground= \"purple3\",activeforeground= \"white\")\r\n cButton2.place(x = 550, y = 100, height = 35, width = 90)\r\n cButton3 = tk.Button(canvas3, text = \"Indie\", font = (\"fixedsys\", 12), command = lambda: combine_funcs(expandGames(3), createGameButtons(3, self.activatedIndie)), cursor = 'hand2', borderwidth = 3, foreground= \"white\", bg = \"purple4\",activebackground= \"purple3\",activeforeground= \"white\")\r\n cButton3.place(x = 650, y = 100, height = 35, width = 90)\r\n cButton4 = tk.Button(canvas3, text = \"Puzzle\", font = (\"fixedsys\", 12), command = lambda: combine_funcs(expandGames(4), createGameButtons(4, self.activatedPuzzle)), cursor = 'hand2', borderwidth = 3, foreground= \"white\", bg = \"purple4\",activebackground= \"purple3\",activeforeground= \"white\")\r\n cButton4.place(x = 750, y = 100, height = 35, width = 90)\r\n cButton5 = tk.Button(canvas3, text = \"Sports\", font = (\"fixedsys\",12), command = lambda: combine_funcs(expandGames(5), createGameButtons(5,self.activatedSports)), cursor = 'hand2', borderwidth = 3, foreground= \"white\", bg = \"purple4\",activebackground= \"purple3\",activeforeground= \"white\")\r\n cButton5.place(x = 850, y = 100, height = 35, width = 90)\r\n\r\n # labels used in window\r\n canvas3.create_text(205, 62, fill = 'white', font = (\"fixedsys\", 24, \"bold\"), text = \"YOUR SHOPPING LIST\" )\r\n canvas3.create_text(690, 62, fill = 'white', font = (\"fixedsys\", 24, \"bold\"), text = \"SELECT YOUR GAMES\" )\r\n canvas3.create_text(700, 430, fill = 'white', font = (\"fixedsys\", 24, \"bold\"), text = \"SELECT A STARTING POINT\" )\r\n canvas3.create_image(1024/3*2+10,768/3*2+85,image = self.miniShop)\r\n\r\n #player positioning on the map buttons\r\n pos1Button = tk.Button(canvas3,image = self.selectno ,state = 'normal', command = lambda: combine_funcs(pos1Button.config(image = self.selectyes),pos2Button.config(image = self.selectno),confirm(), setStart(1)), cursor = 'hand2', borderwidth = 3)\r\n pos2Button = tk.Button(canvas3,image = self.selectno ,state = 'normal', command = lambda: combine_funcs(pos2Button.config(image = self.selectyes),pos1Button.config(image = self.selectno),confirm(), setStart(2)), cursor = 'hand2', borderwidth = 3)\r\n pos1Button.place(x = 510, y = 498, height = 32, width = 32)\r\n pos2Button.place(x = 820, y = 614, height = 32, width = 32)\r\n\r\n #confirmation button that opens game frame and info button that opens information page\r\n confirmButton = tk.Button(canvas3, text = \"CONFIRM\", font = (\"fixedsys\",18), command = lambda: combine_funcs(sound(),controller.show_frame(GamePage)), state = 'disabled', cursor = 'hand2',borderwidth=3,foreground= \"white\", bg = \"forest green\", activebackground= \"lime green\", activeforeground= \"white\")\r\n confirmButton.place(x = 250, y = 665, height = 57, width = 230, anchor = 'n')\r\n infoButton = tk.Button(canvas3,image = self.menu, command = lambda:combine_funcs(sound(), controller.show_frame(InfoPage)), cursor = 'hand2',borderwidth=3)\r\n infoButton.place(x = 80, y = 665, height = 57, width = 60, anchor = 'n')\r\n\r\n #creating all avaible game buttons\r\n self.gameAc1 = tk.Button(canvas3, image = self.game1 ,command = lambda: combine_funcs(intolist(1),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameAc2 = tk.Button(canvas3, image = self.game2 ,command = lambda: combine_funcs(intolist(2),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameAc3 = tk.Button(canvas3, image = self.game3 ,command = lambda:combine_funcs(intolist(3),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameAc4 = tk.Button(canvas3, image = self.game4 ,command = lambda:combine_funcs(intolist(4),displayList()), cursor = 'hand2', borderwidth = 3)\r\n\r\n self.gameAd1 = tk.Button(canvas3, image = self.game5,command = lambda: combine_funcs(intolist(5),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameAd2 = tk.Button(canvas3, image = self.game6,command = lambda: combine_funcs(intolist(6),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameAd3 = tk.Button(canvas3, image = self.game7,command = lambda: combine_funcs(intolist(7),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameAd4 = tk.Button(canvas3, image = self.game8,command = lambda: combine_funcs(intolist(8),displayList()), cursor = 'hand2', borderwidth = 3)\r\n\r\n self.gameI1 = tk.Button(canvas3, image = self.game9,command = lambda: combine_funcs(intolist(9),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameI2 = tk.Button(canvas3, image = self.game10,command = lambda: combine_funcs(intolist(10),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameI3 = tk.Button(canvas3, image = self.game11,command = lambda: combine_funcs(intolist(11),displayList()), cursor = 'hand2', borderwidth = 3)\r\n\r\n self.gameP1 = tk.Button(canvas3,image = self.game12,command = lambda: combine_funcs(intolist(12),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameP2 = tk.Button(canvas3,image = self.game13,command = lambda: combine_funcs(intolist(13),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameP3 = tk.Button(canvas3, image = self.game14,command = lambda: combine_funcs(intolist(14),displayList()), cursor = 'hand2', borderwidth = 3)\r\n\r\n self.gameS1 = tk.Button(canvas3, image = self.game15,command = lambda: combine_funcs(intolist(15),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameS2 = tk.Button(canvas3, image = self.game16,command = lambda: combine_funcs(intolist(16),displayList()), cursor = 'hand2', borderwidth = 3)\r\n self.gameS3 = tk.Button(canvas3, image = self.game17,command = lambda: combine_funcs(intolist(17),displayList()), cursor = 'hand2', borderwidth = 3)\r\n \r\n def createGameButtons(cat, state): # when button were created and destroyed, creates them again\r\n \r\n if cat == 1 and state == False:\r\n self.gameAc1 = tk.Button(canvas3, image = self.game1 ,command = lambda: intolist(1), cursor = 'hand2', borderwidth = 3)\r\n self.gameAc2 = tk.Button(canvas3, image = self.game2 ,command = lambda: intolist(2), cursor = 'hand2', borderwidth = 3)\r\n self.gameAc3 = tk.Button(canvas3, image = self.game3 ,command = lambda: intolist(3), cursor = 'hand2', borderwidth = 3)\r\n self.gameAc4 = tk.Button(canvas3, image = self.game4 ,command = lambda: intolist(4), cursor = 'hand2', borderwidth = 3)\r\n \r\n elif cat == 2 and state == False:\r\n self.gameAd1 = tk.Button(canvas3, image = self.game5,command = lambda: intolist(5), cursor = 'hand2', borderwidth = 3)\r\n self.gameAd2 = tk.Button(canvas3, image = self.game6,command = lambda: intolist(6), cursor = 'hand2', borderwidth = 3)\r\n self.gameAd3 = tk.Button(canvas3, image = self.game7,command = lambda: intolist(7), cursor = 'hand2', borderwidth = 3)\r\n self.gameAd4 = tk.Button(canvas3, image = self.game8,command = lambda: intolist(8), cursor = 'hand2', borderwidth = 3)\r\n \r\n elif cat == 3 and state == False:\r\n\r\n self.gameI1 = tk.Button(canvas3, image = self.game9,command = lambda: intolist(9), cursor = 'hand2', borderwidth = 3)\r\n self.gameI2 = tk.Button(canvas3, image = self.game10,command = lambda: intolist(10), cursor = 'hand2', borderwidth = 3)\r\n self.gameI3 = tk.Button(canvas3, image = self.game11,command = lambda: intolist(11), cursor = 'hand2', borderwidth = 3)\r\n\r\n elif cat == 4 and state == False:\r\n \r\n self.gameP1 = tk.Button(canvas3, image = self.game12,command = lambda: intolist(12), cursor = 'hand2', borderwidth = 3)\r\n self.gameP2 = tk.Button(canvas3, image = self.game13,command = lambda: intolist(13), cursor = 'hand2', borderwidth = 3)\r\n self.gameP3 = tk.Button(canvas3, image = self.game14,command = lambda: intolist(14), cursor = 'hand2', borderwidth = 3)\r\n\r\n elif cat == 5 and state == False:\r\n\r\n self.gameS1 = tk.Button(canvas3, image = self.game15,command = lambda: intolist(15), cursor = 'hand2', borderwidth = 3)\r\n self.gameS2 = tk.Button(canvas3, image = self.game16,command = lambda: intolist(16), cursor = 'hand2', borderwidth = 3)\r\n self.gameS3 = tk.Button(canvas3, image = self.game17,command = lambda: intolist(17), cursor = 'hand2', borderwidth = 3)\r\n \r\n\r\n def expandGames(category): # if the category button is pressed once places game buttons on screen, when category button is pressed again, game buttons are destroyed\r\n\r\n if category == 1:\r\n \r\n if self.activatedAction == False:\r\n self.gameAc1.place(x = 460, y = 150, height = 42, width = 66)\r\n self.gameAc2.place(x = 460, y = 200, height = 42, width = 66)\r\n self.gameAc3.place(x = 460, y = 250, height = 42, width = 66)\r\n self.gameAc4.place(x = 460, y = 300, height = 42, width = 66)\r\n\r\n elif self.activatedAction == True:\r\n self.gameAc1.destroy()\r\n self.gameAc2.destroy()\r\n self.gameAc3.destroy()\r\n self.gameAc4.destroy()\r\n\r\n if self.activatedAction == False:\r\n self.activatedAction = True\r\n else:\r\n self.activatedAction = False\r\n\r\n \r\n\r\n if category == 2:\r\n \r\n if self.activatedAdventure == False:\r\n self.gameAd1.place(x = 560, y = 150, height = 42, width = 66)\r\n self.gameAd2.place(x = 560, y = 200, height = 42, width = 66)\r\n self.gameAd3.place(x = 560, y = 250, height = 42, width = 66)\r\n self.gameAd4.place(x = 560, y = 300, height = 42, width = 66)\r\n \r\n elif self.activatedAdventure == True:\r\n self.gameAd1.destroy()\r\n self.gameAd2.destroy()\r\n self.gameAd3.destroy()\r\n self.gameAd4.destroy()\r\n\r\n if self.activatedAdventure == False:\r\n self.activatedAdventure = True\r\n else:\r\n self.activatedAdventure = False\r\n \r\n\r\n if category == 3:\r\n \r\n if self.activatedIndie == False:\r\n self.gameI1.place(x = 660, y = 150, height = 42, width = 66)\r\n self.gameI2.place(x = 660, y = 200, height = 42, width = 66)\r\n self.gameI3.place(x = 660, y = 250, height = 42, width = 66)\r\n \r\n elif self.activatedIndie == True:\r\n self.gameI1.destroy()\r\n self.gameI2.destroy()\r\n self.gameI3.destroy()\r\n\r\n if self.activatedIndie == False:\r\n self.activatedIndie = True\r\n else:\r\n self.activatedIndie = False\r\n \r\n if category == 4:\r\n \r\n if self.activatedPuzzle == False:\r\n self.gameP1.place(x = 760, y = 150, height = 42, width = 66)\r\n self.gameP2.place(x = 760, y = 200, height = 42, width = 66)\r\n self.gameP3.place(x = 760, y = 250, height = 42, width = 66)\r\n \r\n elif self.activatedPuzzle == True:\r\n self.gameP1.destroy()\r\n self.gameP2.destroy()\r\n self.gameP3.destroy()\r\n\r\n if self.activatedPuzzle == False:\r\n self.activatedPuzzle = True\r\n else:\r\n self.activatedPuzzle = False\r\n \r\n\r\n if category == 5:\r\n\r\n if self.activatedSports == False:\r\n self.gameS1.place(x = 860, y = 150, height = 42, width = 66)\r\n self.gameS2.place(x = 860, y = 200, height = 42, width = 66)\r\n self.gameS3.place(x = 860, y = 250, height = 42, width = 66)\r\n \r\n elif self.activatedSports == True:\r\n self.gameS1.destroy()\r\n self.gameS2.destroy()\r\n self.gameS3.destroy()\r\n\r\n if self.activatedSports == False:\r\n self.activatedSports = True\r\n else:\r\n self.activatedSports = False\r\n\r\n \r\n def intolist(gameID):\r\n #adds selected games into global list for selected games that will be submited for searching\r\n inGameList = binarySearch(selectedGameList, gameID)\r\n if self.listCount<7 and not inGameList:\r\n selectedGameList.append(gameID)\r\n self.listCount+=1\r\n \r\n def displayList():\r\n #prints on screen(shopping list) selected games\r\n global imageDict\r\n posx = 1024/2-450\r\n posy = 800/2-270\r\n alist = []\r\n alist=[self.game1L, \r\n self.game2L,\r\n self.game3L,\r\n self.game4L,\r\n self.game5L, \r\n self.game6L,\r\n self.game7L,\r\n self.game8L,\r\n self.game9L,\r\n self.game10L,\r\n self.game11L,\r\n self.game12L,\r\n self.game13L ,\r\n self.game14L ,\r\n self.game15L,\r\n self.game16L,\r\n self.game17L]\r\n \r\n for game in selectedGameList:\r\n gameLabel = tk.Label(canvas3, image = alist[game-1])\r\n gameLabel.place(x = posx, y = posy)\r\n posy = posy + 70 \r\n\r\n def confirm():\r\n #makes 'confirm' button available only when game list is not empty and starting point is selected\r\n if len(selectedGameList)>0:\r\n confirmButton.config(state = 'normal')\r\n#############################################################################################################\r\n\r\nclass Node:\r\n \"\"\"Class used to store an x and y coordanates of each node in the map for a* algorithm\"\"\"\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n \r\n def __eq__(self, other):\r\n return ((self.x, self.y) ==\r\n (other.x, other.y))\r\n \r\n def __hash__(self):\r\n return hash((self.x, self.y))\r\n\r\nclass PathFind:\r\n \"\"\" class that finds a path between two points on the map (graph), must specify bounds of the map, map should be in 0 and 1's\"\"\"\r\n \r\n def __init__(self, array, boundsx, boundsy):\r\n self.boundsx = boundsx\r\n self.boundsy = boundsy\r\n self.array = array\r\n \r\n def AStar(self, start, goal):\r\n \t# takes start and goal positions of the object and creates most optimal path between them, uses lists to store child nodes \r\n\r\n array = self.array\r\n closedSet = []\r\n \r\n openSet = [start]\r\n\r\n cameFrom = {}\r\n\r\n gScore = {}\r\n gScore[start] = 0\r\n\r\n fScore = {}\r\n fScore[start] = self.heuristic_cost_estimate(start, goal)\r\n\r\n \r\n while openSet != [] :\r\n \r\n current = self.lowestValue(openSet, fScore)\r\n if current.x == goal.x and current.y == goal.y:\r\n return self.reconstructPath(cameFrom, goal)\r\n \r\n openSet.remove(current)\r\n closedSet.append(current)\r\n\r\n neighbourNodes = []\r\n if current.x > 0:\r\n if array[current.x -1][current.y] == 0:\r\n neighbourNodes.append(Node(current.x -1, current.y))\r\n \r\n if current.y > 0:\r\n if array[current.x][current.y - 1] == 0:\r\n neighbourNodes.append(Node(current.x, current.y - 1))\r\n\r\n if current.x != self.boundsx - 1:\r\n if array[current.x + 1][current.y] == 0:\r\n neighbourNodes.append(Node(current.x + 1, current.y))\r\n\r\n if current.y != self.boundsy - 1: \r\n if array[current.x][current.y + 1] == 0:\r\n neighbourNodes.append(Node(current.x, current.y + 1))\r\n \r\n for neighbour in neighbourNodes:\r\n if self.linearSearch(closedSet, neighbour):\r\n continue\r\n \r\n tentative_gScore = gScore[current] + 1\r\n\r\n if not self.linearSearch(openSet, neighbour):\r\n openSet.append(neighbour)\r\n elif tentative_gScore >= gScore[neighbour]:\r\n continue\r\n \r\n cameFrom[neighbour] = current\r\n gScore[neighbour] = tentative_gScore\r\n fScore[neighbour] = gScore[neighbour] + self.heuristic_cost_estimate(neighbour, goal)\r\n \r\n return False\r\n \r\n \r\n\r\n def heuristic_cost_estimate(self, start, end):\r\n \r\n x = abs( start.x - end.x)\r\n y = abs(start.y - end.y)\r\n\r\n return x + y\r\n \r\n def linearSearch(self, array, val):\r\n pos = 0\r\n while pos < len (array):\r\n if array[pos] == val:\r\n return True\r\n pos = pos + 1\r\n return False\r\n\r\n def lowestValue(self, openSet, dic):\r\n lowestVal = None\r\n lowestKey = \"\"\r\n\r\n for value in openSet:\r\n score = dic[value]\r\n if lowestVal == None or score < lowestVal:\r\n lowestKey = value\r\n lowestVal = score\r\n \r\n return lowestKey\r\n\r\n def reconstructPath(self, cameFrom, current):\r\n total_path = [current]\r\n while current in cameFrom:\r\n current = cameFrom[current]\r\n total_path.append(current)\r\n total_path.reverse()\r\n return total_path\r\n \r\n\r\n\r\ndef array():\r\n \r\n Matrix =[\t[ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n\t\t[ 0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1],\r\n\t\t[ 1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1],\r\n\t\t[ 1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1],\r\n\t\t[ 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],\r\n\t\t[ 1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1],\r\n\t\t[ 1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1],\r\n\t\t[ 1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1],\r\n\t\t[ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\r\n [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]\r\n \r\n \r\n return Matrix\r\n \r\n\r\ndef getPath(startx, starty, finishx,finishy):\r\n # returns path as a list created by A*\r\n print(str(startx)+' ' +str(starty)+' ' +str( finishy)+' '+str(finishx))\r\n listcoord = []\r\n c= 0\r\n Path = PathFind(array(),27, 27)\r\n path = Path.AStar(Node(startx,starty), Node(finishy,finishx))\r\n for point in path: \r\n coo = point.x, point.y\r\n listcoord.append(coo)\r\n c+=1\r\n return(listcoord)\r\n\r\n##############################################################################################################\r\nclass GamePage(tk.Frame):\r\n ''' class for creating objects and managing them of the actual game'''\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n\r\n self.gameBackground = openImage('shop1.png')\r\n self.playerIcon = openImage('gamer.jpg')\r\n var = tk.IntVar()\r\n var2 = tk.IntVar()\r\n self.dec = int()\r\n self.dec = 0\r\n self.increasing = True\r\n self.budget = randint(70,100)\r\n self.rating = 0\r\n #default sorting options (according game price, in increasing order, using shell sort\r\n self.sortBy = 2\r\n self.sortIn = 1\r\n self.sortA = 1\r\n\r\n itemDict = {1:iGame1, 2:iGame2, 3:iGame3, 4:iGame4,\r\n 5:iGame5, 6:iGame6, 7:iGame7, 8:iGame8,\r\n 9:iGame9, 10:iGame10, 11:iGame11, 12:iGame12,\r\n 13:iGame13, 14:iGame14, 15:iGame15, 16:iGame16, 17:iGame17}\r\n\r\n self.game1 = openImage(imageDict[1])\r\n self.game2 = openImage(imageDict[2])\r\n self.game3 = openImage(imageDict[3])\r\n self.game4 = openImage(imageDict[4])\r\n self.game5 = openImage(imageDict[5])\r\n self.game6 = openImage(imageDict[6])\r\n self.game7 = openImage(imageDict[7])\r\n self.game8 = openImage(imageDict[8])\r\n self.game9 = openImage(imageDict[9])\r\n self.game10 = openImage(imageDict[10])\r\n self.game11 = openImage(imageDict[11])\r\n self.game12 = openImage(imageDict[12])\r\n self.game13 = openImage(imageDict[13])\r\n self.game14 = openImage(imageDict[14])\r\n self.game15 = openImage(imageDict[15])\r\n self.game16 = openImage(imageDict[16])\r\n self.game17 = openImage(imageDict[17])\r\n\r\n pictureDict = {1:self.game1, 2:self.game2, 3:self.game3,\r\n 4:self.game4, 5:self.game5, 6:self.game6,\r\n 7:self.game7, 8:self.game8, 9:self.game9,\r\n 10:self.game10, 11:self.game11, 12:self.game12,\r\n 13:self.game13, 14:self.game14, 15:self.game15,\r\n 16:self.game16,17:self.game17}\r\n \r\n canvas4 = tk.Canvas(self, width = 1024, height = 768, bg = \"white\")\r\n canvas4.create_image(1024/2,768/2,image = self.gameBackground)\r\n canvas4.pack()\r\n \r\n startButton = tk.Button(canvas4, text = \"START\", font = (\"fixedsys\",18),state = 'disabled', command = lambda: combine_funcs(setTime(),sound(), self.minutes.destroy(), self.seconds.destroy(),placeGamesToCollect(), showTime(), movePlayer(),startButton.destroy()), cursor = 'hand2',borderwidth=3,foreground= \"white\", bg = \"forest green\", activebackground= \"lime green\", activeforeground= \"white\")\r\n startButton.place(x = 512, y = 694, height = 57, width = 292, anchor = 'n')\r\n \r\n timeLabel = tk.Label(canvas4, font = (\"fixedsys\",48),activebackground='grey10', activeforeground='white',background= 'grey10', foreground= 'white',justify= 'center')\r\n timeLabel.place(x = 550, y = 600)\r\n \r\n selectSorting1 = tk.Radiobutton(canvas4,variable = var, value = 1,selectcolor = 'grey8', text=\"Shell sort\", font = (\"fixedsys\",12), bg = 'grey10', command = lambda: setTypeA(1), fg = 'white', cursor = 'hand2')\r\n selectSorting1.place (x = 700, y = 610)\r\n selectSorting2 = tk.Radiobutton(canvas4,variable = var,value = 2, selectcolor= 'grey8', text=\"Merge sort\", font = (\"fixedsys\",12), bg = 'grey10', command = lambda: setTypeA(2), fg = 'white', cursor = 'hand2')\r\n selectSorting2.place (x = 700, y = 640)\r\n selectSorting4 = tk.Radiobutton(canvas4,variable = var2, value = 1,selectcolor = 'grey8', text=\"Name sort\", font = (\"fixedsys\",12), bg = 'grey10', command = lambda: setTypeBy(1), fg = 'white', cursor = 'hand2')\r\n selectSorting4.place (x = 850, y = 610)\r\n selectSorting5 = tk.Radiobutton(canvas4,variable = var2, value = 2, selectcolor = 'grey8', text=\"Price sort\", font = (\"fixedsys\",12), bg = 'grey10', command = lambda: setTypeBy(2), fg = 'white',cursor = 'hand2')\r\n selectSorting5.place (x = 850, y = 640)\r\n selectSorting6 = tk.Radiobutton(canvas4,variable = var2,value = 3, selectcolor= 'grey8', text=\"Rating sort\", font = (\"fixedsys\",12), bg = 'grey10', command = lambda: setTypeBy(3), fg = 'white', cursor = 'hand2')\r\n selectSorting6.place (x = 850, y = 670)\r\n \r\n selectDec = tk.Checkbutton(canvas4, text = 'Decreasing', variable = self.dec, onvalue = 1, offvalue = 0, command = lambda: setTypeIn(2), font = (\"fixedsys\",12), fg = 'white', bg = 'grey10',cursor = 'hand2',selectcolor= 'grey10')\r\n selectDec.place (x = 765, y = 710)\r\n \r\n self.minutes = tk.Spinbox (canvas4, from_=0, to = 10,cursor = 'hand2', fg = 'grey8', font = (\"fixedsys\",32), width = 2, command = lambda: startButton.config(state='normal'))\r\n self.minutes.place(x = 370, y = 620)\r\n self.seconds = tk.Spinbox (canvas4, from_=0, to = 59, cursor = 'hand2', fg = 'grey8', font = (\"fixedsys\",32), width = 2, command = lambda: startButton.config(state='normal'))\r\n self.seconds.place(x = 525, y = 620)\r\n \r\n self.gamesToBuy = selectedGameList\r\n self.collectedGames = []\r\n \r\n \r\n def placeGamesToCollect():\r\n #creates images on canvas that will be collected\r\n self.gamesToBuy = selectedGameList\r\n self.coordList = [(133,224-32),(261,288-32),(421,96-32),(581,288-32),(709,96-32),(901,96-32),(485,192-32),(133,288-32),(325,288-32),(514,96-32),(261,160-32),(453,288-32),(613,128-32),(837,288-32),(709,288-32),(901,192-32),(805,192-32)]\r\n self.coordDict = {}\r\n self.additionalDict = {}\r\n for game in self.gamesToBuy:\r\n self.coordDict[game] = self.coordList[game-1]\r\n for game in self.gamesToBuy:\r\n \r\n if game == 1:\r\n self.inGame1 = tk.Label(canvas4, image = self.game1)\r\n self.inGame1.place(x = iGame1.X, y = iGame1.Y)\r\n self.additionalDict[game] = self.inGame1\r\n if game == 2:\r\n self.inGame2 = tk.Label(canvas4, image = self.game2)\r\n self.inGame2.place(x = iGame2.X, y = iGame2.Y)\r\n self.additionalDict[game] = self.inGame2\r\n if game == 3:\r\n self.inGame3 = tk.Label(canvas4, image = self.game3)\r\n self.inGame3.place(x = iGame3.X, y = iGame3.Y)\r\n self.additionalDict[game] =self.inGame3\r\n if game == 4:\r\n self.inGame4 = tk.Label(canvas4, image = self.game4)\r\n self.inGame4.place(x = iGame4.X, y = iGame4.Y)\r\n self.additionalDict[game] = self.inGame4\r\n if game == 5:\r\n self.inGame5 = tk.Label(canvas4, image = self.game5)\r\n self.inGame5.place(x = iGame5.X, y = iGame5.Y)\r\n self.additionalDict[game] = self.inGame5\r\n if game == 6:\r\n self.inGame6 = tk.Label(canvas4, image = self.game6)\r\n self.inGame6.place(x = iGame6.X, y = iGame6.Y)\r\n self.additionalDict[game] = self.inGame6\r\n if game == 7:\r\n self.inGame7 = tk.Label(canvas4, image = self.game7)\r\n self.inGame7.place(x = iGame7.X, y = iGame7.Y)\r\n self.additionalDict[game] = self.inGame7\r\n if game == 8:\r\n self.inGame8 = tk.Label(canvas4, image = self.game8)\r\n self.inGame8.place(x = iGame8.X, y = iGame8.Y)\r\n self.additionalDict[game] = self.inGame8\r\n if game == 9:\r\n self.inGame9 = tk.Label(canvas4, image = self.game9)\r\n self.inGame9.place(x = iGame9.X, y = iGame9.Y)\r\n self.additionalDict[game] = self.inGame9\r\n if game == 10:\r\n self.inGame10 = tk.Label(canvas4, image = self.game10)\r\n self.inGame10.place(x = iGame10.X, y = iGame10.Y)\r\n self.additionalDict[game] = self.inGame10\r\n if game == 11:\r\n self.inGame11 = tk.Label(canvas4, image = self.game11)\r\n self.inGame11.place(x = iGame11.X, y = iGame11.Y)\r\n self.additionalDict[game] = self.inGame11\r\n if game == 12:\r\n self.inGame12 = tk.Label(canvas4, image = self.game12)\r\n self.inGame12.place(x = iGame12.X, y = iGame12.Y)\r\n self.additionalDict[game] = self.inGame12\r\n if game == 13:\r\n self.inGame13 = tk.Label(canvas4, image = self.game13)\r\n self.inGame13.place(x = iGame13.X, y = iGame13.Y)\r\n self.additionalDict[game] = self.inGame13\r\n if game == 14:\r\n self.inGame14 = tk.Label(canvas4, image = self.game14)\r\n self.inGame14.place(x = iGame14.X, y =iGame14.Y)\r\n self.additionalDict[game] = self.inGame14\r\n if game == 15:\r\n self.inGame15 = tk.Label(canvas4, image = self.game15)\r\n self.inGame15.place(x = iGame15.X, y = iGame15.Y)\r\n self.additionalDict[game] = self.inGame15\r\n if game == 16:\r\n self.inGame16 = tk.Label(canvas4, image = self.game16)\r\n self.inGame16.place(x = iGame16.X, y = iGame16.Y)\r\n self.additionalDict[game] = self.inGame16\r\n if game == 17:\r\n self.inGame17 = tk.Label(canvas4, image = self.game17)\r\n self.inGame17.place(x = iGame17.X, y = iGame17.Y)\r\n self.additionalDict[game] = self.inGame17\r\n\r\n self.player = tk.Label(canvas4, image = self.playerIcon)\r\n\r\n def shellSort(increasing,name, price):\r\n\r\n if increasing == True:\r\n inc = len(price) // 2\r\n while inc:\r\n for i in range(len(price)):\r\n j = i\r\n temp = price[i]\r\n temp2= name[i]\r\n #add here if need to be sorted\r\n while j >= inc and price[j-inc] > temp:\r\n price[j] = price[j - inc]\r\n name[j] = name[j- inc]\r\n #add here if need to be sorted\r\n j -= inc\r\n price[j] = temp\r\n name[j] = temp2\r\n #add here if need to be sorted\r\n inc = inc//2 if inc//2 else (0 if inc==1 else 1)\r\n else:\r\n\r\n inc = len(price) // 2\r\n while inc:\r\n for i in range(len(price)):\r\n j = i\r\n temp = price[i]\r\n temp2= name[i]\r\n #add here if need to be sorted\r\n while j >= inc and price[j-inc] < temp:\r\n price[j] = price[j - inc]\r\n name[j] = name[j- inc]\r\n #add here if need to be sorted\r\n j -= inc\r\n price[j] = temp\r\n name[j] = temp2\r\n #add here if need to be sorted\r\n inc = inc//2 if inc//2 else (0 if inc==1 else 1)\r\n\r\n\r\n def _ItComparator(x ,y):return x>y\r\n\r\n def _ItComparatorDec(x ,y):return x1:\r\n mid = int(len(price)//2)\r\n lefthalf = price[:mid]\r\n righthalf = price[mid:]\r\n lefthalf2 = name[:mid]\r\n righthalf2 = name[mid:]\r\n #add here\r\n\r\n mergeSort(increasing,lefthalf,lefthalf2, comparator)#add here\r\n mergeSort(increasing,righthalf,righthalf2, comparator)#add here\r\n\r\n i=0\r\n lefthalfi=0\r\n righthalfi=0\r\n lefthalf2i=0\r\n righthalf2i=0\r\n while True:\r\n if lefthalfi>=len(lefthalf):price[i:]= righthalf[righthalfi:]; break\r\n if righthalfi >= len(righthalf):price[i:]= lefthalf[lefthalfi:]; break\r\n if comparator(lefthalf[lefthalfi],righthalf[righthalfi]):\r\n price[i]=lefthalf[lefthalfi]\r\n lefthalfi += 1\r\n else:\r\n price[i]=righthalf[righthalfi]\r\n righthalfi+=1\r\n i+=1\r\n \r\n\r\n \r\n def setTypeIn(x): #sorting by : increasing / decreasing order\r\n self.sortIn = x\r\n \r\n def setTypeA(x): #sorting using shell / merge algorithm\r\n self.sortA = x\r\n\r\n def setTypeBy(x):# sorting by : name / price / rating \r\n self.sortBy = x\r\n\r\n def sortCollectedGames(alist,nameList, priceList, ratingList):\r\n localList = []\r\n \r\n if self.sortBy == 1:#name\r\n if self.sortIn == 1:#increasing\r\n if self.sortA == 1:#shellsort\r\n shellSort(True,alist, nameList)\r\n localList = alist\r\n \r\n elif self.sortA == 2:#mergesort\r\n mergeSort(True,alist,nameList,comparator = _ItComparator)\r\n localList = alist\r\n\r\n elif self.sortIn ==2:#decreaing\r\n if self.sortA == 1:#shellsort\r\n shellSort(False,alist, nameList)\r\n localList = alist\r\n \r\n elif self.sortA == 2:#mergesort\r\n mergeSort(False,alist,nameList,comparator = _ItComparatorDec)\r\n localList = alist\r\n\r\n \r\n elif self.sortBy == 2:#price\r\n if self.sortIn == 1:#increasing\r\n if self.sortA == 1:#shellsort\r\n shellSort(True,alist, priceList)\r\n localList = alist\r\n \r\n elif self.sortA == 2:#mergesort\r\n mergeSort(True,alist,priceList,comparator = _ItComparator)\r\n localList = alist\r\n\r\n elif self.sortIn ==2:#decreaing\r\n if self.sortA == 1:#shellsort\r\n shellSort(False,alist, priceList)\r\n localList = alist\r\n \r\n elif self.sortA == 2:#mergesort\r\n mergeSort(False,alist,priceList,comparator = _ItComparatorDec)\r\n localList = alist\r\n\r\n \r\n elif self.sortBy == 3:\r\n if self.sortIn == 1:#increasing\r\n if self.sortA == 1:#shellsort\r\n shellSort(True,alist, ratingList)\r\n localList = alist\r\n \r\n elif self.sortA == 2:#mergesort\r\n mergeSort(True,alist,ratingList,comparator = _ItComparator)\r\n localList = alist\r\n\r\n elif self.sortIn ==2:#decreaing\r\n if self.sortA == 1:#shellsort\r\n shellSort(False,alist, ratingList)\r\n localList = alist\r\n \r\n elif self.sortA == 2:#mergesort\r\n mergeSort(False,alist,ratingList,comparator = _ItComparatorDec)\r\n localList = alist\r\n \r\n localx = 150\r\n localy = 500\r\n\r\n for i in localList:\r\n img = pictureDict[i]\r\n canvas4.create_image(localx,localy, image = img)\r\n localx += 120\r\n canvas4.update()\r\n \r\n \r\n\r\n \r\n def setTime():\r\n global sec\r\n m = int(self.minutes.get())\r\n print(str(m))\r\n s = int(self.seconds.get())\r\n print(str(s))\r\n sec = m*60+s\r\n\r\n def showTime():\r\n global sec\r\n if sec >0 :\r\n sec-=1\r\n timeLabel['text'] = sec\r\n else:\r\n sec = 1\r\n \r\n canvas4.create_text(505,665,text = \"Time Left: s\" ,font = (\"fixedsys\",24),fill = 'white',justify= 'center')\r\n\r\n def walk(path):\r\n #draws player on canvas\r\n i= 0\r\n while i < len(path) and sec>0 :\r\n Y,X= path[i]\r\n self.player.place(x= X*32+101,y = Y*32 +32)\r\n canvas4.update()\r\n gamer.updatePos(Y,X)\r\n sleep(0.5)\r\n i+=1\r\n canvas4.after(500, showTime)\r\n\r\n\r\n def movePlayer():\r\n global sec\r\n nameList = []\r\n priceList = []\r\n ratingList = []\r\n self.rating = 0\r\n lenght = len(self.gamesToBuy)\r\n if self.budget >0:\r\n for i in range(lenght):\r\n if gamer.startPos == (1,0):\r\n toFind = min(self.coordDict, key = self.coordDict.get)\r\n else:\r\n toFind = max(self.coordDict, key = self.coordDict.get)\r\n \r\n fx, fy = self.coordDict[toFind]\r\n self.coordDict.pop(toFind)\r\n fx = (fx - 101)//32\r\n fy = (fy - 32)//32\r\n path = getPath(gamer.xpos,gamer.ypos, fx,fy)\r\n walk(path)\r\n item = itemDict[toFind]\r\n self.budget = self.budget - item.price\r\n andrei = randint(1,100)\r\n if andrei>=1 and andrei<=40:\r\n fiftyfifty = randint(1,2)\r\n if fiftyfifty == 1:\r\n sec += 10\r\n print('Time awarded')\r\n elif fiftyfifty == 2:\r\n self.budget += 5\r\n print('Cash awarded')\r\n else:\r\n print('Unlucky')\r\n \r\n if self.budget >=0:\r\n self.rating += item.rating\r\n nameList.append(item.name)\r\n priceList.append(item.price)\r\n ratingList.append(item.rating)\r\n self.additionalDict[toFind].destroy()\r\n self.collectedGames.append(toFind)\r\n else:\r\n self.budget += item.price\r\n continue\r\n\r\n \r\n sortCollectedGames(self.collectedGames, nameList, priceList, ratingList)\r\n collectedStatus['text'] = 'Collected: ' + str(len(self.collectedGames)) + ' / '+ str(len(self.gamesToBuy))\r\n budgetStatus['text'] = 'Budget: '+ str(self.budget)\r\n ratingStatus['text'] = 'Avg Rating: '+ str(round(int(self.rating)/len(self.collectedGames),2))\r\n \r\n if len(self.collectedGames) != len(self.gamesToBuy):\r\n canvas4.create_text(500, 380, text = 'GAME OVER', font = ('fixedsys',52), fill = 'red', justify= 'center')\r\n startButton.destroy()\r\n else:\r\n canvas4.create_text(500, 380, text = 'CONGRATULATIONS!', font = ('fixedsys',52), fill = 'gold', justify= 'center')\r\n startButton.destroy()\r\n \r\n resultButton = tk.Button(canvas4, text = \"EXIT GAME\",font = (\"fixedsys\",18), command = lambda: combine_funcs(sound(),app.destroy() ), cursor = 'hand2', borderwidth= 3,foreground= \"white\", bg = \"firebrick4\", activebackground= \"firebrick3\", activeforeground= \"white\")\r\n resultButton.place(x = 512, y = 694, height = 57, width = 292, anchor = 'n')\r\n\r\n\r\n\r\n \r\n\r\n collectedStatus = tk.Label(canvas4, text =( 'Collected: ' + str(len(self.collectedGames)) + ' / '+ str(len(self.gamesToBuy))),font = (\"fixedsys\",24),activebackground='grey10', activeforeground='white',background= 'grey10',foreground= 'white')\r\n collectedStatus.place(x =50,y= 610)\r\n budgetStatus = tk.Label(canvas4, text = ('Budget: '+ str(self.budget)),font = (\"fixedsys\",24),activebackground='grey10', activeforeground='white',background= 'grey10',foreground= 'white')\r\n budgetStatus.place(x =50,y= 650)\r\n ratingStatus = tk.Label(canvas4, text = ('Avg Rating: '+ str(0)),font = (\"fixedsys\",24),activebackground='grey10', activeforeground='white',background= 'grey10',foreground= 'white')\r\n ratingStatus.place(x =50,y= 690)\r\n timeLabel = tk.Label(canvas4, font = (\"fixedsys\",48),activebackground='grey10', activeforeground='white',background= 'grey10', foreground= 'white',justify= 'center')\r\n timeLabel.place(x = 545, y = 610)\r\n#############################################################################################################################################################################\r\n\r\nclass Player():\r\n '''keeps player image and position'''\r\n def __init__(self, image, startingPosition = (0,0)):\r\n self.image = image\r\n self.startPos = startingPosition\r\n self.xpos = startingPosition[0]\r\n self.ypos = startingPosition[1]\r\n\r\n def updatePos(self, xnew, ynew):\r\n self.xpos = xnew\r\n self.ypos = ynew\r\n\r\n\r\n \r\n\r\n\r\n##############################################################################################################################################\r\n\r\ngamer = Player('gamer.jpg')\r\niGame1 = Game(1, 'BattleField', 'Action', 23, 5,'game1.gif', 'lbl1.png', 133, 192)\r\niGame2 = Game(2,'GTA 5', 'Action', 40, 5, 'game2.png','lbl2.png',261,256)\r\niGame3 = Game(3,'XCOM2', 'Action', 20, 4, 'game3.png','lbl3.png', 421,64)\r\niGame4 = Game(4,'Call Of Duty','Action', 10, 3,'game4.gif','lbl4.png', 581, 256)\r\niGame5 = Game(5,'Sonic','Adventure', 4, 3,'game5.png','lbl5.png', 709, 64)\r\niGame6 = Game(6,'DayZ', 'Adventure',20, 2, 'game6.png','lbl6.png', 901,64)\r\niGame7 = Game(7,'Witcher 3', 'Adventure',50, 4, 'game7.gif','lbl7.png',485,160)\r\niGame8 = Game(8, 'Fall Out 4','Adventure', 40, 5, 'game8.png','lbl8.png', 133,256)\r\niGame9 = Game(9, 'Limbo' ,'Indie',3,4,'game9.png','lbl9.png',325, 256)\r\niGame10 = Game(10,'Undertale', 'Indie', 5, 4, 'game10.png','lbl10.png', 482,64)\r\niGame11 = Game(11,'To The Moon','Indie', 2, 3, 'game11.png','lbl11.png',261,160)\r\niGame12 = Game(12,'Little Big Planet','Puzzle', 6, 2 , 'game12.png','lbl12.png',453,256)\r\niGame13 = Game(13,'Infra', 'Puzzle',11,2, 'game13.png','lbl13.png',613,96)\r\niGame14 = Game(14,'Puzzle Stages','Puzzle', 1,1,'game14.png','lbl14.png',837,256)\r\niGame15 = Game(15,'The Crew', 'Sports',13, 3, 'game15.png','lbl15.png', 709,256)\r\niGame16 = Game(16,'Fifa 15','Sports', 25, 4, 'game16.gif','lbl16.png',901, 160)\r\niGame17 = Game(17,'Need for Speed','Sports', 5, 4, 'game17.gif','lbl17.png', 805,160)\r\n\r\n\r\napp = GameShop()\r\napp.mainloop()\r\n \r\n","sub_path":"main code INDIVIDUAL-ANDREI.py","file_name":"main code INDIVIDUAL-ANDREI.py","file_ext":"py","file_size_in_byte":59815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"397586548","text":"import collections\n\nSnapshot = collections.namedtuple(\"Snapshot\",\n \"tick, user_messages, game_events, world,\"\n \" modifiers\")\n\nTICKS_PER_SECOND = 30\n\nclass StreamBinding(object):\n \"\"\"\n The StreamBinding class is Tarrasque's metaphor for the replay. Every\n Tarrasque entity class has a reference to an instance of this\n class, and when the tick of the instance changes, the data returned by\n those classes changes. This makes it easy to handle complex object graphs\n without explicitly needing to pass the Skadi demo object around.\n\n .. note:: Where methods on this class take absolute tick values (i.e. the\n ``start`` and ``end`` arguments to :meth:`iter_ticks`), special string\n arguments may be passed. These are:\n\n * ``\"start\"`` - The start of the replay\n * ``\"draft\"`` - The start of the draft\n * ``\"pregame\"`` - The end of the draft phase\n * ``\"game\"`` - The time when the game clock hits 0\n * ``\"postgame\"`` - The time the ancient is destroyed\n * ``\"end\"`` - The last tick in the replay\n\n These values will not be 100% accurate, but should be good +-50 ticks\n \"\"\"\n\n @property\n def user_messages(self):\n \"\"\"\n The user messages for the current tick.\n \"\"\"\n return self._user_messages\n\n @property\n def game_events(self):\n \"\"\"\n The game events in the current tick.\n \"\"\"\n from .gameevents import GameEvent\n\n events = []\n for data in self._game_events:\n events.append(GameEvent(stream_binding=self, data=data))\n return events\n\n # Just another layer of indirection\n # These are properties for autodoc reasons mostly\n @property\n def world(self):\n \"\"\"\n The Skadi wold object for the current tick.\n \"\"\"\n return self._snapshot.world\n\n @property\n def tick(self):\n \"\"\"\n The current tick.\n \"\"\"\n return self._snapshot.tick\n\n @property\n def demo(self):\n \"\"\"\n The Skadi demo object that the binding is reading from.\n \"\"\"\n return self._demo\n\n @property\n def modifiers(self):\n \"\"\"\n The Skadi modifiers object for the tick.\n \"\"\"\n return self._snapshot.modifiers\n\n @property\n def string_tables(self):\n \"\"\"\n The string_table provided by Skadi.\n \"\"\"\n return self._stream.string_tables\n\n @property\n def prologue(self):\n \"\"\"\n The prologue of the replay.\n \"\"\"\n return self._stream.prologue\n\n def __init__(self, demo, start_tick=None, start_time=None):\n self._demo = demo\n self._user_messages = []\n self._game_events = []\n\n # Do this to bootstrap go_to_tick(\"end\")\n self._state_change_ticks = {\n \"end\": self.demo.file_info.playback_ticks - 2,\n }\n self.go_to_tick(\"end\")\n\n self._state_change_ticks = {\n \"start\": 0,\n \"draft\": self._time_to_tick(self.info.draft_start_time),\n \"pregame\": self._time_to_tick(self.info.pregame_start_time),\n \"game\": self._time_to_tick(self.info.game_start_time),\n \"postgame\": self._time_to_tick(self.info.game_end_time),\n \"end\": self.demo.file_info.playback_ticks - 2\n }\n if start_tick is not None:\n self.go_to_tick(start_tick)\n elif start_time is not None:\n self.go_to_time(start_time)\n else:\n self.go_to_tick(\"game\")\n\n def iter_ticks(self, start=None, end=None, step=1):\n \"\"\"\n A generator that iterates through the demo's ticks and updates the\n :class:`StreamBinding` to that tick. Yields the current tick.\n\n The start parameter defines the tick to iterate from, and if not set, the\n current tick will be used instead.\n\n The end parameter defines the point to stop iterating; if not set,\n the iteration will continue until the end of the replay.\n\n The step parameter is the number of ticks to consume before yielding\n the tick; the default of one means that every tick will be yielded. Do\n not assume that the step is precise; the gap between two ticks will\n always be larger than the step, but usually not equal to it.\n \"\"\"\n\n if start is None:\n start = self.tick\n elif start in self._state_change_ticks:\n start = self._state_change_ticks[start]\n\n if end in self._state_change_ticks:\n end = self._state_change_ticks[end]\n\n if end is not None:\n assert start < end\n\n if start > self.demo.file_info.playback_ticks or start < 0:\n raise IndexError(\"Tick {} out of range\".format(tick))\n\n self._user_messages = []\n self._game_events = []\n\n last_tick = start - step - 1\n self._stream = self.demo.stream(tick=start)\n for snapshot in self._stream:\n self._snapshot = Snapshot(*snapshot)\n\n if end is not None and self.tick >= end:\n break\n\n self._user_messages.extend(self._snapshot.user_messages)\n self._game_events.extend(self._snapshot.game_events)\n\n if self.tick - last_tick < step:\n continue\n else:\n last_tick = self.tick\n\n yield self.tick\n\n self._user_messages = []\n self._game_events = []\n\n def go_to_tick(self, tick):\n \"\"\"\n Moves to the given tick, or the nearest tick after it. Returns the tick\n moved to.\n \"\"\"\n if tick in self._state_change_ticks:\n tick = self._state_change_ticks[tick]\n\n if tick > self.demo.file_info.playback_ticks or tick < 0:\n raise IndexError(\"Tick {} out of range\".format(tick))\n\n self._stream = self.demo.stream(tick=tick)\n self._snapshot = Snapshot(*next(iter(self._stream)))\n self._user_messages = self._snapshot.user_messages[:]\n self._game_events = self._snapshot.game_events[:]\n\n return self.tick\n\n def _time_to_tick(self, time):\n \"\"\"\n Converts a time to a tick.\n \"\"\"\n current_time = self.info.game_time\n return int(self.tick + (time - current_time) * TICKS_PER_SECOND) - 2\n\n def go_to_time(self, time):\n \"\"\"\n Moves to the tick with the given game time. Could potentially overshoot,\n but not by too much. Will not undershoot.\n\n Returns the tick it has moved to.\n \"\"\"\n target_tick = self._time_to_tick(time)\n for tick in self.iter_ticks(start=target_tick):\n if self.info.game_time > time:\n return tick\n\n def __iter__(self):\n return self.iter_ticks()\n\n @property\n def players(self):\n \"\"\"\n A list of :class:`Player` objects, one for each player in the game.\n This excludes spectators and other non-hero-controlling players.\n \"\"\"\n from . import Player\n\n return [p for p in Player.get_all(self) if\n p.index != None and p.team != \"spectator\"]\n\n @property\n def info(self):\n \"\"\"\n The :class:`GameInfo` object for the replay.\n \"\"\"\n from .gameinfo import GameInfo\n info = GameInfo.get_all(self)\n assert len(info) == 1\n return info[0]\n\n\n @staticmethod\n def from_file(filename, *args, **kwargs):\n \"\"\"\n Loads the demo from the filename, and then initialises the\n :class:`StreamBinding` with it, along with any other passed arguments.\n \"\"\"\n import skadi.demo\n\n demo = skadi.demo.construct(filename)\n\n return StreamBinding(demo, *args, **kwargs)","sub_path":"tarrasque/binding.py","file_name":"binding.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"464165581","text":"##DATA Class\r\n##By: Markus Garbiso\r\n##Date Updated: May 6, 2017 by Peter Consalvi\r\n##Date Updated: May 17, 2017 by Matthew Martin\r\n\r\nfrom functions import addUncert #Used to include uncertainty values\r\n\r\n\r\n \r\n \r\n\r\n##This function is used in the main __init__ function to parse through strings in each line of the file inorder to find the value of the spin that corresponds to energy of that line of that file.\r\ndef numberSlashBool(lineString,posTracker,trackChar):\r\n if(lineString.index(trackChar)-posTracker>-1):##This condition pertains to all characters that are allowed in a\r\n ##given spin\r\n if((lineString[lineString.index(trackChar)-posTracker]=='1' or lineString[lineString.index(trackChar)-posTracker]=='2' or lineString[lineString.index(trackChar)-posTracker]=='3' or lineString[lineString.index(trackChar)-posTracker]=='4' or lineString[lineString.index(trackChar)-posTracker]=='5' or lineString[lineString.index(trackChar)-posTracker]=='6' or lineString[lineString.index(trackChar)-posTracker]=='7' or lineString[lineString.index(trackChar)-posTracker]=='8' or lineString[lineString.index(trackChar)-posTracker]=='9' or lineString[lineString.index(trackChar)-posTracker]=='0' or lineString[lineString.index(trackChar)-posTracker]=='/'or lineString[lineString.index(trackChar)-posTracker]=='-'or lineString[lineString.index(trackChar)-posTracker]=='+'or lineString[lineString.index(trackChar)-posTracker]==')')):\r\n return True\r\n else:##This condtion is reached when the loop using this function reaches a character that could not be in a spin string.\r\n return False\r\n\r\ndef hasNumbers(inputString):\r\n return any(char.isdigit() for char in inputString)\r\n\r\nclass data:##This is the main data class\r\n def __init__(self,ENSDF,ISOvar,option = 'EoL',energyLimit=5000,maxSpin = 9): ##Initiator to take only lines that are determined by the filt (short for filter)\r\n\r\n ##Initialize Parameters\r\n filtDataSet = []\r\n self.data = []\r\n self.name = ISOvar\r\n self.op=option\r\n\r\n ##If there is a data file that matches the user's input then this program will try to extract the data.\r\n self.f = open(\"Data/\"+str(ENSDF),'rU')\r\n ##Each line of the file is split into list so the code can parse through each line easier\r\n\r\n \r\n #Initializes a to be apple, a string that should not appear in the data file\r\n #This is used to stop the code after it goes through the first block\r\n a = 'apple'\r\n linecount=0 #linecount can be used to locate problem-causing lines in the ensdf files\r\n firstline = \"NOTHING\"\r\n for line in self.f:\r\n linecount=linecount+1\r\n line = line.split()\r\n #print('|',line,'|')\r\n\r\n ##Break function used to stop code after the evaluated nuclear data\\\r\n if len(line) == 0 and \\\r\n a.lower() == b.lower():\r\n break\r\n\r\n ##Names for each entry which are used to filter which lines are used \r\n if(len(line) >= 3): ##This makes sure not to take any lines that are emptry which will cause an error down the line.\r\n a = line[0] ##The first entry of each line, usually the filename. a will be used to double check to see if that line is valid.\r\n c = line[1] ##This second entry contains what type of data and information is in the list line. The ENSDF website has a complete list of\r\n ##each line type. I used L since L lines have experimental data.\r\n\r\n ##This resets a to be apple every time there is a blank line\r\n ##This ensures that the first block wanted will be read rather than the first block in the file\r\n if(len(line) == 0):\r\n a = 'apple'\r\n \r\n b = str(ISOvar)\r\n ##This part of the code contains parsing algorithims to find wanted values,depending on what option is used.\r\n ##Marcus found that experimental data are one lines when c equal \"L\"\r\n\r\n\r\n#This massive series of nested loops is what extracts the data of interest from the monster ensdf data files according to the user's inputs. \r\n if(option == 'EoL' and a.lower() == b.lower() and c == \"L\"): \r\n a = ''\r\n b = ''\r\n #print (linecount) #XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\r\n #print(\"[2]: \",line[2])\r\n #print(\"[3]: \",line[3])\r\n #print('\\n')\r\n\r\n for i in range(3,len(line)): ##This loop will parse through each entry of line to find the spin corrospondin to the energy of that spin.\r\n ##Intitialize the string used for the spin value and a temp string varible to take a string that has a spin, but may have\r\n ##extra characters due to the incosistent file structure of ENSDF.\r\n spinStr=''\r\n unfilSpinStr=str(line[i])\r\n\r\n \r\n ##This if statement stops the code if the first character in the spin state is a J, which do not correspond to actual spin states\r\n ##This corrects the 65Zn plotting 2+ states issue\r\n if str(line[i][0]) == \"J\":\r\n break\r\n \r\n oddANumberSingleDigitCheck=True\r\n if(len(unfilSpinStr)>1 and not('X' in unfilSpinStr)and not('x' in unfilSpinStr) and not('Y' in unfilSpinStr)and not('y' in unfilSpinStr)):##Rarely, an entry will have pesky x and y characters which will break the program, so this if statment will not allow those statements. Also strings with lenths of 1 will break the program's algorithm.\r\n for z in range(unfilSpinStr.count('+')+unfilSpinStr.count('-')):\r\n if(':' in unfilSpinStr):\r\n try:\r\n if('/' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('/')]\r\n elif('+' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('+')]\r\n elif( '-' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('-')]\r\n ##print float(line[2])<=energyLimit,line[2]\r\n if(float(line[2])<=energyLimit):\r\n ##print unfilSpinStr\r\n upper=int(unfilSpinStr[unfilSpinStr.index(':')+1])\r\n if (unfilSpinStr[unfilSpinStr.index(':')-1]!='-' and unfilSpinStr[unfilSpinStr.index(':')-1]!='+'):\r\n lower=int(unfilSpinStr[unfilSpinStr.index(':')-1])\r\n else:\r\n lower=int(unfilSpinStr[unfilSpinStr.index(':')-2])\r\n\r\n if('-' in unfilSpinStr):\r\n for i in range(lower,upper+1):\r\n ##print [float(line[2]),str(i)+'-']\r\n filtDataSet.append([float(line[2]),\"(\"+str(i)+'-'+\")\"])\r\n #include uncertainty value\r\n addUncert(filtDataSet,line)\r\n elif('+' in unfilSpinStr):\r\n for i in range(lower,upper+1):\r\n ##print [float(line[2]),str(i)+'+']\r\n filtDataSet.append([float(line[2]),\"(\"+str(i)+'+'+\")\"])\r\n #Include uncertainty value\r\n addUncert(filtDataSet,line)\r\n except:\r\n print(\"Error with \" + str(line[2])+ \" \" +unfilSpinStr + \" at loop 0a\") \r\n elif('+' in unfilSpinStr): ##For strings will + only but like the first case above for + and -.\r\n posTracker=0\r\n while(numberSlashBool(unfilSpinStr,posTracker,'+')):\r\n posTracker=posTracker+1\r\n for j in range(unfilSpinStr.index('+')-posTracker+1,unfilSpinStr.index('+')+1):\r\n spinStr=spinStr+unfilSpinStr[j]\r\n ##print(spinStr) \r\n if(('(' in unfilSpinStr or ')' in unfilSpinStr) and len(spinStr.replace(\"(\",\"\").replace(\")\",\"\"))>1):\r\n unfilSpinStr.replace(spinStr,\"\")\r\n spinStr=\"(\"+spinStr.replace(\"(\",\"\").replace(\")\",\"\")+\")\".replace(\"(\",\"\").replace(\")\",\"\")+\")\"\r\n para = True\r\n else:\r\n unfilSpinStr.replace(spinStr,\"\")\r\n para = False\r\n \r\n ##print(spinStr + \"test\")\r\n if(spinStr.replace(\"+\",\"\").replace(\"-\",\"\").replace(\"(\",\"\").replace(\")\",\"\") and not('+' in spinStr and '-' in spinStr)):\r\n saneString=spinStr.replace(\"+\",\"\").replace(\"-\",\"\").replace(\"(\",\"\").replace(\")\",\"\")\r\n if(eval(saneString)>maxSpin):\r\n try:\r\n if('/' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('/')]\r\n elif('+' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('+')]\r\n elif( '-' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('-')]\r\n if(float(line[2])<=energyLimit and '/' in spinStr):\r\n if(eval(spinStr[:spinStr.index('/')].replace(\"(\",\"\"))<10 or oddANumberSingleDigitCheck):\r\n if para:\r\n filtDataSet.append([float(line[2]),\"(\"+spinStr[-5]+spinStr[-4]+spinStr[-3]+spinStr[-2]+\")\"])\r\n else:\r\n filtDataSet.append([float(line[2]),spinStr[-4]+spinStr[-3]+spinStr[-2]+spinStr[-1]])\r\n else:\r\n if para:\r\n filtDataSet.append([float(line[2]),\"(\"+spinStr[-6]+spinStr[-5]+spinStr[-4]+spinStr[-3]+spinStr[-2]+\")\"])\r\n else:\r\n filtDataSet.append([float(line[2]),spinStr[-5]+spinStr[-4]+spinStr[-3]+spinStr[-2]+spinStr[-1]])\r\n elif(float(line[2])<=energyLimit and hasNumbers(spinStr[-2]+spinStr[-1])):\r\n if para:\r\n filtDataSet.append([float(line[2]),\"(\"+spinStr[-2]+spinStr[-1]+\")\"])\r\n else:\r\n filtDataSet.append([float(line[2]),spinStr[-2]+spinStr[-1]])\r\n except:\r\n print(\"Error with \" + str(line[2])+ \" \" +spinStr + \" at loop 2a\")\r\n else:\r\n try:\r\n if('/' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('/')]\r\n elif('+' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('+')]\r\n elif( '-' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('-')]\r\n if(float(line[2])<=energyLimit):\r\n filtDataSet.append([float(line[2]),spinStr])\r\n if('/' in spinStr):\r\n if(eval(spinStr[:spinStr.index('/')].replace(\"(\",\"\"))<10):\r\n oddANumberSingleDigitCheck=True\r\n else:\r\n oddANumberSingleDigitCheck=False\r\n except:\r\n print(\"Error with \" + str(line[2])+ \" \" +str(spinStr) + \" at loop 2b\")\r\n\r\n spinStr = ''\r\n #include uncertainty value\r\n addUncert(filtDataSet,line)\r\n elif('-' in unfilSpinStr):##For strings will - only but like the first case above for + and -.\r\n posTracker=0\r\n while(numberSlashBool(unfilSpinStr,posTracker,'-')):\r\n posTracker=posTracker+1\r\n for j in range(unfilSpinStr.index('-')-posTracker+1,unfilSpinStr.index('-')+1):\r\n spinStr=spinStr+unfilSpinStr[j]\r\n \r\n if(('(' in unfilSpinStr or ')' in unfilSpinStr) and len(spinStr.replace(\"(\",\"\").replace(\")\",\"\"))>1):\r\n unfilSpinStr.replace(spinStr,\"\")\r\n spinStr=\"(\"+spinStr.replace(\"(\",\"\").replace(\")\",\"\")+\")\".replace(\"(\",\"\").replace(\")\",\"\")+\")\"\r\n para = True\r\n else:\r\n unfilSpinStr.replace(spinStr,\"\")\r\n para = False\r\n \r\n if(spinStr.replace(\"+\",\"\").replace(\"-\",\"\").replace(\"(\",\"\").replace(\")\",\"\") and not('+' in spinStr and '-' in spinStr)):\r\n saneString=spinStr.replace(\"+\",\"\").replace(\"-\",\"\").replace(\"(\",\"\").replace(\")\",\"\")\r\n if(eval(saneString)>maxSpin):\r\n try:\r\n if('/' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('/')]\r\n elif('+' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('+')]\r\n elif( '-' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('-')]\r\n if(float(line[2])<=energyLimit and '/' in spinStr):\r\n if(eval(spinStr[:spinStr.index('/')].replace(\"(\",\"\"))<10 or oddANumberSingleDigitCheck):\r\n if para:\r\n filtDataSet.append([float(line[2]),\"(\"+spinStr[-5]+spinStr[-4]+spinStr[-3]+spinStr[-2]+\")\"])\r\n else:\r\n filtDataSet.append([float(line[2]),spinStr[-4]+spinStr[-3]+spinStr[-2]+spinStr[-1]])\r\n else:\r\n if para:\r\n filtDataSet.append([float(line[2]),\"(\"+spinStr[-6]+spinStr[-5]+spinStr[-4]+spinStr[-3]+spinStr[-2]+\")\"])\r\n else:\r\n filtDataSet.append([float(line[2]),spinStr[-5]+spinStr[-4]+spinStr[-3]+spinStr[-2]+spinStr[-1]])\r\n elif(float(line[2])<=energyLimit and hasNumbers(spinStr[-2]+spinStr[-1])):\r\n if para:\r\n filtDataSet.append([float(line[2]),\"(\"+spinStr[-2]+spinStr[-1]+\")\"])\r\n else:\r\n filtDataSet.append([float(line[2]),spinStr[-2]+spinStr[-1]])\r\n except:\r\n print(\"Error with \" + str(line[2])+ \" \" +spinStr + \" at loop 3a\")\r\n else:\r\n try:\r\n if('/' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('/')]\r\n elif('+' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('+')]\r\n elif( '-' in str(line[2])):\r\n line[2]=str(line[2])\r\n line[2]=line[2][:line[2].find('-')]\r\n if(float(line[2])<=energyLimit):\r\n filtDataSet.append([float(line[2]),spinStr])\r\n if('/' in spinStr):\r\n if(eval(spinStr[:spinStr.index('/')].replace(\"(\",\"\"))<10):\r\n oddANumberSingleDigitCheck=True\r\n else:\r\n oddANumberSingleDigitCheck=False\r\n except:\r\n print(\"Error with \" + str(line[2])+ \" \" +str(spinStr) + \" at loop 3b\")\r\n spinStr = ''\r\n #include uncertaity value\r\n addUncert(filtDataSet,line)\r\n \r\n ##This if loop rids the data list of redundant entries and saves data into\r\n ##the self.data list\r\n \r\n \r\n for i in filtDataSet:\r\n if i not in self.data:\r\n self.data.append(i)##Saves data into the self.data list\r\n self.f.close()##Closes the ENSDF file\r\n \r\n\r\n\r\n ##This function exports the data in the class into a data file. The output file is\r\n ##is sent to the Output file\r\n def export(self,fExtOption = \".txt\",extraTitleText=\"\"):\r\n if(self.op == 'EoL'):##for energies and spins\r\n if(fExtOption==\".dat\"or fExtOption==\"_Fil.dat\"):##To make data files for use in gnuplot and plt file.\r\n fileName=str(self.name)+extraTitleText+fExtOption##creates filename\r\n fileName=\"Output/\" + \"gnuPlot/\"+fileName.replace('/','_')\r\n datFile = open(fileName,'wb')##Creates a file with a valid file name.\r\n for i in range(len(self.data)):##Write the line fro each entry and each entry is delimited by a ,\r\n datFile.write(str.encode(str(self.name)+','+str(self.data[i][0])+','+str(self.data[i][1])+','+str(self.data[i][2])+'\\n'))\r\n ###.dat is used for preparing data for gnuplot\r\n else:##This case is like the code above but for every other file type and is delimited by tabs.\r\n fileName=str(self.name)+extraTitleText+fExtOption\r\n fileName=\"Output/\" + \"gnuPlot/\"+fileName.replace('/','_')\r\n datFile = open(fileName,'wb')##Creates a file with a valid file name.\r\n exportFile = open(\"Output/\"+str(self.name)+extraTitleText+fExtOption,'wb')\r\n for i in range(len(self.data)):\r\n exportFile.write(str.encode(str(self.name)+'\\t'+str(self.data[i][0])+'\\t'+str(self.data[i][1])+'\\n'))\r\n ##.txt or any other file extension is used for preparing data for generic text file. \r\n else:##Writes the each entry of our datalist with one entry per line.\r\n exportFile = open(\"Output/\"+str(self.name)+\".txt\",'wb')\r\n for i in range(len(self.data)):\r\n exportFile.write(str.encode(str(self.name)+'\\t'+str(self.data[i])+'\\n'))\r\n ##Writes Energy Only\r\n \r\n ##This functino allows the user to choose only wanted spins of a given data set. Note: This is only valid for the EoL option. The userinput must follow this\r\n ##syntax \"0+,2+,3/2-\" no quotations.\r\n def filterData(self,userInput,UI=False):\r\n if(self.op == 'EoL'):\r\n newData=[]##storage for new data\r\n for wantedString in userInput.split(\",\"):##adds all the strings that are included in the userInput.\r\n for i in range(0,len(self.data)):\r\n if(self.data[i][1]==wantedString or self.data[i][1]==(\"(\"+wantedString+\")\")):\r\n newData.append(self.data[i])\r\n if(newData):\r\n self.data=newData##changes data to the new data.\r\n else:\r\n if(UI):\r\n print(\"Warning:No data filtered/selected for \"+ self.name +\".\")#Prints a statement telling user than no file was found\r\n self.data=[[0.0,\"--\",0.0]]##Enters a dummy entry to file with something.\r\n","sub_path":"Old_Files/dataClass.py","file_name":"dataClass.py","file_ext":"py","file_size_in_byte":23161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"333265667","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 3 17:14:11 2021\n\n@author: jan-g\n\"\"\"\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 23 16:05:44 2021\n\n@author: jan-g\n\"\"\"\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 21 13:14:16 2021\n\n@author: jan-g\n\"\"\"\nimport numpy as np\nfrom stable_baselines.ddpg.policies import FeedForwardPolicy #THIS SHOULD NOT BE .common.policies\nfrom stable_baselines import DDPG\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv\n\nfrom stable_baselines.ddpg import AdaptiveParamNoiseSpec\nfrom stable_baselines.ddpg import OrnsteinUhlenbeckActionNoise\nfrom stable_baselines.ddpg import NormalActionNoise\n\nfrom stable_baselines.common import set_global_seeds\n\nfrom osim.env import *\nimport gym\n\nfrom DDPG_COL import DDPG_CoL\nfrom stable_baselines.common.callbacks import CallbackList, CheckpointCallback, EvalCallback\n\n\n\n# Global Callback avariable\nn_steps_eval = 0\nn_steps_save = 0\n\n# Custom MLP policy of two layers of size 128 each\nclass CustomDDPGPolicy(FeedForwardPolicy):\n def __init__(self, *args, **kwargs):\n super(CustomDDPGPolicy, self).__init__(*args, **kwargs,\n layers=[128, 128],\n layer_norm=True,\n feature_extraction=\"mlp\")\n \ndef make_env_opensimrl(env_id, rank, seed=0):\n\n def _init():\n env = gym.make(env_id)\n env.seed(seed + rank)\n return env\n\n set_global_seeds(seed)\n return _init\n\nif __name__ == '__main__':\n num_cpu = 1 # Number of processes to use\n env_id = \"osimrl2D-v0\"\n env = L2RunEnv(visualize=False)\n env = DummyVecEnv([lambda: env])\n env = DummyVecEnv([make_env_opensimrl(env_id, i) for i in range(1)])\n n_actions = env.action_space.shape[-1]\n action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.2) * np.ones(n_actions), theta=0.1)\n\n model_name = \"data/test\"\n data_addr = \"data/test\"\n \n # callback to save models during training\n checkpoint_callback = CheckpointCallback(save_freq=5e4, save_path='./logs/',\n name_prefix='DDPG_CoT_')\n\n model = DDPG_CoL(CustomDDPGPolicy, env, gamma=0.99, memory_policy=None, \n eval_env=None, nb_train_steps=50, nb_rollout_steps=100, \n nb_eval_steps=100, param_noise=None, action_noise=action_noise, \n normalize_observations=False, tau=0.001, batch_size=256, \n param_noise_adaption_interval=50, normalize_returns=False, \n enable_popart=False, observation_range=(-np.inf, np.inf), \n critic_l2_reg=0.0000001,actor_l2_reg=0.0001, return_range=(-np.inf, np.inf), actor_lr=1e-3, \n critic_lr=1e-4, clip_norm=None, reward_scale=1.0, render=False, \n render_eval=False, memory_limit=None, buffer_size=1000000, \n random_exploration=0.0, verbose=0, tensorboard_log=\"./l2walk_tensorboard/\", seed = 1,\n _init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,\n lambda_ac_di_loss=20.0, lambda_ac_qloss=1.0, lambda_qloss=1.0, lambda_n_step=1.0, act_prob_expert_schedule=None,\n train_steps=0, schedule_steps=0, bc_model_name=None, dynamic_sampling_ratio=False,\n log_addr='data/test', schedule_expert_actions=False, dynamic_loss=False, csv_log_interval=10,\n norm_reward=1., n_expert_trajs=-1)\n \n \n \n model.learn(total_timesteps=1.2e6, callback=checkpoint_callback, seed=None, log_interval=100,\n tb_log_name=\"CoL\", reset_num_timesteps=True, dataset_addr= 'Expert_data_k00.npz',\n pretrain_steps=10000, max_samples_expert=None, pretrain_model_name=data_addr+\"/pre_trained_model\",\n replay_wrapper=None)\n \n ","sub_path":"COL__test.py","file_name":"COL__test.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"611149205","text":"#coding:utf-8\n\nfrom tornado.web import RequestHandler\nimport json\nfrom util.convert import is_mobile\nimport logging\n\nLOG = logging.getLogger(__name__)\n\nclass DefaultHandler(RequestHandler):\n def initialize(self, static_path, templates_path, view_prefix, **kwargs):\n self.static_path = static_path\n self.templates_path = templates_path\n\n if view_prefix[-1] != '/':\n view_prefix += '/'\n self.prefix = view_prefix\n\n def get_template_path(self):\n return self.templates_path\n\n def get(self):\n self.redirect(self.prefix + r'login.html', permanent=True)\n\nclass LoginViewHandler(RequestHandler):\n def initialize(self, static_path, templates_path, **kwargs):\n self.static_path = static_path\n self.templates_path = templates_path\n\n def get_template_path(self):\n return self.templates_path\n\n def get(self):\n LOG.info(\"-----------------login-----------\")\n self.render(\"login.html\")\n\nclass IndextHandler(RequestHandler):\n def initialize(self, static_path, templates_path, view_prefix, **kwargs):\n self.static_path = static_path\n self.templates_path = templates_path\n\n if view_prefix[-1] != '/':\n view_prefix += '/'\n self.prefix = view_prefix\n\n def get_template_path(self):\n return self.templates_path\n\n def get(self):\n LOG.info(\"-----------------index-----------:%s\"%self.get_secure_cookie('user_name'))\n if not self.get_secure_cookie('user_name'):\n #self.redirect(self.prefix + r'login.html', permanent=True)\n self.redirect(self.prefix + r'login.html')\n return\n self.render(\"index.html\")\n\n\nhtml_path = (\n \"grade\",\n \"class\",\n \"teacher\",\n \"student\",\n \"relative\",\n \"studentHistory\",\n \"teacherHistory\",\n \"school\",\n \"studentAttendance\",\n \"teacherAttendance\",\n \"loginPhone\",\n \"commonPerson\",\n \"studentAttendance\",\n \"forgetPassword\")\n\n\nclass ManageViewHandler(RequestHandler):\n def initialize(self, static_path, templates_path, **kwds):\n self.static_path = static_path\n self.templates_path = templates_path\n\n def get_template_path(self):\n return self.templates_path\n\n def get(self, manage_obj):\n #real_ip = self.request.headers.get(\"x-real-ip\", self.request.headers.get(\"x-forwarded-for\", \"\"))\n if manage_obj not in html_path:\n self.redirect(r'login.html', permanent=True)\n return\n\n self.render(manage_obj + r'.html', user_name=self.get_secure_cookie('user_name'))","sub_path":"views/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"647481563","text":"import FWCore.ParameterSet.Config as cms\n\nmuonDTStubPSet = cms.PSet(\n dtLocalTrigger = cms.PSet(\n verbose = cms.int32(0),\n inputTag = cms.InputTag(\"simDtTriggerPrimitiveDigis\"),\n run = cms.bool(True),\n minBX = cms.int32(-1),\n maxBX = cms.int32(1),\n )\n)\n","sub_path":"GEMValidation/python/simTrackPSets/muonDTStubPSet.py","file_name":"muonDTStubPSet.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"358406387","text":"from django.http import JsonResponse, HttpResponse\nfrom django.views import View\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nimport json\n\n# Create your views here.\nmock_thermostat = {'current_temp': 1}\n\nclass thermostatRestView(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super(thermostatRestView, self).dispatch(request, *args, **kwargs)\n\n def get(self, request):\n global mock_thermostat\n return JsonResponse({'current_temp': mock_thermostat['current_temp']})\n\n def post(self, request):\n global mock_thermostat\n payload = json.loads(request.body)\n mock_thermostat['current_temp'] = int(payload['requested_temp'])\n return HttpResponse(\"OK\")\n","sub_path":"thermostat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"510684788","text":"#!env python3\n# -*- coding: utf-8 -*-\nimport os\nimport tempfile\nimport unittest\n\nclass TestUsingTempFile(unittest.TestCase):\n def test_using_temp_file(self):\n with tempfile.NamedTemporaryFile(mode='w') as f:\n f.write('\\n'.join(['aaa', 'bbb', 'ccc']) + '\\n')\n f.flush()\n with open(f.name, 'r') as fd:\n actual = len(fd.readlines())\n self.assertEqual(actual, 3)\n\n def test_using_temp_dir(self):\n with tempfile.TemporaryDirectory() as dirpath:\n with open(os.path.join(dirpath, 'file1'), mode='w', encoding='utf-8') as f1, \\\n open(os.path.join(dirpath, 'file2'), mode='w', encoding='utf-8') as f2:\n actual = len(os.listdir(dirpath))\n self.assertEqual(actual, 2)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/test/unittest_tempfile.py","file_name":"unittest_tempfile.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"576175395","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport time\nfrom teamcity import is_running_under_teamcity\nfrom teamcity.unittestpy import TeamcityTestRunner\n\n\nimport unittest\n\nclass InvestETH(unittest.TestCase):\n\n def setUp(self):\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('headless')\n chrome_options.add_argument('no-sandbox')\n self.driver = webdriver.Chrome(chrome_options=chrome_options)\n wait = WebDriverWait(self.driver, 40)\n\n def test_InvestETH(self):\n self.driver.get(\"https://ico.beeasy.io\")\n time.sleep(4)\n\n #login\n self.driver.find_element_by_xpath(\"//input[@name='user']\").send_keys('alex')\n self.driver.find_element_by_xpath(\"//input[@name='password']\").send_keys('sda')\n self.driver.find_element_by_xpath(\"//button[@class='btn btn-info btn-lg btn-block text-uppercase waves-effect waves-light']\").click()\n wait_i = self.driver.implicitly_wait(30)\n time.sleep(3)\n\n #Buy ETKN by ETH\n self.driver.find_element_by_xpath(\"//a[@routerlink='/transactions/new/eth']\").click()\n time.sleep(3)\n btc=self.driver.find_element_by_xpath(\"//input[@id='input-tokens']\")\n time.sleep(1)\n btc.clear()\n btc.send_keys('100')\n self.driver.find_element_by_xpath(\"//button[@id='button-buy']\").click()\n time.sleep(2)\n self.driver.find_element_by_xpath(\"//button[@class='btn-copy text-white input-group-addon btn-info']\").click()\n time.sleep(2)\n\n\n def tearDown(self):\n self.driver.close()\n\nif __name__ == '__main__':\n if is_running_under_teamcity():\n runner = TeamcityTestRunner()\n else:\n runner = unittest.TextTestRunner()\n unittest.main(testRunner=runner)","sub_path":"scripts/beeasy_ico/InvestETH.py","file_name":"InvestETH.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"458497112","text":"from django.contrib.auth import get_user_model\nfrom django.shortcuts import render\n\nUser = get_user_model()\n\n\ndef index(requrest):\n\n users = User.objects.all()\n\n context = {\n 'users': users,\n }\n\n # username = user.username\n # img_profile = user.img_profile\n # nickname = U\n\n return render(requrest, 'index.html', context)\n","sub_path":"config/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"34392244","text":"import unittest\n\nfrom panel.config import config\nfrom panel.io.notifications import NotificationArea\nfrom panel.io.state import set_curdoc, state\nfrom panel.template import VanillaTemplate\n\n\ndef test_notification_instantiate_on_config(document):\n with config.set(notifications=True):\n tmpl = VanillaTemplate()\n\n assert isinstance(tmpl.notifications, NotificationArea)\n\n tmpl.server_doc(document)\n session_context = unittest.mock.Mock()\n document._session_context = lambda: session_context\n\n with set_curdoc(document):\n assert state.notifications is tmpl.notifications\n\n\ndef test_notification_explicit(document):\n tmpl = VanillaTemplate(notifications=NotificationArea())\n\n assert isinstance(tmpl.notifications, NotificationArea)\n\n tmpl.server_doc(document)\n session_context = unittest.mock.Mock()\n document._session_context = lambda: session_context\n\n with set_curdoc(document):\n assert state.notifications is tmpl.notifications\n","sub_path":"panel/tests/template/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"378577047","text":"def is_prime(n):\n if n == 2:\n return True\n elif n == 1 or n % 2 == 0:\n return False\n\n i = 3 \n while i * i <= n:\n if n % i == 0:\n return False\n i += 2 \n return True\n\n\ndef prime_nth(n):\n i = cnt = 0\n while(cnt != n):\n i += 1\n if is_prime(i):\n cnt += 1\n return i \n\n\nimport time\nstart = time.time()\nprint(prime_nth(10001))\nprint(time.time() - start)\n","sub_path":"problem7.py","file_name":"problem7.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"261506857","text":"#!/usr/bin/env python3\n\n\"\"\"\nError handling. Write a function called safe(func, *pargs, **kargs) that runs any\nfunction with any number of positional and/or keyword arguments by using the\n* arbitrary arguments header and call syntax, catches any exception raised while\nthe function runs, and prints the exception using the exc_info call in the sys mod-\nule. Then use your safe function to run your oops function from exercise 1 or 2.\nPut safe in a module file called exctools.py, and pass it the oops function interac-\ntively. What kind of error messages do you get? Finally, expand safe to also print\na Python stack trace when an error occurs by calling the built-in print_exc function\nin the standard traceback module; see earlier in this chapter, and consult the\nPython library reference manual for usage details. We could probably code safe as\na function decorator using Chapter 32 techniques, but we’ll have to move on to the\nnext part of the book to learn fully how (see the solutions for a preview).\n\"\"\"\n\nimport sys\nimport traceback\nimport exercise_7_01\n\ndef safe(func, *pargs, **kargs):\n try:\n func(*pargs, **kargs)\n except Exception:\n print('Got it!')\n print(sys.exc_info())\n traceback.print_exc()\n\nif __name__ == '__main__':\n safe(exercise_7_01.oops)\n safe(lambda x=1, y='two': x+y)\n","sub_path":"python/learn_python_Mark_Lutz/exercise_7_03.py","file_name":"exercise_7_03.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"477788645","text":"# Developed by jrbyte on Github.com. This was used for practice for machine learning using K nearest Algorithm.\n# Some of the code written here was aided with a youtube tutorial.\n\nimport sklearn\nfrom sklearn.utils import shuffle\nfrom sklearn.neighbors import KNeighborsClassifier\nimport pandas as pd\nimport numpy as np\nfrom sklearn import linear_model, preprocessing\nimport matplotlib.pyplot as pyplot\nfrom matplotlib import style\n\n\n# K Nearest Algorithm (Basic summary): Classification algorithm, tries to classify the data points with classes that it already knows.\n# We need to look at the groups of data points which are classified already part of that group. Whatever the data point\n# we don't know the classification of looks for the closest group to be classified as.\n#\n# K: is the number of data points closest to the unknown data point (magnitude). Whatever has a higher amount of a certain class is\n# what the unknown class data point becomes. K needs to always be an odd number because if it is an even number then it\n# has the possibility of it becoming a tie.\n# More information: https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm\n# https://scikit-learn.org/stable/getting_started.html\nclass example1:\n def main(self):\n data = pd.read_csv(\"car.data\")\n print(\"\\nChecking that data is being parsed correctly.\")\n print(data.head())\n\n # Converting the the identifiers of the Category (car color: red, blue, green ,and etc) to values (car color: 1, 2, 3, 4)\n le = preprocessing.LabelEncoder()\n buying = le.fit_transform(list(data[\"buying\"]))\n maint = le.fit_transform(list(data[\"maint\"]))\n door = le.fit_transform(list(data[\"door\"]))\n persons = le.fit_transform(list(data[\"persons\"]))\n lug_boot = le.fit_transform(list(data[\"lug_boot\"]))\n safety = le.fit_transform(list(data[\"safety\"]))\n listClass = le.fit_transform(list(data[\"class\"]))\n\n predict = \"class\"\n\n # Splitting the data to X and Y and putting chosen\n X = list(zip(buying, maint, door, persons, lug_boot, safety))\n Y = list(listClass)\n\n acc = 0.00\n count = 0\n neighbors = 1\n maxAttempts = 100\n maxNeighbors = 15\n bestAcc = 0.00\n bestNeighbors = 0\n bestModel = None\n bestX_test = None\n bestY_test = None\n\n # This while loop will find the best neighbor for the data set with the best accuracy. It can be specified above.\n while neighbors <= maxNeighbors:\n # Training the algorithm with k nearest neighbor\n x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.1)\n # Every data set is different, so it may require you to tweak for the best amount of neighbors.\n model = KNeighborsClassifier(n_neighbors=neighbors)\n model.fit(x_train, y_train)\n acc = model.score(x_test, y_test)\n print(\"Attempts: \", count, \", Current Neighbors: \", neighbors, \", and Best Neighbor: \", bestNeighbors)\n print(\"Current Accuracy: \", acc, \" vs BestAccuracy: \", bestAcc, \"\\n\")\n\n if acc > bestAcc:\n bestAcc = acc\n bestNeighbors = neighbors\n bestModel = model\n bestX_test = x_test\n bestY_test = y_test\n\n if count == maxAttempts:\n count = 0\n # All neighbors must be odd.\n neighbors += 2\n count += 1\n\n predicted = bestModel.predict(x_test)\n names = [\"unacc\", \"acc\", \"good\", \"vgood\"]\n\n for x in range(len(x_test)):\n print(\"Predicted: \", names[predicted[x]], \" Data: \", bestX_test[x], \" Actual: \", names[bestY_test[x]])\n # Distance between each data point in neighbors.\n # n = model.kneighbors([x_test[x]], 9, True)\n # print(\"N: \", n)\n\n print(\"\\nBest accuracy: \", bestAcc, \"Best Neighbor: \", bestNeighbors)\n\n\nif __name__ == '__main__':\n example1().main()\n","sub_path":"KNearestAlg.py","file_name":"KNearestAlg.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"378180260","text":"import pygame\nfrom pygame.locals import *\nimport numpy as np\nfrom gl import Renderer, Model\nimport shaders\n\n\nwidth = 860\nheight = 560\n\ndeltaTime = 0.0\n\npygame.init()\nscreen = pygame.display.set_mode((width,height), pygame.DOUBLEBUF | pygame.OPENGL )\nclock = pygame.time.Clock()\n\n\n\nrend = Renderer(screen)\nrend.setShaders(shaders.vertex_shader, shaders.fragment_shader)\n\nface = Model('ufo.obj', 'model.bmp')\nface.position.z = -5\n\nrend.scene.append( face )\n\n\nisRunning = True\nwhile isRunning:\n\n\n keys = pygame.key.get_pressed()\n\n # Traslacion de camara\n if keys[K_d]:\n rend.camPosition.x += 1 * deltaTime\n if keys[K_a]:\n rend.camPosition.x -= 1 * deltaTime\n if keys[K_w]:\n rend.camPosition.z += 1 * deltaTime\n if keys[K_s]:\n rend.camPosition.z -= 1 * deltaTime\n if keys[K_q]:\n rend.camPosition.y -= 1 * deltaTime\n if keys[K_e]:\n rend.camPosition.y += 1 * deltaTime\n\n if keys[K_LEFT]:\n if rend.valor > 0:\n rend.valor -= 0.1 * deltaTime\n\n if keys[K_RIGHT]:\n if rend.valor < 0.2:\n rend.valor += 0.1 * deltaTime\n\n # Rotacion de camara\n if keys[K_z]:\n rend.camRotation.y += 15 * deltaTime\n if keys[K_x]:\n rend.camRotation.y -= 15 * deltaTime\n\n\n for ev in pygame.event.get():\n if ev.type == pygame.QUIT:\n isRunning = False\n\n elif ev.type == pygame.KEYDOWN:\n if ev.key == pygame.K_ESCAPE:\n isRunning = False\n\n if ev.key == K_1:\n rend.filledMode()\n if ev.key == K_2:\n rend.wireframeMode()\n if ev.key == K_3:\n rend.setShaders(shaders.vertex_shader, shaders.fragment_shader)\n if ev.key == K_4:\n rend.setShaders(shaders.vertex_shader_toon, shaders.fragment_shader_toon)\n if ev.key == K_5:\n rend.setShaders(shaders.vertex_gold_shader, shaders.fragment_shader)\n\n rend.tiempo += deltaTime\n deltaTime = clock.tick(60) / 1000\n\n rend.render()\n\n pygame.display.flip()\n\npygame.quit()\n","sub_path":"RendererO_GL.py","file_name":"RendererO_GL.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"263221352","text":"import os\nfrom flask import Flask, request, jsonify, render_template, json, abort, current_app as app\n#from flask_filter import query_with_filters\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/product_details')\ndef product_details():\n return render_template('product_details.html')\n\n\ndef get_paginated_list(klass, url, start, limit):\n # check if page exists\n results = klass\n count = len(results)\n if (count < start):\n abort(404)\n # make response\n obj = {}\n obj['start'] = start\n obj['limit'] = limit\n obj['count'] = count\n # make URLs\n # make previous url\n if start == 1:\n obj['previous'] = ''\n else:\n start_copy = max(1, start - limit)\n limit_copy = start - 1\n obj['previous'] = url + '?start=%d&limit=%d' % (start_copy, limit_copy)\n # make next url\n if start + limit > count:\n obj['next'] = ''\n else:\n start_copy = start + limit\n obj['next'] = url + '?start=%d&limit=%d' % (start_copy, limit)\n # finally extract result according to bounds\n obj['results'] = results[(start - 1):(start - 1 + limit)]\n return obj\n\n\ndef read_data(file_name):\n read_file = os.path.join(app.static_folder, 'data', file_name)\n json_data = []\n with open(read_file) as f:\n data_header = [x.lower() for x in f.readline().strip().split(\"~\")]\n print(data_header)\n data_body = f.readlines()[0:]\n \n for x in data_body:\n body_line = x.strip().split(\"~\")\n res = {data_header[idx]: val for idx, val in enumerate(body_line)}\n json_data.append(res)\n return json_data\n\n\n@app.route('/api/products', methods=['GET'])\ndef products_api():\n data = read_data('products.txt')\n query_parameters = request.args\n\n if query_parameters:\n q = query_parameters.get('q').lower()\n else:\n return \"Error: No id field provided. Please specify an id.\"\n \n\n results = []\n\n if q:\n for x in data:\n if q in x['ingredient'] or q in x['trade_name'] or x['df;route']:\n results.append(x)\n \"\"\"\n if q:\n filtered_data = list(filter(lambda d: q in d['ingredient'] or q in d['trade_name'] or q in d['df;route'], data))\n return jsonify(filtered_data)\n \"\"\"\n return jsonify(results)\n\n\n@app.route('/api/products/details', methods=['GET'])\ndef products_details_api():\n obj = {}\n patent_data = read_data('patent.txt')\n excl_data = read_data('exclusivity.txt')\n \n query_parameters = request.args\n\n if query_parameters:\n Product_No = query_parameters.get('Product_No')\n Appl_No = query_parameters.get('Appl_No')\n Appl_Type = query_parameters.get('Appl_Type')\n else:\n return \"Error: No id field provided. Please specify an id.\"\n \n if patent_data:\n obj['patent_obj'] = jsonify(list(filter(lambda d: Product_No.lower() in d['Product_No'].lower() and Appl_No.lower() in d['Appl_No'].lower() or Appl_Type.lower() in d['Appl_Type'].lower(), patent_data)))\n elif excl_data:\n obj['excl_obj'] = jsonify(list(filter(lambda d: Product_No.lower() in d['Product_No'].lower() and Appl_No.lower() in d['Appl_No'].lower() or Appl_Type.lower() in d['Appl_Type'].lower(), excl_data)))\n \n #return jsonify({'data':200})\n return obj","sub_path":"orange_book2_working/prj/testing/normal/app_normal.py","file_name":"app_normal.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"481087707","text":"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\n\nif __name__ == \"__main__\":\n tf.reset_default_graph()\n\n with tf.device(\"/device:GPU:0\"):\n scalar = tf.Variable(initial_value=3, dtype=tf.float32, name=\"scalar\")\n vector = tf.Variable(initial_value=[3], dtype=tf.float32, name=\"vector\")\n matrix_2d = tf.Variable(initial_value=[[3], [2]], dtype=tf.float32, name=\"matrix_2d\")\n matrix_3d = tf.Variable(initial_value=[[[3], [2]], [[10], [11]]], dtype=tf.float32, name=\"matrix_3d\")\n\n scalar_rank, scalar_shape = tf.rank(scalar), tf.shape(scalar)\n vector_rank, vector_shape = tf.rank(vector), tf.shape(vector)\n matrix_2d_rank, matrix_2d_shape = tf.rank(matrix_2d), tf.shape(matrix_2d)\n matrix_3d_rank, matrix_3d_shape = tf.rank(matrix_3d), tf.shape(matrix_3d)\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n print(sess.run([scalar_rank, scalar_shape])) # [0, array([], dtype=int32)]\n print(sess.run([vector_rank, vector_shape])) # [1, array([1], dtype=int32)]\n print(sess.run([matrix_2d_rank, matrix_2d_shape])) # [2, array([2, 1], dtype=int32)]\n print(sess.run([matrix_3d_rank, matrix_3d_shape])) # [3, array([2, 2, 1], dtype=int32)]\n","sub_path":"abep01/ex_tensor.py","file_name":"ex_tensor.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"578556813","text":"from django import forms\nfrom django.db.models import Q\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.utils.translation import gettext_lazy as _\nfrom oscar.forms.widgets import DatePickerInput\nfrom oscar.apps.dashboard.offers.forms import (\n RestrictionsForm as BaseRestrictionsForm,\n)\nfrom django.forms import ModelMultipleChoiceField\nfrom oscar.core.loading import get_model\n\nConditionalOffer = get_model(\"offer\", \"ConditionalOffer\")\nCompoundBenefit = get_model(\"offer\", \"CompoundBenefit\")\nBenefit = get_model(\"offer\", \"Benefit\")\nCompoundCondition = get_model(\"offer\", \"CompoundCondition\")\nCondition = get_model(\"offer\", \"Condition\")\nRange = get_model(\"offer\", \"Range\")\nOfferGroup = get_model(\"offer\", \"OfferGroup\")\nOrder = get_model(\"order\", \"Order\")\nSourceType = get_model(\"payment\", \"SourceType\")\n\n\nclass BenefitSearchForm(forms.Form):\n compound_benefit_cpath = \"%s.%s\" % (\n CompoundBenefit.__module__,\n CompoundBenefit.__name__,\n )\n _benefit_classes = getattr(settings, \"BLUELIGHT_BENEFIT_CLASSES\", [])\n _benefit_classes.append((compound_benefit_cpath, _(\"Compound Benefit\")))\n range = forms.ModelChoiceField(\n required=False, queryset=Range.objects.order_by(\"name\")\n )\n benefit_type = forms.ChoiceField(\n choices=[\n (\"\", \"---------\"),\n ]\n + _benefit_classes,\n required=False,\n label=_(\"Type\"),\n )\n min_value = forms.DecimalField(required=False, label=_(\"Minimum value\"))\n max_value = forms.DecimalField(required=False, label=_(\"Maximum value\"))\n\n\nclass ConditionSearchForm(forms.Form):\n range = forms.ModelChoiceField(\n required=False, queryset=Range.objects.order_by(\"name\")\n )\n\n\nclass OrderDiscountSearchForm(forms.Form):\n number = forms.CharField(required=False, label=_(\"Order number\"))\n status = forms.ChoiceField(required=False, label=_(\"Order status\"), choices=[])\n date_from = forms.DateField(\n required=False, label=_(\"Date from\"), widget=DatePickerInput\n )\n date_to = forms.DateField(\n required=False, label=_(\"Date to\"), widget=DatePickerInput\n )\n product = forms.CharField(required=False, label=_(\"Product\"))\n payment_method = forms.ChoiceField(\n required=False, label=_(\"Payment method\"), choices=[]\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"status\"].choices = self.get_order_status_choices()\n self.fields[\"payment_method\"].choices = self.get_payment_method_choices()\n\n def get_order_status_choices(self):\n return [(\"\", \"---------\")] + [(v, v) for v in Order.all_statuses()]\n\n def get_payment_method_choices(self):\n return [(\"\", \"---------\")] + [\n (src.code, src.name) for src in SourceType.objects.all()\n ]\n\n def filter_queryset(self, qs):\n if not self.is_valid():\n return qs\n data = self.cleaned_data\n is_filtered = False\n if data.get(\"number\"):\n qs = qs.filter(order__number__icontains=data[\"number\"])\n is_filtered = True\n if data.get(\"status\"):\n qs = qs.filter(order__status=data[\"status\"])\n is_filtered = True\n if data.get(\"date_from\"):\n qs = qs.filter(order__date_placed__gte=data[\"date_from\"])\n is_filtered = True\n if data.get(\"date_to\"):\n qs = qs.filter(order__date_placed__lte=data[\"date_to\"])\n is_filtered = True\n if data.get(\"product\"):\n qs = qs.filter(\n (\n Q(order__lines__title__icontains=data[\"product\"])\n | Q(order__lines__upc__icontains=data[\"product\"]) # NOQA\n | Q(order__lines__partner_sku__icontains=data[\"product\"]) # NOQA\n )\n )\n is_filtered = True\n if data.get(\"payment_method\"):\n qs = qs.filter(order__sources__source_type__code=data[\"payment_method\"])\n is_filtered = True\n return qs, is_filtered\n\n\nclass BenefitForm(forms.ModelForm):\n _benefit_classes = getattr(settings, \"BLUELIGHT_BENEFIT_CLASSES\", [])\n proxy_class = forms.ChoiceField(\n choices=_benefit_classes,\n required=True,\n label=_(\"Type\"),\n help_text=_(\"Select a benefit type\"),\n )\n\n class Meta:\n model = Benefit\n fields = [\"range\", \"proxy_class\", \"value\", \"max_affected_items\", \"max_discount\"]\n\n\nclass ConditionForm(forms.ModelForm):\n _condition_classes = getattr(settings, \"BLUELIGHT_CONDITION_CLASSES\", [])\n proxy_class = forms.ChoiceField(\n choices=_condition_classes,\n required=True,\n label=_(\"Type\"),\n help_text=_(\"Select a condition type\"),\n )\n\n class Meta:\n model = Condition\n fields = [\n \"range\",\n \"proxy_class\",\n \"value\",\n ]\n\n\nclass CompoundBenefitForm(forms.ModelForm):\n CPATH = \"%s.%s\" % (CompoundBenefit.__module__, CompoundBenefit.__name__)\n proxy_class = forms.ChoiceField(\n choices=((CPATH, _(\"Compound Benefit\")),),\n initial=CPATH,\n disabled=True,\n label=_(\"Type\"),\n help_text=_(\"Select a benefit type\"),\n )\n\n class Meta:\n model = CompoundBenefit\n fields = [\"proxy_class\", \"subbenefits\", \"max_discount\"]\n\n\nclass CompoundConditionForm(forms.ModelForm):\n CPATH = \"%s.%s\" % (CompoundCondition.__module__, CompoundCondition.__name__)\n proxy_class = forms.ChoiceField(\n choices=((CPATH, _(\"Compound Condition\")),),\n initial=CPATH,\n disabled=True,\n label=_(\"Type\"),\n help_text=_(\"Select a condition type\"),\n )\n\n class Meta:\n model = CompoundCondition\n fields = [\"proxy_class\", \"conjunction\", \"subconditions\"]\n\n\nclass MetaDataForm(forms.ModelForm):\n offer_group = forms.ModelChoiceField(\n label=_(\"Offer Group\"),\n queryset=OfferGroup.objects.get_queryset(),\n help_text=_(\"Offer group to which this offer belongs\"),\n )\n\n class Meta:\n model = ConditionalOffer\n fields = (\n \"name\",\n \"short_name\",\n \"description\",\n # Oscar puts offer_type on the metadata form, but we put it on the restrictions\n # form instead (due to it's ties to the user group limiting functionality).\n # \"offer_type\",\n \"offer_group\",\n \"affects_cosmetic_pricing\",\n \"priority\",\n )\n\n\nclass BenefitSelectionForm(forms.ModelForm):\n class Meta:\n model = ConditionalOffer\n fields = (\"benefit\",)\n\n\nclass ConditionSelectionForm(forms.ModelForm):\n class Meta:\n model = ConditionalOffer\n fields = (\"condition\",)\n\n\nclass RestrictionsForm(BaseRestrictionsForm):\n groups = forms.ModelMultipleChoiceField(\n label=_(\"User Groups\"),\n queryset=Group.objects.get_queryset(),\n help_text=_(\"Which user groups will be able to apply this offer?\"),\n required=False,\n )\n\n class Meta:\n model = ConditionalOffer\n fields = list(BaseRestrictionsForm.Meta.fields) + [\n \"offer_type\",\n \"groups\",\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"priority\"].widget = forms.HiddenInput()\n self.fields[\"priority\"].disabled = True\n self.fields[\"exclusive\"].widget = forms.HiddenInput()\n self.fields[\"exclusive\"].disabled = True\n self.fields[\"combinations\"].widget = forms.HiddenInput()\n self.fields[\"combinations\"].disabled = True\n\n def clean_offer_type(self):\n data = self.cleaned_data[\"offer_type\"]\n if (\n (self.instance.pk is not None)\n and (self.instance.offer_type == ConditionalOffer.VOUCHER)\n and (\"offer_type\" in self.changed_data)\n and self.instance.vouchers.exists()\n ):\n raise forms.ValidationError(\n _(\"This can only be changed if it has no vouchers attached to it\")\n )\n return data\n\n def clean(self):\n cleaned_data = super().clean()\n # If offer_type is _User_, require at least 1 group to be selected\n if cleaned_data[\"offer_type\"] == ConditionalOffer.USER:\n if len(cleaned_data[\"groups\"]) <= 0:\n raise forms.ValidationError(\n {\n \"groups\": _(\n \"User offers must have at least 1 user group selected.\"\n )\n }\n )\n # If offer_type is anything other than _User_, clear the groups field.\n else:\n cleaned_data[\"groups\"] = []\n return cleaned_data\n\n\nclass OfferGroupForm(forms.ModelForm):\n offers = ModelMultipleChoiceField(\n queryset=ConditionalOffer.objects.order_by(\"name\").all(),\n widget=forms.widgets.SelectMultiple(),\n required=False,\n )\n\n class Meta:\n model = OfferGroup\n fields = (\n \"name\",\n \"priority\",\n \"offers\",\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.instance:\n self.initial[\"offers\"] = self.instance.offers.all()\n\n def save(self, *args, **kwargs):\n offer_group = super().save(*args, **kwargs)\n offer_group.offers.set(self.cleaned_data[\"offers\"])\n return offer_group\n","sub_path":"server/src/oscarbluelight/dashboard/offers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":9487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"478302969","text":"# 插入、删除:3\r\n# 修改:(q w e r t a s d f g z x c v ) (y u i o p h j k l b n m)\r\n# 以上两个分组内的字符修改 1 分,两个分组间字符修改 2 分。\r\n# 编辑距离\r\n\r\nimport sys\r\n\r\nsys.setrecursionlimit(int(1e9))\r\ninput = lambda: sys.stdin.readline().rstrip(\"\\r\\n\")\r\nMOD = 998244353\r\nINF = int(4e18)\r\n\r\n\r\nLEFT, RIGHT = set(list(\"qwertasdfgzxcv\")), set(list(\"yuiophjklbnm\"))\r\ntarget, *arr = input().split()\r\n\r\n\r\ndef calDist(src: str, dist: str) -> int:\r\n n, m = len(src), len(dist)\r\n dp = [[INF] * (m + 1) for _ in range(n + 1)]\r\n dp[0][0] = 0\r\n\r\n for i in range(1, n + 1):\r\n for j in range(1, m + 1):\r\n if src[i - 1] == dist[j - 1]:\r\n dp[i][j] = dp[i - 1][j - 1]\r\n else:\r\n if (src[i - 1] in LEFT) ^ (dist[j - 1] in LEFT):\r\n dp[i][j] = min(dp[i][j], dp[i - 1][j - 1] + 2) # 两个分组间字符修改\r\n else:\r\n dp[i][j] = min(dp[i][j], dp[i - 1][j - 1] + 1) # 两个分组内字符修改\r\n dp[i][j] = min(dp[i][j], dp[i - 1][j] + 3, dp[i][j - 1] + 3) # 插入、删除\r\n\r\n return dp[-1][-1]\r\n\r\n\r\nres = sorted(arr, key=lambda x: calDist(x, target))\r\nprint(\" \".join(res[:3]))\r\n","sub_path":"20_杂题/滴滴历届编程题真题/1_英文单词拼写纠错推荐-双字符dp.py","file_name":"1_英文单词拼写纠错推荐-双字符dp.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"617325853","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/Kamaelia/Util/Stringify.py\n# Compiled at: 2008-10-19 12:19:52\n\"\"\"=======================\nConvert Data to Strings\n=======================\n\nA simple component that takes data items and converts them to strings.\n\nExample Usage\n-------------\n\nA simple pipeline::\n\n Pipeline( sourceOfNonStrings(),\n Stringify(),\n consumerThatWantsStrings(),\n ).activate()\n \n\nHow does it work?\n-----------------\n\nSend data items to this component's \"inbox\" inbox. They are converted to\nstrings using the str(...) function, and sent on out of the \"outbox\" outbox.\n\nAnything sent to this component's \"control\" inbox is ignored.\n\nThis component does not terminate.\n\"\"\"\nfrom Axon.Component import component, scheduler\n\nclass Stringify(component):\n \"\"\" Stringify() -> new Stringify.\n \n A component that converts data items received on its \"inbox\" inbox to\n strings and sends them on out of its \"outbox\" outbox.\n \"\"\"\n Inboxes = {'inbox': 'Data items to convert to string', 'control': 'NOT USED'}\n Outboxes = {'outbox': 'Data items converted to strings', 'signal': 'NOT USED'}\n\n def __init__(self):\n \"\"\"x.__init__(...) initializes x; see x.__class__.__doc__ for signature\"\"\"\n super(Stringify, self).__init__()\n self.activate()\n\n def mainBody(self):\n \"\"\"Main loop body.\"\"\"\n if self.dataReady('inbox'):\n theData = self.recv('inbox')\n self.send(str(theData), 'outbox')\n return 1\n\n\n__kamaelia_components__ = (\n Stringify,)","sub_path":"pycfiles/Kamaelia-0.6.0-py2.5/Stringify.py","file_name":"Stringify.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"71027572","text":"import scrapy\nfrom scrapy.loader import ItemLoader\nfrom davescraper.items import QuillItem\n\n\nclass QuillSpider(scrapy.Spider):\n name = 'quill'\n allowed_domains = [\"quill.com\"]\n start_urls = [\n 'https://www.quill.com/search?keywords=Gel+Pens',\n 'https://www.quill.com/labels/cbl/345.html?filter=Label+Type_Address',\n 'https://www.quill.com/search?keywords=Post+it',\n 'https://www.quill.com/laminating-machine-and-supplies/cbd/501.html',\n 'https://www.quill.com/pens/cbl/598.html?filter=Pen+Type_Ballpoint',\n 'https://www.quill.com/dry-erase-makers/cbk/114047.html',\n 'https://www.quill.com/permanent-markers/cbk/118131.html',\n 'https://www.quill.com/all-purpose-cleaners-degreasers/cbl/4174.html',\n 'https://www.quill.com/search?keywords=Cleaning+Wipes',\n 'https://www.quill.com/packing-tape/cbl/18190.html'\n ]\n\n def __init__(self):\n super(QuillSpider, self).__init__()\n pass\n\n def parse(self, response):\n items = response.css('div#ResultsSection div.BrowseItem')\n for item in items:\n loader = ItemLoader(item=QuillItem(), selector=item)\n loader.add_css('title', 'h3#skuName a::text')\n loader.add_css('price', 'span#SkuPriceUpdate::text')\n loader.add_css('number', 'div#ItemSrchCompare div.iNumber::text')\n\n yield loader.load_item()\n\n next_page_data_url = response.css('div#Pager span.next::attr(data-url)').get()\n next_page_query_string = response.css('div#Pager span.next::attr(data-querystring)').get()\n next_page = None\n if next_page_data_url is not None and next_page_query_string is not None:\n next_page = next_page_data_url + '?' + next_page_query_string\n\n if next_page is not None:\n next_page = response.urljoin(next_page)\n self.logger.info('Next Page : {}'.format(next_page))\n yield scrapy.Request(next_page, callback=self.parse)\n\n\n\n","sub_path":"davescraper/spiders/quill_spider.py","file_name":"quill_spider.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"54810349","text":"# -*- coding: utf-8 -*-\n#\n# 2012 Nico Schottelius (nico-ceof at schottelius.org)\n#\n# This file is part of ceof.\n#\n# ceof is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ceof is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ceof. If not, see .\n#\n# Generic TCP Server to be used for UI Server\n#\n#\n\nimport ceof\nimport logging\nimport socket\n\nlog = logging.getLogger(__name__)\n\nclass TCPServerError(ceof.Error):\n pass\n\nclass TCPServer(object):\n \"\"\"Server to accept connections\"\"\"\n\n def __init__(self, address, port, handler=None):\n self.address = address\n self.port = port\n\n if handler:\n self.handler = handler\n else:\n self.handler = self.conn_handler\n \n\n def run(self):\n \"\"\"Main loop\"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n log.debug(\"Binding TCPServer on %s:%s\" % (self.address, self.port))\n s.bind((str(self.address), int(self.port)))\n s.listen(1)\n\n try:\n while 1:\n conn, addr = s.accept()\n self.handler(conn, addr)\n\n except (socket.error, KeyboardInterrupt):\n s.close()\n\n\n # Default if not setup from external\n def conn_handler(self, conn, addr):\n log.info(\"Connected by %s\" % str(addr))\n\n try:\n while 1:\n data = conn.recv(1024)\n if not data:\n break\n print(\"Internal: \" + data.decode('utf-8'))\n\n except (socket.error, KeyboardInterrupt):\n conn.close()\n raise\n\n conn.close()\n","sub_path":"src/lib/ceof/server/tcp.py","file_name":"tcp.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"415748599","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\nn=int(input())\na=[int(i) for i in input().split()]\nd={}\nfor i in a:\n if i in d:\n d[i]+=1\n else:\n d[i]=1\nans=chk=0\nfor i in d:\n ans+=[d[i],d[i]-i][d[i]>=i] \nprint(ans)\n","sub_path":"abc/b076_100/b082/c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"366368098","text":"from celery import shared_task, Task\nfrom time import sleep\nfrom celery import chain, group\n\nimport json\nimport shutil\n\nimport ansible.constants as C\nfrom ansible.executor.task_queue_manager import TaskQueueManager\nfrom ansible.module_utils.common.collections import ImmutableDict\nfrom ansible.inventory.manager import InventoryManager\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.playbook.play import Play\nfrom ansible.plugins.callback import CallbackBase\nfrom ansible.vars.manager import VariableManager\nfrom ansible import context\nimport yaml\nimport logging\nimport sys\n\nlogger = logging.getLogger(\"test\")\nlogger.setLevel(logging.DEBUG)\n\n# 创建一个流处理器handler并设置其日志级别为DEBUG\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setLevel(logging.DEBUG)\n\n# 创建一个格式器formatter并将其添加到处理器handler\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nhandler.setFormatter(formatter)\n\n# 为日志器logger添加上面创建的处理器handler\nlogger.addHandler(handler)\n\nclass ResultsCollectorJSONCallback(CallbackBase):\n \"\"\"A sample callback plugin used for performing an action as results come in.\n\n If you want to collect all results into a single object for processing at\n the end of the execution, look into utilizing the ``json`` callback plugin\n or writing your own custom callback plugin.\n \"\"\"\n\n def __init__(self, task_id = None, schema_id=None, *args, **kwargs):\n super(ResultsCollectorJSONCallback, self).__init__(*args, **kwargs)\n self.host_ok = {}\n self.host_unreachable = {}\n self.host_failed = {}\n self.task_id = task_id\n self.schema_id = schema_id\n\n def v2_runner_on_unreachable(self, result):\n host = result._host\n self.host_unreachable[host.get_name()] = result\n logger.error(\"runner unreachable\")\n\n def v2_runner_on_ok(self, result, *args, **kwargs):\n logger.info(\"Task %s success\", result.task_name)\n\n def runner_on_skipped(self, host, item=None):\n logger.info(\"task skipped\")\n print(host, item)\n\n def runner_on_failed(self, host, res, ignore_errors=False):\n print(res)\n logger.info(\"Task failed\")\n\ndef ansible_install_api(task_id, play_book_path, schema):\n context.CLIARGS = ImmutableDict(connection='smart', private_key_file=\"~/.ssh/id_rsa\", forks=10,\n become_method='sudo', become_user='root', check=False, diff=False, verbosity=0)\n host_list = [schema.host_ip]\n sources = ','.join(host_list)\n if len(host_list) == 1:\n sources += ','\n\n loader = DataLoader()\n passwords = dict(vault_pass='')\n\n results_callback = ResultsCollectorJSONCallback(task_id=task_id)\n\n inventory = InventoryManager(loader=loader, sources=sources)\n\n variable_manager = VariableManager(loader=loader, inventory=inventory)\n tqm = TaskQueueManager(\n inventory=inventory,\n variable_manager=variable_manager,\n loader=loader,\n passwords=passwords,\n stdout_callback=results_callback, # Use our custom callback instead of the ``default`` callback plugin, which prints to stdout\n )\n\n play_sources = []\n\n import os\n os.chdir(os.path.dirname(play_book_path))\n with open(play_book_path) as f:\n data = yaml.load(f, yaml.SafeLoader)\n if isinstance(data, list):\n play_sources.extend(data)\n else:\n play_sources.append(data)\n\n logger.info(\"there are %d tasks to run\", len(play_sources))\n for play_book in play_sources:\n play_book['hosts'] = host_list\n play_book['remote_user'] = 'vagrant'\n play_book['vars']['mysql_port'] = schema.port\n play_book['vars']['schema_name'] = schema.schema\n print(play_book)\n play = Play().load(play_book, variable_manager=variable_manager, loader=loader)\n # Actually run it\n try:\n result = tqm.run(play) # most interesting data for a play is actually sent to the callback's methods\n finally:\n # we always need to cleanup child procs and the structures we use to communicate with them\n logger.info(\"tqm has finished\")\n tqm.cleanup()\n if loader:\n loader.cleanup_all_tmp_files()\n\n # Create play object, playbook objects use .load instead of init or new methods,\n # this will also automatically create the task objects from the info provided in play_source\n\n # Remove ansible tmpdir\n shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)\n\nif __name__ == '__main__':\n from os.path import join, dirname, abspath\n from collections import namedtuple\n Schema = namedtuple('Schema', ['host_ip', 'port', 'schema'])\n schema = Schema('10.37.129.3', 4000, 'testss')\n base_dir = dirname(dirname(abspath(__file__)))\n print(base_dir)\n\n ansible_install_api(1, join(base_dir, \"ansible-playbook/mysql.yml\"), schema)","sub_path":"schema_info/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"560743648","text":"import boto3,argparse,json\nfrom aws_provision import AwsClient\n\ndef delete(func_prefix,range_val):\n for i in range(1,range_val):\n func_name = func_prefix + str(i)\n response = lambda_client.delete_function(\n FunctionName=func_name\n )\n print(response)\n\nif __name__ == \"__main__\":\n\n defaults = {\"key\" : None, \"secret\" : None}\n parser = argparse.ArgumentParser()\n parser.add_argument('-k',\"--key\")\n parser.add_argument('-s',\"--secret\")\n parser.add_argument('-d',\"--delete\")\n parser.add_argument('-r',\"--range\")\n args = parser.parse_args()\n \n command_line_args = {key:value for key,value in vars(args).items() if value}\n key = command_line_args[\"key\"]\n secret = command_line_args[\"secret\"]\n func_name = command_line_args[\"delete\"]\n range_val = command_line_args[\"range\"]\n\n lambda_client = AwsClient('lambda',key,secret).client\n delete(func_name,int(range_val))\n\n\n\n\n\n\n","sub_path":"manage_functions.py","file_name":"manage_functions.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"186466954","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nimport math\nimport hours as hrs\nimport datetime\nimport os\nimport csv\n\nfrom toy import Toy\n\nclass Elf:\n \"\"\" Each Elf starts with a rating of 1.0 and are available at 09:00 on Jan 1. \"\"\"\n def __init__(self, elfid, start_working_time = datetime.datetime(2014,1,1,9,0,0)):\n self.id = elfid\n self.rating = 1.0\n self.next_available_working_time = start_working_time\n self.next_available_time = 540\n self.rating_increase = 1.02\n self.rating_decrease = 0.90\n \n self.hrs = hrs.Hours()\n\n self.__time_base = datetime.datetime(2014,1,1,0,0,0)\n\n def __str__(self):\n return \"Elf %s : Productivity %f, Next Available : %s\" % (self.id, self.rating, self.get_next_available_working_time())\n\n\n def tick_to_next_minute(self):\n \"\"\"Avance à la prochaine minute disponible de l'elfe\"\"\"\n available_time = self.get_next_available_working_time()\n\n if available_time.hour == 18 and available_time.minute == 59:\n self.next_available_working_time = available_time + datetime.timedelta(minutes=9*60+5*60+1)\n else:\n self.next_available_working_time = available_time + datetime.timedelta(minutes=1)\n\n def make_toy(self, toy, wcsv):\n \"\"\"Fait un jouet\"\"\"\n\n # Mise à jour next available time\n start_available_working_time = self.get_next_available_working_time()\n start_minute = int(((start_available_working_time-self.__time_base).total_seconds())/60)\n toy_duration = toy.get_duration()\n toy_required_minutes = int(math.ceil(toy_duration / self.rating))\n\n sanctioned, unsanctioned = self.hrs.get_sanctioned_breakdown(start_minute, toy_required_minutes)\n\n # enforce resting time based on the end_minute and the unsanctioned minutes that\n # need to be accounted for.\n end_minute = start_minute + toy_required_minutes\n # print(start_minute, end_minute)\n if unsanctioned == 0:\n if self.hrs.is_sanctioned_time(end_minute):\n #self.next_available_time = end_minute\n self.set_next_available_working_time(start_available_working_time+datetime.timedelta(minutes=end_minute-start_minute))\n else:\n #self.next_available_time = self.hrs.next_sanctioned_minute(end_minute)\n self.set_next_available_working_time(start_available_working_time+datetime.timedelta(minutes=self.hrs.next_sanctioned_minute(end_minute)-start_minute))\n else:\n #self.next_available_time = self.hrs.apply_resting_period(end_minute, unsanctioned)\n self.set_next_available_working_time(start_available_working_time+datetime.timedelta(minutes=self.hrs.apply_resting_period(end_minute, unsanctioned)-start_minute))\n\n # Mise à jour productivité\n self.rating = max(0.25,\n min(4.0, self.rating * (self.rating_increase ** (sanctioned/60.0)) *\n (self.rating_decrease ** (unsanctioned/60.0))))\n\n # Ecriture du jouet\n # print(toy)\n tt = start_available_working_time\n # print \"tt : %s\" % tt\n time_string = \" \".join([str(tt.year), str(tt.month), str(tt.day), str(tt.hour), str(tt.minute)])\n wcsv.writerow([toy.id, self.id, time_string, toy_required_minutes])\n\n # print(self)\n # print(self.get_next_available_working_time())\n\n def set_rating(self, rating):\n \"\"\"Met à jour manuellement le rating de l'elfe\"\"\"\n self.rating = rating\n\n def apply_strategy_for(self, thetoypool, theelfpool, wcsv):\n \"\"\"Procedure la plus complexe, applique la stratégie de l'elfe selectionné pour un toypool et un elfpool donné\"\"\"\n # Si rien on sort\n if len(thetoypool) == 0:\n return\n\n # print(self)\n # Recupération d'un jouet au hasard dans le toy pool que l'elfe pourrai faire\n toy = thetoypool.get_random_toy_for_elf(self)\n # print(len(thetoypool), len(theelfpool))\n # print(toy)\n\n # Cas 1 : L'elfe dispose d'assez de temps pour réaliser le jouet dans la journée\n if(self.will_finish_toy_in_sanctionned_hours(toy)):\n self.make_toy(toy, wcsv)\n else:\n # Cas 2 : L'elfe ne dispose d'assez de temps pour réaliser le jouet dans la journée\n while True:\n if not thetoypool.toy_left_for_elf(self):\n self.make_toy(toy, wcsv)\n break\n\n short_toy = thetoypool.get_next_short_toy_for(self)\n\n if self.will_finish_toy_in_sanctionned_hours(short_toy):\n self.make_toy(short_toy, wcsv)\n else:\n self.make_toy(short_toy, wcsv)\n self.make_toy(toy, wcsv)\n break\n\n def set_next_available_working_time(self, thetimestamp):\n \"\"\"Mets à jour manuellement le working time\"\"\"\n self.next_available_working_time = thetimestamp\n self.next_available_time = int(((thetimestamp-self.__time_base).total_seconds())/60)\n\n def get_next_available_working_time(self):\n \"\"\"Recupere le prochain timestamp de disponibilite de l'elfe\"\"\"\n return self.next_available_working_time\n\n def will_finish_toy_in_sanctionned_hours(self, toy):\n \"\"\"Le jouet va-t-il être fini dans les heures ouvrées\"\"\"\n elf_working_timestamp = self.get_next_available_working_time()\n \n toy_duration = toy.get_duration()\n\n toy_required_minutes = int(math.ceil(toy_duration / self.rating))\n\n if toy_required_minutes > 600:\n return False\n else:\n next_elf_working_timestamp = elf_working_timestamp + datetime.timedelta(minutes=toy_required_minutes)\n if next_elf_working_timestamp.date() > elf_working_timestamp.date():\n return False\n else:\n if next_elf_working_timestamp.hour == 19 and next_elf_working_timestamp.minute == 0:\n return True\n elif next_elf_working_timestamp.hour < 19:\n return True\n else:\n return False\n\n def update_elf(self, hrs, toy, start_minute, duration):\n \"\"\" Updates the elf's productivity rating and next available time based on last toy completed.\n :param hrs: Hours object for bookkeeping\n :param toy: Toy object for the toy the elf just finished\n :param start_minute: minute work started\n :param duration: duration of work, in minutes\n :return: void\n \"\"\"\n self.update_next_available_minute(hrs, start_minute, duration)\n self.update_productivity(hrs, start_minute, int(math.ceil(toy.duration / self.rating)))\n\n def update_next_available_minute(self, hrs, start_minute, duration):\n \"\"\" Apply the resting time constraint and determine the next minute when the elf can work next.\n Here, elf can only start work during sanctioned times\n :param start_minute: time work started on last toy\n :param duration: duration of work on last toy\n :return: void\n \"\"\"\n sanctioned, unsanctioned = hrs.get_sanctioned_breakdown(start_minute, duration)\n\n # enforce resting time based on the end_minute and the unsanctioned minutes that\n # need to be accounted for.\n end_minute = start_minute + duration\n if unsanctioned == 0:\n if hrs.is_sanctioned_time(end_minute):\n self.next_available_time = end_minute\n else:\n self.next_available_time = hrs.next_sanctioned_minute(end_minute)\n else:\n self.next_available_time = hrs.apply_resting_period(end_minute, unsanctioned)\n\n def update_productivity(self, hrs, start_minute, toy_required_minutes):\n \"\"\" Update the elf's productivity rating based on the number of minutes the toy required that were\n worked during sanctioned and unsanctioned times.\n max(0.5,\n min(2.0, previous_rating * (self.rating_increase ** sanctioned_hours) *\n (self.rating_decrease ** unsanctioned_hours)))\n :param hrs: hours object\n :param start_minute: minute work started\n :param toy_required_minutes: minutes required to build the toy (may be different from minutes elf worked)\n :return: void\n \"\"\"\n # number of required minutes to build toy worked by elf, broken up by sanctioned and unsanctioned minutes\n sanctioned, unsanctioned = hrs.get_sanctioned_breakdown(start_minute, toy_required_minutes)\n self.rating = max(0.25,\n min(4.0, self.rating * (self.rating_increase ** (sanctioned/60.0)) *\n (self.rating_decrease ** (unsanctioned/60.0))))\n\n\nclass ElfTest(unittest.TestCase):\n\n def setUp(self):\n self.elf_productivity_1 = Elf(1)\n self.elf_productivity_2 = Elf(2)\n self.elf_productivity_3 = Elf(3)\n self.elf_productivity_2.set_rating(2)\n\n soln_file = os.path.join(os.getcwd(), 'test.csv')\n self.wcsv = csv.writer(open(soln_file, \"wb\"))\n\n\n def test_get_available_time(self):\n elf = Elf(1)\n self.assertEqual(elf.get_next_available_working_time(), datetime.datetime(2014,1,1,9,0,0))\n elf.set_next_available_working_time(datetime.datetime(2014, 1, 1, 11, 40))\n self.assertEqual(elf.get_next_available_working_time(), datetime.datetime(2014,1,1,11,40,0))\n\n\n def test_make_toy(self):\n elf1 = Elf(1, datetime.datetime(2014, 1, 1, 9, 0, 0))\n elf2 = Elf(2, datetime.datetime(2014, 1, 1, 9, 0, 0))\n elf3 = Elf(3, datetime.datetime(2014, 1, 1, 18, 59, 0))\n\n elf2.set_rating(2)\n\n toy1 = Toy(1, \"2014 1 1 0 0\", 600)\n toy2 = Toy(2, \"2014 1 1 0 0\", 600)\n toy3 = Toy(3, \"2014 1 1 0 0\", 1)\n toy4 = Toy(4, \"2014 1 1 0 0\", 2)\n\n elf1.make_toy(toy1, self.wcsv)\n self.assertEquals(elf1.get_next_available_working_time(), datetime.datetime(2014, 1, 2, 9, 0, 0))\n\n elf1.make_toy(toy3, self.wcsv)\n self.assertEquals(elf1.get_next_available_working_time(), datetime.datetime(2014, 1, 2, 9, 1, 0))\n\n elf2.make_toy(toy2, self.wcsv)\n self.assertEquals(elf2.get_next_available_working_time(), datetime.datetime(2014, 1, 1, 14, 0, 0))\n\n elf3.make_toy(toy4, self.wcsv)\n self.assertEquals(elf3.get_next_available_working_time(), datetime.datetime(2014, 1, 2, 9, 1, 0))\n\n def test_will_finish_toy_in_sanctionned_hours(self):\n toy1 = Toy(1, \"2014 1 1 0 0\", 600)\n toy2 = Toy(1, \"2014 1 1 0 0\", 601)\n\n self.assertTrue(self.elf_productivity_1.will_finish_toy_in_sanctionned_hours(toy1))\n self.assertFalse(self.elf_productivity_1.will_finish_toy_in_sanctionned_hours(toy2))\n\n self.assertTrue(self.elf_productivity_2.will_finish_toy_in_sanctionned_hours(toy1))\n self.assertTrue(self.elf_productivity_2.will_finish_toy_in_sanctionned_hours(toy2))\n\n def test_tick_to_next_minute(self):\n\n elf = Elf(1, datetime.datetime(2014, 1, 1, 18, 58, 0))\n\n self.assertEquals(elf.get_next_available_working_time(), datetime.datetime(2014, 1, 1, 18, 58))\n\n elf.tick_to_next_minute()\n self.assertEquals(elf.get_next_available_working_time(), datetime.datetime(2014, 1, 1, 18, 59))\n\n elf.tick_to_next_minute()\n self.assertEquals(elf.get_next_available_working_time(), datetime.datetime(2014, 1, 2, 9, 0))\n\n elf.tick_to_next_minute()\n self.assertEquals(elf.get_next_available_working_time(), datetime.datetime(2014, 1, 2, 9, 1))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"my_solution_first/elf.py","file_name":"elf.py","file_ext":"py","file_size_in_byte":11696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"375013718","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\n# Create your models here.\n# 데이터가 보여지는 방법\n# User는 이미 만들어진게 어느 정도 있으니까 다른 app과 다른 코드를 취한다.\n\n# AbstractUser가 뭔지를 알기위해 코드를 뜯어보면 좋다.\n\n\nclass User(AbstractUser): #Abstract유저안에 기본정보인 이름, 이메일 등이 들어가있다.\n \"\"\" Explanation \"\"\"\n # 자기소개, 성별 등 원하는 속성을 추가\n # 여기에 뭘 쓰든 장고가 알아서 form으로 만들어주고 마이그레이션과 함께\n # 데이터베이스에다가 form에 필요한 정보를 요청할거야.\n\n GENDER_MALE = \"male\"\n GENDER_FEMALE = \"female\"\n GENDER_OTHER = \"other\"\n#charfield이 커스터마이징\n GENDER_CHOICES = (\n (GENDER_MALE, \"Male\"),\n (GENDER_FEMALE, \"Female\"),\n (GENDER_OTHER, \"Other\"),\n ) # 이건 단지 폼이기때문에 데이터베이스에 변화를 일으키진 않는다 \n # -> 마이그레이션 노 필요.\n\n LANGUAGE_ENGLISH = \"en\"\n LANGUAGE_KOREAN = \"ko\"\n\n LANGUAGE_CHOICES = (\n (LANGUAGE_ENGLISH, \"English\"), # ( 데이터베이스로 갈 값, FORM에 보여질 값)\n (LANGUAGE_KOREAN, \"Korean\"),\n )\n\n CURRENCY_USD = \"usd\"\n CURRENCY_KRW = \"krw\"\n\n CURRENCY_CHOICES = (\n (CURRENCY_USD, \"USD\"),\n (CURRENCY_KRW, \"KRW\"),\n )\n\n # 필드를 추가한다. 필드는 장고 documentation에서 뭐가 있는지 볼 수 있음\n bio = models.TextField(default=\"\")\n # ㅇ아직 데이터베이스가 bio에 대한 정보가 없다.\n # -> 마이그레이션 해줘야한다. 마이그레이션 생성, migrate\n # -> add user에서 bio필드 확인 가능!\n\n # default를 써야하는 이유는 원래 존재하던 user에도 어떤 값을 줘야하니까\n # pillow install해야 사용 가능.\n avatar = models.ImageField(blank=True, upload_to=\"avatars\") # upload_to로 적은 파일에 사진을 올려줘서 정리하기 쉽게 해준다\n gender = models.CharField(\n choices=GENDER_CHOICES,\n max_length=10, \n blank=True,\n ) # 비어있어도 상관없다.\n \n # Datetime과 Date 필드가 있다.\n birthdate = models.DateField(blank=True, null=True)\n language = models.CharField(\n blank=True,\n choices = LANGUAGE_CHOICES,\n max_length=2,\n )\n currency = models.CharField(\n blank=True,\n choices = CURRENCY_CHOICES,\n max_length=3,\n )\n superhost = models.BooleanField(blank=True, null=True)\n\n def __str__(self):\n return self.username # 원래는 self.username\n\n # 이 모델을 admin에 연결해야한다.. 왜?\n","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"183648162","text":"from collections import ChainMap\nfrom train import *\n#Importers parameters\ncoins = sorted(['BTC', 'USDT'], key=str.lower)\nmark_met_struct = {'name': 'sin',\n 'coins': coins,\n 'in_metrics': ['last'],\n 'out_metrics': ['last']} #Remember out is averaged as we want one thing to plot on pred graph atm! Can be changed\nbiz_csv_struct = {'name': 'biz', 'time': 'timestamp', 'score': 'replies', 'text': 'comment'}\nhomo_tens_struct = {'n_tpi': 3, 'n_wpt': 8, 'n_words': 128}\nred_csv_struct = {'name': 'rd', 'time': 'timestamp', 'score': 'score', 'text': 'title'}\nyt_csv_struct = {'name': 'yt', 'time': 'timestamp', 'score': 'viewCount', 'text': 'title'}\n\n#Data parameters\n\ntime_ctrl_params = {\n 'start_dt': '18/03/21 22:00',\n 'end_dt': '18/04/05 22:00',\n 'resolution': 60*2*1000 #Note 4 instead of 2 due to missing data\n }\n\ntime_ctrl_params = {\n 'start_dt': '18/01/01 00:00',\n 'end_dt': '18/01/08 00:00',\n 'resolution': 60*2*1000\n }\n\ntext_import_params = {\n # 'biz': (biz_csv_struct, homo_tens_struct),\n # 'red': (red_csv_struct, homo_tens_struct),\n #'yt': (yt_csv_struct, homo_tens_struct)\n }\ndata_params = {\n 'sample_len': 256,\n 'pred_len': 1\n}\n\nimporter_ens_paras = {'time_ctrl_params': time_ctrl_params,\n 'market_met_struct': mark_met_struct,\n 'text_import_params': text_import_params}\n\ndataset_ctrl_params = dict(ChainMap(data_params, importer_ens_paras))\n\n# Create dataset_controller\n\n#Graph parameters\nbasic_sent_params = {'input_type': 'TextImporter',\n 'tens_struct': homo_tens_struct,\n 'n_sentiments': 1,\n 'embedding_out_dim': 16,\n 'conv_shape': [32, 32],\n 'dense_shape': [64]\n }\n\nytonly_params = {\n 'ana_layer': {\n 'main_shape': [2, 2],\n 'dense_shape': [64]\n },\n 'input_layers':{\n 'MarketIn': {'input_type': 'MarketImporter'},\n # 'Biz': basic_sent_params,\n #'red': basic_sent_params,\n #'yt': basic_sent_params\n },\n 'output_layer': 'prediction_out',\n 'trainable_layers': ['yt', 'prediction_out', 'analysis'],\n 'saved_layers':['yt'],\n 'name': 'graph conv residual graph dropout yt only',\n 'n_checkpoints': 5,\n 'n_epochs': 3,\n 'batch_size': 64,\n 'learning_rate': 0.00001\n}\n\nredonly_params = {\n 'ana_layer': {\n 'main_shape': [2, 2],\n 'dense_shape': [64]\n },\n 'input_layers': {\n # 'MarketIn': {'input_type': 'MarketImporter'},\n # 'Biz': basic_sent_params,\n 'red': basic_sent_params,\n # 'Yt': basic_sent_params\n },\n 'output_layer': 'prediction_out',\n 'trainable_layers':['red', 'prediction_out', 'analysis'],\n 'saved_layers':['red', 'prediction_out'],\n 'name': 'graph conv residual graph dropout red only',\n 'n_checkpoints': 5,\n 'n_epochs': 3,\n 'batch_size': 64,\n 'learning_rate': 0.0001\n}\n\nfinetune_params = {\n 'ana_layer': {\n 'main_shape': [32, 64, 128],\n 'dense_shape': [64, 64]\n },\n 'input_layers':{\n 'MarketIn': {'input_type': 'MarketImporter'},\n # 'Biz': basic_sent_params,\n # 'red': basic_sent_params,\n # 'yt': basic_sent_params\n },\n 'trainable_layers':['analysis', 'prediction_out'],\n 'saved_layers': ['analysis'], #['analysis', 'red', 'yt'],\n 'output_layer': 'prediction_out',\n\n 'name': 'proper finetune',\n 'n_checkpoints': 5,\n 'n_epochs': 20,\n 'batch_size': 1,\n 'learning_rate': 0.01\n}\n\ntrading_params_no_fee = {\n 'ana_layer': {\n 'main_shape': [16, 32, 64]\n },\n 'input_dif': 'log_dif',\n 'input_layers':{\n 'MarketIn': {'input_type': 'MarketImporter'},\n # 'Biz': basic_sent_params,\n # 'red': basic_sent_params,\n #'yt': basic_sent_params\n },\n 'trainable_layers': ['analysis', 'trading_out'],\n 'saved_layers': [],\n 'output_layer': 'trading_out',\n 'tx_fee': 0.,\n\n 'name': 'no fees',\n 'n_checkpoints': 1,\n 'n_epochs': 10,\n 'batch_size': 1,\n 'learning_rate': 0.001\n}\n\ntrading_params_small_fee = {\n 'ana_layer': {\n 'main_shape': [16, 32, 64]\n },\n 'input_dif': 'log_dif',\n 'input_layers':{\n 'MarketIn': {'input_type': 'MarketImporter'},\n # 'Biz': basic_sent_params,\n # 'red': basic_sent_params,\n #'yt': basic_sent_params\n },\n 'trainable_layers': ['analysis', 'trading_out'],\n 'saved_layers': [],\n 'output_layer': 'trading_out',\n 'tx_fee': 0.0002,\n\n 'name': '0.0002 fees',\n 'n_checkpoints': 1,\n 'n_epochs': 10,\n 'batch_size': 1,\n 'learning_rate': 0.001\n}\n\ntrading_params_medium_fee = {\n 'ana_layer': {\n 'main_shape': [16, 32],\n 'skip_size': 32\n },\n 'input_dif': 'log_dif',\n 'input_layers': {\n 'MarketIn': {'input_type': 'MarketImporter'},\n # 'Biz': basic_sent_params,\n # 'red': basic_sent_params,\n #'yt': basic_sent_params\n },\n 'trainable_layers': ['analysis', 'trading_out'],\n 'saved_layers': [],\n 'output_layer': 'trading_out',\n\n 'tx_fee': 0.0025,\n 'entropy': 0.1,\n 'discount_factor': 0.99,\n 'n_prev_actions': 8,\n\n 'name': 'rev 0 txfee 0 ent 0.3 df 128 bs 0.0001 lr',\n 'n_checkpoints': 1,\n 'n_epochs': 2000,\n 'batch_size': 512,\n 'learning_rate': 0.001\n}\ntrading_medium_fee = LayerConfigTree()\ntrading_medium_fee.update(trading_params_medium_fee)\n\n# trading_small_fee = LayerConfigTree(parent=trading_medium_fee)\n# trading_small_fee.update(trading_params_small_fee)\n#\n# trading_no_fee = LayerConfigTree(parent=trading_small_fee)\n# trading_no_fee.update(trading_params_no_fee)\n\nsess = TrainingSession(dataset_ctrl_params, trading_medium_fee)\nsess.run(download=True)","sub_path":"training_test.py","file_name":"training_test.py","file_ext":"py","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"651803989","text":"import win32com\r\nimport win32com.client\r\nfrom constants import WdColorIndex\r\n\r\nword = win32com.client.gencache.EnsureDispatch('Word.Application')\r\ndoc = word.Documents.Open(r'D:\\pycharmspace\\py4office\\c.docx')\r\nrange1 = doc.Range(doc.Paragraphs(2).Range.Start, doc.Paragraphs(3).Range.End)\r\n# range1.HighlightColorIndex = win32com.client.constants.wdYellow # 替换背景颜色为绿色\r\nrange1.HighlightColorIndex = WdColorIndex.wdYellow.value\r\nrange1.Select()\r\nword.Selection.Font.Bold = True\r\n\r\ndoc.Close()\r\nword.Quit()\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"281803326","text":"# Test Complex situation: three clients\n# (1) Target: packets of two go through proxy, one to server directly\n# (2) Contrast: packets of three clients go through proxy\n# Draw the packets received by server\n\nimport collections\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport json\n\n\ndef load_file(file):\n with open(file, \"r\") as f:\n lines = f.read()\n f.close()\n lines = json.loads(lines)\n return lines\n\n\n# Proxy Efficiency\ndata = load_file(\"./EfficiencyTest1-1.txt\")\ndata = collections.OrderedDict(data)\nlabels = data[\"labels\"]\nserver = data[\"without proxy\"]\nproxy = data[\"with proxy\"]\n\nx = np.arange(len(labels)) # the label locations\nwidth = 0.35 # the width of the bars\n\nfig, ax = plt.subplots()\nrects1 = ax.bar(x - width/2, server, width, label='Without Proxy')\nrects2 = ax.bar(x + width/2, proxy, width, label='With Proxy')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_xlabel('Number of Packets of Each Client')\nax.set_ylabel('Number of Packets Received By Server')\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\nax.grid(True)\n\nax.bar_label(rects1, padding=3)\nax.bar_label(rects2, padding=3)\n\nfig.tight_layout()\n\nplt.show()\n","sub_path":"Test/EfficiencyTest1-1.py","file_name":"EfficiencyTest1-1.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"557857644","text":"from database.db_models import *\nfrom database.db_conn import connect\nfrom datetime import datetime\n\n\ndef set_content():\n specialization_a = Specialization(name='product')\n session.add(specialization_a)\n session.commit()\n\n specialization_b = Specialization(name='service')\n session.add(specialization_b)\n session.commit()\n\n for num in range(1, 15):\n company = Company(name='adidas' + str(num),\n id_specialization=1)\n session.add(company)\n session.commit()\n\n # for num in range(1, 3):\n # case_study = CaseStudy(name='bezecke tenisky' + str(num),\n # id_company=1,\n # description='svetoznama firma v oblasti sportu',\n # motivation='bezci nemaju vhodnu obuv, ktora ....',\n # unique_value='je vynimocny...',\n # revenue=100 * num,\n # employees_num=1 * num\n # )\n # session.add(case_study)\n # session.commit()\n\n for num in range(1, 20):\n user = User(email='user@user.com', name='admin', id_google='123456', created_at=datetime.now())\n session.add(user)\n session.commit()\n\n for num in range(1, 20):\n profile = UserProfile(id_user=num, name='Jozef' + str(num))\n session.add(profile)\n session.commit()\n\n for num in range(1, 20):\n for sub_num in range(1, 3):\n project = UserProject(id_user=num,\n name='project' + str(sub_num),\n description='description project' + str(sub_num),\n specialization='service')\n session.add(project)\n session.commit()\n\n # stages = [\"Partners\", \"Activities\", \"Resources\", \"Customer relationships\",\n # \"Channels\", \"Value Proposition\", \"Customer segments\", \"Cost structure\", \"Revenue streams\"]\n\n stages = ['Zákazníci',\n 'Problém',\n 'Unikátnosť produktu',\n 'Riešenie',\n 'Marketing',\n 'Náklady',\n 'Kľúčové metriky',\n 'Zdroj príjmov',\n 'Neférová výhoda'\n ]\n\n for num in range(len(stages)):\n question = BusinessModelStage(name=stages[num])\n session.add(question)\n session.commit()\n\n for num in range(1, 9):\n for sub_num in range(1, 4):\n question = ProjectQuestion(id_stage=num,\n question_text='stage' + str(num) + ' otazka' + str(sub_num),\n order=sub_num,\n help='v napovede ....')\n session.add(question)\n session.commit()\n\n questions = session.query(ProjectQuestion)\n id_project = 1\n\n for question in questions:\n new_project = ProjectAnswer(id_project=id_project,\n id_question=question.id,\n answer_text='odpoved pre projekt ' + str(id_project) +\n ' stage ' + str(question.id_stage) +\n ' otazka ' + str(question.id))\n session.add(new_project)\n session.commit()\n\n\nif __name__ == '__main__':\n db_conn = connect()\n\n Session = sessionmaker(db_conn)\n session = Session()\n set_content()\n\n # Delete\n # users = session.query(User)\n # for user in users:\n # session.delete(user)\n # session.commit()\n #\n # profiles = session.query(UserProfile)\n # for profile in profiles:\n # session.delete(profile)\n # session.commit()\n\n # print('-----------users-----------------')\n # # users = session.query(User).order_by(desc(User.id))\n # users = session.query(User).order_by(desc(User.id))\n #\n # for user in users.filter(User.id == 5):\n # print(user)\n # print('\\t' + user.profile.name)\n # print('---his projects')\n # for project in user.projects:\n # print(project)\n #\n # print('-----------user_profiles-----------------')\n # profiles = session.query(UserProfile)\n # for profile in profiles:\n # print(profile)\n # print('\\t' + profile.user.email)\n\n # Update\n # user_a.email = \"admin@admin.sk\"\n # session.commit()\n","sub_path":"source/flask_backend/database/db_content.py","file_name":"db_content.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"413904977","text":"\nfrom array import array\n\n# New int array.\na = array(\"i\")\n\n# Append three integers.\na.append(100)\na.append(200)\na.append(300)\n\n# Insert an integer at index 1.\na.insert(1, 900)\n\n# Remove this element.\na.remove(200)\n\n# Count elements with this value.\ni = a.count(900)\n\nprint(i);\n\n# Print.\nprint(a)\n","sub_path":"python-programming-workshop/array/2.array_insert.py","file_name":"2.array_insert.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"497145632","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# TODO:\n# \n# \n# R1\n# -\tget the Nyquist plot axis dimensions issue when $k=1$ fixed\n# -\tfigure out the failing of .pz with active elements\n# \n# \n# R2\n# -\tmake the frequency analysis stuff happen\n# \n\n# In[1]:\n\n\nfrom skidl.pyspice import *\n#can you say cheeky \nimport PySpice as pspice\n#becouse it's written by a kiwi you know\nimport lcapy as kiwi\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sympy as sym\n\nfrom scipy.signal import zpk2tf as scipy_zpk2tf\n\n\nfrom IPython.display import YouTubeVideo, display\n\nimport traceback\nimport warnings\n\n\n# In[2]:\n\n\n#import dc code from parral folder\nimport sys\nsys.path.insert(1, '../DC_1/')\nfrom DC_1_Codes import get_skidl_spice_ref, easy_tf\n\nfrom AC_2_Codes import *\n\nsym.init_printing()\n\n#notebook specific loading control statements \nget_ipython().run_line_magic('matplotlib', 'inline')\n#tool to log notebook internals\n#https://github.com/jrjohansson/version_information\nget_ipython().run_line_magic('load_ext', 'version_information')\nget_ipython().run_line_magic('version_information', 'skidl, PySpice,lcapy, sympy, numpy, matplotlib, pandas, scipy')\n\n\n# # What is PZ anylsis\n# \n# Pole-Zero analysis (.pz)does exactly what it says it does. It analysis the circuit between two ports and will return all the poles and or zero between the ports and that's it. That means with the resulting poles and zeros we can reconstruct the transfer function between the ports up to the gain term that is $$H(s)_{true}\\propto \\dfrac{\\prod_n (s-a_n)}{\\prod_m (s-b_m)}$$ where $a_n$ are the zeros and $b_n$ are the poles. Again this can't be stressed enough .pz does not recover the \"gain\" term $K$ that would turn the proportionality into an equality.\n# \n# So what use is it? While that depends on what stage of the design cycle you're in and what is being analyzed. So for RLC elements, it's just going to tell us the poles and zeros where we know that the poles and zero do not move. But for active devices such as BJTs, FETs, etc we could cycle the .pz analysis and see how the .pz locations move with the bias. And while .pz is limited from a verification standpoint seeing as it will just confirm the pole-zero locations that should have been set in the design stage it can be of use when performing reverse engineering on an unknown circuit. Further during the design stage or when analyzing an unknown circuit the lack of $K$ is not a total handicap. Since even without $K$ we can perform root-locus analysis or we can sweep $K$ while comparing the resulting transfer function response to an .ac simulation to then determine $K$ when reverse engineering an unknown design.\n# \n# So then let's go ahead and start looking at .pz and what we can do with that data.\n# \n\n# # PZ analysis of an RC lowpass filter\n# \n# For this first use case, we will use the RC lowpass filter that we developed in the last section bassed on the work of ALL ABOUT ELECTRONICS\n\n# In[3]:\n\n\n#instatate the rc_lowpass filter to \nlowpassF=rc_lowpass(C_value=.1@u_uF, R_value=1@u_kOhm)\nlowpassF.lcapy_self()\n\n\n# Lets then get the voltage transfer function for this filter topology\n\n# In[4]:\n\n\nH_rcl_gen=lowpassF.get_tf(with_values=False); H_rcl_gen\n\n\n# The voltage transfer function for this topology shows that it has a single pole and the following gain term $K$\n\n# In[5]:\n\n\nH_rcl_gen.K\n\n\n# Real quickly how lcapy is getting the full voltage-transfer function is based on\n# \n# 1.\tGenerating the symbolic Modified nodal analysis in the Laplace domain\n# \n# 2.\textracting the so-called Two-Port admittance parameters $Y$ from the Modified nodal analysis matrix\n# \n# 3.\tfinding the port 1 to port 2 voltage transfer function via $$H_v(s)=\\dfrac{Y_{21}}{Y_{22}}$$ Which is just one way of doing it. Where more on two-port network theory and SPICE acquisition will be shown in the remaining sections of this chapter.\n# \n# Lets now get the transfer function for this instinacs of the rc lowpass topology and isolate its $K$ term\n# \n\n# In[6]:\n\n\nH_rcl=lowpassF.get_tf(with_values=True, ZPK=True); H_rcl\n\n\n# In[7]:\n\n\n#K should always be real or where in trouble\nK_rcl=np.real(H_rcl.K.cval); K_rcl\n\n\n# As with any SPICE simulation we have to instantiate our DUT in a circuit. However unlike DC's .tf we do not actually have to have any supplies but since we are also going be comparing the .ac simulation to what .pz we need a full circuit with a source to perform the .ac simulation\n\n# In[8]:\n\n\nreset()\n#create the nets\nnet_in=Net('In'); net_out=Net('Out'); \n\n#create a 1V AC test source and attache to nets\nvs=SINEV(ac_magnitude=1@u_V, dc_offset=5@u_V); vs['p', 'n']+=net_in, gnd\n\n#attaceh term_0 to net_in and term_2 to net_out per scikit-rf convention all \n#other terminals are grounded\nlowpassF.SKiDl(net_in, gnd, net_out, gnd)\n\ncirc=generate_netlist()\nprint(circ)\n\n\n# In[9]:\n\n\nfilter_responce=qfilter_explorer(circ, 'RC Low Pass Filter Responce');\n\n\n# In[10]:\n\n\n#this is the full filter tf response in comparison to the .ac sim\nfilter_responce.symbolic_tf(lowpassF)\n\n\n# ## .tf does not get $K$\n# \n# Lets be clear about this .tf will not yield $K$ in the generic case. It might get lucky but recalling that in DC simulations capcitors are treated as non existing elements there is no way that .tf will recover the $K$ for this topology where $K=\\dfrac{1}{RC}$\n# \n\n# In[11]:\n\n\ntf=easy_tf(circ)\n\n\n# In[12]:\n\n\ntf.dc_voltage_gain(vs, node(net_out), node(gnd))\ntf.vg_results\n\n\n# # PZ ease\n# \n# The following class like the rest in this book makes using the SPICE analysis easier and enhance it with the power of Python. But real quick let's look at the ngspice call for .pz (typically found in chapter 15 section 3)\n# \n# ```\n# .pz node1 node2 node3 node4 \n# ```\n# \n# This differs from .tf in DC analysis where we had to specify a source for the input, where instead the input port terminals are specified by `node1` & `node2` which are the positive and negative terminals respectively. And similarly, the output port terminals are specified by `node3` & `node4`. Since .pz only requires the specification of the terminals to define the two-port network we can take advantage of this to look just at sat the feedback (aka $\\beta$) network in circuits containing feedback structures.\n# \n# Following the node arguments are the transfer type argument where if `vol` is used we are acquiring the poles and or zeros of voltage transfer function \n# \n# $$H_v(s)=\\dfrac{V_o}{V_i}$$\n# \n# else, if `cur` is used we are acquiring the Transimpedance (aka Transfer Impedance) \n# $$H_F(s)=\\dfrac{V_o}{I_i}$$\n# , were again when using .pz we are only acquiring the poles and or zeros that make up the respective transfer function not the transfer function as a whole.\n# \n# Finlay the last argument `analysis_type` controls what we are acquiring from the .pz analysis. While typically we leave it as `pz` to get both the poles and zeros there are times it might not be possible to get both or the poles and zero have to be acquired separately. Where in that case we can use `pol` to get just the poles and `zer` to get just the zeros\n# \n# Below the class, `pz_ease` is designed to perform the .pz analysis with additional methods to analyze the results. And in both it's instantiation and in serval of its methods, the value of $K$ can be feed into it if known.\n# \n\n# In[13]:\n\n\n#%%writefile -a AC_2_Codes.py\n#chapteer 2 section 4 pz_ease class\n#class to perform .pz simulations with a bit more grace \n#with some additional built-in analysis tools\n\nclass pz_ease(ac_representation_tool, eecomplex_plot_templets):\n def __init__(self, circ_netlist_obj, K=1.0):\n \"\"\"\n Class to perform Pole Zero (.pz) SPICE simulation with grace\n \n Args:\n circ_netlist_obj (pyspice.Spice.Netlist.Circuit): the Netlist circuit produced \n from SKiDl's `generate_netlist()`\n \n K (float/int; 1): the gain; must be manually put in or found from .tf analysis\n \n Returns: \n \n \"\"\"\n self.circ_netlist_obj=circ_netlist_obj\n \n assert (type(K)==float) or (type(K)==int), 'K must be a float or int'\n self.K=K\n \n \n #dic of allowed pz control statements\n self.allowed_control_statments={'voltage':'vol', 'current':'cur',\n 'pole-zero':'pz', 'zeros':'zer', 'poles':'pol'}\n\n \n def pz_def_ports(self, port_0_pos_term, port_0_neg_term, port_1_pos_term, port_1_neg_term, display_table=False):\n \"\"\"\n Method to set the Port terminals for the two-port section of the circuit under test\n where all inputs must be nodes in the circuit under test\n \n Terminals:\n port_0_pos_term, port_0_neg_term, port_1_pos_term, port_1_neg_term\n \n Port & Terminals are defined via:\n ```\n Left_Port - Two-Port Section under Test - Right_Port\n +-------------+ \n Postive Port0 port_0_pos_term-| DUT Section |-port_1_pos_term Postive Port1\n Negtive Port0 port_0_neg_term-| |-port_1_neg_term Negtive Port1\n +-------------+\n ```\n Args:\n display_table (bool; False): when true will display the generated `self.control_df` below\n this method call in a jupyter notebook like environment\n \n \n Returns:\n Settings are recoded in `self.control_df` rows: `'port_0_terms+-'` & `'port_1_terms+-'`\n \"\"\"\n \n assert port_0_pos_term in self.circ_netlist_obj.node_names, f'`{port_0_pos_term}` is not a node in the circuit under test'\n self.port_0_pos_term=port_0_pos_term\n \n assert port_0_neg_term in self.circ_netlist_obj.node_names, f'`{port_0_neg_term}` is not a node in the circuit under test'\n self.port_0_neg_term=port_0_neg_term\n \n assert port_1_pos_term in self.circ_netlist_obj.node_names, f'`{port_1_pos_term}` is not a node in the circuit under test'\n self.port_1_pos_term=port_1_pos_term\n \n assert port_1_neg_term in self.circ_netlist_obj.node_names, f'`{port_1_neg_term}` is not a node in the circuit under test'\n self.port_1_neg_term=port_1_neg_term\n \n #record the results in table\n self._build_control_table(display_table)\n \n \n def pz_mode_set(self, tf_type='voltage', pz_acu='pole-zero', display_table=False):\n \"\"\"\n Method to set the pole-zero analysis controls\n \n Args:\n tf_type (str; 'voltage'): the tf for wich the poles and zeros fit to\n if `voltage` the tf is of the form V_o/V_i else if `current` in the form of\n V_o/I_i\n \n pz_acu (str; 'pole-zero'): if `pole-zero` will attempt to get all the poles and zeros for the\n specfied transfer function; else if `zeros` or `poles` will get just the respective zeros\n or poles \n \n display_table (bool; False): when true will display the generated `self.control_df` below\n this method call in a jupyter notebook like environment\n \n Returns:\n Settings are recoded in `self.control_df` rows: `'tf_type'` & `'acqui_mode'`\n \n \n \"\"\"\n assert tf_type in self.allowed_control_statments.keys(), f'`{tf_type}` is not `voltage` or `current`'\n self.tf_type=tf_type\n \n assert pz_acu in self.allowed_control_statments.keys(), f'`{pz_acu}` is not `pole-zero` or `poles` or `zeros`'\n self.pz_acu=pz_acu\n \n #record the results in table\n self._build_control_table(display_table)\n \n def _build_control_table(self, display_table=True):\n \"\"\"\n Internal method to build a pz control table to display pz simulation settings\n \n Args:\n display_table (bool; True): when true will display the generated `self.control_df` below\n this method call in a jupyter notebook like environment\n \n Returns:\n creates dataframe table `self.control_df` that records pz simulation controls\n if `display_table` is true will force showing under jupyter notebook cell\n \n \"\"\"\n \n self.control_df=pd.DataFrame(columns=['value'], \n index=['tf_type', \n 'acqui_mode',\n 'port_0_terms+-',\n 'port_1_terms+-'\n ])\n if hasattr(self, 'tf_type'):\n self.control_df.at['tf_type']=self.tf_type\n \n if hasattr(self, 'pz_acu'):\n self.control_df.at['acqui_mode']=self.pz_acu\n \n if hasattr(self, 'port_0_pos_term') and hasattr(self, 'port_0_neg_term') :\n self.control_df.at['port_0_terms+-', 'value']=[self.port_0_pos_term, self.port_0_neg_term]\n \n if hasattr(self, 'port_1_pos_term') and hasattr(self, 'port_1_neg_term') :\n self.control_df.at['port_1_terms+-', 'value']=[self.port_1_pos_term, self.port_1_neg_term]\n \n self.control_df.index.name='pz_sim_control'\n \n if display_table:\n display(self.control_df)\n \n \n def do_pz_sim(self, display_table=False):\n \"\"\"\n Method to perform the pole-zero simulation based on values stored in self.control_df\n If the simulation does not converge will give a warning with a basic debug action\n but will set `self.pz_values` to empty dict.\n \n TODO:\n - add simulation kwargs\n - flush out exception handling\n \"\"\"\n \n attriputs_to_check=['port_0_pos_term', 'port_0_neg_term', 'port_1_pos_term', 'port_1_neg_term', \n 'tf_type', 'pz_acu']\n \n for i in attriputs_to_check:\n if hasattr(self, i):\n pz_is_go=True\n else:\n pz_is_go=False\n warnings.warn(f'{i} has not been set; pole-zero simulation will not procdede till set')\n \n if pz_is_go:\n self.sim=self.circ_netlist_obj.simulator()\n #I cant catch the warning when it hangs so going to have to do this\n self.pz_values={}\n\n try:\n self.pz_values=self.sim.polezero(\n node1=self.port_0_pos_term, \n node2=self.port_0_neg_term, \n node3=self.port_1_pos_term, \n node4=self.port_1_neg_term, \n tf_type=self.allowed_control_statments[self.tf_type], \n pz_type=self.allowed_control_statments[self.pz_acu]\n )\n \n self._record_pz_results(display_table)\n \n except pspice.Spice.NgSpice.Shared.NgSpiceCommandError:\n self.pz_values={}\n warnings.warn(\"\"\"PZ analysis did not converge with the current setting:\n start by changing the tf type (self.tf_type) and pz acusisiton type (self.pz_acu) \"\"\")\n \n \n def _record_pz_results(self, display_table=True):\n \"\"\"\n Internal method to record the PZ results to a dataframe\n \n Args:\n display_table (bool; True): when true will display the generated `self.control_df` below\n this method call in a jupyter notebook like environment\n \n Returns:\n creates dataframe table `self.pz_results_DF` that records pz simulation results\n if `display_table` is true will force showing under jupyter notebook cell\n \n \"\"\"\n self.pz_results_DF=pd.DataFrame(columns=['Type', 'Values'])\n \n if hasattr(self.pz_values, 'nodes'):\n for k, v in self.pz_values.nodes.items():\n self.pz_results_DF.at[len(self.pz_results_DF)]=k, v.as_ndarray()[0]\n \n if display_table:\n display(self.pz_results_DF)\n \n \n def get_pz_sym_tf(self, dec_round=None, overload_K=None):\n \"\"\"\n Method to get the symbolic transfer function via lacpy\n \n Args:\n dec_round (int; None): contorl to `np.around`'s `decimals` argument\n if left `None` np.around will not be used\n \n overload_K (float/int; None): if not `None` will overload the DC\n gain constant stored in `self.K`\n \n Returns:\n if `self.pz_results_DF` exists return the symbolic transfer function in the s\n dominan in `self.sym_tf`\n \"\"\"\n if overload_K!=None:\n assert (type(overload_K)==float) or (type(overload_K)==int), 'K must be a float or int'\n self.K=overload_K\n \n if hasattr(self, 'pz_results_DF')!=True:\n warnings.warn('no poles/zero recorded run `self.do_pz_sim`')\n else:\n zeros_B=np.empty(0)\n poles_A=np.empty(0)\n for index, row in self.pz_results_DF.iterrows():\n if 'zero' in row['Type']:\n zeros_B=np.hstack((zeros_B, row['Values']))\n elif 'pole' in row['Type']:\n poles_A=np.hstack((poles_A, row['Values']))\n \n if dec_round!=None:\n zeros_B=np.around(zeros_B, dec_round)\n poles_A=np.around(poles_A, dec_round)\n \n self.zeros_B=zeros_B; self.poles_A=poles_A\n #wish I didn't have to do this\n zeros_B=zeros_B.tolist(); poles_A=poles_A.tolist() \n \n #use lcapy to get the symbolic tf \n self.sym_tf=kiwi.zp2tf(zeros_B, poles_A, K=self.K)\n #use simplify because if in pzk it does weird things with j that\n #lambdfy has issues with\n self.sym_tf=self.sym_tf.simplify()\n \n def plot_pz_loc(self, ax=None, title='', unitcircle=False):\n \"\"\"\n uses lcapy's `plot_pole_zero` in https://github.com/mph-/lcapy/blob/6e42983d6b77954e694057d61045bd73d17b4616/lcapy/plot.py#L12\n to plot the poles and zero locations on a Nyquist chart\n \n Args:\n axs (list of matplotlib axis; None): If left None will create a new plot, else must \n be a list of matplotlib subplots axis to be added to where the first entry\n will be the magnitude axis, and the second will be the phase axis\n \n title (str; ''): Subplot title string\n \n unitcircle (bool; False): when True will plot the unit circle on the resulting plot\n \n Returns:\n Returns a real-imag with Poles and Zero map, and if an axis was passed to `ax` will be modified\n with the pole-zero map\n \n \n \"\"\"\n axs=ax or plt.gca()\n if hasattr(self, 'sym_tf')==False:\n warnings.warn(\"\"\"Trying to get symbolic transfer function from `self.get_pz_sym_tf`\n thus you will get what you get\"\"\")\n self.get_pz_sym_tf()\n \n self.sym_tf.plot(axes=axs, unitcircle=unitcircle, \n #wish there be a better way to do this\n label=\"'X'=pole; 'O'=zero\")\n \n axs.axhline(0, linestyle='--', linewidth=2.0, color='black')\n axs.axvline(0, linestyle='--', linewidth=2.0, color='black')\n axs.set_xlabel('Real'); axs.set_ylabel('Imag')\n \n axs.legend()\n \n if title!='':\n title=' of '+title\n axs.set_title(f'Pole-Zero locations plot{title}');\n\n \n \n def plot_3d_laplce(self, title=''):\n \"\"\"\n Creates 3d plots of the Laplace space of the transfer function, one for the mag\n the other for the phase in degrees unwrapped\n \n Args:\n title (str; ''): Subplot title string\n \n Returns:\n returns a 3d plot with the left subplot being the mag and the right being the phase\n \n TODO:\n - get the freaking color bar into a clean location when working with 3d plots\n - merge phase as color into mag see the physics video by eugene on Laplace \n \"\"\"\n if hasattr(self, 'sym_tf')==False:\n warnings.warn(\"\"\"Trying to get symbolic transfer function from `self.get_pz_sym_tf`\n thus you will get what you get\"\"\")\n self.get_pz_sym_tf()\n \n #import the additnal matplotlib featuers for 3d\n from mpl_toolkits.mplot3d import Axes3D\n from matplotlib import cm\n \n #stole this off lcapy's plot_pole_zero\n #https://github.com/mph-/lcapy/blob/7c4225f2159aa33398dac481041ed538169b7058/lcapy/plot.py\n \n \n #check self.sys_tf is good to be used\n sys_tf_syms=self.sym_tf.symbols\n assert len(sys_tf_syms)==1 and ('s' in sys_tf_syms.keys()), 'trasfer function must be laplce form and only have `s` as a free symbol'\n\n #lambdfy the tf\n sys_tf_lam=sym.lambdify(kiwi.s, self.sym_tf.canonical(), 'numpy', dummify=False)\n\n #get the plot bounds\n #stole this off lcapy's plot_pole_zero\n #https://github.com/mph-/lcapy/blob/7c4225f2159aa33398dac481041ed538169b7058/lcapy/plot.py\n\n poles = self.sym_tf.poles()\n zeros = self.sym_tf.zeros()\n try:\n p = np.array([p.cval for p in poles.keys()])\n z = np.array([z.cval for z in zeros.keys()])\n except ValueError:\n raise TypeError('Cannot get poles and zeros of `self.sym_tf')\n a = np.hstack((p, z))\n x_min = a.real.min()\n x_max = a.real.max()\n y_min = a.imag.min()\n y_max = a.imag.max()\n\n x_extra, y_extra = 3.0, 3.0\n\n # This needs tweaking for better bounds.\n if len(a) >= 2:\n x_extra, y_extra = 0.1 * (x_max - x_min), 0.1 * (y_max - y_min)\n if x_extra == 0:\n x_extra += 1.0\n if y_extra == 0:\n y_extra += 1.0\n\n x_min -= 0.5 * x_extra\n x_max += 0.5 * x_extra\n y_min -= 0.5 * y_extra\n y_max += 0.5 * y_extra\n\n #the input domain\n RealRange=np.linspace(x_min, x_max, 100); ImagRange=np.linspace(y_min, y_max, 100)\n sr, si=np.meshgrid(RealRange, ImagRange)\n s_num=sr+1j*si\n\n #plot this\n fig = plt.figure()\n \n #mag 3d plot\n ax3d_mag = fig.add_subplot(121, projection='3d')\n\n XmagPlot=ax3d_mag.plot_surface(sr, si, np.abs(sys_tf_lam(s_num)), alpha=0.5,\n cmap=cm.coolwarm, antialiased=False)\n ax3d_mag.set_xlabel(r'$\\sigma$'); ax3d_mag.set_ylabel(r'$j\\omega$'), ax3d_mag.set_zlabel(r'$|X|$')\n fig.colorbar(XmagPlot, shrink=0.5, aspect=5)\n\n #phase 3d plot\n ax3d_phase = fig.add_subplot(122, projection='3d')\n\n XphasePlot=ax3d_phase.plot_surface(sr, si, angle_phase_unwrap(sys_tf_lam(s_num)), alpha=0.5,\n cmap=cm.coolwarm, antialiased=False)\n ax3d_phase.set_xlabel(r'$\\sigma$'); ax3d_phase.set_ylabel(r'$j\\omega$'), ax3d_phase.set_zlabel(r'$ang(X)$')\n fig.colorbar(XphasePlot, shrink=0.5, aspect=5)\n\n plt.tight_layout()\n \n if title!='':\n title=' of '+title\n ax3d_mag.set_title(f'3D Mag Laplace plot{title}');\n ax3d_phase.set_title(f'3D Phase_deg Laplace plot{title}');\n\n \n \n def scipy_pzk(self, dec_round=None, overload_K=None):\n \"\"\"\n Method to create to generate the Numerator (a) and Denominator (b)\n arrays to use within the scipy signal framework see \n https://docs.scipy.org/doc/scipy/reference/signal.html\n &\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.zpk2tf.html#scipy.signal.zpk2tf\n \n Args:\n dec_round (int; None): contorl to `np.around`'s `decimals` argument\n if left `None` np.around will not be used\n \n overload_K (float/int; None): if not `None` will overload the DC\n gain constant stored in `self.K`\n \n Returns:\n if `self.pz_results_DF` exists tries to return the coefficients for an lti\n filter for use in scipy's signal library in a dictionary in `self.scipy_tfcoef`\n \"\"\"\n if overload_K!=None:\n assert (type(overload_K)==float) or (type(overload_K)==int), 'K must be a float or int'\n self.K=overload_K\n \n if hasattr(self, 'pz_results_DF')!=True:\n warnings.warn('no poles/zero recorded run `self.do_pz_sim`')\n else:\n zeros_B=np.empty(0)\n poles_A=np.empty(0)\n for index, row in self.pz_results_DF.iterrows():\n if 'zero' in row['Type']:\n zeros_B=np.hstack((zeros_B, row['Values']))\n elif 'pole' in row['Type']:\n poles_A=np.hstack((poles_A, row['Values']))\n \n if dec_round!=None:\n zeros_B=np.around(zeros_B, dec_round)\n poles_A=np.around(poles_A, dec_round)\n \n b, a=scipy_zpk2tf(zeros_B, poles_A, self.K)\n self.scipy_tfcoef={'b':b, 'a':a}\n \n def get_sym_freq_resp(self, freq_vec=None):\n \"\"\"\n method to get the symbolic transfer function response \n \n Args:\n freq_vec (numpy array or pandas series; None): frequencies\n to generate results from the generated symbolic transfer function\n if `None` will come up with a freancy vector inside via `np.logspace(-1, 12, 12*10*10)`\n \n Returns:\n will store frequency vector as the index in `self.pz_symresults_DF`\n where the response of the symbolic transfer function will be stored\n in `self.pz_symresults_DF['pzk_symres_[V]']` from there will\n use the inheritance from `ac_representation_tool` to then \n generate `self.ac_sim_real_DF['pzk_symres_[V]'], self.ac_sim_imag_DF['pzk_symres_[V]']`, ect\n \n \"\"\"\n if hasattr(self, 'sym_tf')==False:\n warnings.warn(\"\"\"Trying to get symbolic transfer function from `self.get_pz_sym_tf`\n thus you will get what you get\"\"\")\n self.get_pz_sym_tf()\n \n if freq_vec==None:\n self.freq_vec=np.logspace(-1, 12, 12*10*10)\n \n self.pz_symresults_DF=pd.DataFrame(index=self.freq_vec)\n self.pz_symresults_DF.index.name='freq[Hz]'\n \n self.pz_symresults_DF['pzk_symres_[V]']=self.sym_tf.frequency_response(self.pz_symresults_DF.index.values).astype('complex')\n \n ac_representation_tool.__init__(self, self.pz_symresults_DF)\n \n #just make the reps\n self.make_real_imag()\n self.make_mag_phase()\n \n def plot_nyquist_with_pz(self, ax=None, title='', unitcircle=False):\n \"\"\"\n plotting utility to plot the pole-zero location on top of a Nyquist\n a plot of the response of the symbolic transfer function\n \n Args:\n ax (matplotlib axis; None): If left None will create a new plot, else must\n be a matplotlib subplot axis to be added to\n \n title (str; ''): Subplot title string\n \n Returns:\n Returns a Nyquist plot with the pole-zero locations superimposed on top of, \n and if an axis was passed to `ax` will be modified\n \n \n \n \"\"\"\n axs=ax or plt.gca()\n if hasattr(self, 'ac_sim_real_DF')==False:\n warnings.warn(\"\"\"Trying to get the real imag data from `self.get_sym_freq_resp`, \n you get what you get\"\"\")\n self.get_sym_freq_resp()\n \n \n self.nyquist_plot_templet(self.ac_sim_real_DF['pzk_symres_[V]'], self.ac_sim_imag_DF['pzk_symres_[V]'], \n ax=axs)\n self.plot_pz_loc(ax=axs, unitcircle=unitcircle)\n\n \n if title!='':\n title=' of '+title\n axs.set_title(f'Nyquist with Pole-Zero locations plot{title}');\n \n def plot_bode_from_pz(self, ax=None, title=''):\n \"\"\"\n plotting utility to plot single mag and phase on a single bode plot \n for the response of the symbolic transfer function\n \n Args:\n ax (matplotlib axis; None): If left None will create a new plot, else must\n be a matplotlib subplot axis to be added to\n \n title (str; ''): Subplot title string\n \n Returns:\n Returns a bode plot from the symbolic transfer function, \n and if an axis was passed to `ax` will be modified\n \"\"\"\n axs=ax or plt.gca()\n if hasattr(self, 'ac_sim_real_DF')==False:\n warnings.warn(\"\"\"Trying to get the mag phase data from `self.get_sym_freq_resp`, \n you get what you get\"\"\")\n self.get_sym_freq_resp()\n \n \n \n self.bode_plot_one_templet(self.ac_sim_mag_DF.index, self.ac_sim_mag_DF['pzk_symres_[V][dB]'], self.ac_sim_phase_DF['pzk_symres_[V][deg]'], \n ax=axs)\n \n if title!='':\n title=' of '+title\n axs.set_title(f'Bode plot from Pole-Zero anylsis plot{title}');\n \n def plot_nichols_from_pz(self, ax=None, title=''):\n \"\"\"\n plotting utility to plot the Nichols chart\n for the response of the symbolic transfer function\n \n Args:\n ax (matplotlib axis; None): If left None will create a new plot, else must\n be a matplotlib subplot axis to be added to\n \n title (str; ''): Subplot title string\n \n Returns:\n Returns a Nichols chart plot from the symbolic transfer function, \n and if an axis was passed to `ax` will be modified\n \"\"\"\n axs=ax or plt.gca()\n \n if hasattr(self, 'ac_sim_real_DF')==False:\n warnings.warn(\"\"\"Trying to get the mag phase data from `self.get_sym_freq_resp`, \n you get what you get\"\"\")\n self.get_sym_freq_resp()\n \n \n \n self.nichols_plot_templet(self.ac_sim_mag_DF['pzk_symres_[V][dB]'], self.ac_sim_phase_DF['pzk_symres_[V][deg]'], \n ax=axs)\n \n if title!='':\n title=' of '+title\n axs.set_title(f'Nichols plot from Pole-Zero anylsis plot{title}');\n \n\n\n# To use `pz_ease` we instantiate it by passing in the circuit under test at the time of creation. The instantiation has another argument to pass in $K$ if known *a prior* if just the moment we will leave $K=1$ to demonstrate what .pz and `pz_ease` can do when $K$ is not known\n\n# In[14]:\n\n\npz=pz_ease(circ)\n\n\n# next, we define the port terminals and set the control for the voltage transfer function and to get both poles and zero which is the default case.\n\n# In[15]:\n\n\npz.pz_def_ports('In', '0', 'Out', '0')\npz.pz_mode_set( display_table=True)\n\n\n# And so now we run the .pz simulation\n\n# In[16]:\n\n\npz.do_pz_sim(display_table=True)\n\n\n# ## Results when $K=1$\n\n# And we see that we get the pole that we know exists with the value we expect. So now then we can feed the results into Lcapy's zp2tf to generate the symbolic transfer function. Where we will keep $K=1$ for the moment\n\n# In[17]:\n\n\npz.get_pz_sym_tf()\nH_pzk1=pz.sym_tf; H_pzk1\n\n\n# We can now plot the pole zero locations\n\n# In[18]:\n\n\npz.plot_pz_loc()\n\n\n# Generate the 3d plot of the Laplace domain response of this filter. Where the Laplace variable $s$ is equal to \n# $$s=\\sigma +j\\omega$$\n# where $j\\omega$ is the angular frequency response and when $\\sigma$ is zero $s$ gives yields the Fourier transform response of the system. Otherwise $\\sigma=1/\\tau$ the decay response of the system. For more info review the section [\"Transfer Function Analysis\"](https://control.com/textbook/ac-electricity/transfer-function-analysis/) from the controls book at https://control.com/\n\n# In[19]:\n\n\npz.plot_3d_laplce()\n\n\n# We then can get the symbolic transfer function’s frequency response and view the Nyquist interpretation over the poles and zeros. Though when $K=1$ this can have issues in the plotting.\n\n# In[20]:\n\n\n#pz.plot_nyquist_with_pz()\n#dont get why this is not working with K=1\n\n\n# As well as plotting the symbolic transfer function’s Bode and Nichols plot interpretation of its response.\n\n# In[21]:\n\n\npz.plot_bode_from_pz()\n\n\n# In[22]:\n\n\npz.plot_nichols_from_pz()\n\n\n# And finally, besides generating the symbolic transfer function, pz_ease can also use scipy's signal module to generate a dictionary containing the transfer functions numerator and denominator terms from the pole zeros and $K$ to the use within the scipy signal module or in the python controls library.\n\n# In[23]:\n\n\npz.scipy_pzk(); pz.scipy_tfcoef\n\n\n# ## Results with Proper $K$\n# \n# Now giggle and grin lets see what our lowpass filtes full transfer function responce looks like when we know $K$ using the SPICE estracted poles and zeros from .pz anylsis. Where after overriding the $K$ in `pz_ease` we need to rerun `pz_ease.get_sym_freq_resp()` to regenerate the symploic transfer functions respnonce for the new sympolic transfer function found after $K$ what overridden\n\n# In[24]:\n\n\npz.get_pz_sym_tf(overload_K=K_rcl)\npz.get_sym_freq_resp()\nH_pzk1=pz.sym_tf; H_pzk1\n\n\n# In[25]:\n\n\npz.plot_3d_laplce()\n\n\n# In[26]:\n\n\n# figure out this strange matplotlib value error\n#pz.plot_nyquist_with_pz()\n\n\n# In[27]:\n\n\npz.plot_bode_from_pz()\n\n\n# In[28]:\n\n\npz.plot_nichols_from_pz()\n\n\n# The results show that with an appropriate value of $K$ set we get the value that matches the .ac simulation results.\n\n# ## Asking too much from .pz\n# \n# Lets now see what happens when .pz does not work\n\n# In[29]:\n\n\npz.pz_mode_set(pz_acu='zeros', display_table=True)\npz.do_pz_sim(display_table=True)\n\n\n# In this first error case, the pyspice ngspice wrapper catches the ngspice error that in this case, we know is due from trying to calculate zeros from a circuit we know does not have any zeros. We can also run into cases where instead of throwing a blatant error ngspice will throw a warning when it cant converge to a solution. In which case at the moment `pz_ease` won't display the warning but will set `pz_ease.pz_values` to an empty dictionary.\n\n# # Pole-Zero analysis of a Series Band Reject filter \n# \n# Here we get the pole-zero analysis results of one of the filter examples from the previous section where we know that the transfer function has a zero coefficient term in its numerator to demonstrate the utility of `pz_ease` to formulate the transfer function form the .pz results.\n# \n\n# In[30]:\n\n\n#instantiate the rlc_bandstop filter\nbandstopRLC_s=rlc_series_bandstop(L_value=8.33e-5@u_H, C_value=2.7e-7@u_F, R_value=50@u_Ohm)\nbandstopRLC_s.lcapy_self()\n\n\n# In[31]:\n\n\nH_rlcs_bs=bandstopRLC_s.get_tf(with_values=True, ZPK=False).ratfloat(); H_rlcs_bs\n\n\n# In[32]:\n\n\nK_rlcs_bs=np.real(H_rlcs_bs.K.cval); K_rlcs_bs\n\n\n# In[33]:\n\n\nreset()\n#create the nets\nnet_in=Net('In'); net_out=Net('Out'); \n\n#create a 1V AC test source and attache to nets\nvs=SINEV(ac_magnitude=1@u_V); vs['p', 'n']+=net_in, gnd\n\n#attaceh term_0 to net_in and term_2 to net_out per scikit-rf convention all \n#other terminals are grounded\nbandstopRLC_s.SKiDl(net_in, gnd, net_out, gnd)\n\ncirc=generate_netlist()\nprint(circ)\n\n\n# In[34]:\n\n\nfilter_responce=qfilter_explorer(circ, 'RLC Series Bandstop Filter Responce');\n\n\n# In[35]:\n\n\nfilter_responce.symbolic_tf(bandstopRLC_s)\n\n\n# Where again we will invoke the .tf of this function to show that .tf will not give us $K$ reliably only in this case serendipitously. So don't rely on it for finding $K$.\n\n# In[36]:\n\n\ntf=easy_tf(circ)\n\n\n# In[37]:\n\n\ntf.dc_voltage_gain(vs, node(net_out), node(gnd))\ntf.vg_results\n\n\n# Then getting the .pz results\n\n# In[38]:\n\n\npz=pz_ease(circ, K=K_rlcs_bs)\n\n\n# In[39]:\n\n\npz.pz_def_ports('In', '0', 'Out', '0')\npz.pz_mode_set( display_table=True)\n\npz.do_pz_sim(display_table=True)\n\n\n# Where the resulting transfer function the .pz anylsis is\n\n# In[40]:\n\n\npz.get_pz_sym_tf()\npz.sym_tf.canonical()\n\n\n# and recaling the anylsitical transfer function as the reference\n\n# In[41]:\n\n\nH_rlcs_bs.ratfloat()\n\n\n# We can then look at the error, wich given the size of the coefficients is small\n\n# In[42]:\n\n\n#numerator error\nN_error=H_rlcs_bs.N-pz.sym_tf.canonical().N\nD_error=H_rlcs_bs.D-pz.sym_tf.canonical().D\nprint(f'Numerator Delta: {N_error}')\nprint(f'Denomator Delta: {D_error}')\n\n\n# The so the results for scipy signal the .pz aquared pole-zeros yields\n\n# In[43]:\n\n\npz.scipy_pzk(); pz.scipy_tfcoef\n\n\n# In[44]:\n\n\npz.plot_3d_laplce()\n\n\n# In[45]:\n\n\npz.plot_nyquist_with_pz()\n\n\n# Note that because of the larger space of the poles of zero for this system under test the results of the frequency response is concentrated around (0,0) and shows up as a blip.\n\n# In[46]:\n\n\npz.plot_bode_from_pz()\n\n\n# In[47]:\n\n\npz.plot_nichols_from_pz()\n\n\n# # Pole-Zero analysis of a lattice All-Pass filter¶\n# \n# We also know that from the previous section that the lcapy generated transfer function was not correct to the .ac simulation results (properly due to this authors lcapy usage error) Thus let's take a look at what .pz gives us\n# \n\n# In[48]:\n\n\n#instatate the all-pass lattice filter to \nallpasslat_lf=lc_balanced_allpass_lowfreq_lattice_filt()\nallpasslat_lf.lcapy_self()\n\n\n# In[49]:\n\n\n#get this filters abstract transfer function\nallpasslat_lf.get_tf(with_values=True, ZPK=False)\n\n\n# In[50]:\n\n\nreset()\n#create the nets; the last one is needed to deal with singularity issues when dealing with lattice circuits and ground\nnet_in=Net('In'); net_out=Net('Out'); net_outlower=Net('Out2')\n#create a 1V AC test source and attache to nets\nvs=SINEV(ac_magnitude=1@u_V); vs['p', 'n']+=net_in, gnd\n#net_in+=dummy_1[2]\n\n#attaceh term_0 to net_in and term_2 to net_out per scikit-rf convention all \n#other terminals are grounded\n#but need to add dummy resistors to deal with singular issues and get solvable matric\ndummy_botin=R(value=0, ref='dummy')\ndummy_botin[1]+=gnd\n\ndummy_botout=R(value=0, ref='dummy')\ndummy_botout[2]+=gnd\nallpasslat_lf.SKiDl(net_in, dummy_botin[2], net_out, dummy_botout[1])\n\ncirc=generate_netlist()\nprint(circ)\n\n\n# In[51]:\n\n\nfilter_responce=qfilter_explorer(circ, 'LC All-Pass Low Frequcy Lattice Filter');\n\n\n# In[52]:\n\n\nfilter_responce.symbolic_tf(allpasslat_lf)\n\n\n# In[53]:\n\n\npz=pz_ease(circ, K=1)\n\n\n# In[54]:\n\n\npz.pz_def_ports('In', '0', 'Out', '0')\npz.pz_mode_set( display_table=True)\n\npz.do_pz_sim(display_table=True)\n\n\n# In[55]:\n\n\npz.scipy_pzk(); pz.scipy_tfcoef\n\n\n# In[56]:\n\n\npz.get_pz_sym_tf()\nH=pz.sym_tf; H.canonical()\n\n\n# In[57]:\n\n\npz.plot_3d_laplce()\n\n\n# In[58]:\n\n\npz.plot_nyquist_with_pz()\n\n\n# Where now looking at the Bode and Nichols charts generated from the .pz analysis we see they are almost identical to what was gotten to the .ac analysis.\n\n# In[59]:\n\n\npz.plot_bode_from_pz()\n\n\n# In[60]:\n\n\npz.plot_nichols_from_pz()\n\n\n# # Add the Razavi T-coil after the previous magnetic section is done¶\n\n# # Citations\n\n# [1] T. Kuphardt, Lessons In Industrial Instrumentation :, Version 2.30. [online], 2018, Chapter 5 Section \"Transfer Function Analysis\". Available: https://control.com/textbook/ac-electricity/transfer-function-analysis/. [Accessed: 10- Jan- 2021].\n","sub_path":"_build/jupyter_execute/AC_2/AC_4_PZ.py","file_name":"AC_4_PZ.py","file_ext":"py","file_size_in_byte":39644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"470956488","text":"import re\nfrom importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\ndescription = \"Python's missing debug print command and other development tools.\"\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n history = (THIS_DIR / 'HISTORY.md').read_text()\n history = re.sub(r'#(\\d+)', r'[#\\1](https://github.com/samuelcolvin/python-devtools/issues/\\1)', history)\n history = re.sub(r'( +)@([\\w\\-]+)', r'\\1[@\\2](https://github.com/\\2)', history, flags=re.I)\n history = re.sub('@@', '@', history)\n\n long_description = (THIS_DIR / 'README.md').read_text() + '\\n\\n' + history\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://python-devtools.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'devtools/version.py').load_module()\n\nsetup(\n name='devtools',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n author='Samuel Colvin',\n author_email='s@muelcolvin.com',\n url='https://github.com/samuelcolvin/python-devtools',\n license='MIT',\n packages=['devtools'],\n python_requires='>=3.6',\n install_requires=[\n 'executing>=0.8.0,<1.0.0',\n 'asttokens>=2.0.0,<3.0.0',\n ],\n extras_require={\n 'pygments': ['Pygments>=2.2.0'],\n },\n zip_safe=True,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"568643465","text":"import functools\nimport importlib\nimport logging\nfrom typing import Dict, Optional\n\nfrom gaphas.guide import GuidePainter\nfrom gaphas.painter import FreeHandPainter, HandlePainter, PainterChain\nfrom gaphas.segment import LineSegmentPainter\nfrom gaphas.tool.rubberband import RubberbandPainter, RubberbandState\nfrom gaphas.view import GtkView\nfrom gi.repository import Gdk, GdkPixbuf, Gtk\n\nfrom gaphor import UML\nfrom gaphor.core import event_handler, gettext\nfrom gaphor.core.modeling import StyleSheet\nfrom gaphor.core.modeling.diagram import StyledDiagram\nfrom gaphor.core.modeling.event import AttributeUpdated, ElementDeleted\nfrom gaphor.diagram.diagramtoolbox import tooliter\nfrom gaphor.diagram.diagramtools import apply_default_tool_set, apply_placement_tool_set\nfrom gaphor.diagram.diagramtools.placement import create_item\nfrom gaphor.diagram.painter import ItemPainter\nfrom gaphor.diagram.selection import Selection\nfrom gaphor.diagram.support import get_diagram_item\nfrom gaphor.transaction import Transaction\nfrom gaphor.ui.event import DiagramSelectionChanged, Notification, ToolSelected\n\nlog = logging.getLogger(__name__)\n\n\n@functools.lru_cache(maxsize=1)\ndef placement_icon_base():\n with importlib.resources.path(\"gaphor.ui\", \"placement-icon-base.png\") as f:\n return GdkPixbuf.Pixbuf.new_from_file_at_scale(str(f), 64, 64, True)\n\n\nGtkView.set_css_name(\"diagramview\")\n\n\nif Gtk.get_major_version() == 3:\n _placement_pixbuf_map: Dict[str, GdkPixbuf.Pixbuf] = {}\n\n def get_placement_cursor(display, icon_name):\n if icon_name in _placement_pixbuf_map:\n pixbuf = _placement_pixbuf_map[icon_name]\n else:\n pixbuf = placement_icon_base().copy()\n icon = Gtk.IconTheme.get_default().load_icon(icon_name, 24, 0)\n icon.copy_area(\n 0,\n 0,\n icon.get_width(),\n icon.get_height(),\n pixbuf,\n 9,\n 15,\n )\n _placement_pixbuf_map[icon_name] = pixbuf\n return Gdk.Cursor.new_from_pixbuf(display, pixbuf, 1, 1)\n\n\nelse:\n _placement_texture_map: Dict[str, Gdk.Texture] = {}\n\n def get_placement_cursor(display, icon_name):\n if display is None:\n display = Gdk.Display.get_default()\n if icon_name in _placement_texture_map:\n texture = _placement_texture_map[icon_name]\n else:\n pixbuf = placement_icon_base().copy()\n theme_icon = Gtk.IconTheme.get_for_display(display).lookup_icon(\n icon_name,\n None,\n 24,\n 1,\n Gtk.TextDirection.NONE,\n Gtk.IconLookupFlags.FORCE_SYMBOLIC,\n )\n icon = GdkPixbuf.Pixbuf.new_from_file_at_scale(\n theme_icon.get_file().get_path(), 32, 32, True\n )\n icon.copy_area(\n 0,\n 0,\n icon.get_width(),\n icon.get_height(),\n pixbuf,\n 9,\n 15,\n )\n texture = Gdk.Texture.new_for_pixbuf(pixbuf)\n _placement_texture_map[icon_name] = texture\n return Gdk.Cursor.new_from_texture(texture, 1, 1)\n\n\nclass DiagramPage:\n\n if Gtk.get_major_version() == 3:\n VIEW_TARGET_STRING = 0\n VIEW_TARGET_ELEMENT_ID = 1\n VIEW_TARGET_TOOLBOX_ACTION = 2\n VIEW_DND_TARGETS = [\n Gtk.TargetEntry.new(\"gaphor/element-id\", 0, VIEW_TARGET_ELEMENT_ID),\n Gtk.TargetEntry.new(\"gaphor/toolbox-action\", 0, VIEW_TARGET_TOOLBOX_ACTION),\n ]\n\n def __init__(\n self, diagram, event_manager, element_factory, properties, modeling_language\n ):\n self.event_manager = event_manager\n self.element_factory = element_factory\n self.properties = properties\n self.diagram = diagram\n self.modeling_language = modeling_language\n\n self.view: Optional[GtkView] = None\n self.widget: Optional[Gtk.Widget] = None\n self.diagram_css: Optional[Gtk.CssProvider] = None\n\n self.rubberband_state = RubberbandState()\n\n self.event_manager.subscribe(self._on_element_delete)\n self.event_manager.subscribe(self._on_style_sheet_updated)\n self.event_manager.subscribe(self._on_tool_selected)\n\n title = property(lambda s: s.diagram and s.diagram.name or gettext(\"\"))\n\n def get_diagram(self):\n return self.diagram\n\n def get_view(self):\n return self.view\n\n def construct(self):\n \"\"\"Create the widget.\n\n Returns: the newly created widget.\n \"\"\"\n assert self.diagram\n\n view = GtkView(selection=Selection())\n if Gtk.get_major_version() == 3:\n view.drag_dest_set(\n Gtk.DestDefaults.ALL,\n DiagramPage.VIEW_DND_TARGETS,\n Gdk.DragAction.MOVE | Gdk.DragAction.COPY | Gdk.DragAction.LINK,\n )\n else:\n # TODO: Gtk4 - use controllers DragSource and DropTarget\n pass\n\n self.diagram_css = Gtk.CssProvider.new()\n view.get_style_context().add_provider(\n self.diagram_css, Gtk.STYLE_PROVIDER_PRIORITY_USER\n )\n\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n if Gtk.get_major_version() == 3:\n scrolled_window.add(view)\n scrolled_window.show_all()\n view.connect(\"drag-data-received\", self._on_drag_data_received)\n else:\n scrolled_window.set_child(view)\n\n view.selection.add_handler(self._on_view_selection_changed)\n\n self.view = view\n self.widget = scrolled_window\n\n self.select_tool(\"toolbox-pointer\")\n\n self.set_drawing_style()\n\n # Set model only after the painters are set\n view.model = self.diagram\n\n return self.widget\n\n def get_tool_def(self, tool_name):\n return next(\n t\n for t in tooliter(self.modeling_language.toolbox_definition)\n if t.id == tool_name\n )\n\n def apply_tool_set(self, tool_name):\n \"\"\"Return a tool associated with an id (action name).\"\"\"\n if tool_name == \"toolbox-pointer\":\n return apply_default_tool_set(\n self.view,\n self.modeling_language,\n self.event_manager,\n self.rubberband_state,\n )\n\n tool_def = self.get_tool_def(tool_name)\n item_factory = tool_def.item_factory\n handle_index = tool_def.handle_index\n return apply_placement_tool_set(\n self.view,\n item_factory=item_factory,\n modeling_language=self.modeling_language,\n event_manager=self.event_manager,\n handle_index=handle_index,\n )\n\n def get_tool_icon_name(self, tool_name):\n if tool_name == \"toolbox-pointer\":\n return None\n return next(\n t\n for t in tooliter(self.modeling_language.toolbox_definition)\n if t.id == tool_name\n ).icon_name\n\n @event_handler(ToolSelected)\n def _on_tool_selected(self, event: ToolSelected):\n self.select_tool(event.tool_name)\n\n @event_handler(ElementDeleted)\n def _on_element_delete(self, event: ElementDeleted):\n if event.element is self.diagram:\n self.close()\n\n @event_handler(AttributeUpdated)\n def _on_style_sheet_updated(self, event: AttributeUpdated):\n if event.property is StyleSheet.styleSheet:\n self.set_drawing_style()\n\n diagram = self.diagram\n for item in diagram.get_all_items():\n diagram.request_update(item)\n\n def close(self):\n \"\"\"Tab is destroyed.\n\n Do the same thing that would be done if Close was pressed.\n \"\"\"\n assert self.widget\n if Gtk.get_major_version() == 3:\n self.widget.destroy()\n else:\n parent = self.widget.get_parent()\n if parent:\n parent.remove(self.widget)\n\n self.event_manager.unsubscribe(self._on_element_delete)\n self.event_manager.unsubscribe(self._on_style_sheet_updated)\n self.event_manager.unsubscribe(self._on_tool_selected)\n self.view = None\n\n def select_tool(self, tool_name: str):\n if self.view:\n self.apply_tool_set(tool_name)\n icon_name = self.get_tool_icon_name(tool_name)\n if Gtk.get_major_version() == 3:\n window = self.view.get_window()\n if icon_name and window:\n window.set_cursor(\n get_placement_cursor(window.get_display(), icon_name)\n )\n elif window:\n window.set_cursor(None)\n else:\n if icon_name:\n self.view.set_cursor(get_placement_cursor(None, icon_name))\n else:\n self.view.set_cursor(None)\n\n def set_drawing_style(self):\n \"\"\"Set the drawing style for the diagram based on the active style\n sheet.\"\"\"\n assert self.view\n assert self.diagram_css\n\n style = self.diagram.style(StyledDiagram(self.diagram, self.view))\n\n bg = style.get(\"background-color\")\n self.diagram_css.load_from_data(\n f\"diagramview {{ background-color: rgba({int(255*bg[0])}, {int(255*bg[1])}, {int(255*bg[2])}, {bg[3]}); }}\".encode()\n if bg\n else b\"\"\n )\n\n view = self.view\n\n item_painter = ItemPainter(view.selection)\n\n sloppiness = style.get(\"line-style\", 0.0)\n if sloppiness:\n item_painter = FreeHandPainter(item_painter, sloppiness=sloppiness)\n\n view.bounding_box_painter = item_painter\n view.painter = (\n PainterChain()\n .append(item_painter)\n .append(HandlePainter(view))\n .append(LineSegmentPainter(view.selection))\n .append(GuidePainter(view))\n .append(RubberbandPainter(self.rubberband_state))\n )\n\n view.request_update(self.diagram.get_all_items())\n\n def _on_view_selection_changed(self, item):\n view = self.view\n assert view\n selection = view.selection\n self.event_manager.handle(\n DiagramSelectionChanged(\n view, selection.focused_item, selection.selected_items\n )\n )\n\n def _on_drag_data_received(self, view, context, x, y, data, info, time):\n \"\"\"Handle data dropped on the diagram.\"\"\"\n if (\n data\n and data.get_format() == 8\n and info == DiagramPage.VIEW_TARGET_TOOLBOX_ACTION\n ):\n tool_def = self.get_tool_def(data.get_data().decode())\n with Transaction(self.event_manager):\n create_item(view, tool_def.item_factory, x, y)\n context.finish(True, False, time)\n elif (\n data\n and data.get_format() == 8\n and info == DiagramPage.VIEW_TARGET_ELEMENT_ID\n ):\n element_id = data.get_data().decode()\n element = self.element_factory.lookup(element_id)\n assert element\n\n if not isinstance(\n element, (UML.Classifier, UML.Package, UML.Property)\n ) or isinstance(element, UML.Association):\n self.event_manager.handle(\n Notification(\n gettext(\n \"Drag to diagram is (temporarily) limited to Classifiers, Packages, and Properties, not {type}.\"\n ).format(type=type(element).__name__)\n )\n )\n context.finish(True, False, time)\n return\n\n item_class = get_diagram_item(type(element))\n if item_class:\n with Transaction(self.event_manager):\n item = self.diagram.create(item_class)\n assert item\n\n x, y = view.get_matrix_v2i(item).transform_point(x, y)\n item.matrix.translate(x, y)\n item.subject = element\n\n view.selection.unselect_all()\n view.selection.focused_item = item\n\n else:\n log.warning(\n f\"No graphical representation for element {type(element).__name__}\"\n )\n context.finish(True, False, time)\n else:\n context.finish(False, False, time)\n","sub_path":"gaphor/ui/diagrampage.py","file_name":"diagrampage.py","file_ext":"py","file_size_in_byte":12621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"256433310","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm.auto import tqdm\nimport torch\nimport torch.nn as nn\nfrom model import resnet18\nfrom experiment_utils.train_models import get_dataloaders_incr\nfrom experiment_utils.utils.helpers import find_network_modules_by_name, set_torchvision_network_module\nfrom args import *\nfrom network_consolidation import ExperimentArgs\n\n\nclass OODArgs(ExperimentArgs):\n ARGS = {\n\n }\n\n\ndef shuffle_pixels(x, scale=1):\n \"\"\"\n x: [batch X in channels X height X width]\n \"\"\"\n _, _, height, width = x.shape\n og_x_idx = np.arange(height).repeat(width)\n og_y_idx = np.arange(width)[None].repeat(height, 0).reshape(-1)\n x_idx = np.random.choice(height // scale, height // scale, replace=False).repeat(scale)\n y_idx = np.random.choice(width // scale, width // scale, replace=False).repeat(scale)\n x_idx = ((np.arange(height) - x_idx) % height).repeat(width)\n y_idx = ((np.arange(width) - y_idx) % width)[None].repeat(height, 0).reshape(-1)\n x = x.transpose(0, 2).transpose(1, 3)\n x[og_x_idx, og_y_idx] = x[x_idx, y_idx]\n return x.transpose(0, 2).transpose(1, 3)\n\n\ndef main():\n data_args, experiment_args, model_args = parse_args(IncrDataArgs, ExperimentArgs, AllModelArgs)\n main_ood_detection(data_args, experiment_args, model_args)\n\n\ndef main_ood_detection(data_args, experiment_args, model_args):\n assert model_args.load_state_path, 'please specify a path to a pretrained model'\n state = torch.load(model_args.load_state_path)\n net = resnet18(num_classes=data_args.num_classes, seed=data_args.seed, disable_bn_stats=model_args.disable_bn_stats)\n if data_args.num_classes != state['fc.weight'].shape[0]:\n net.fc = nn.Linear(net.fc.in_features, state['fc.bias'].shape[0], bias=True)\n net.load_state_dict(state)\n net.cuda()\n\n train_loaders, val_loaders, test_loaders = get_dataloaders_incr(data_args, load_test=True)\n\n reinit_layers = find_network_modules_by_name(net, experiment_args.layer)\n layer_path = 'models/consolidation_experiments/incr_task/%d-layer' % len(reinit_layers)\n\n \"\"\"net.bn1 = nn.BatchNorm2d(net.bn1.num_features, affine=False).cuda()\n net.layer1[0].bn1 = nn.BatchNorm2d(net.layer1[0].bn1.num_features, affine=False).cuda()\n net.layer1[0].bn2 = nn.BatchNorm2d(net.layer1[0].bn2.num_features, affine=False).cuda()\n net.layer1[1].bn1 = nn.BatchNorm2d(net.layer1[1].bn1.num_features, affine=False).cuda()\n net.layer1[1].bn2 = nn.BatchNorm2d(net.layer1[1].bn2.num_features, affine=False).cuda()\n net.layer2[0].bn1 = nn.BatchNorm2d(net.layer2[0].bn1.num_features, affine=False).cuda()\n net.layer2[0].bn2 = nn.BatchNorm2d(net.layer2[0].bn2.num_features, affine=False).cuda()\n net.layer2[0].downsample[1] = nn.BatchNorm2d(net.layer2[0].downsample[1].num_features, affine=False).cuda()\n net.layer2[1].bn1 = nn.BatchNorm2d(net.layer2[1].bn1.num_features, affine=False).cuda()\n net.layer2[1].bn2 = nn.BatchNorm2d(net.layer2[1].bn2.num_features, affine=False).cuda()\n net.layer3[0].bn1 = nn.BatchNorm2d(net.layer3[0].bn1.num_features, affine=False).cuda()\n net.layer3[0].bn2 = nn.BatchNorm2d(net.layer3[0].bn2.num_features, affine=False).cuda()\n net.layer3[0].downsample[1] = nn.BatchNorm2d(net.layer3[0].downsample[1].num_features, affine=False).cuda()\n net.layer3[1].bn1 = nn.BatchNorm2d(net.layer3[1].bn1.num_features, affine=False).cuda()\n net.layer3[1].bn2 = nn.BatchNorm2d(net.layer3[1].bn2.num_features, affine=False).cuda()\n net.layer4[0].bn1 = nn.BatchNorm2d(net.layer4[0].bn1.num_features, affine=False).cuda()\n net.layer4[0].bn2 = nn.BatchNorm2d(net.layer4[0].bn2.num_features, affine=False).cuda()\n net.layer4[0].downsample[1] = nn.BatchNorm2d(net.layer4[0].downsample[1].num_features, affine=False).cuda()\n net.layer4[1].bn1 = nn.BatchNorm2d(net.layer4[1].bn1.num_features, affine=False).cuda()\"\"\"\n net.layer4[1].bn2 = nn.BatchNorm2d(net.layer4[1].bn2.num_features, affine=False).cuda()\n\n def build_ood_conv(conv):\n in_ch = conv.in_channels // experiment_args.redundant_groups * experiment_args.redundant_groups\n out_ch = conv.out_channels // experiment_args.redundant_groups * experiment_args.redundant_groups\n return OODConv(in_ch, out_ch, conv.kernel_size, bias=conv.bias is not None,\n stride=conv.stride, padding=conv.padding, dilation=conv.dilation,\n groups=conv.groups)\n\n def load_weight(conv, file_path):\n state_dict = torch.load(file_path)\n conv.weight.data[:] = state_dict['weight']\n if 'bias' in state_dict.keys():\n conv.bias.data[:] = state_dict['bias']\n\n discriminator_params = []\n phi_params = []\n for i, layer_name in enumerate(experiment_args.layer):\n old_conv = reinit_layers[i]\n sup_conv = build_ood_conv(old_conv)\n set_torchvision_network_module(net, layer_name, sup_conv)\n load_weight(sup_conv, '%s/%s-task_0.pth' % (layer_path, layer_name))\n sup_conv.cuda()\n reinit_layers[i] = sup_conv\n discriminator_params += list(sup_conv.discriminator.parameters())\n phi_params += list(sup_conv.phi.parameters())\n\n bce = nn.BCEWithLogitsLoss()\n optim = torch.optim.SGD(discriminator_params + phi_params,\n lr=experiment_args.lr,\n nesterov=experiment_args.nesterov,\n momentum=experiment_args.momentum,\n weight_decay=experiment_args.weight_decay)\n discriminator_optim = torch.optim.SGD(discriminator_params,\n lr=experiment_args.lr,\n nesterov=experiment_args.nesterov,\n momentum=experiment_args.momentum,\n weight_decay=experiment_args.weight_decay)\n\n phi_optim = torch.optim.SGD(phi_params,\n lr=experiment_args.lr,\n nesterov=experiment_args.nesterov,\n momentum=experiment_args.momentum,\n weight_decay=experiment_args.weight_decay)\n\n def zero_grad():\n discriminator_optim.zero_grad()\n phi_optim.zero_grad()\n\n def discriminator_loss(m, length_pos):\n # get discriminator outputs for un-perturbed pos and neg inputs\n pos_logits = m.log_real[:length_pos]\n neg_logits = m.log_real[length_pos:]\n\n # get discriminator outputs for perturbed (pseudo pos) inputs\n pseudo_pos_logits = m.log_fake[length_pos:]\n\n total_pos = pos_logits.numel()\n total_neg = neg_logits.numel() + pseudo_pos_logits.numel()\n\n loss_pos = bce(pos_logits, torch.ones_like(pos_logits))\n loss_neg = bce(neg_logits, torch.zeros_like(neg_logits))\n loss_pseudo_pos = bce(pseudo_pos_logits, torch.zeros_like(pseudo_pos_logits))\n #loss_all_neg = loss_neg + loss_pseudo_pos\n\n # weight positive and negative samples evenly\n #loss = (loss_pos * total_neg + loss_all_neg * total_pos) / 2 / total_neg / total_pos\n loss = (loss_pos + loss_neg + loss_pseudo_pos) / 3\n\n pred_real = torch.sigmoid(m.log_real).round()\n acc_real = pred_real[:length_pos].sum().item() - (pred_real[length_pos:] - 1).sum().item()\n acc_real /= pred_real.numel()\n\n pred_fake = torch.sigmoid(pseudo_pos_logits).round()\n acc_fake = -(pred_fake - 1).sum().item() / pred_fake.numel()\n\n return loss, acc_real, acc_fake\n\n def phi_loss(m, length_pos, l2=0.1):\n # get discriminator outputs for perturbed (pseudo pos) inputs\n phi_pos_logits = m.log_fake[:length_pos]\n phi_neg_logits = m.log_fake[length_pos:]\n\n loss = bce(phi_neg_logits, torch.ones_like(phi_neg_logits))\n l2_loss = l2 * m.l2[:length_pos].mean()\n\n pred = torch.sigmoid(phi_neg_logits).round()\n acc = -(pred - 1).sum().item() / pred.numel()\n\n return loss, l2_loss, acc\n\n discriminator_losses_by_layer = {n: [] for n in experiment_args.layer}\n phi_losses_by_layer = {n: [] for n in experiment_args.layer}\n phi_l2_by_layer = {n: [] for n in experiment_args.layer}\n real_accs_by_layer = {n: [] for n in experiment_args.layer}\n fake_accs_by_layer = {n: [] for n in experiment_args.layer}\n optimize = 'discriminator'\n l2_weight = 10\n for epoch in range(10):\n pbar = tqdm(total=min(map(lambda x: len(x), train_loaders[:2])))\n for (_, x0, y0), (_, x1, y1) in zip(*train_loaders[:2]):\n x0, x1 = x0.to(0), x1.to(0)\n #x_shuffle = shuffle_pixels(x0, scale=7*1) # we blow up images by 7 to begin with\n length_pos = len(y0)\n y = torch.zeros(len(y0) + len(y1)).to(0)\n y[:length_pos] = 1\n x = torch.cat([x0, x1], dim=0)\n net(x)\n\n # discriminator update\n if optimize == 'discriminator':\n for n, m in net.named_modules():\n if type(m) == OODConv:\n \"\"\"#loss = bce(m.log_real, y[:,None,None,None].repeat(1, *m.log_real.shape[1:]))\n loss = bce(m.log_real[:length_pos], torch.ones_like(m.log_real[:length_pos]))\n loss += bce(m.log_real[length_pos:], torch.zeros_like(m.log_real[length_pos:]))\n loss /= 2\n discriminator_losses_by_layer[n] += [loss.item()]\n loss.backward()\"\"\"\n\n loss, acc_real, acc_fake = discriminator_loss(m, length_pos)\n\n discriminator_losses_by_layer[n] += [loss.item()]\n\n real_accs_by_layer[n] += [acc_real]\n fake_accs_by_layer[n] += [acc_fake]\n\n loss.backward()\n discriminator_optim.step()\n # phi update\n elif optimize == 'phi':\n for n, m in net.named_modules():\n if type(m) == OODConv:\n loss, l2_loss, acc_fake = phi_loss(m, length_pos, l2=l2_weight)\n\n phi_losses_by_layer[n] += [loss.item()]\n phi_l2_by_layer[n] += [l2_loss.item()]\n\n real_accs_by_layer[n] += [real_accs_by_layer[n][-1]]\n fake_accs_by_layer[n] += [acc_fake]\n\n (loss + l2_loss).backward()\n phi_optim.step()\n\n zero_grad()\n pbar.update(1)\n pbar.close()\n pass\n\n\n\n\nclass OODConv(nn.Conv2d):\n def __init__(self, *args, hidden_dim=512, **kwargs):\n super(OODConv, self).__init__(*args, **kwargs)\n self.log_real = None # logits for real samples\n self.log_fake = None # logits for fake samples\n self.phi_out = None # faked in-domain data\n self.discriminator = MLP(self.in_channels, hidden_dim, 1) #nn.Conv2d(self.in_channels, 1, (1, 1), bias=True)\n self.phi = MLP(self.in_channels, hidden_dim, self.in_channels)#nn.Conv2d(self.in_channels, self.in_channels, (1, 1), bias=True)\n self.l2 = None\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n phi_out = self.phi(input.detach())\n self.l2 = (phi_out - input)**2\n self.log_real = self.discriminator(input.detach())\n self.log_fake = self.discriminator(phi_out)\n\n return super(OODConv, self).forward(input)\n\n\nclass MLP(nn.Module):\n def __init__(self, in_channels, hidden_dim, out_dim):\n super(MLP, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, hidden_dim, 1, bias=True)\n self.sigmoid = nn.Sigmoid()\n self.conv2 = nn.Conv2d(hidden_dim, out_dim, 1, bias=True)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.sigmoid(out)\n return self.conv2(out)\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"ood_detection.py","file_name":"ood_detection.py","file_ext":"py","file_size_in_byte":11969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"537802714","text":"'''\r\nDemonstrates:\r\n split\r\n join\r\n set\r\n'''\r\n\r\ndef split_join():\r\n str1 = 'These are the days when birds come back'\r\n lst = str1.split() #create a list with words of str1 as elements\r\n for item in lst:\r\n print (item)\r\n\r\n str2 = ''.join(lst) #construct a string from the elements of lst\r\n #notice that spaces are removed relative to the original string\r\n print (str2)\r\n\r\ndef do_set():\r\n people1 = set(['Zack', 'John', 'Mary', 'Katie']) #create set\r\n people2 = set(['Katie', 'Michael']) #create set\r\n\r\n print ('Jeremy' in people1) #check for set membership\r\n print ('Katie' in people2) #check for set membership\r\n\r\n people3 = people1 & people2 #set intersection\r\n print(people3)\r\n\r\n people4 = people1 | people2 #set union\r\n print (people4)\r\n\r\n print(people2 <= people1) #is people2 a subset of people1\r\n print(people2 >= people1) #is people2 a superset of people1\r\n \r\n\r\ndef main():\r\n split_join()\r\n do_set()\r\n \r\n \r\nmain()\r\n \r\n","sub_path":"A-Python-Examples/H-python-split-join-set.py","file_name":"H-python-split-join-set.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"157900137","text":"import os,sys,glob,math\nfrom PIL import Image\nimport numpy as np\nimport re\nfrom ast import literal_eval\nimport argparse\nfrom comp_utils import *\nimport pandas as pd\nimport random\ndef read_qtable(qname,i=-1):\n #------old read from file--------\n #e.g [[1,2],[2,3]]\n #f = open(qname,\"r\")\n #a=re.sub(\"\\s+\",'',f.read())\n #qtable=np.array(literal_eval(f.read()))\n #--------new form-----------\n #same as libjpeg requirements, done by space\n qtable = np.loadtxt(qname,dtype=np.int)\n if i!=-1:\n return qtable.reshape((-1,8,8))[i]\n return qtable.reshape((-1,8,8))\n\ndef write_qtable(qtable,qname='qtable.txt'):\n f = open(qname,'w')\n str_qtable = str(np.abs(qtable.astype(int)) ).replace('[ ','').replace('[','')\n str_qtable = ' '.join(str_qtable.split())\n str_qtable = str_qtable.replace(']','\\n')\n f.write(' '+str_qtable)\n f.close()\n return qtable\ndef bound_qtable_generate(qtable_name):\n df = pd.read_csv(\"csv/sorted.csv\")\n scores = np.array((df['rate'],df['acc1']))\n scores = np.swapaxes(scores,0,1)\n indexs = np.load('pareto1000.npy')\n qts = []\n for ind in indexs:\n qname=(\"/data/zhijing/flickrImageNetV2/sorted_cache/qtables/qtable\"+str(ind)+\".txt\")\n if df['rate'][ind] > 21 and df['rate'][ind] < 23:\n qt = read_qtable(qname,0)\n qts.append(qt.reshape((-1)))\n qts.append(np.transpose(qt).reshape((-1)))\n qtstd = np.array(qts).std(axis=0)\n qtmax = np.rint(np.clip(np.array(qts).max(axis=0)+qtstd*0.5, 1, 255))\n qtmin = np.rint(np.clip(np.array(qts).min(axis=0)-qtstd*0.5, 1, 255))\n bound_qt = np.array([np.random.randint(qtmin[i], qtmax[i]) for i in range(64)]).reshape(8,8)\n return write_qtable(bound_qt, qtable_name)\n\n\ndef perturbed_qtable_generate(input_name, qtable_name, step_range = 3):\n qtable = read_qtable(input_name) \n qtable += np.random.randint(-1*step_range,step_range+1,size=qtable.shape)\n qtable = np.clip(qtable,1,255)\n write_qtable(qtable,qtable_name)\n \n\ndef sorted_qtable_generate(qtable_name,depth=3):\n qtable = np.abs(np.random.standard_normal(( depth,64) ))\n qtable = [qtable[i]/np.max(qtable[i]) for i in range(depth)]\n qtable = np.sort(qtable,axis=1)\n qtable = np.array([zigzag_reverse(qtable[i],8) for i in range(3)])\n ran = np.random.randint(0,35)\n ranmax = np.random.randint(50,150)\n qtable = np.clip(np.round(qtable*(ranmax-ran) + ran ),ran,ranmax).astype(int)\n scale = np.random.uniform(0.5,1.5)\n qtable = np.clip(np.round(qtable*scale),1,255).astype(int)\n if depth == 3:\n write_qtable(qtable,qtable_name)\n return qtable\n else:\n write_qtable(qtable[0],qtable_name)\n return qtable[0]\n\n\ndef random_qtable_generate(qtable_name):\n qtable = np.abs(np.random.standard_normal((3,8,8)))\n qtable = qtable/np.max(qtable) #range in [0, 1]\n qtable = np.clip(np.round(qtable*255),1,255).astype(int)\n write_qtable(qtable,qtable_name) \n return qtable\n\ndef size_jpeg(path):\n comp = glob.glob(path)\n x = []\n for name in comp:\n x.append(os.path.getsize(name))\n return np.array(x).mean()\n\n\n","sub_path":"jpeg_eval/ratio.py","file_name":"ratio.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"171286933","text":"import socket\nimport datetime\nimport threading\nimport logging\n\nPORT = 5050\nCLOCK_SERVER = socket.gethostbyname(socket.gethostname())\nADDRESS = (CLOCK_SERVER, PORT)\nFORMAT = 'utf-8'\nCLOCK_REQUEST = \"SYNCHRONIZE\"\nlogging.basicConfig(filename='clock_server.log', level=logging.DEBUG, filemode='w')\n\n\ndef sendTime(client_connection, client_address):\n connected = True\n while connected:\n # Client sends CLOCK_REQUEST whenever it needs to know the time from the server\n msg = client_connection.recv(1024).decode(FORMAT)\n if msg == CLOCK_REQUEST:\n client_connection.send(str(datetime.datetime.now()).encode(FORMAT))\n client_connection.close()\n\n\nif __name__ == '__main__':\n # Creating the clock server socket\n clock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n logging.debug(\"[CLOCK SERVER] Successfully created\")\n\n # Binding the clock server socket to the address - defined globally\n clock.bind(ADDRESS)\n\n # The clock server keeps listening for different clients\n clock.listen()\n logging.debug(\"[CLOCK SERVER] Clock server is listening\")\n\n while True:\n # Clock server accepts new clients and creates a new thread for each client\n connection, address = clock.accept()\n logging.debug(\"[CLIENT CONNECTED] {}\".format(str(connection)))\n thread = threading.Thread(target=sendTime, args=(connection, address))\n thread.start()\n\n clock.close()\n","sub_path":"clock_server.py","file_name":"clock_server.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"81334853","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass BMSContainerQueryObject(Model):\n \"\"\"The query filters that can be used with the list containers API.\n\n :param backup_management_type: Backup management type for this container.\n Possible values include: 'Invalid', 'AzureIaasVM', 'MAB', 'DPM',\n 'AzureBackupServer', 'AzureSql'\n :type backup_management_type: str or :class:`BackupManagementType\n `\n :param container_type: Type of container for filter. Possible values\n include: 'Invalid', 'Unknown', 'IaasVMContainer',\n 'IaasVMServiceContainer', 'DPMContainer', 'AzureBackupServerContainer',\n 'MABContainer', 'Cluster', 'AzureSqlContainer', 'Windows', 'VCenter'\n :type container_type: str or :class:`ContainerType\n `\n :param backup_engine_name: Backup engine name\n :type backup_engine_name: str\n :param status: Status of registration of this container with the Recovery\n Services Vault.\n :type status: str\n :param friendly_name: Friendly name of this container.\n :type friendly_name: str\n \"\"\"\n\n _validation = {\n 'backup_management_type': {'required': True},\n }\n\n _attribute_map = {\n 'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},\n 'container_type': {'key': 'containerType', 'type': 'str'},\n 'backup_engine_name': {'key': 'backupEngineName', 'type': 'str'},\n 'status': {'key': 'status', 'type': 'str'},\n 'friendly_name': {'key': 'friendlyName', 'type': 'str'},\n }\n\n def __init__(self, backup_management_type, container_type=None, backup_engine_name=None, status=None, friendly_name=None):\n self.backup_management_type = backup_management_type\n self.container_type = container_type\n self.backup_engine_name = backup_engine_name\n self.status = status\n self.friendly_name = friendly_name\n","sub_path":"azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/bms_container_query_object.py","file_name":"bms_container_query_object.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"382746761","text":"# -*- coding: utf-8 -*-\n\nimport warnings\n\nfrom click import shell_completion # type: ignore\n\n\n__version__ = \"1.5.1\"\n__author__ = \"Sam Schott\"\n__url__ = \"https://maestral.app\"\n\n\n# suppress Python 3.9 warning from rubicon-objc\nwarnings.filterwarnings(\"ignore\", module=\"rubicon\", category=UserWarning)\n\n\n# patch click shell completion argument detection\n# see https://github.com/pallets/click/issues/1929\n\n\ndef _start_of_option(value: str) -> bool:\n \"\"\"Check if the value looks like the start of an option.\"\"\"\n return value[0] == \"-\" if value else False\n\n\nshell_completion._start_of_option = _start_of_option\n","sub_path":"src/maestral/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"6705821","text":"import shapedist\nimport numpy as np\nfrom numba import jit, float64\nfrom math import pi\nfrom inspect import signature\nfrom scipy.integrate import trapz\nfrom scipy.linalg import svd\nimport scipy.interpolate\nimport sys\nfrom tqdm import tqdm, trange\nimport matplotlib.pyplot as plt\n\n@jit(nopython=True)\ndef arclen_fct_values(b):\n \"\"\"\n Returns an arclength parametrization of a curve in 2D\n Parameters\n ----------\n b : array of floats\n The curve\n Returns\n -------\n array of float\n The arclength parametrization\n \"\"\"\n N = b.shape[0]\n d = np.zeros(N)\n d[1:N] = np.sum((b[1:N, :] - b[0:N-1, :])**2, 1)**0.5\n\n cumsum_d = np.cumsum(d)\n return cumsum_d / cumsum_d[N-1]\n\ndef normalize(p):\n \"\"\"\n Normalizes a 2D curve by center of mass, and also makes its arclength 1\n Parameters\n ----------\n p : array of floats\n The curve to be noramlized\n\n Returns\n -------\n array of floats\n The normalized curve\n \"\"\"\n N = p.shape[0]\n p = (p - shapedist.shape_representations.calculate_com(p))\n arclen_1 = np.sum((p[1:N, :] - p[0:N - 1, :]) ** 2, 1) ** 0.5\n arclen_1 = np.sum(arclen_1)\n p = p / arclen_1\n return p\n\n\ndef find_shapedist(p, q, dr='m', neigh = 5, shape_rep=shapedist.coords, distfunc=None, t1=None, t2=None,\n tol=2e-3, energy_dot=False, strip_height=8):\n \"\"\"\n\n Parameters\n ----------\n p\n q\n dr\n neigh\n shape_rep\n distfunc\n t1\n t2\n tol\n energy_dot\n strip_height\n\n Returns\n -------\n\n \"\"\"\n uniform = False\n if 'u' in dr.lower():\n uniform = True\n coarsen = False\n if 'c' in dr.lower():\n coarsen = True\n if 't' in dr.lower():\n p = p.T\n q = q.T\n numparams = 1\n if len(p.shape) == 2 and p.shape[1] == 2:\n # p = shapedist.normalize(p)\n # q = shapedist.normalize(q)\n\n if t1 is None and t2 is None:\n t1 = arclen_fct_values(p)\n t2 = arclen_fct_values(q)\n uniform = False\n numparams = len(signature(shape_rep).parameters)\n\n if len(p.shape) == 1 and len(q.shape) == 1:\n p = np.reshape(p, (-1, 1))\n q = np.reshape(q, (-1, 1))\n c = False\n if shape_rep is shapedist.curvature:\n c = True\n [t, p, q], mask = shapedist.build_hierarchy.hierarchical_curve_discretization(p, q,\n t1, t2,\n coarsen, tol=tol, curvature=c)\n if numparams == 2:\n p, s1 = shape_rep(p, t)\n q, s2 = shape_rep(q, t)\n else:\n p, s1 = shape_rep(p)\n q, s2 = shape_rep(q)\n\n if shape_rep is shapedist.srvf:\n energy_dot = True\n distfunc = shapedist.calculate_shape_distance_SRVF\n if shape_rep is shapedist.tangent:\n distfunc = shapedist.calculate_shape_distance_tangent\n # Find gamma in N dimensions\n if len(p.shape) == 1 or p.shape[1] == 1:\n p = np.reshape(p, (-1))\n q = np.reshape(q, (-1))\n\n if len(p.shape) == 2:\n dim = p.shape[1]\n else:\n dim = 1\n if \"2\" in dr.lower():\n\n tg, gammay, sdist = shapedist.elastic_n_2.find_gamma(t, p, q, neigh, neigh, energy_dot, uniform, dim)\n else:\n tg, gammay, sdist = shapedist.elastic_linear_multilevel.find_gamma(t, p, q, mask, energy_dot, uniform, dim, neigh, strip_height)\n if distfunc is not None:\n sdist = distfunc(p, q, tg, gammay)\n\n if 'd' in dr.lower():\n if 't' in dr.lower():\n p = p.T\n q = q.T\n return sdist, p[mask[-1]], q[mask[-1]], tg, gammay\n else:\n return sdist\n\n\n\n@jit(float64(float64[:], float64[:], float64[:]), cache=True, nopython=True)\ndef find_error(tg, gammar, gammat):\n \"\"\"\n Calculates the difference between two gamma curves.\n Parameters\n ----------\n tg\n gammar\n gammat\n\n Returns\n -------\n\n \"\"\"\n n = tg.size\n error = 1 / 2 * (tg[1] - tg[0]) * (gammar[1] - gammat[1]) ** 2 + 1 / 2 * (tg[n - 1] - tg[n - 2]) * (\n gammar[n - 1] - gammat[n - 1]) ** 2\n k = 2\n if n != gammar.size or n != gammat.size:\n raise IndexError\n while k < n - 1:\n error = error + 1 / 2 * (gammar[k] - gammat[k]) ** 2 * (tg[k] - tg[k - 1]) ** 2\n k = k + 1\n error = error ** (1 / 2)\n return error\n\n\n@jit(float64(float64[:], float64[:], float64[:]), cache=True, nopython=True)\ndef inner_product(t, p, q):\n \"\"\"\n Finds the inner product for SRVF and Tangent Shape Distance\n\n Parameters\n ----------\n t\n p\n q\n\n Returns\n -------\n\n \"\"\"\n i = 0\n result = 0\n while i < p.size - 1:\n result = result + (p[i] * q[i] + p[i + 1] * q[i + 1]) / 2 * (t[i + 1] - t[i])\n i = i + 1\n return result\n\n\n@jit(float64(float64[:], float64[:, :], float64[:, :]), cache=True, nopython=True)\ndef inner_product_2D(t, p, q):\n \"\"\"\n Finds the inner product for SRVF and Tangent Shape Distance\n\n Parameters\n ----------\n t\n p\n q\n\n Returns\n -------\n\n \"\"\"\n i = 0\n result = 0\n\n while i < p.shape[0] - 1:\n val1 = p[i][0] * q[i][0] + p[i][1] * q[i][1]\n val2 = p[i + 1][0] * q[i + 1][0] + p[i + 1][1] * q[i + 1][1]\n result = result + (val1 + val2) / 2 * (t[i + 1] - t[i])\n i = i + 1\n return result\n\ndef calculate_shape_distance_tangent(p, q, t, gamma):\n q_reparam = np.zeros(q.shape)\n for i in range(q.shape[1]):\n func = scipy.interpolate.CubicSpline(t, q[:, i])\n q_reparam[:, i] = func(gamma)\n q = q_reparam\n p_q = inner_product_2D(t, p, q)\n p_p = inner_product_2D(t, p, p)\n q_q = inner_product_2D(t, q, q)\n temp = p_q / (p_p ** 0.5 * q_q ** 0.5)\n if temp > 1:\n temp = 1\n return np.arccos(temp) / np.pi\n\n\ndef calculate_shape_distance_SRVF(p, q, t, gamma):\n gammad = np.sqrt(np.gradient(gamma, t))\n q_reparam = np.zeros(q.shape)\n for i in range(q.shape[1]):\n func = scipy.interpolate.CubicSpline(t, q[:, i])\n q_reparam[:, i] = func(gamma)\n q_reparam[:, i] = np.multiply(q_reparam[:, i], gammad)\n q = q_reparam\n p_q = inner_product_2D(t, p, q)\n p_p = inner_product_2D(t, p, p)\n q_q = inner_product_2D(t, q, q)\n temp = p_q / (p_p ** 0.5 * q_q ** 0.5)\n if temp > 1:\n temp = 1\n return np.arccos(temp) / pi\n\n","sub_path":"shapedist/shapedist.py","file_name":"shapedist.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"539585646","text":"from django.urls import path\nfrom .views import *\n\n\nurlpatterns = [\n # Posts\n path('posts/', PostListAPIView.as_view()),\n path('posts/create/', PostCreateAPIView.as_view()),\n # Rates\n path('rates/', Rating.as_view()),\n # Comments\n path('comments/', CommentListAPIView.as_view()),\n path('comments/send/', CommentCreateAPIView.as_view()),\n # Dialogs\n path('dialogs/', DialogListAPIView.as_view()),\n path('dialogs/create/', DialogCreateAPIView.as_view()),\n # Messages\n path('messages/', MessageListAPIView.as_view()),\n path('messages/send/', MessageCreateAPIView.as_view()),\n # Profiles\n path('profile/', GetProfile.as_view()),\n # Communities\n path('community/', GetCommunity.as_view()),\n path('communities/', CommunityListAPIView.as_view()),\n path('communities/posts/', CommunityPostListAPIView.as_view()),\n path('communities/create/', CommunityCreateAPIView.as_view()),\n path('communities/follow/', FollowCommunity.as_view()),\n # Users\n path('users/friends/', FriendListAPIView.as_view()),\n path('users/followers/', FollowersListAPIView.as_view()),\n path('users/follow/', FollowUser.as_view()),\n path('users/response/', UserResponseRequest.as_view()),\n path('users/friends/', FriendListAPIView.as_view()),\n\n\n path('test/', Test.as_view())\n]\n","sub_path":"socnet_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"500474619","text":"from django.shortcuts import render\nfrom .models import Sightings\nfrom .forms import SightingsForm\nfrom django .http import JsonResponse \nfrom django import forms\n\ndef index(request):\n sightings=Sightings.objects.all()\n context={\n 'sightings':sightings,\n }\n return render(request, 'sightings/index.html', context)\n\n\ndef AddSquirrel(request):\n if request.method == 'POST':\n form = SightingsForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/sightings/') #check later\n \n else:\n form = Form()\n context = {'form':form,}\n return render(request, '', context) #make html\n\n\ndef UpdateSquirrel(request, squirrel_pk):\n squirrel = Sightings.objects.get(unique_squirrel_id = squirrel_pk)\n form = SightingsForm(request.POST or None, instance = squirrel)\n context = {'form':form}\n \n if form.is_valid(): #check out if this works\n squirrels = form.save(commit = False)\n context = {'form':form}\n return render (request, 'sightings/update_sightings.html', context) \n\n else:\n return render(request, 'sightings/update_sightings.html', context)\n\n\ndef Stats(request):\n pass\n\n\n\n\n\n\n\n\n\n","sub_path":"sightings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"490643254","text":"from kzpy3.utils3 import *\n\n\nDefault_Arguments = { # top key is selected by required_arguments[0]\n\n\t'--default--':{\n\t 'resume':1,\n\t 'GPU':999,\n\t 'momentum':0.001,\n\t 'LR':0.01,\n\t 'batch_size':64,\n\t 'backwards':True,\n\t 'losses_to_average':25,\n\t 'save_timer_time':5*minutes,\n\t 'runs':'train',\n\t 'clip':1,\n\t 'noise':0,\n\t 'input':False,\n\t 'target':False,\n\t 'display.output':[0,3],\n\t 'display.input':[0,3],\n\t 'display.target':[0,3],\n\t 'pts2_h5py_type':None,\n\t 'reset_loss':False,\n\t},\n\n 'XOR':{\n\t 'GPU':-1,\n\t 'batch_size':8,\n\t 'losses_to_average':5,\n\t 'save_timer_time':30,\n\t},\n\n\t'ConDecon_test2':{\n\t 'batch_size':512,\n\t},\n\n\t'ConDecon_Fire':{\n\n\t},\n\t\n\t'ConDecon_Fire_FS':{\n\t\t'batch_size':1,\n\t\t'losses_to_average':64,\n\t\t'runs':'train',\n\t\t'input_offset':0,\n\t\t'target_offset':0,\n\t\t'inputs':['Fire3'],\n\t\t'targets':['Fire3'],\n\t\t'Data_read_path':False,\n\t\t'Data_write_path':False,\n\t},\n\n\t'Runs_Values':{\n\t}\n}\n\nfor k in Default_Arguments:\n\tif k != '--default--':\n\t\tfor l in Default_Arguments['--default--']:\n\t\t\tif l not in Default_Arguments[k].keys():\n\t\t\t\tDefault_Arguments[k][l] = Default_Arguments['--default--'][l]\n\n#kprint(Default_Arguments)\n\n#EOF\n","sub_path":"Learn/default_args.py","file_name":"default_args.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"222999293","text":"from django import forms\nfrom django.forms import ModelForm\nfrom .models import inventory,bulk_import\n\nclass prod_search(forms.Form):\n \n id=forms.CharField(widget=forms.TextInput(attrs={'placeholder':\"Product_id\"}),label=\"UID\")\n \n \nclass salesform(ModelForm):\n sold_price=forms.IntegerField(required=True)\n class Meta:\n model=inventory\n fields=['product_id','category','sold_price','cust_name','cust_phone','cust_Email']\n widgets={\n 'product_id':forms.TextInput(attrs={\"Readonly\":True}),\n 'category':forms.TextInput(attrs={\"Readonly\":True}),\n \n }\n labels={\n \n 'cust_name':\"Customer Name\",\n 'cust_phone':\"Mobile\",\n 'cust_Email':\"Email\",\n 'sold_price': \"Sales Price\",\n \n }\n def __init__(self,*args,**kwargs):\n super(salesform,self).__init__(*args,**kwargs)\n self.fields['sold_price'].required=True\n self.fields['cust_name'].required=True\n self.fields['cust_phone'].required=True\n self.fields['cust_Email'].required=True\n\n\n\n\nclass importform(ModelForm):\n class Meta:\n model=bulk_import\n fields=['file_name']\n labels={\n 'file_name':''\n }","sub_path":"Billit/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"82567139","text":"import re\nimport pickle\n\ndrct = \"./out/UK.txt\"\ndout = \"./out/basic_info.dump\"\n\nf = open(drct, \"r\")\nfout = open(dout, \"wb\")\n\ntext = f.read()\nlines = re.split(\"\\n(\\||\\})\", text)\n\ndic = {}\n\nfor line in lines:\n expr = re.search(\"(.*?)\\s=\\s(.*?)$\", line, re.S)\n if expr is not None:\n dic[expr.group(1)] = expr.group(2)\n\npickle.dump(dic, fout)\n\nf.close()\nfout.close()\n","sub_path":"100knock/chapter3/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"518962160","text":"\"\"\"\nGeneral purpose utilities for PyBERT.\n\nOriginal author: David Banas \n\nOriginal date: September 27, 2014 (Copied from pybert_cntrl.py.)\n\nCopyright (c) 2014 David Banas; all rights reserved World wide.\n\"\"\"\nimport os.path\nimport re\nfrom functools import reduce\nimport pkgutil\nimport importlib\n\nimport numpy as np\nfrom numpy import (\n array,\n concatenate,\n convolve,\n cumsum,\n diff,\n float,\n histogram,\n insert,\n log10,\n mean,\n ones,\n pi,\n power,\n real,\n reshape,\n resize,\n sign,\n sort,\n sqrt,\n where,\n zeros,\n)\nfrom numpy.fft import fft, ifft\nfrom scipy.signal import freqs, get_window, invres\nfrom scipy.stats import norm\nimport skrf as rf\n\ndebug = False\ngDebugOptimize = False\ngMaxCTLEPeak = 20 # max. allowed CTLE peaking (dB) (when optimizing, only)\n\n\ndef moving_average(a, n=3):\n \"\"\"\n Calculates a sliding average over the input vector.\n\n Args:\n a([float]): Input vector to be averaged.\n n(int): Width of averaging window, in vector samples. (Optional;\n default = 3.)\n\n Returns:\n [float]: the moving average of the input vector, leaving the input\n vector unchanged.\n \"\"\"\n\n ret = cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return insert(ret[n - 1 :], 0, ret[n - 1] * ones(n - 1)) / n\n\n\ndef find_crossing_times(\n t, x, min_delay: float = 0.0, rising_first: bool = True, min_init_dev: float = 0.1, thresh: float = 0.0,\n):\n \"\"\"\n Finds the threshold crossing times of the input signal.\n\n Args:\n t([float]): Vector of sample times. Intervals do NOT need to be\n uniform.\n x([float]): Sampled input vector.\n min_delay(float): Minimum delay required, before allowing\n crossings. (Helps avoid false crossings at beginning of\n signal.) (Optional; default = 0.)\n rising_first(bool): When True, start with the first rising edge\n found. (Optional; default = True.) When this option is True,\n the first rising edge crossing is the first crossing returned.\n This is the desired behavior for PyBERT, because we always\n initialize the bit stream with [0, 0, 1, 1], in order to\n provide a known synchronization point for jitter analysis.\n min_init_dev(float): The minimum initial deviation from zero,\n which must be detected, before searching for crossings.\n Normalized to maximum input signal magnitude.\n (Optional; default = 0.1.)\n thresh(float): Vertical crossing threshold.\n\n Returns:\n [float]: Array of signal threshold crossing times.\n \"\"\"\n\n if len(t) != len(x):\n raise ValueError(\"len(t) (%d) and len(x) (%d) need to be the same.\" % (len(t), len(x)))\n\n t = array(t)\n x = array(x)\n\n try:\n max_mag_x = max(abs(x))\n except:\n print(\"len(x):\", len(x))\n raise\n min_mag_x = min_init_dev * max_mag_x\n i = 0\n while abs(x[i]) < min_mag_x:\n i += 1\n assert i < len(x), \"Input signal minimum deviation not detected!\"\n x = x[i:] - thresh\n t = t[i:]\n\n sign_x = sign(x)\n sign_x = where(sign_x, sign_x, ones(len(sign_x))) # \"0\"s can produce duplicate xings.\n diff_sign_x = diff(sign_x)\n xing_ix = where(diff_sign_x)[0]\n xings = [t[i] + (t[i + 1] - t[i]) * x[i] / (x[i] - x[i + 1]) for i in xing_ix]\n\n if not xings:\n return array([])\n\n i = 0\n if min_delay:\n assert min_delay < xings[-1], \"min_delay ({}) must be less than last crossing time ({}).\".format(\n min_delay, xings[-1]\n )\n while xings[i] < min_delay:\n i += 1\n\n if debug:\n print(\"min_delay: {}\".format(min_delay))\n print(\"rising_first: {}\".format(rising_first))\n print(\"i: {}\".format(i))\n print(\"max_mag_x: {}\".format(max_mag_x))\n print(\"min_mag_x: {}\".format(min_mag_x))\n print(\"xings[0]: {}\".format(xings[0]))\n print(\"xings[i]: {}\".format(xings[i]))\n\n try:\n if rising_first and diff_sign_x[xing_ix[i]] < 0.0:\n i += 1\n except:\n print(\"len(diff_sign_x):\", len(diff_sign_x))\n print(\"len(xing_ix):\", len(xing_ix))\n print(\"i:\", i)\n raise\n\n return array(xings[i:])\n\n\ndef find_crossings(\n t, x, amplitude, min_delay: float = 0.0, rising_first: bool = True, min_init_dev=0.1, mod_type=0,\n):\n \"\"\"\n Finds the crossing times in a signal, according to the modulation type.\n\n Args:\n t([float]): The times associated with each signal sample.\n x([float]): The signal samples.\n amplitude(float): The nominal signal amplitude. (Used for\n determining thresholds, in the case of some modulation\n types.)\n min_delay(float): The earliest possible sample time we want\n returned. (Optional; default = 0.)\n rising_first(bool): When True, start with the first rising edge\n found. When this option is True, the first rising edge\n crossing is the first crossing returned. This is the desired\n behavior for PyBERT, because we always initialize the bit\n stream with [0, 1, 1], in order to provide a known\n synchronization point for jitter analysis.\n (Optional; default = True.)\n min_init_dev(float): The minimum initial deviation from zero,\n which must be detected, before searching for crossings.\n Normalized to maximum input signal magnitude.\n (Optional; default = 0.1.)\n mod_type(int): The modulation type. Allowed values are:\n {0: NRZ, 1: Duo-binary, 2: PAM-4}\n (Optional; default = 0.)\n\n Returns:\n [float]: The signal threshold crossing times.\n \"\"\"\n\n assert mod_type >= 0 and mod_type <= 2, \"ERROR: pybert_util.find_crossings(): Unknown modulation type: {}\".format(\n mod_type\n )\n\n xings = []\n if mod_type == 0: # NRZ\n xings.append(\n find_crossing_times(t, x, min_delay=min_delay, rising_first=rising_first, min_init_dev=min_init_dev)\n )\n elif mod_type == 1: # Duo-binary\n xings.append(\n find_crossing_times(\n t,\n x,\n min_delay=min_delay,\n rising_first=rising_first,\n min_init_dev=min_init_dev,\n thresh=(-0.5 * amplitude),\n )\n )\n xings.append(\n find_crossing_times(\n t,\n x,\n min_delay=min_delay,\n rising_first=rising_first,\n min_init_dev=min_init_dev,\n thresh=(0.5 * amplitude),\n )\n )\n elif mod_type == 2: # PAM-4 (Enabling the +/-0.67 cases yields multiple ideal crossings at the same edge.)\n xings.append(\n find_crossing_times(\n t,\n x,\n min_delay=min_delay,\n rising_first=rising_first,\n min_init_dev=min_init_dev,\n thresh=(0.0 * amplitude),\n )\n )\n else:\n raise ValueError(f\"Unknown modulation type: {mod_type}\")\n\n return sort(concatenate(xings))\n\n\ndef calc_jitter(ui, nui, pattern_len, ideal_xings, actual_xings, rel_thresh=6, num_bins=99, zero_mean=True):\n \"\"\"\n Calculate the jitter in a set of actual zero crossings, given the ideal crossings and unit interval.\n\n Inputs:\n\n - ui : The nominal unit interval.\n - nui : The number of unit intervals spanned by the input signal.\n - pattern_len : The number of unit intervals, before input symbol stream repeats.\n - ideal_xings : The ideal zero crossing locations of the edges.\n - actual_xings : The actual zero crossing locations of the edges.\n - rel_thresh : (optional) The threshold for determining periodic jitter spectral components (sigma).\n - num_bins : (optional) The number of bins to use, when forming histograms.\n - zero_mean : (optional) Force the mean jitter to zero, when True.\n\n Outputs:\n\n - jitter : The total jitter.\n - t_jitter : The times (taken from 'ideal_xings') corresponding to the returned jitter values.\n - isi : The peak to peak jitter due to intersymbol interference.\n - dcd : The peak to peak jitter due to duty cycle distortion.\n - pj : The peak to peak jitter due to uncorrelated periodic sources.\n - rj : The standard deviation of the jitter due to uncorrelated unbounded random sources.\n - tie_ind : The data independent jitter.\n - thresh : Threshold for determining periodic components.\n - jitter_spectrum : The spectral magnitude of the total jitter.\n - tie_ind_spectrum : The spectral magnitude of the data independent jitter.\n - spectrum_freqs : The frequencies corresponding to the spectrum components.\n - hist : The histogram of the actual jitter.\n - hist_synth : The histogram of the extrapolated jitter.\n - bin_centers : The bin center values for both histograms.\n\n \"\"\"\n\n def my_hist(x):\n \"\"\"\n Calculates the probability mass function (PMF) of the input vector,\n enforcing an output range of [-UI/2, +UI/2], sweeping everything in [-UI, -UI/2] into the first bin,\n and everything in [UI/2, UI] into the last bin.\n \"\"\"\n hist, bin_edges = histogram(\n x, [-ui] + [-ui / 2.0 + i * ui / (num_bins - 2) for i in range(num_bins - 1)] + [ui]\n )\n bin_centers = (\n [-ui / 2.0] + [mean([bin_edges[i + 1], bin_edges[i + 2]]) for i in range(len(bin_edges) - 3)] + [ui / 2.0]\n )\n\n return (array(list(map(float, hist))) / sum(hist), bin_centers)\n\n # Check inputs.\n if not ideal_xings.all():\n raise ValueError(\"calc_jitter(): zero length ideal crossings vector received!\")\n if not actual_xings.all():\n raise ValueError(\"calc_jitter(): zero length actual crossings vector received!\")\n\n # Line up first ideal/actual crossings, and count/validate crossings per pattern.\n ideal_xings = array(ideal_xings) - (ideal_xings[0] - ui / 2.0)\n actual_xings = array(actual_xings) - (actual_xings[0] - ui / 2.0)\n xings_per_pattern = where(ideal_xings > (pattern_len * ui))[0][0]\n if xings_per_pattern % 2 or not xings_per_pattern:\n print(\"xings_per_pattern:\", xings_per_pattern)\n print(\"len(ideal_xings):\", len(ideal_xings))\n print(\"min(ideal_xings):\", min(ideal_xings))\n print(\"max(ideal_xings):\", max(ideal_xings))\n raise AssertionError(\"pybert_util.calc_jitter(): Odd number of (or, no) crossings per pattern detected!\")\n num_patterns = nui // pattern_len\n\n # Assemble the TIE track.\n i = 0\n jitter = []\n t_jitter = []\n skip_next_ideal_xing = False\n for ideal_xing in ideal_xings:\n if skip_next_ideal_xing:\n t_jitter.append(ideal_xing)\n skip_next_ideal_xing = False\n continue\n # Confine our attention to those actual crossings occuring\n # within the interval [-UI/2, +UI/2] centered around the\n # ideal crossing.\n min_t = ideal_xing - ui / 2.0\n max_t = ideal_xing + ui / 2.0\n while i < len(actual_xings) and actual_xings[i] < min_t:\n i += 1\n if i == len(actual_xings): # We've exhausted the list of actual crossings; we're done.\n break\n if actual_xings[i] > max_t: # Means the xing we're looking for didn't occur, in the actual signal.\n jitter.append(3.0 * ui / 4.0) # Pad the jitter w/ alternating +/- 3UI/4.\n jitter.append(-3.0 * ui / 4.0) # (Will get pulled into [-UI/2, UI/2], later.\n skip_next_ideal_xing = True # If we missed one, we missed two.\n else: # Noise may produce several crossings. We find all those\n xings = [] # within the interval [-UI/2, +UI/2] centered\n j = i # around the ideal crossing, and take the average.\n while j < len(actual_xings) and actual_xings[j] <= max_t:\n xings.append(actual_xings[j])\n j += 1\n tie = mean(xings) - ideal_xing\n jitter.append(tie)\n t_jitter.append(ideal_xing)\n jitter = array(jitter)\n\n if debug:\n print(\"mean(jitter):\", mean(jitter))\n print(\"len(jitter):\", len(jitter))\n\n if zero_mean:\n jitter -= mean(jitter)\n\n # Do the jitter decomposition.\n # - Separate the rising and falling edges, shaped appropriately for averaging over the pattern period.\n tie_risings = jitter.take(list(range(0, len(jitter), 2)))\n tie_fallings = jitter.take(list(range(1, len(jitter), 2)))\n tie_risings.resize(num_patterns * xings_per_pattern // 2)\n tie_fallings.resize(num_patterns * xings_per_pattern // 2)\n tie_risings = reshape(tie_risings, (num_patterns, xings_per_pattern // 2))\n tie_fallings = reshape(tie_fallings, (num_patterns, xings_per_pattern // 2))\n\n # - Use averaging to remove the uncorrelated components, before calculating data dependent components.\n try:\n tie_risings_ave = tie_risings.mean(axis=0)\n tie_fallings_ave = tie_fallings.mean(axis=0)\n isi = max(tie_risings_ave.ptp(), tie_fallings_ave.ptp())\n except:\n print(\"xings_per_pattern:\", xings_per_pattern)\n print(\"len(ideal_xings):\", len(ideal_xings))\n raise\n isi = min(isi, ui) # Cap the ISI at the unit interval.\n dcd = abs(mean(tie_risings_ave) - mean(tie_fallings_ave))\n\n # - Subtract the data dependent jitter from the original TIE track, in order to yield the data independent jitter.\n tie_ave = sum(list(zip(tie_risings_ave, tie_fallings_ave)), ())\n tie_ave = resize(tie_ave, len(jitter))\n tie_ind = jitter - tie_ave\n\n # - Use spectral analysis to help isolate the periodic components of the data independent jitter.\n # -- Calculate the total jitter spectrum, for display purposes only.\n # --- Make vector uniformly sampled in time, via zero padding where necessary.\n # --- (It's necessary to keep track of those elements in the resultant vector, which aren't paddings; hence, 'valid_ix'.)\n x, valid_ix = make_uniform(t_jitter, jitter, ui, nui)\n y = fft(x)\n jitter_spectrum = abs(y[: len(y) // 2]) / sqrt(len(jitter)) # Normalized, in order to make power correct.\n f0 = 1.0 / (ui * nui)\n spectrum_freqs = [i * f0 for i in range(len(y) // 2)]\n\n # -- Use the data independent jitter spectrum for our calculations.\n tie_ind_uniform, valid_ix = make_uniform(t_jitter, tie_ind, ui, nui)\n\n # --- Normalized, in order to make power correct, since we grab Rj from the freq. domain.\n # --- (I'm using the length of the vector before zero padding, because zero padding doesn't add energy.)\n # --- (This has the effect of making our final Rj estimate more conservative.)\n y = fft(tie_ind_uniform) / sqrt(len(tie_ind))\n y_mag = abs(y)\n y_mean = moving_average(y_mag, n=len(y_mag) // 10)\n y_var = moving_average((y_mag - y_mean) ** 2, n=len(y_mag) // 10)\n y_sigma = sqrt(y_var)\n thresh = y_mean + rel_thresh * y_sigma\n y_per = where(y_mag > thresh, y, zeros(len(y))) # Periodic components are those lying above the threshold.\n y_rnd = where(y_mag > thresh, zeros(len(y)), y) # Random components are those lying below.\n y_rnd = abs(y_rnd)\n rj = sqrt(mean((y_rnd - mean(y_rnd)) ** 2))\n tie_per = real(ifft(y_per)).take(valid_ix) * sqrt(len(tie_ind)) # Restoring shape of vector to its original,\n pj = tie_per.ptp() # non-uniformly sampled state.\n\n # --- Save the spectrum, for display purposes.\n tie_ind_spectrum = y_mag[: len(y_mag) // 2]\n\n # - Reassemble the jitter, excluding the Rj.\n # -- Here, we see why it was necessary to keep track of the non-padded elements with 'valid_ix':\n # -- It was so that we could add the average and periodic components back together,\n # -- maintaining correct alignment between them.\n if len(tie_per) > len(tie_ave):\n tie_per = tie_per[: len(tie_ave)]\n if len(tie_per) < len(tie_ave):\n tie_ave = tie_ave[: len(tie_per)]\n jitter_synth = tie_ave + tie_per\n\n # - Calculate the histogram of original, for comparison.\n hist, bin_centers = my_hist(jitter)\n\n # - Calculate the histogram of everything, except Rj.\n hist_synth, bin_centers = my_hist(jitter_synth)\n\n # - Extrapolate the tails by convolving w/ complete Gaussian.\n rv = norm(loc=0.0, scale=rj)\n rj_pdf = rv.pdf(bin_centers)\n rj_pmf = rj_pdf / sum(rj_pdf)\n hist_synth = convolve(hist_synth, rj_pmf)\n tail_len = (len(bin_centers) - 1) // 2\n hist_synth = (\n [sum(hist_synth[: tail_len + 1])]\n + list(hist_synth[tail_len + 1 : len(hist_synth) - tail_len - 1])\n + [sum(hist_synth[len(hist_synth) - tail_len - 1 :])]\n )\n\n return (\n jitter,\n t_jitter,\n isi,\n dcd,\n pj,\n rj,\n tie_ind,\n thresh[: len(thresh) // 2],\n jitter_spectrum,\n tie_ind_spectrum,\n spectrum_freqs,\n hist,\n hist_synth,\n bin_centers,\n )\n\n\ndef make_uniform(t, jitter, ui, nbits):\n \"\"\"\n Make the jitter vector uniformly sampled in time, by zero-filling where necessary.\n\n The trick, here, is creating a uniformly sampled input vector for the FFT operation,\n since the jitter samples are almost certainly not uniformly sampled.\n We do this by simply zero padding the missing samples.\n\n Inputs:\n\n - t : The sample times for the 'jitter' vector.\n\n - jitter : The input jitter samples.\n\n - ui : The nominal unit interval.\n\n - nbits : The desired number of unit intervals, in the time domain.\n\n Output:\n\n - y : The uniformly sampled, zero padded jitter vector.\n\n - y_ix : The indices where y is valid (i.e. - not zero padded).\n\n \"\"\"\n\n if len(t) < len(jitter):\n jitter = jitter[: len(t)]\n\n run_lengths = list(map(int, diff(t) / ui + 0.5))\n valid_ix = [0] + list(cumsum(run_lengths))\n valid_ix = [x for x in valid_ix if x < nbits]\n missing = where(array(run_lengths) > 1)[0]\n num_insertions = 0\n jitter = list(jitter) # Because we use 'insert'.\n\n for i in missing:\n for _ in range(run_lengths[i] - 1):\n jitter.insert(i + 1 + num_insertions, 0.0)\n num_insertions += 1\n\n if len(jitter) < nbits:\n jitter.extend([0.0] * (nbits - len(jitter)))\n if len(jitter) > nbits:\n jitter = jitter[:nbits]\n\n return jitter, valid_ix\n\n\ndef calc_gamma(R0, w0, Rdc, Z0, v0, Theta0, ws):\n \"\"\"\n Calculates propagation constant from cross-sectional parameters.\n\n The formula's applied are taken from Howard Johnson's \"Metallic Transmission Model\"\n (See \"High Speed Signal Propagation\", Sec. 3.1.)\n\n Inputs:\n - R0 skin effect resistance (Ohms/m)\n - w0 cross-over freq.\n - Rdc d.c. resistance (Ohms/m)\n - Z0 characteristic impedance in LC region (Ohms)\n - v0 propagation velocity (m/s)\n - Theta0 loss tangent\n - ws frequency sample points vector\n\n Outputs:\n - gamma frequency dependent propagation constant\n - Zc frequency dependent characteristic impedance\n \"\"\"\n\n w = array(ws).copy()\n\n # Guard against /0.\n if w[0] == 0:\n w[0] = 1.0e-12\n\n Rac = R0 * sqrt(2 * 1j * w / w0) # AC resistance vector\n R = sqrt(power(Rdc, 2) + power(Rac, 2)) # total resistance vector\n L0 = Z0 / v0 # \"external\" inductance per unit length (H/m)\n C0 = 1.0 / (Z0 * v0) # nominal capacitance per unit length (F/m)\n C = C0 * power((1j * w / w0), (-2.0 * Theta0 / pi)) # complex capacitance per unit length (F/m)\n gamma = sqrt((1j * w * L0 + R) * (1j * w * C)) # propagation constant (nepers/m)\n Zc = sqrt((1j * w * L0 + R) / (1j * w * C)) # characteristic impedance (Ohms)\n\n return (gamma, Zc)\n\n\ndef calc_gamma_RLGC(R, L, G, C, ws):\n \"\"\"\n Calculates propagation constant from R, L, G, and C.\n\n Inputs:\n - R resistance per unit length (Ohms/m)\n - L inductance per unit length (Henrys/m)\n - G conductance per unit length (Siemens/m)\n - C capacitance per unit length (Farads/m)\n - ws frequency sample points vector\n\n Outputs:\n - gamma frequency dependent propagation constant\n - Zc frequency dependent characteristic impedance\n \"\"\"\n\n w = array(ws).copy()\n\n # Guard against /0.\n if w[0] == 0:\n w[0] = 1.0e-12\n\n gamma = sqrt((1j * w * L0 + R) * (1j * w * C + G)) # propagation constant (nepers/m)\n Zc = sqrt((1j * w * L0 + R) / (1j * w * C + G)) # characteristic impedance (Ohms)\n\n return (gamma, Zc)\n\n\ndef calc_G(H, Rs, Cs, Zc, RL, Cp, CL, ws):\n \"\"\"\n Calculates fully loaded transfer function of complete channel.\n\n Inputs:\n - H unloaded transfer function of interconnect\n - Rs source series resistance\n - Cs source parallel (parasitic) capacitance\n - Zc frequency dependent characteristic impedance of the interconnect\n - RL load resistance (differential)\n - Cp load parallel (parasitic) capacitance (single ended)\n - CL load series (d.c. blocking) capacitance (single ended)\n - ws frequency sample points vector\n\n Outputs:\n - G frequency dependent transfer function of channel\n \"\"\"\n\n w = array(ws).copy()\n\n # Guard against /0.\n if w[0] == 0:\n w[0] = 1.0e-12\n\n # Impedance looking back into the Tx output is a simple parallel RC network.\n Zs = Rs / (1.0 + 1j * w * Rs * Cs)\n # Rx load impedance is 2 series, a.c.-coupling capacitors, in series w/ parallel comb. of Rterm & parasitic cap.\n # (The two parasitic capacitances are in series.)\n ZL = 2.0 * 1.0 / (1j * w * CL) + RL / (1.0 + 1j * w * RL * Cp / 2)\n # Admittance into the interconnect is (Cs || Zc) / (Rs + (Cs || Zc)).\n Cs_par_Zc = Zc / (1.0 + 1j * w * Zc * Cs)\n A = Cs_par_Zc / (Rs + Cs_par_Zc)\n # Reflection coefficient at Rx:\n R1 = (ZL - Zc) / (ZL + Zc)\n # Reflection coefficient at Tx:\n R2 = (Zs - Zc) / (Zs + Zc)\n # Fully loaded channel transfer function:\n G = A * H * (1 + R1) / (1 - R1 * R2 * H ** 2)\n G = G * (((RL / (1j * w * Cp / 2)) / (RL + 1 / (1j * w * Cp / 2))) / ZL) # Corrected for divider action.\n # (i.e. - We're interested in what appears across RL.)\n return G\n\n\ndef calc_eye(ui, samps_per_ui, height, ys, y_max, clock_times=None):\n \"\"\"\n Calculates the \"eye\" diagram of the input signal vector.\n\n Args:\n ui(float): unit interval (s)\n samps_per_ui(int): # of samples per unit interval\n height(int): height of output image data array\n ys([float]): signal vector of interest\n y_max(float): max. +/- vertical extremity of plot\n\n Keyword Args:\n clock_times([float]): (optional) vector of clock times to use\n for eye centers. If not provided, just use mean\n zero-crossing and assume constant UI and no phase jumps.\n (This allows the same function to be used for eye diagram\n creation, for both pre and post-CDR signals.)\n\n Returns:\n 2D *NumPy* array: The \"heat map\" representing the eye diagram. Each grid\n location contains a value indicating the number of times the\n signal passed through that location.\n \"\"\"\n\n # List/array necessities.\n ys = array(ys)\n\n # Intermediate variable calculation.\n tsamp = ui / samps_per_ui\n\n # Adjust the scaling.\n width = 2 * samps_per_ui\n y_scale = height // (2 * y_max) # (pixels/V)\n y_offset = height // 2 # (pixels)\n\n # Generate the \"heat\" picture array.\n img_array = zeros([height, width])\n if clock_times:\n for clock_time in clock_times:\n start_time = clock_time - ui\n start_ix = int(start_time / tsamp)\n if start_ix + 2 * samps_per_ui > len(ys):\n break\n interp_fac = (start_time - start_ix * tsamp) // tsamp\n i = 0\n for (samp1, samp2) in zip(\n ys[start_ix : start_ix + 2 * samps_per_ui], ys[start_ix + 1 : start_ix + 1 + 2 * samps_per_ui],\n ):\n y = samp1 + (samp2 - samp1) * interp_fac\n img_array[int(y * y_scale + 0.5) + y_offset, i] += 1\n i += 1\n else:\n start_ix = where(diff(sign(ys)))[0][0] + samps_per_ui // 2\n last_start_ix = len(ys) - 2 * samps_per_ui\n while start_ix < last_start_ix:\n i = 0\n for y in ys[start_ix : start_ix + 2 * samps_per_ui]:\n img_array[int(y * y_scale + 0.5) + y_offset, i] += 1\n i += 1\n start_ix += samps_per_ui\n\n return img_array\n\ndef make_ctle(rx_bw, peak_freq, peak_mag, w, mode=\"Passive\", dc_offset=0):\n \"\"\"\n Generate the frequency response of a continuous time linear\n equalizer (CTLE), given the:\n\n - signal path bandwidth,\n - peaking specification\n - list of frequencies of interest, and\n - operational mode/offset.\n\n We use the 'invres()' function from scipy.signal, as it suggests\n itself as a natural approach, given our chosen use model of having\n the user provide the peaking frequency and degree of peaking.\n\n That is, we define our desired frequency response using one zero\n and two poles, where:\n\n - The pole locations are equal to:\n - the signal path natural bandwidth, and\n - the user specified peaking frequency.\n\n - The zero location is chosen, so as to provide the desired degree\n of peaking.\n\n Inputs:\n\n - rx_bw The natural (or, unequalized) signal path bandwidth (Hz).\n\n - peak_freq The location of the desired peak in the frequency\n response (Hz).\n\n - peak_mag The desired relative magnitude of the peak (dB). (mag(H(0)) = 1)\n\n - w The list of frequencies of interest (rads./s).\n\n - mode The operational mode; must be one of:\n - 'Off' : CTLE is disengaged.\n - 'Passive': Maximum frequency response has magnitude one.\n - 'AGC' : Automatic gain control. (Handled by calling routine.)\n - 'Manual' : D.C. offset is set manually.\n\n - dc_offset The d.c. offset of the CTLE gain curve (dB).\n (Only valid, when 'mode' = 'Manual'.)\n\n Outputs:\n\n - w, H The resultant complex frequency response, at the\n given frequencies.\n\n \"\"\"\n\n if mode == \"Off\":\n return (w, ones(len(w)))\n\n p2 = -2.0 * pi * rx_bw\n p1 = -2.0 * pi * peak_freq\n z = p1 / pow(10.0, peak_mag / 20.0)\n if p2 != p1:\n r1 = (z - p1) / (p2 - p1)\n r2 = 1 - r1\n else:\n r1 = -1.0\n r2 = z - p1\n b, a = invres([r1, r2], [p1, p2], [])\n w, H = freqs(b, a, w)\n\n if mode == \"Passive\":\n H /= max(abs(H))\n elif mode in (\"Manual\", \"AGC\"):\n H *= pow(10.0, dc_offset / 20.0) / abs(H[0]) # Enforce d.c. offset.\n else:\n raise RuntimeError(\"pybert_util.make_ctle(): Unrecognized value for 'mode' parameter: {}.\".format(mode))\n\n return (w, H)\n\n\ndef trim_impulse(g, min_len=0, max_len=1000000):\n \"\"\"\n Trim impulse response, for more useful display, by:\n - clipping off the tail, after 99.8% of the total power has been\n captured (Using 99.9% was causing problems; I don't know why.), and\n - setting the \"front porch\" length equal to 20% of the total length.\n\n Inputs:\n\n - g impulse response\n\n - min_len (optional) minimum length of returned vector\n\n - max_len (optional) maximum length of returned vector\n\n Outputs:\n\n - g_trim trimmed impulse response\n\n - start_ix index of first returned sample\n\n \"\"\"\n\n # Trim off potential FFT artifacts from the end and capture peak location.\n g = array(g[: int(0.9 * len(g))])\n max_ix = np.argmax(g)\n\n # Capture 99.8% of the total energy.\n Pt = 0.998 * sum(g ** 2)\n i = 0\n P = 0\n while P < Pt:\n P += g[i] ** 2\n i += 1\n stop_ix = min(max_ix + max_len, max(i, max_ix + min_len))\n\n # Set \"front porch\" to 20%, guarding against negative start index.\n start_ix = max(0, max_ix - (stop_ix - max_ix) // 4)\n\n return (g[start_ix:stop_ix], start_ix)\n\n\ndef import_channel(filename, sample_per, padded=False, windowed=False):\n \"\"\"\n Read in a channel file.\n\n Args:\n filename(str): Name of file from which to import channel description.\n sample_per(float): Sample period of signal vector (s).\n padded(Bool): (Optional) Zero pad s4p data, such that fmax >= 1/(2*sample_per)? (Default = False)\n windowed(Bool): (Optional) Window s4p data, before converting to time domain? (Default = False)\n\n Returns:\n [float]: Imported channel impulse, or step, response.\n \"\"\"\n\n extension = os.path.splitext(filename)[1][1:]\n # if extension in (\"s1p\", \"S1P\", \"s2p\", \"S2P\", \"s4p\", \"S4P\"):\n if re.search(\"^s\\d+p$\", extension, re.ASCII | re.IGNORECASE):\n return import_freq(filename, sample_per, padded=padded, windowed=windowed)\n return import_time(filename, sample_per)\n\n\ndef interp_time(ts, xs, sample_per):\n \"\"\"\n Resample time domain data, using linear interpolation.\n\n Args:\n ts([float]): Original time values.\n xs([float]): Original signal values.\n sample_per(float): System sample period.\n\n Returns:\n [float]: Resampled waveform.\n \"\"\"\n tmax = ts[-1]\n res = []\n t = 0.0\n i = 0\n while t < tmax:\n while ts[i] <= t:\n i = i + 1\n res.append(xs[i - 1] + (xs[i] - xs[i - 1]) * (t - ts[i - 1]) / (ts[i] - ts[i - 1]))\n t += sample_per\n\n return array(res)\n\n\ndef import_time(filename, sample_per):\n \"\"\"\n Read in a time domain waveform file, resampling as\n appropriate, via linear interpolation.\n\n Args:\n filename(str): Name of waveform file to read in.\n sample_per(float): New sample interval\n\n Returns:\n [float]: Resampled waveform.\n \"\"\"\n ts = []\n xs = []\n with open(filename, mode=\"rU\") as file:\n for line in file:\n try:\n tmp = list(map(float, [_f for _f in re.split(\"[, ;:]+\", line) if _f][0:2]))\n except:\n continue\n ts.append(tmp[0])\n xs.append(tmp[1])\n\n return interp_time(ts, xs, sample_per)\n\n\ndef sdd_21(ntwk):\n \"\"\"\n Given a 4-port single-ended network, return its differential throughput.\n\n Args:\n ntwk(skrf.Network): 4-port single ended network.\n\n Returns:\n [float]: Sdd[2,1].\n \"\"\"\n if real(ntwk.s21.s[0, 0, 0]) < 0.5: # 1 ==> 3 port numbering?\n ntwk.renumber((1, 2), (2, 1))\n\n return 0.5 * (ntwk.s21 - ntwk.s23 + ntwk.s43 - ntwk.s41)\n\n\ndef se2mm(ntwk):\n \"\"\"\n Given a 4-port single-ended network, return its mixed mode equivalent.\n\n Args:\n ntwk(skrf.Network): 4-port single ended network.\n\n Returns:\n skrf.Network: Mixed mode equivalent network, in the following format:\n Sdd11 Sdd12 Sdc11 Sdc12\n Sdd21 Sdd22 Sdc21 Sdc22\n Scd11 Scd12 Scc11 Scc12\n Scd21 Scd22 Scc21 Scc22\n \"\"\"\n if real(ntwk.s21.s[0, 0, 0]) < 0.5: # 1 ==> 3 port numbering?\n ntwk.renumber((2, 3), (3, 2))\n f = ntwk.f\n s = np.zeros(ntwk.s.shape, dtype=complex)\n s[:,0,0] = 0.5 * (ntwk.s11 - ntwk.s13 - ntwk.s31 + ntwk.s33).s.flatten()\n s[:,0,1] = 0.5 * (ntwk.s12 - ntwk.s14 - ntwk.s32 + ntwk.s34).s.flatten()\n s[:,0,2] = 0.5 * (ntwk.s11 + ntwk.s13 - ntwk.s31 - ntwk.s33).s.flatten()\n s[:,0,3] = 0.5 * (ntwk.s12 + ntwk.s14 - ntwk.s32 - ntwk.s34).s.flatten()\n s[:,1,0] = 0.5 * (ntwk.s21 - ntwk.s23 - ntwk.s41 + ntwk.s43).s.flatten()\n s[:,1,1] = 0.5 * (ntwk.s22 - ntwk.s24 - ntwk.s42 + ntwk.s44).s.flatten()\n s[:,1,2] = 0.5 * (ntwk.s21 + ntwk.s23 - ntwk.s41 - ntwk.s43).s.flatten()\n s[:,1,3] = 0.5 * (ntwk.s22 + ntwk.s24 - ntwk.s42 - ntwk.s44).s.flatten()\n s[:,2,0] = 0.5 * (ntwk.s11 - ntwk.s13 + ntwk.s31 - ntwk.s33).s.flatten()\n s[:,2,1] = 0.5 * (ntwk.s12 - ntwk.s14 + ntwk.s32 - ntwk.s34).s.flatten()\n s[:,2,2] = 0.5 * (ntwk.s11 + ntwk.s13 + ntwk.s31 + ntwk.s33).s.flatten()\n s[:,2,3] = 0.5 * (ntwk.s12 + ntwk.s14 + ntwk.s32 + ntwk.s34).s.flatten()\n s[:,3,0] = 0.5 * (ntwk.s21 - ntwk.s23 + ntwk.s41 - ntwk.s43).s.flatten()\n s[:,3,1] = 0.5 * (ntwk.s22 - ntwk.s24 + ntwk.s42 - ntwk.s44).s.flatten()\n s[:,3,2] = 0.5 * (ntwk.s21 + ntwk.s23 + ntwk.s41 + ntwk.s43).s.flatten()\n s[:,3,3] = 0.5 * (ntwk.s22 + ntwk.s24 + ntwk.s42 + ntwk.s44).s.flatten()\n return rf.Network(frequency=f, s=s)\n\n\ndef import_freq(filename, sample_per, padded=False, windowed=False, f_step=10e6):\n \"\"\"\n Read in a single ended 1, 2, or 4-port Touchstone file, and extract the\n differential throughput step response, resampling as\n appropriate, via linear interpolation.\n\n Args:\n filename(str): Name of Touchstone file to read in.\n sample_per(float): New sample interval\n padded(Bool): (Optional) Zero pad data, such that fmax >= 1/(2*sample_per)? (Default = False)\n windowed(Bool): (Optional) Window data, before converting to time domain? (Default = False)\n\n Returns:\n [float]: Resampled step response waveform.\n\n Raises:\n ValueError: If Touchstone file is not 1, 2, or 4-port.\n \"\"\"\n ntwk = rf.Network(filename)\n (fs, rs, cs) = ntwk.s.shape\n assert (rs == cs), \"Non-square Touchstone file S-matrix!\"\n assert (rs in (1, 2, 4)), \"Touchstone file must have 1, 2, or 4 ports!\"\n\n # Form frequency vector.\n f = ntwk.f\n fmin = f_step\n if f[0] > 0:\n fmin = max(fmin, f[0])\n fmax = f[-1]\n f = np.arange(fmin, fmax + fmin, fmin)\n F = rf.Frequency.from_f(f / 1e9) # skrf.Frequency.from_f() expects its argument to be in units of GHz.\n\n # Form impulse response from frequency response.\n if rs == 4:\n ntwk2 = sdd_21(ntwk)\n elif rs == 2:\n ntwk2 = ntwk\n else: # rs == 1\n ntwk2 = rf.one_port_2_two_port(ntwk)\n H = ntwk2.interpolate_from_f(F).s[:, 0, 0]\n H = np.pad(H, (1, 0), \"constant\", constant_values=1.0) # Presume d.c. value = 1.\n if windowed:\n window = get_window(6.0, 2 * len(H))[len(H) :]\n H *= window\n if padded:\n h = np.fft.irfft(H, int(1.0 / (fmin * sample_per)) + 1)\n fmax = 1.0 / (2.0 * sample_per)\n else:\n h = np.fft.irfft(H)\n h /= np.abs(h.sum()) # Equivalent to assuming that step response settles at 1.\n\n # Form step response from impulse response.\n s = np.cumsum(h)\n\n # Form time vector.\n t0 = 1.0 / (2.0 * fmax) # Sampling interval = 1 / (2 fNyquist).\n t = np.array([n * t0 for n in range(len(h))])\n\n return interp_time(t, s, sample_per)\n\n\ndef lfsr_bits(taps, seed):\n \"\"\"\n Given a set of tap indices and a seed, generate a PRBS.\n\n Args:\n taps([int]): The set of fed back taps.\n (Largest determines order of generator.)\n seed(int): The initial value of the shift register.\n\n Returns:\n generator: A PRBS generator object with a next() method, for retrieving\n the next bit in the sequence.\n \"\"\"\n val = int(seed)\n num_taps = max(taps)\n mask = (1 << num_taps) - 1\n\n while True:\n xor_res = reduce(lambda x, b: x ^ b, [bool(val & (1 << (tap - 1))) for tap in taps])\n val = (val << 1) & mask # Just to keep 'val' from growing without bound.\n if xor_res:\n val += 1\n yield val & 1\n\n\ndef safe_log10(x):\n \"\"\"Guards against pesky 'Divide by 0' error messages.\"\"\"\n\n if hasattr(x, \"__len__\"):\n x = where(x == 0, 1.0e-20 * ones(len(x)), x)\n else:\n if x == 0:\n x = 1.0e-20\n\n return log10(x)\n\n\ndef pulse_center(p, nspui):\n \"\"\"\n Determines the center of the pulse response, using the \"Hula Hoop\"\n algorithm (See SiSoft/Tellian's DesignCon 2016 paper.)\n\n Args:\n p([Float]): The single bit pulse response.\n nspui(Int): The number of vector elements per unit interval.\n\n Returns:\n (Int, float): The estimated index at which the clock will\n sample the main lobe, and the vertical threshold at which\n the main lobe is UI wide.\n \"\"\"\n div = 2.0\n p_max = p.max()\n thresh = p_max / div\n main_lobe_ixs = where(p > thresh)[0]\n if not main_lobe_ixs.size: # Sometimes, the optimizer really whacks out.\n return (-1, 0) # Flag this, by returning an impossible index.\n\n err = main_lobe_ixs[-1] - main_lobe_ixs[0] - nspui\n while err and div < 5000:\n div *= 2.0\n if err > 0:\n thresh += p_max / div\n else:\n thresh -= p_max / div\n main_lobe_ixs = where(p > thresh)[0]\n err = main_lobe_ixs[-1] - main_lobe_ixs[0] - nspui\n\n clock_pos = int(mean([main_lobe_ixs[0], main_lobe_ixs[-1]]))\n return (clock_pos, thresh)\n\n\ndef submodules(package):\n \"\"\"Find all sub-modules of a package.\"\"\"\n rst = {}\n\n for imp, name, _ in pkgutil.iter_modules(package.__path__):\n fullModuleName = \"{0}.{1}\".format(package.__name__, name)\n mod = importlib.import_module(fullModuleName, package=package.__path__)\n rst[name] = mod\n\n return rst\n","sub_path":"pybert/pybert_util.py","file_name":"pybert_util.py","file_ext":"py","file_size_in_byte":37536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"71341154","text":"import os\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.sampler import Sampler\nimport random\nimport numpy as np\nimport time\nimport logging\nimport threading\n\n\nclass SubGroupsRandomSampler(Sampler):\n\n def __init__(self, data_source):\n self.data_source = data_source\n self.prefetch_thread = None\n self.last_file_visited = None\n\n def __iter__(self):\n file_order = np.random.permutation(self.data_source.data_files)\n\n if len(file_order) > 1 and self.last_file_visited is not None: # Start from currently cached batch\n last_visited_idx = np.where(file_order==self.last_file_visited)\n file_order[0], file_order[last_visited_idx] = file_order[last_visited_idx], file_order[0]\n\n for file_idx, group_file in enumerate(file_order):\n group_size = int(group_file[1]) - int(group_file[0])\n self.last_file_visited = group_file\n for count, idx in enumerate(np.random.permutation(group_size)):\n yield int(group_file[0]) + idx\n\n def __len__(self):\n return len(self.data_source)\n\n\nclass UMDDataset(Dataset):\n\n def __init__(self, path, ypr_quant, ypr_regress, deg_dim, h_flip_augment, use_cuda):\n\n assert not ypr_quant or (180 % deg_dim == 0), \\\n \"Invalid deg_dim parameter defined for trainer params: %r\" % deg_dim\n\n self.ypr_quant = ypr_quant\n self.ypr_regress = ypr_regress\n self.deg_dim = 2\n self.h_flip_augment = h_flip_augment\n self.use_cuda = use_cuda\n self.data_files = []\n self.labels = None\n self.dataset_lock = threading.Lock()\n batches = []\n\n for filename in os.listdir(path):\n assert (filename.startswith('umd_') or filename.startswith('celebA_')) and filename.endswith('.pth'), \\\n \"Invalid file %r, does not belong to UMDFaces dataset\" % filename\n\n filepath = os.path.join(path, filename)\n\n if '_labels' in filename:\n self.labels = torch.load(filepath)\n else:\n last_underscore_idx = filename.rfind('_')\n batch_end = int(filename[last_underscore_idx+1:-len('.pth')])\n second_last_underscore_idx = filename[:last_underscore_idx].rfind('_')\n batch_start = int(filename[second_last_underscore_idx+1:last_underscore_idx])\n self.data_files.append((batch_start, batch_end, filepath))\n\n # Verify all files are present\n assert self.data_files, \"No data files found for UMDFaces dataset in path %r\" % path\n assert self.labels is not None, \"No labels file found for UMDFaces dataset in path %r\" % path\n\n self.data_files.sort(key=lambda tup: tup[0])\n assert self.data_files[0][0] == 0, \"First data file for UMDFaces dataset is missing in path %r\" % path\n for i, _ in enumerate(batches[:-1]):\n assert self.data_files[i][1] == self.data_files[i+1][0],\\\n \"Missing data file for batch starting from %r in path %r\" % (batches[i+1][0], path)\n\n self.current_batch_range = (-1, -1)\n self.current_batch = None\n self.prev_batch_range = (-1, -1)\n self.prev_batch = None\n\n @staticmethod\n def normalize_img(x):\n return x.float().div(255).mul_(2).sub_(1) # To range -1 to 1\n\n @staticmethod\n def flip_horizontally(x, y):\n x = x.index_select(2, torch.arange(x.size(2) - 1, -1, -1).long())\n return x,y\n\n def prefetch(self, idx, true_prefetch):\n with self.dataset_lock:\n if not self.current_batch_range[0] <= idx < self.current_batch_range[1]:\n if self.prev_batch_range[0] <= idx < self.prev_batch_range[1]:\n return self.fetch_from_batch(self.prev_batch, self.prev_batch_range, idx)\n next_data_entry = next(entry for entry in self.data_files if entry[0] <= idx < entry[1])\n\n self.prev_batch = self.current_batch\n self.prev_batch_range = self.current_batch_range\n self.current_batch_range = (next_data_entry[0], next_data_entry[1])\n\n if true_prefetch:\n logging.info('Prefetch optimization executed..')\n else:\n logging.info('Hot cache miss for data-batch file..')\n logging.info('Swapping data-batch file to: ' + next_data_entry[2])\n start = time.time()\n\n self.current_batch = torch.load(next_data_entry[2])\n end = time.time()\n logging.info('Loading completed after ' + '{0:.2f}'.format(end - start) + ' seconds')\n\n def fetch_from_batch(self, batch_file, batch_range, idx):\n x = batch_file[idx - batch_range[0]]\n x = self.normalize_img(x)\n y = self.labels[idx].float()\n\n if self.h_flip_augment and random.random() >= 0.5:\n x, y = self.flip_horizontally(x, y)\n\n x = x.index_select(0, torch.arange(x.size(0) - 1, -1, -1).long())\n\n return {'data': x, 'label': y}\n\n def __getitem__(self, idx):\n # Load next batch if needed,\n # Maintain a LRU history of size 1\n if not self.current_batch_range[0] <= idx < self.current_batch_range[1]:\n if self.prev_batch_range[0] <= idx < self.prev_batch_range[1]:\n return self.fetch_from_batch(self.prev_batch, self.prev_batch_range, idx)\n else:\n self.prefetch(idx, False)\n\n return self.fetch_from_batch(self.current_batch, self.current_batch_range, idx)\n\n def __len__(self):\n return self.data_files[-1][1]","sub_path":"sanity/gender_data_loader.py","file_name":"gender_data_loader.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"198840909","text":"# 최대 힙\nimport heapq\nimport sys\ninput = sys.stdin.readline\nt = int(input())\nq = []\nfor _ in range(t):\n \n num = int(input())\n if num == 0:\n if len(q) != 0:\n print(heapq.heappop(q)[1])\n else:\n print(0)\n else:\n heapq.heappush(q,(abs(num),num))\n ","sub_path":"Python/BOJ/11286.py","file_name":"11286.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"67877977","text":"import os\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom config import Config\nimport utils\nimport model as modellib\nimport visualize\nfrom model import log\n\nclass CellsConfig(Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"shapes\"\n\n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 4\n # Number of classes (including background)\n NUM_CLASSES = 1 + 4 # background + 3 shapes\n DETECTION_MIN_CONFIDENCE = 0.3\n\n # Use small images for faster training. Set the limits of the small side\n # the large side, and that determines the image shape.\n IMAGE_MIN_DIM = 512\n IMAGE_MAX_DIM = 640\n\n # Use smaller anchors because our image and objects are small\n RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels\n\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 400\n\n # Use a small epoch since the data is simple\n STEPS_PER_EPOCH = 32\n\n # use small validation steps since the epoch is small\n VALIDATION_STEPS = 5\n\nclass InferenceConfig(CellsConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\ndef get_ax(rows=1, cols=1, size=8):\n \"\"\"Return a Matplotlib Axes array to be used in\n all visualizations in the notebook. Provide a\n central point to control graph sizes.\n\n Change the default size attribute to control the size\n of rendered images\n \"\"\"\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax\n\n\nclass CellsDataset(utils.Dataset):\n \"\"\"Generates the shapes synthetic dataset. The dataset consists of simple\n shapes (triangles, squares, circles) placed randomly on a blank surface.\n The images are generated on the fly. No file access required.\n \"\"\"\n\n def load_shapes(self, count, height, width):\n \"\"\"Generate the requested number of synthetic images.\n count: number of images to generate.\n height, width: the size of the generated images.\n \"\"\"\n # Add classes\n self.add_class(\"cells\", 1, \"gbm\")\n self.add_class(\"cells\", 2, \"hnsc\")\n self.add_class(\"cells\", 3, \"lgg\")\n self.add_class(\"cells\", 4, \"lung\")\n\n # Add images\n # Generate random specifications of images (i.e. img_path and\n # list of shapes sizes and mask_path). Images are generated in load_image().\n path_prefix = '/home/neuron/Desktop/Donghao/cellsegmentation/main_data_folder/maskrcnn'\n cancer_types = ['gbm', 'hnsc', 'lgg', 'lung']\n for i in range(count):\n i_reminder = i % 8\n i_division = i // 8\n img_path = path_prefix + '/resize_train/' + 'image0' + str(i_reminder+1) + '_' \\\n + cancer_types[i_division] + '_resized.png'\n mask_path_save = path_prefix + '/resize_train_mask/' + 'image0' + str(i_reminder+1) + '_' \\\n + cancer_types[i_division] + '_resized.png'\n im_cv2 = cv2.imread(img_path)\n im_cv2 = np.flip(im_cv2, 2)\n im_width = im_cv2.shape[0]\n im_height = im_cv2.shape[1]\n self.add_image(\"cells\", image_id=i, path=img_path,\n width=im_width, height=im_height,\n mask_path = mask_path_save,\n img_type=cancer_types[i_division],\n celltype_ids=i_division)\n\n def load_image(self, image_id):\n \"\"\"Generate an image from the specs of the given image ID.\n This function loads the image from a file.\n \"\"\"\n info = self.image_info[image_id]\n img_path = info['path']\n image = cv2.imread(img_path)\n image = np.flip(image, 2)\n return image\n\n def image_reference(self, image_id):\n \"\"\"Return the shapes data of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"shapes\":\n return info[\"shapes\"]\n else:\n super(self.__class__).image_reference(self, image_id)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for shapes of the given image ID.\n \"\"\"\n info = self.image_info[image_id]\n mask_path = info['mask_path']\n mask_label = cv2.imread(mask_path)\n gray_mask_label = mask_label[:,:,2]\n cell_count = gray_mask_label.max()\n mask = np.zeros([info['height'], info['width'], cell_count], dtype=np.uint8)\n for i in range(cell_count):\n imr_copy = np.zeros(gray_mask_label.shape)\n edge_index = np.where(gray_mask_label == i)\n imr_copy[edge_index] = 1\n mask[:, :, i] = imr_copy\n cur_class_ids = info['celltype_ids']\n # Map class names to class IDs.\n class_ids = np.ones((cell_count)) * (cur_class_ids + 1)\n return mask, class_ids.astype(np.int32)\n\n def draw_shape(self, image, shape, dims, color):\n \"\"\"Draws a shape from the given specs.\"\"\"\n # Get the center x, y and the size s\n x, y, s = dims\n if shape == 'square':\n image = cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)\n elif shape == \"circle\":\n image = cv2.circle(image, (x, y), s, color, -1)\n elif shape == \"triangle\":\n points = np.array([[(x, y-s),\n (x-s/math.sin(math.radians(60)), y+s),\n (x+s/math.sin(math.radians(60)), y+s),\n ]], dtype=np.int32)\n image = cv2.fillPoly(image, points, color)\n return image\n\n def random_shape(self, height, width):\n \"\"\"Generates specifications of a random shape that lies within\n the given height and width boundaries.\n Returns a tuple of three valus:\n * The shape name (square, circle, ...)\n * Shape color: a tuple of 3 values, RGB.\n * Shape dimensions: A tuple of values that define the shape size\n and location. Differs per shape type.\n \"\"\"\n # Shape\n shape = random.choice([\"square\", \"circle\", \"triangle\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height//4)\n return shape, color, (x, y, s)\n\n def random_image(self, height, width):\n \"\"\"Creates random specifications of an image with multiple shapes.\n Returns the background color of the image and a list of shape\n specifications that can be used to draw the image.\n \"\"\"\n # Pick random background color\n bg_color = np.array([random.randint(0, 255) for _ in range(3)])\n # Generate a few random shapes and record their\n # bounding boxes\n shapes = []\n boxes = []\n N = random.randint(1, 4)\n for _ in range(N):\n shape, color, dims = self.random_shape(height, width)\n # print('shape color', color)\n # print('shape ', shape)\n shapes.append((shape, color, dims))\n x, y, s = dims\n boxes.append([y-s, x-s, y+s, x+s])\n\n # Apply non-max suppression wit 0.3 threshold to avoid\n # shapes covering each other\n keep_ixs = utils.non_max_suppression(np.array(boxes), np.arange(N), 0.3)\n # print('keep_ixs', keep_ixs)\n shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]\n # for i, s in enumerate(shapes):\n # print(i, s)\n return bg_color, shapes\n\n# Root directory of the project\nROOT_DIR = os.getcwd()\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Configuration for the cell dataset\nconfig = InferenceConfig()\n\n# Training dataset\ndataset = CellsDataset()\ndataset.load_shapes(32, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])\ndataset.prepare()\n\n# Create model in inference mode\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR,\n config=config)\n\n# Get path to saved weights\n# Either set a specific path or find last trained weights\nmodel_path = os.path.join(ROOT_DIR, \"logs/shapes20171211T2313/mask_rcnn_shapes_0030.h5\")\n# Load trained weights (fill in path to trained weights here)\nassert model_path != \"\", \"Provide path to trained weights\"\n# print(\"Loading weights from \", model_path)\nmodel.load_weights(model_path, by_name=True)\n\n# Random a image id\nimage_id = random.choice(dataset.image_ids)\n# Extract the image based on the image id\nimage, image_meta, gt_bbox, gt_mask =\\\n modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)\n\n# Run object detection\nresults = model.detect([image], verbose=1)\nr = results[0]\n\n# Run RPN sub-graph\npillar = model.keras_model.get_layer(\"ROI\").output # node to start searching from\n\n# TF 1.4 introduces a new version of NMS. Search for both names to support TF 1.3 and 1.4\nnms_node = (model.ancestor(pillar, \"ROI/rpn_non_max_suppression:0\")\n or model.ancestor(pillar, \"ROI/rpn_non_max_suppression/NonMaxSuppressionV2:0\"))\n\nrpn = model.run_graph([image], [\n (\"rpn_class\", model.keras_model.get_layer(\"rpn_class\").output),\n (\"pre_nms_anchors\", model.ancestor(pillar, \"ROI/pre_nms_anchors:0\")),\n (\"refined_anchors\", model.ancestor(pillar, \"ROI/refined_anchors:0\")),\n (\"refined_anchors_clipped\", model.ancestor(pillar, \"ROI/refined_anchors_clipped:0\")),\n (\"post_nms_anchor_ix\", nms_node),\n (\"proposals\", model.keras_model.get_layer(\"ROI\").output),\n])\n\n# Get input and output to classifier and mask heads.\nmrcnn = model.run_graph([image], [\n (\"proposals\", model.keras_model.get_layer(\"ROI\").output),\n (\"probs\", model.keras_model.get_layer(\"mrcnn_class\").output),\n (\"deltas\", model.keras_model.get_layer(\"mrcnn_bbox\").output),\n (\"masks\", model.keras_model.get_layer(\"mrcnn_mask\").output),\n (\"detections\", model.keras_model.get_layer(\"mrcnn_detection\").output),\n])\n\n# Get detection class IDs. Trim zero padding.\ndet_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)\ndet_count = len(det_class_ids)-1\ndet_class_ids = det_class_ids[:det_count]\ndetections = mrcnn['detections'][0, :det_count]\n\n# Proposals are in normalized coordinates. Scale them\n# to image coordinates.\nh, w = config.IMAGE_SHAPE[:2]\nproposals = np.around(mrcnn[\"proposals\"][0] * np.array([h, w, h, w])).astype(np.int32)\n\n# Class ID, score, and mask per proposal\nroi_class_ids = np.argmax(mrcnn[\"probs\"][0], axis=1)\nroi_scores = mrcnn[\"probs\"][0, np.arange(roi_class_ids.shape[0]), roi_class_ids]\nroi_class_names = np.array(dataset.class_names)[roi_class_ids]\nroi_positive_ixs = np.where(roi_class_ids > 0)[0]\n\n# How many ROIs vs empty rows?\nprint(\"{} Valid proposals out of {}\".format(np.sum(np.any(proposals, axis=1)), proposals.shape[0]))\nprint(\"{} Positive ROIs\".format(len(roi_positive_ixs)))\n\n# Class counts\nprint(list(zip(*np.unique(roi_class_names, return_counts=True))))\n\n# Class-specific bounding box shifts.\nroi_bbox_specific = mrcnn[\"deltas\"][0, np.arange(proposals.shape[0]), roi_class_ids]\nlog(\"roi_bbox_specific\", roi_bbox_specific)\n\n# Apply bounding box transformations\n# Shape: [N, (y1, x1, y2, x2)]\nrefined_proposals = utils.apply_box_deltas(\n proposals, roi_bbox_specific * config.BBOX_STD_DEV).astype(np.int32)\nlog(\"refined_proposals\", refined_proposals)\n\n# Remove boxes classified as background\nkeep = np.where(roi_class_ids > 0)[0]\n# print(\"Keep {} detections:\\n{}\".format(keep.shape[0], keep))\n\n# Remove low confidence detections\nkeep = np.intersect1d(keep, np.where(roi_scores >= config.DETECTION_MIN_CONFIDENCE)[0])\n# print(\"Remove boxes below {} confidence. Keep {}:\\n{}\".format(\n# config.DETECTION_MIN_CONFIDENCE, keep.shape[0], keep))\n\n# Apply per-class non-max suppression\npre_nms_boxes = refined_proposals[keep]\npre_nms_scores = roi_scores[keep]\npre_nms_class_ids = roi_class_ids[keep]\n\nnms_keep = []\nfor class_id in np.unique(pre_nms_class_ids):\n # Pick detections of this class\n ixs = np.where(pre_nms_class_ids == class_id)[0]\n # Apply NMS\n class_keep = utils.non_max_suppression(pre_nms_boxes[ixs],\n pre_nms_scores[ixs],\n config.DETECTION_NMS_THRESHOLD)\n # Map indicies\n class_keep = keep[ixs[class_keep]]\n nms_keep = np.union1d(nms_keep, class_keep)\n print(\"{:22}: {} -> {}\".format(dataset.class_names[class_id][:20],\n keep[ixs], class_keep))\n\nkeep = np.intersect1d(keep, nms_keep).astype(np.int32)\nprint(\"\\nKept after per-class NMS: {}\\n{}\".format(keep.shape[0], keep))\n\n# Get predictions of mask head\nmrcnn = model.run_graph([image], [\n (\"detections\", model.keras_model.get_layer(\"mrcnn_detection\").output),\n (\"masks\", model.keras_model.get_layer(\"mrcnn_mask\").output),\n])\n\n# Get detection class IDs. Trim zero padding.\ndet_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)\ndet_count = len(det_class_ids) - 1\ndet_class_ids = det_class_ids[:det_count]\n\nprint(\"{} detections: {}\".format(\n det_count, np.array(dataset.class_names)[det_class_ids]))\n\n# Masks\ndet_boxes = mrcnn[\"detections\"][0, :, :4].astype(np.int32)\ndet_mask_specific = np.array([mrcnn[\"masks\"][0, i, :, :, c]\n for i, c in enumerate(det_class_ids)])\ndet_masks = np.array([utils.unmold_mask(m, det_boxes[i], image.shape)\n for i, m in enumerate(det_mask_specific)])\nlog(\"det_mask_specific\", det_mask_specific)\nlog(\"det_masks\", det_masks)\n","sub_path":"lib/Mask_RCNN-master/maskrcnn_cell_detect_step_v3.py","file_name":"maskrcnn_cell_detect_step_v3.py","file_ext":"py","file_size_in_byte":14024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"535799322","text":"# scrapes Craigslist for locks in NoVa within specified budget\r\n\r\nimport requests\r\nimport sys\r\nimport bs4\r\n\r\nbudget = sys.argv[1]\r\nr = requests.get(\"https://washingtondc.craigslist.org/search/nva/sss?query=lock&excats=69-27-11-11-2-2-12-7-1-11-7-3-2-2-4-3-8-5-8-1-3-1-3-1&sort=rel&max_price=\" + budget)\r\nhtml = bs4.BeautifulSoup(r.text, \"html.parser\") # create BS object to parse website's html\r\n\r\nlocks = html.select(\".result-info\")\r\nnumLocks = 0\r\n\r\nfor lock in locks:\r\n title = lock.findAll(attrs={'class': 'result-title'})[0].text.lower()\r\n if (\"lock\" not in title): #skip if 'lock' isn't explicitly in title\r\n continue\r\n price = lock.findAll(attrs={'class': 'result-price'})[0].text\r\n if (len(lock.findAll(attrs={'class': 'result-hood'})) == 0): #skip on location error\r\n continue\r\n numLocks += 1\r\n location = lock.findAll(attrs={'class': 'result-hood'})[0].text\r\n link = \"https://washingtondc.craigslist.org\" + lock.find('a', attrs={'class': 'hdrlnk'})['href']\r\n print(\"-------------------------------------------------\\n\" + title, price, location + \"\\n\\n\" + link)\r\n\r\nprint(\"\\nfound \" + str(numLocks) + \" locks for $\" + budget + \" or less!\")\r\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"14659449","text":"import math\nimport cv2 as cv\nimport numpy as np\n\nhaar_cascasde_eye = cv.CascadeClassifier(\"classifier/haarcascade_eye.xml\")\nhaar_cascasde_nose = cv.CascadeClassifier(\"classifier/haarcascade_nose.xml\")\n# haar_cascasde_face = cv.CascadeClassifier(\"classifier/haarcascade_frontalface_default.xml\")\nhaar_cascasde_face = cv.CascadeClassifier(\"classifier/lbpcascaade_frontalface_improved.xml\")\n\n\n# Haarcascade basic detection\ndef haarcascade_FacialDetection(image, scaleFactor, minNeighbors, minSize = None, maxSize = None):\n \"\"\"\n This is to be used by videoManager only because there seems to be some error when used in imageManager (caused by multithreading probably)\n Just run the haarcascade facial detection on the image without modifying the input image\n :return the detectMultiScale output\n \"\"\"\n if minSize == None:\n if maxSize == None:\n return haar_cascasde_face.detectMultiScale(image, scaleFactor, minNeighbors)\n return haar_cascasde_face.detectMultiScale(image, scaleFactor, minNeighbors, maxSize = maxSize)\n if maxSize == None:\n return haar_cascasde_face.detectMultiScale(image, scaleFactor, minNeighbors, minSize = minSize)\n return haar_cascasde_face.detectMultiScale(image, scaleFactor, minNeighbors, minSize = minSize, maxSize = maxSize)\n\n\ndef haarcascade_EyeDetection(image, scaleFactor, minNeighbors, minSize = None, maxSize = None):\n \"\"\"\n Just run the haarcascade eye detection on the image without modifying the input image\n :return the detectMultiScale output\n \"\"\"\n if minSize == None:\n if maxSize == None:\n return haar_cascasde_eye.detectMultiScale(image, scaleFactor, minNeighbors)\n return haar_cascasde_eye.detectMultiScale(image, scaleFactor, minNeighbors, maxSize = maxSize)\n if maxSize == None:\n return haar_cascasde_eye.detectMultiScale(image, scaleFactor, minNeighbors, minSize = minSize)\n return haar_cascasde_eye.detectMultiScale(image, scaleFactor, minNeighbors, minSize = minSize, maxSize = maxSize)\n \n\n\n# Rotation\ndef rotateClockwise(img, angle):\n \"\"\"\n This functions return a clockwise rotated image without cropping any part of the original image and keeping the center of the original \n image at the center of the new rotated image. The returned image may have larger dimensions than the original but as small as possible.\n :param img: the original image\n :param angle: the rotating angle clockwise\n :return: the rotated-around-the-center image without cropping\n \"\"\"\n angle = -angle\n return rotateCounterClockwise(img, angle)\n\n\ndef rotateCounterClockwise(img, angle):\n \"\"\"\n This functions return a counter-clockwise rotated image without cropping any part of the original image and keeping the center of the original \n image at the center of the new rotated image. The returned image may have larger dimensions than the original but as small as possible.\n :param img: the original image\n :param angle: the rotating angle counter-clockwise\n :return: the rotated-around-the-center image without cropping\n \"\"\"\n (height, width) = img.shape[:2]\n\n # Calculating the dimension of the new canvas to store the entire rotated image without cropping, diagAngleA is used to calculate fittingWidth, diagAngleB is for fittingHeight\n diagLen = math.sqrt(width**2 + height**2)\n diagAngleA = math.atan(width/height)\n diagAngleB = math.pi/2 - diagAngleA\n\n # TempAngle is used to keep the angle value in math.cos stays between -90 and 90 degree => consistence cos value \n # cos value is the cosine of the offset of frame/img diagonal line from the x/y axis (diagAngle + tempAngle*math.pi/180 - math.pi/2). \n # And using the value of angle, we will know which cos value is used to determined width and height.\n # angle < 360 to avoid edge cases\n angle = angle % 360\n tempAngle = angle%90\n if angle == 0 or angle == 180:\n fittingWidth = width\n fittingHeight = height\n elif angle == 90 or angle == 270:\n fittingWidth = height\n fittingHeight = width\n elif (angle > 0 and angle < 90) or (angle > 180 and angle < 270):\n \n fittingWidth = math.floor(math.cos(diagAngleA + tempAngle*math.pi/180 - math.pi/2) * diagLen)\n fittingHeight = math.floor(math.cos(diagAngleB + tempAngle*math.pi/180 - math.pi/2) * diagLen)\n else :\n fittingWidth = math.floor(math.cos(diagAngleB + tempAngle*math.pi/180 - math.pi/2) * diagLen)\n fittingHeight = math.floor(math.cos(diagAngleA + tempAngle*math.pi/180 - math.pi/2) * diagLen)\n\n # Drawing pre rotated image ontop of bigger canvas\n # rotatedCanvas initially takes the largest horizontal/vertical value among newWidth, newHeight, img.shape[1], img.shape[2] to prevent the image from cropping and index out of bound\n # Once the image is rotated on rotatedCanvas, we will crop out the excess part\n try:\n rotatedCanvas = np.zeros((max(fittingHeight, img.shape[0]), max(fittingWidth, img.shape[1]), 3), dtype='uint8')\n rotatedCanvas[math.floor(rotatedCanvas.shape[0]/2 - img.shape[0]/2): math.floor(rotatedCanvas.shape[0]/2 + img.shape[0]/2), math.floor(rotatedCanvas.shape[1]/2 - img.shape[1]/2): math.floor(rotatedCanvas.shape[1]/2 + img.shape[1]/2)] = img\n except:\n rotatedCanvas = np.zeros((max(fittingHeight, img.shape[0]), max(fittingWidth, img.shape[1])), dtype='uint8')\n rotatedCanvas[math.floor(rotatedCanvas.shape[0]/2 - img.shape[0]/2): math.floor(rotatedCanvas.shape[0]/2 + img.shape[0]/2), math.floor(rotatedCanvas.shape[1]/2 - img.shape[1]/2): math.floor(rotatedCanvas.shape[1]/2 + img.shape[1]/2)] = img\n # rotatedCanvas[100:200,400:500] = 0,255,0\n # [upper height bound: lower height bound, left width bound: right width bound]\n # cv.imshow(\"Test draw on cavnas before rotate\", rescaleFrame(rotatedCanvas, 500))\n\n #rotPoint (width, height)\n rotPoint = (rotatedCanvas.shape[1]//2, rotatedCanvas.shape[0]//2)\n \n rotMat = cv.getRotationMatrix2D(rotPoint, angle, 1.0)\n excessDimensions = (rotatedCanvas.shape[1], rotatedCanvas.shape[0])\n rotatedCanvas = cv.warpAffine(rotatedCanvas, rotMat, excessDimensions)\n # rotatedCanvas = rotatedCanvas[100:1200, 100:600]\n # Cropping excess borders\n rotatedCanvas = rotatedCanvas[math.floor((rotatedCanvas.shape[0] - fittingHeight)/2) : math.floor((rotatedCanvas.shape[0] + fittingHeight)/2), math.floor((rotatedCanvas.shape[1] - fittingWidth)/2) : math.floor((rotatedCanvas.shape[1] + fittingWidth)/2)]\n return rotatedCanvas\n\n\n\n\n\n# Resize\ndef resizeMinTo500(img):\n \"\"\"\n Create a resized clone of the original image such that the smaller dimension of the image = 500px, and the other dimension is kept to scale\n :param img: the image need to be resized\n :return a resized copy of the iamge\n \"\"\"\n \n (height, width) = img.shape[:2]\n\n resizedImage = img.copy()\n\n if height < width:\n width = int(width * 500/ height)\n height = 500\n \n else:\n height = int(height * 500/ width)\n width = 500\n \n dimensions = (width, height)\n return cv.resize(resizedImage, dimensions, interpolation= cv.INTER_LINEAR)\n\n# Resize\ndef resizeMinTo(img, size):\n \"\"\"\n Create a resized clone of the original image such that the smaller dimension of the image = 500px, and the other dimension is kept to scale\n :param img: the image need to be resized\n :return a resized copy of the iamge\n \"\"\"\n size = int(size)\n (height, width) = img.shape[:2]\n\n resizedImage = img.copy()\n\n if height < width:\n width = int(width * size/ height)\n height = size\n \n else:\n height = int(height * size/ width)\n width = size\n \n dimensions = (width, height)\n return cv.resize(resizedImage, dimensions, interpolation=cv.INTER_LINEAR)\n\n\n# Resize TEST INTERPOLATION SPEED\ndef TEST_INTERPOLATION_TYPE_resizeMinTo(img, size, interpolationType):\n \"\"\"\n Create a resized clone of the original image such that the smaller dimension of the image = 500px, and the other dimension is kept to scale\n :param img: the image need to be resized\n :param interapolationType: the interpolation type with\n 0: INTER_AREA\n 1: INTER_CUBIC\n 2: INTER_LANCZOS4\n 3: INTER_NEAREST\n 4: INTER_LINEAR\n :return a resized copy of the iamge\n \"\"\"\n size = int(size)\n (height, width) = img.shape[:2]\n\n resizedImage = img.copy()\n\n if height < width:\n width = int(width * size/ height)\n height = size\n \n else:\n height = int(height * size/ width)\n width = size\n \n dimensions = (width, height)\n if interpolationType == 0:\n return cv.resize(resizedImage, dimensions, interpolation=cv.INTER_AREA)\n elif interpolationType == 1:\n return cv.resize(resizedImage, dimensions, interpolation=cv.INTER_CUBIC)\n elif interpolationType == 2:\n return cv.resize(resizedImage, dimensions, interpolation=cv.INTER_LANCZOS4)\n elif interpolationType == 3:\n return cv.resize(resizedImage, dimensions, interpolation=cv.INTER_NEAREST)\n elif interpolationType == 4:\n return cv.resize(resizedImage, dimensions, interpolation=cv.INTER_LINEAR)\n\n","sub_path":"FacialDetection/helperFunctions.py","file_name":"helperFunctions.py","file_ext":"py","file_size_in_byte":9352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"315658867","text":"import os\nfrom webob.dec import wsgify\nfrom webob.static import DirectoryApp\nfrom webob.exc import HTTPFound\nfrom webdispatch import URLDispatcher,MethodDispatcher\nfrom wsgiref.simple_server import make_server\n\nfrom datetime import datetime\nimport sqlalchemy as sa\nimport sqlalchemy.orm as orm\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.exc import IntegrityError\nfrom docutils.core import publish_parts\nfrom jinja2 import Environment\nfrom jinja2.loaders import PackageLoader\n\nhere = os.path.dirname(__file__)\nenv = Environment(loader=PackageLoader(__name__, 'templates'))\n\nDBSession = orm.scoped_session(orm.sessionmaker())\n\nBase = declarative_base()\n\ndef init_db(engine):\n DBSession.configure(bind=engine)\n Base.metadata.create_all(bind=DBSession.bind)\n try:\n front_page = Page(page_name='FrontPage', contents=\"\"\"\\\nFrontPage\n====================\"\"\")\n DBSession.add(front_page)\n DBSession.commit()\n except IntegrityError:\n DBSession.remove()\n\n\n\nclass Page(Base):\n __tablename__ = 'pages'\n id = sa.Column(sa.Integer, primary_key=True)\n page_name = sa.Column(sa.Unicode(255), unique=True)\n contents = sa.Column(sa.UnicodeText)\n created = sa.Column(sa.DateTime, default=datetime.now)\n edited = sa.Column(sa.DateTime, onupdate=datetime.now)\n\n @property\n def html_contents(self):\n parts = publish_parts(source=self.contents, writer_name=\"html\")\n return parts['html_body']\n\n@wsgify.middleware\ndef sqla_transaction(req, app):\n try:\n res = req.get_response(app)\n DBSession.commit()\n return res\n finally:\n DBSession.remove()\n\n@wsgify\ndef page_view(request):\n page_name = request.urlvars['page_name']\n edit_url = request.environ['webdispatch.urlgenerator'].generate('page_edit', page_name=page_name)\n try:\n page = DBSession.query(Page).filter(Page.page_name==page_name).one()\n tmpl = env.get_template('page.html')\n return tmpl.render(page=page, edit_url=edit_url)\n except NoResultFound:\n return HTTPFound(location=edit_url)\n\n@wsgify\ndef page_edit_form(request):\n page_name = request.urlvars['page_name']\n try:\n page = DBSession.query(Page).filter(Page.page_name==page_name).one()\n except NoResultFound:\n page = Page(page_name=page_name, contents=\"\")\n\n tmpl = env.get_template('page_edit.html')\n return tmpl.render(page=page)\n\n@wsgify\ndef page_update(request):\n page_name = request.urlvars['page_name']\n try:\n page = DBSession.query(Page).filter(Page.page_name==page_name).one()\n except NoResultFound:\n page = Page(page_name=page_name, contents=\"\")\n DBSession.add(page)\n\n page.contents = request.params['contents']\n location = request.environ['webdispatch.urlgenerator'].generate('page', page_name=page_name)\n return HTTPFound(location=location)\n\npage_edit = MethodDispatcher()\npage_edit.register_app('get', page_edit_form)\npage_edit.register_app('post', page_update)\n\ndef make_app():\n application = URLDispatcher()\n js_app = DirectoryApp(os.path.join(here, 'static/js'))\n css_app = DirectoryApp(os.path.join(here, 'static/css'))\n img_app = DirectoryApp(os.path.join(here, 'static/img'))\n\n application.add_url('js', '/js/*', js_app)\n application.add_url('css', '/css/*', css_app)\n application.add_url('img', '/img/*', img_app)\n application.add_url('page', '/{page_name}', page_view)\n application.add_url('page_edit', '/{page_name}/edit', page_edit)\n application.add_url('top', '/', HTTPFound(location='FrontPage'))\n return application\n\ndef main():\n engine = sa.create_engine('sqlite:///{dir}/wiki.db'.format(dir=os.getcwd()))\n engine.echo = True\n init_db(engine)\n application = make_app()\n\n application = sqla_transaction(application)\n httpd = make_server('', 8000, application)\n httpd.serve_forever()\n","sub_path":"perfect_python/Part3/14/wiki/py3wiki/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"504121359","text":"''' Создайте собственную программу ��Адресная книга»,\n работающую из командной строки и позволяющую просматривать,\n добавлять, изменять, удалять или искать контактные данные\n ваших знакомых. Кроме того, эта информация также должна\n сохраняться на диске для последующего доступа.\n'''\n\n## Идея была сделать через класс Contact и методы класса, но получилось то что получилось, методы стали функциями,\n## для вызова которых приходится передовать None, поэтому была создана версия 2\n\n__version__ = 1.0\nfrom ast import literal_eval\n\nclass Contact:\n\n data_file = 'Adress_book.data'\n dict_data = dict()\n \n def __init__(self):\n ''' При создании объекта класса - контакта адресной книги,\n присваивает ему имя, фамилию и номер телефона'''\n self.name = str(input('Имя: '))\n self.last_name = str(input('Фамилия: '))\n self.phone_number = str(input('Телефон: '))\n print('Init Test success:\\nName = {}\\nLast_name = {}\\nNumber = {}\\n'.format(self.name, self.last_name, self.phone_number))\n\n\n def load_ab(self):\n ''' Загружает адресную книгу по адресу data_file '''\n with open (Contact.data_file, 'r') as op_file:\n try:\n Contact.dict_data = literal_eval(op_file.readline()) ## readline -> string, so str -> dict with ast.py\n print('Контакты загружены')\n except:\n print(\"Контакты не найдены\")\n\n def save_ab(self):\n ''' Сохраняет адресную книгу по адресу data_file '''\n with open (Contact.data_file, 'w') as op_file:\n try:\n op_file.write(str(Contact.dict_data)+'\\n')\n print('Save_ab Test:\\nSucess!\\n', )\n except:\n print(\"Save_ab Test failed\\n\")\n \n def edit_contact (self, edit_name): #Не совсем то что хотелось бы, проблема в конкатенации имени и фамилии\n ''' Редактирует контакт'''\n new_name = str(input('Новое имя: '))\n new_last_name = str(input('Новая фамилия: '))\n new_phone_number = str(input('Введите новый номер телефона или нажмите enther для продолжения\\n: '))\n if new_phone_number == '\\n' or None: #Косяк\n new_phone_number = Contact.dict_data[edit_name]\n name = '{}'.format(new_name + ' ' + new_last_name)\n Contact.dict_data.pop(edit_name)\n Contact.dict_data[name] = '{}'.format(new_phone_number)\n \n \n\n\n def save_contact (self):\n ''' Помещает объекты созданого класса в словарь (dict) '''\n print('Trying to save_contact\\n')\n name = '{}'.format(self.name + ' ' + self.last_name)\n Contact.dict_data[name] = '{}'.format(self.phone_number)\n print('Save_contact Test:\\nSuccess\\n')\n \ndef menu():\n ''' Интерфейс взаимодействия с пользователем'''\n print('Адресная книга {} активна'.format(__version__))\n Contact.load_ab(None)\n while True:\n print('Выберите действие:',\\\n '1. Посмотреть контакты',\\\n '2. Новый контакт',\\\n '3. Редактировать контакт',\\\n '4. Удалить контакт',\\\n '5. Выход', sep = '\\n')\n try:\n chose = int(input(': '))\n print('')\n assert chose in [1,2,3,4,5]\n except AssertionError:\n print('Нет такого пункта меню', end='\\n\\n')\n except ValueError:\n print('Неизвестная операция', end='\\n\\n')\n continue\n if chose == 1:\n for name, phone_number in Contact.dict_data.items():\n print(name, '-', phone_number, end = '\\n\\n')\n elif chose == 2:\n a = Contact()\n while True:\n try:\n chose = input('Сохранить контакт? Y/N\\n: ').upper()\n assert chose in ['Y', 'N']\n if chose == 'Y':\n a.save_contact()\n a.save_ab()\n break\n elif chose == 'N':\n break\n except AssertionError:\n print('Неизвестная операция')\n elif chose == 3:\n edit_name = input('Введите имя контакта для редактирования\\n: ')\n if not edit_name in Contact.dict_data.keys():\n print('Имя введено неверно или такого контакта нет\\n')\n continue\n else:\n Contact.edit_contact (None, edit_name)\n Contact.save_ab(None)\n print('Контакт изменен')\n elif chose == 4:\n try:\n Contact.dict_data.pop('{}'.format(input('Введите имя контакта для удаления.\\n: ')))\n Contact.save_ab(None)\n except KeyError:\n print('Имя введено неверно или такого контакта нет\\n')\n \n elif chose == 5:\n break\n\n\n\nmenu()\n","sub_path":"Small projects/Adress_book/old/Adress book.py","file_name":"Adress book.py","file_ext":"py","file_size_in_byte":5980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"99877771","text":"from django import template\nfrom LearnToEarn.models import *\n\nregister = template.Library()\n\n\n@register.filter(name='hasAttemptedExam')\ndef hasAttemptedExam(request, id):\n exam_id = ExamModel.objects.get(id=id)\n exam_qsn = exam_id.examquestion\n try:\n attempt = ExamAnswer.objects.get(examquestion=exam_qsn, user=request)\n except:\n return False\n return attempt.attempted\n","sub_path":"LearnToEarn/Learn/LearnToEarn/templatetags/hasAttemptedExam.py","file_name":"hasAttemptedExam.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"376286439","text":"'''\r\nCreated on Nov 3, 2010\r\n\r\n@author: Chris Greenough - Chris.Greenough@nau.edu\r\n'''\r\nimport unittest\r\nimport logging\r\nfrom BbPy.ContextWS import ContextWS\r\nfrom BbPy.VO.CourseWS import CategoryMembershipVO, CategoryVO\r\nimport string\r\nimport random\r\nimport datetime\r\nimport time\r\nclass Test(unittest.TestCase):\r\n\r\n\r\n def setUp(self):\r\n logging.basicConfig()\r\n log=logging.getLogger()\r\n log.setLevel(logging.WARN)\r\n context = ContextWS(\"https://dev.bblearn.nau.edu\")\r\n success = context.loginTool(\"NAU\", \"PythonFeed\", \"*****\")\r\n if not success:\r\n log.error(\"Login was not successful!\")\r\n self.fail(\"Login was not successful!\")\r\n\r\n self.coursews = context.getCourseWS()\r\n self.random = string.lower(''.join(random.choice(string.letters) for i in xrange(5)))\r\n \r\n ## Create course to play with\r\n self.begin = datetime.date.today()\r\n self.end = datetime.date.today()+datetime.timedelta(days=1)\r\n self.courseid = self.coursews.createSimpleCourse(True,\r\n \"batch\"+self.random, # course batch id\r\n \"course\"+self.random, # course id\r\n \"Testing \" + self.random + \" description\", # description\r\n \"name\"+self.random, # course name\r\n time.mktime(self.begin.timetuple()),\r\n time.mktime(self.end.timetuple()))\r\n self.assertTrue(self.courseid)\r\n \r\n ## Create Category to play with\r\n self.categoryId = self.coursews.createSimpleCourseCategory(\r\n \"category\"+self.random, #name\r\n \"catBatch\"+self.random, #id\r\n \"Testing category\" + self.random + \" description\", #Description\r\n True)\r\n self.assertTrue(self.categoryId)\r\n \r\n ## Create Child Category to play with\r\n categoryVO=CategoryVO()\r\n categoryVO.available=True\r\n categoryVO.batchUid=\"TestChild\"+self.random\r\n categoryVO.description=\"TestChild\"+self.random\r\n categoryVO.frontPage=False\r\n categoryVO.title=\"TestChild\"+self.random\r\n categoryVO.parentId=self.categoryId\r\n self.childCategoryId = self.coursews.saveCourseCategory(categoryVO)\r\n \r\n self.assertTrue(self.childCategoryId)\r\n \r\n ## Assign category to course\r\n self.courseCategoryMembershipId = self.coursews.setCoursesMembership(self.courseid, self.categoryId, True)\r\n \r\n self.assertTrue(self.courseCategoryMembershipId)\r\n\r\n\r\n def tearDown(self):\r\n self.assertTrue(self.coursews.deleteCourse(self.courseid))\r\n self.assertTrue(self.coursews.deleteCourseCategory(self.childCategoryId))\r\n self.assertTrue(self.coursews.deleteCourseCategory(self.categoryId))\r\n \r\n \r\n def testCourseChild(self):\r\n categories = self.coursews.getAllCourseCategories()\r\n found = True\r\n \r\n for category in categories:\r\n if category.id == self.childCategoryId:\r\n self.assertEqual(category.parentId,self.categoryId,\"Category parent id does not match %s != %s\"%(category.parentId,self.categoryId))\r\n \r\n self.assertTrue(found,\"Could not find category with id = %s\"%self.childCategoryId)\r\n \r\n\r\n def testGetCourseByBatch(self):\r\n course = self.coursews.getCourseByBatchId(\"batch\"+self.random)\r\n self.assertEqual(course.courseId,\"course\"+self.random)\r\n self.assertEqual(course.name,\"name\"+self.random)\r\n self.assertEqual(course.batchUid,\"batch\"+self.random)\r\n self.assertEqual(course.description,\"Testing \" + self.random + \" description\")\r\n self.assertEqual(datetime.date.fromtimestamp(course.startDate),self.begin)\r\n self.assertEqual(datetime.date.fromtimestamp(course.endDate),self.end)\r\n\r\n def testGetCourseById(self):\r\n course = self.coursews.getCourseById(\"course\" + self.random)\r\n self.assertEqual(course.courseId,\"course\"+self.random)\r\n self.assertEqual(course.name,\"name\"+self.random)\r\n self.assertEqual(course.batchUid,\"batch\"+self.random)\r\n self.assertEqual(course.description,\"Testing \" + self.random + \" description\")\r\n self.assertEqual(datetime.date.fromtimestamp(course.startDate),self.begin)\r\n self.assertEqual(datetime.date.fromtimestamp(course.endDate),self.end)\r\n \r\n def testGetAllCourseCategories(self):\r\n categories = self.coursews.getAllCourseCategories()\r\n found = False\r\n for category in categories:\r\n if category.title == \"category\"+self.random:\r\n found=True\r\n self.assertEqual(found,True)\r\n \r\n def testHasCategory(self):\r\n courseMemberships = self.coursews.getCoursesCategories(self.courseid)\r\n courseMembership = CategoryMembershipVO()\r\n found=False\r\n for courseMembership in courseMemberships:\r\n if courseMembership.categoryId == self.categoryId:\r\n found=True\r\n self.assertEqual(found,True)\r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n #import sys;sys.argv = ['', 'Test.testName']\r\n unittest.main()","sub_path":"src/TestBbPyCourse.py","file_name":"TestBbPyCourse.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"248196822","text":"from django.contrib import admin\n\nfrom . import models\n\n\nclass AnswearInLine(admin.TabularInline):\n model = models.Answear\n fields = [\n \"answear\",\n \"is_correct\",\n ]\n\n\n@admin.register(models.Question)\nclass QuestionAdmin(admin.ModelAdmin):\n fields = [\n \"title\",\n \"points\",\n \"difficulty\",\n ]\n list_display = [\n \"title\",\n \"updated_at\",\n ]\n inlines = [\n AnswearInLine,\n ]\n\n\n@admin.register(models.Answear)\nclass AnswearAdmin(admin.ModelAdmin):\n list_display = [\n \"answear\",\n \"is_correct\",\n \"question\",\n ]\n","sub_path":"quizbot/quiz/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"285755292","text":"######################################### \n# Funciones auxiliares para el \n# procesamiento de texto de Wikipedia\n#\n# autor: Jorge Hermosillo\n# Fecha: 22-sep-2019\n# curso: Escuela de Ciencia de Datos 2019\n##########################################\nimport sys\nimport string\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n#####################################\n# graficación de palabras por doc #\n#Generamos los vectores que vamos a # \n# usar en los gráficos de barras: #\n# * x: contiene la enumeración #\n# de los documentos #\n# * y: contiene sus respectivos #\n# totales de palabras #\n#####################################\ndef grafica_palabras_porDoc(datos,nombre='barras',ancho=0.8):\n #ordena los datos en orden descendente y saca el promedio.\n promedio = datos['Total'].mean()\n print('Promedio de palabras por documento en el corpus: {}'.format(promedio))\n \n #obtiene los valores de x y y\n x=np.arange(len(datos.index.values))\n etiquetas=[]\n \n for e in datos.index.values:\n etiquetas.append(str(e))\n \n y=datos['Total'].values\n print(y[:10])\n \n #define el área de dibujo\n fig, ax = plt.subplots(figsize=(20, 10))\n ax.set_facecolor('white')\n plt.grid(False)\n ax.tick_params(axis='x', labelsize=6)\n\n #graficación\n #ancho = 0.8 #ancho de las barras\n ax.bar(x - ancho/2, y, ancho, label='Totales')\n ax.axhline(y=promedio, \\\n color='r', \\\n linestyle='--', \\\n label='Promedio')\n\n # Etiquetas, títulos, etc.\n ax.set_ylabel('Palabras')\n ax.set_title('Número de palabras por documento')\n ax.set_xticks(x)\n plt.xticks(rotation=90)\n ax.set_xticklabels(etiquetas)\n ax.legend()\n #plt.savefig('img/'+nombre+'.pdf')\n plt.show()\n return\n\ndef grafica_docs(df,titulo='Documentos'):\n #\"\"\" Obtención de valores\"\"\"\n docs_0=df[df.clase==0]\n docs_1=df[df.clase==1]\n\n #\"\"\"Areas de Graficacion y visualizacion de los datos\"\"\"\n fig,ax = plt.subplots(figsize=(5,5))\n\n #\"\"\"Documentos en clase 0\"\"\"\n ax.scatter(docs_0.c0, docs_0.c1,\n facecolor='royalblue', \n marker='o', \n edgecolor='blue',\n s=20,\n alpha=0.5,\n label='Docs_0')\n\n #\"\"\"Documentos en clase 1\"\"\"\n ax.scatter(docs_1.c0, docs_1.c1,\n facecolor='orangered', \n marker='o', \n edgecolor='red',\n s=20,\n alpha=0.5,\n label='Docs_1')\n plt.title(titulo)\n plt.xlabel('c0')\n plt.ylabel('c1')\n ax.legend()\n return ax\n\n\ndef palabras_comunes(df):\n Palabras=df['Palabras'].values.tolist()\n docs = df.doc_id.values\n lista=[]\n for i in range(len(Palabras)):\n lista.append((docs[i],Palabras[i]))\n palco=[]\n nopalco=[]\n for i in range(len(lista)):\n for j in range(i+1,len(lista)):\n palco.append(((lista[i][0],lista[j][0]),\\\n lista[i][1] & lista[j][1]))\n nopalco.append(((lista[i][0],lista[j][0]),\\\n lista[i][1] | lista[j][1] - \\\n lista[i][1] & lista[j][1]))\n\n palco=sorted(palco,key=lambda x: len(x[1]),reverse=True)\n nopalco=sorted(nopalco,key=lambda x: len(x[1]),reverse=True)\n npd = pd.DataFrame(nopalco).drop(columns=[0])\n paldoc=pd.DataFrame(palco)\n paldoc=pd.concat([paldoc,npd],ignore_index=True, sort=False,axis=1)\n paldoc.columns=['_ids','PalCom','PalNoCom']\n return paldoc\n","sub_path":"04 Clasificacion_Agrupamiento/3 Agrupamiento/Notebooks/wiki_graf.py","file_name":"wiki_graf.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"394641661","text":"import pynput\r\nimport sys\r\nimport PySimpleGUI as sg\r\nfrom pynput.keyboard import Key, Listener\r\n\r\n#LoL Key counter GUI\r\n#Author: Briggs Clarke \r\n\r\nsg.theme('DarkAmber') # Add a touch of color\r\n# All the stuff inside your window.\r\nlayout = [ [sg.Text('LoL Key Counter V1.0')],\r\n [sg.Text('Enter Key to be counted'), sg.InputText()],\r\n [sg.Button('Count'), sg.Button('Cancel')] ]\r\n\r\n# Create the Window\r\nwindow = sg.Window('Window Title', layout)\r\n# Event Loop to process \"events\" and get the \"values\" of the inputs\r\nwhile True:\r\n event, values = window.read()\r\n if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel\r\n break\r\n print('You entered ', values[0])\r\n\r\nwindow.close()","sub_path":"LoL Key Counter App.py","file_name":"LoL Key Counter App.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"97437212","text":"# -*- coding: utf-8 -*-\nfrom Boost import *\nfrom flask.ext.login import LoginManager, login_user,logout_user, login_required, current_user\nfrom flask import Flask, render_template, request, redirect, url_for, abort, session,Markup,jsonify, flash, g\nfrom Boost.models import Site, Profile, Session\nimport json\nfrom Boost.functions.authentication import *\n\ndef dir_visitors():\n return render_template(\"visitors.html\")\n\n@app.route('/_get_visitors_profile')\n@login_required\ndef get_visitors_profile():\n skip = request.args.get('skip', 0, type=int)\n site = json.loads(request.args.get('site', 0, type=str))\n site = Site.objects(pk=site).first()\n if site_ownership(site)==False: return None\n\n number_profiles = 10\n profiles=Profile.objects(site=site).skip(skip).limit(number_profiles)\n profile_render = []\n for profile in profiles:\n profile_render.append({\"fingerprint\":profile.fingerprint,\"id\":str(profile.id)})\n\n return jsonify(result=profile_render,number_profiles=number_profiles)\n\ndef dir_show_profile(*args,**kw):\n profile_id = kw[\"star\"][0]\n sessions = Session.objects(profile=profile_id)\n session_render = []\n for session in sessions:\n session_render.append({\"id\":str(session.id)})\n return session_render\n\ndef dir_visitors_profile(**kw):\n site = kw[\"site\"]\n return render_template(\"visitors_profile.html\")","sub_path":"www/Boost/functions/directoryMap.py","file_name":"directoryMap.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"347265491","text":"import tcod as libtcod\nfrom render_functions import RenderOrder\nfrom game_messages import Message\nfrom game_states import GameStates\nfrom constants import *\n\n\ndef kill_player(player, game_map):\n player.char = '%'\n player.color = libtcod.dark_red\n\n return Message('You died on {0}!'.format(game_map.dungeon_level), libtcod.red), GameStates.PLAYER_DEAD\n\n\ndef kill_monster(monster, player):\n if monster.coin_pouch is not None:\n death_message = Message('{0} is dead! Gained {1} coins'.format(monster.name.capitalize(), monster.coin_pouch.get_amount()), libtcod.orange)\n if monster.char == BALROG_CHAR:\n game_ended = True\n death_message = Message('Congratulations! The dreaded Balrog has been defeated. Arise Sir Player, Master of the Mines of Moria.')\n elif monster.char == '@':\n game_ended = True\n else:\n game_ended = False\n\n monster.char = '%'\n monster.color = libtcod.dark_red\n monster.blocks = False\n if monster.coin_pouch is not None:\n player.coin_pouch.add(monster.coin_pouch.get_amount())\n monster.ai = None\n monster.name = 'remains of ' + monster.name\n monster.render_order = RenderOrder.CORPSE\n\n return death_message, game_ended","sub_path":"death_functions.py","file_name":"death_functions.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"598076713","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport os\nfrom torch.utils import data\nfrom os import makedirs\nimport torchvision\nfrom PIL import Image\nimport sys\nimport copy\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n return x\n\ndef numpy_loader(input):\n item = np.load(input)/255.0\n return Image.fromarray(item)\n\ndef evaluate_model_for_accuracy(model, device, data_loader):\n model.eval()\n\n correct = 0\n with torch.no_grad():\n for data, target in data_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n print('\\n Accuracy: {}/{} ({:.0f}%)\\n'.format(\n correct, len(data_loader.dataset),\n 100. * correct / len(data_loader.dataset)))\n\n\ndef evaluate_adv_images(model, device, kwargs, mean, std, data_loader):\n batch_size = 100\n model.eval()\n\n adv_data_loader = torch.utils.data.DataLoader(\n torchvision.datasets.DatasetFolder('adv_images', #Change this to your adv_images folder\n loader=numpy_loader,\n extensions='.npy',\n transform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(mean, std)])),\n batch_size=batch_size, **kwargs)\n\n evaluate_model_for_accuracy(model, device, adv_data_loader)\n\n given_dataset = []\n adv_images = []\n labels = []\n with torch.no_grad():\n for data, target in data_loader:\n data, target = data.to(device), target.to(device)\n if len(given_dataset) ==0:\n given_dataset = data.squeeze().detach().cpu().numpy()\n else:\n given_dataset = np.concatenate([given_dataset, data.squeeze().detach().cpu().numpy()],\n axis=0)\n\n for data, target in adv_data_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n label = target.squeeze().detach().cpu().numpy()\n softmax_values = torch.nn.Softmax()(output).cpu().numpy()[np.arange(batch_size), label]\n adv_images = data\n labels = target\n\n #Checking the range of generated images\n adv_images_copy = copy.deepcopy(adv_images)\n for k in range(adv_images_copy.shape[0]):\n image_ = adv_images_copy[k, :, :]\n\n for t, m, s in zip(image_, mean, std):\n t.mul_(s).add_(m)\n\n image = image_.squeeze().detach().cpu().numpy()\n image = 255.0 * image\n\n if np.min(image) < 0 or np.max(image) > 255:\n print('Generated adversarial image is out of range.')\n sys.exit()\n\n adv_images = adv_images.squeeze().detach().cpu().numpy()\n labels = labels.squeeze().detach().cpu().numpy()\n\n\n #Checking for equation 2 and equation 3\n if all([x > 0.8 for x in softmax_values.tolist()]):\n print('Softmax values for all of your adv images are greater than 0.8')\n S = 0\n for i in range(10):\n label_indices = np.where(labels==i)[0]\n a_i = adv_images[label_indices, :, :]\n for k in range(10):\n image = a_i[k, :, :]\n S = S + np.min(\n np.sqrt(\n np.sum(\n np.square(\n np.subtract(given_dataset, np.tile(np.expand_dims(image, axis=0), [1000,1,1]))\n ),axis=(1,2))))\n\n print('Value of S : {:.4f}'.format(S / 100))\n\n else:\n print('Softmax values for some of your adv images are less than 0.8')\n\n\n\ndef generate_adv_images():\n adv_images = []\n targeted_class_labels = []\n image_names = []\n #your code to generate adv_images goes here\n return adv_images,image_names,targeted_class_labels\n\ndef main():\n # Settings\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--no-cuda', action='store_true', default=False,help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\n parser.add_argument('--model_path', type=str, default='model/mnist_cnn.pt')\n parser.add_argument('--data_folder', type=str, default='data')\n\n\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n mean = (0.1307,)\n std = (0.3081,)\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n data_loader = torch.utils.data.DataLoader(\n torchvision.datasets.DatasetFolder('data',\n loader= numpy_loader,\n extensions= '.npy',\n transform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(mean, std)])),\n batch_size=args.batch_size, **kwargs)\n\n model = Net().to(device)\n\n model.load_state_dict(torch.load(args.model_path, map_location=torch.device('cpu')))\n\n evaluate_model_for_accuracy(model, device, data_loader)\n\n adv_images,image_names,class_labels = generate_adv_images()\n #Implement this method to generate adv images\n #statisfying constraints mentioned in the assignment discription\n\n save_folder = 'adv_images'\n\n for image,image_name,class_label in zip(adv_images,image_names,class_labels):\n for t, m, s in zip(image, mean, std):\n t.mul_(s).add_(m)\n\n image_to_save = image.squeeze().detach().cpu().numpy()\n image_to_save = 255.0 * image_to_save\n\n if np.min(image_to_save) < 0 or np.max(image_to_save) > 255:\n print('Generated adversarial image is out of range.')\n sys.exit()\n\n if not os.path.exists(os.path.join(save_folder,str(class_label))):\n makedirs(os.path.join(save_folder,str(class_label)))\n\n np.save(os.path.join(save_folder,str(class_label),image_name), image_to_save)\n\n evaluate_adv_images(model,device,kwargs,mean,std,data_loader)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"deepfool/mnist_code_Original.py","file_name":"mnist_code_Original.py","file_ext":"py","file_size_in_byte":7263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"578690710","text":"\"\"\"\n SlipStream Client\n =====\n Copyright (C) 2013 SixSq Sarl (sixsq.com)\n =====\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom slipstream.cloudconnectors.CloudClientCommand import CloudClientCommand\nfrom slipstream.cloudconnectors.openstack.OpenStackClientCloud import OpenStackClientCloud\n\n\nclass OpenStackCommand(CloudClientCommand):\n\n def __init__(self):\n self.PROVIDER_NAME = OpenStackClientCloud.cloudName\n super(OpenStackCommand, self).__init__()\n\n def _setCommonOptions(self):\n self.parser.add_option('--username', dest='key',\n help='Key',\n default='', metavar='KEY')\n\n self.parser.add_option('--password', dest='secret',\n help='Secret',\n default='', metavar='SECRET')\n\n self.parser.add_option('--endpoint', dest='endpoint',\n help='Identity service (Keystone)',\n default='', metavar='ENDPOINT')\n\n self.parser.add_option('--region', dest='region',\n help='Region (default: regionOne)',\n default='regionOne', metavar='REGION')\n \n self.parser.add_option('--service-type', dest='service_type',\n help='Type-name of the service which provides the instances functionality (default: compute)',\n default='compute', metavar='TYPE')\n \n self.parser.add_option('--service-name', dest='service_name',\n help='Name of the service which provides the instances functionality (default: nova)',\n default='nova', metavar='NAME')\n \n self.parser.add_option('--project', dest='project',\n help='Project (Tenant)',\n default='', metavar='PROJECT')\n \n def _checkOptions(self):\n if not all((self.options.key, self.options.secret,\n self.options.endpoint, self.options.region)):\n self.parser.error('Some mandatory options were not given values.')\n self.checkOptions()\n\n def _setUserInfo(self):\n self.userInfo[self.PROVIDER_NAME + '.username'] = self.options.key\n self.userInfo[self.PROVIDER_NAME + '.password'] = self.options.secret\n self.userInfo[self.PROVIDER_NAME + '.endpoint'] = self.options.endpoint\n self.userInfo[self.PROVIDER_NAME + '.service.type'] = self.options.service_type\n self.userInfo[self.PROVIDER_NAME + '.service.name'] = self.options.service_name\n self.userInfo[self.PROVIDER_NAME + '.service.region'] = self.options.region\n self.userInfo[self.PROVIDER_NAME + '.tenant.name'] = self.options.project\n\n\n","sub_path":"client/src/main/python/slipstream/cloudconnectors/openstack/OpenStackCommand.py","file_name":"OpenStackCommand.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"221473289","text":"\nclass Shape(object):\n\tdef __init__(self, args):\n\t\tself._value = args\n\t\tself._key_update()\n\n\tdef __getitem__(self, k):\n\t\tif type(k) != tuple:\n\t\t\tk = (k, self._keydict[k])\n\t\treturn self._value[k]\n\n\tdef __setitem__(self, k, v):\n\t\tif type(k) != tuple:\n\t\t\tk = (k, self._keydict[k])\n\t\tself._value[k] = v\n\t\tself._key_update()\n\n\tdef _key_update(self):\n\t\tself._keys = sorted(self._value, key=lambda x: x[1])\n\t\tself._keydict = dict(self._keys)\n\n\tdef __iter__(self):\n\t\tfor k in self._keys:\n\t\t\tfor v in self._value[k]:\n\t\t\t\tyield v\n\n\tdef draw(self):\n\t\tfor v in self:\n\t\t\tv.draw()\n\nclass Polygon(object):\n\t\"\"\"\n\tobject > Polygon\n\t\"\"\"\n\tdef __init__(self, *args, **kwargs):\n\t\tself.vertices = args\n\t\tself.color = kwargs[\"color\"] if \"color\" in kwargs else \"#FFFFFF\"\n\t\tif kwargs[\"stroke\"] == None:\n\t\t\tself.stroke = True\n\n\tdef __contains__(self, p):\n\t\t\"\"\"\n\t\tverts --- all the vertices that define the polygon\n\t\tp --- the point which may lie in the polygon\n\t\t\"\"\"\n\t\tverts = self.vertices\n\t\tx, y = p\n\t\tnum = len(verts)\n\t\tj = num - 1\n\t\tonShape = False\n\t\tfor i in range(num):\n\t\t\txi, yi = verts[i]\n\t\t\txj, yj = verts[j]\n\t\t\tif ((yi > y) != (yj > y)) and (x < xi + (xj - xi) *\n\t\t\t (y - yi) / (yj - yi)):\n\t\t\t\tonShape = not onShape\n\t\t\tj = i\n\t\treturn onShape\n\n\tdef draw(self):\n\t\tfill(self.color)\n\t\tstroke(0)\n\t\tif not self.stroke:\n\t\t\tnoStroke()\n\n\t\tbeginShape()\n\t\tfor v in self.vertices:\n\t\t\tvertex(*v)\n\t\tendShape(CLOSE)\n\n\t\tif not self.stroke:\n\t\t\tstroke(0)\n\nclass Rect(Polygon):\n\t\"\"\"\n\tobject > Polygon > Rect\n\t\"\"\"\n\tdef __init__(self, x, y, w, h, color=\"#FFFFFF\", stroke=True):\n\t\tself.dim = (w, h)\n\t\tself.pos = (x, y)\n\t\tself.color = color\n\t\tself.stroke = stroke\n\n\t@property\n\tdef pos(self):\n\t\treturn self._pos\n\n\t@pos.setter\n\tdef pos(self, p):\n\t\tself._pos = p\n\t\tself._vertices = self._rect2poly()\n\n\t@property\n\tdef dim(self):\n\t\treturn self._dim\n\t\tself._vertices = self._rect2poly()\n\n\t@dim.setter\n\tdef dim(self, v):\n\t\tself._dim = v\n\n\t@property\n\tdef vertices(self):\n\t\treturn self._vertices\n\n\tdef _rect2poly(self):\n\t\tx, y = self.pos\n\t\tw, h = self.dim\n\t\tx0, y0 = x - w / 2, y - h / 2\n\t\tx1, y1 = x0 + w, y0 + h\n\t\treturn [(x0, y0), (x1, y0), (x1, y1), (x0, y1)]\n","sub_path":"Shape.py","file_name":"Shape.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"418440331","text":"import json\nimport os\n\nimport requests\nfrom django.contrib.auth.models import Permission, User\nfrom django.http import Http404\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom geo_api.helpers import getIP\nfrom geo_api.models import IPGeoData\nfrom geo_api.serializers import IPGeoDataSerializer\n\n\nclass IpStackAPI():\n def __init__(self) -> None:\n self.api_key = os.environ[\"IPSTACK_API_KEY\"]\n\n def get_geo_data(self, ip):\n url = f'http://api.ipstack.com/{ip}?access_key={self.api_key}&fields=main'\n response = requests.get(url)\n response.raise_for_status()\n response_json = json.loads(response.text)\n\n return response_json\n\n\nclass Register(APIView):\n def get_all_geo_api_permissions(self):\n permissions = [Permission.objects.get(name=permission) for permission in [\n 'Can add ip geo data',\n 'Can change ip geo data',\n 'Can delete ip geo data',\n 'Can view ip geo data'\n ]]\n\n return permissions\n\n def post(self, request):\n try:\n username = request.data['username']\n password = request.data['password']\n except KeyError:\n error_message = json.dumps({'details': 'key error'})\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n\n user_already_exists = User.objects.filter(\n username=username).exists()\n\n if user_already_exists:\n error_message = json.dumps({'details': 'user already exists'})\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n\n user = User.objects.create_user(username=username,\n password=password)\n\n permissions = self.get_all_geo_api_permissions()\n\n user.user_permissions.set(permissions)\n\n return_message = json.dumps({'details': 'created'})\n return Response(return_message, status=status.HTTP_201_CREATED)\n\n\nclass GeoDataDetail(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get_ip_geo_data(self, address):\n try:\n return IPGeoData.objects.get(ip=getIP(address))\n except IPGeoData.DoesNotExist:\n raise Http404\n\n def get(self, request, address):\n ip = self.get_ip_geo_data(address)\n serializer = IPGeoDataSerializer(ip)\n return Response(serializer.data, status=200)\n\n def delete(self, request, address):\n ip = self.get_ip_geo_data(address)\n serializer = IPGeoDataSerializer(ip)\n ip.delete()\n\n return Response(serializer.data, status=200)\n\n\nclass GeoData(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request):\n addresses = IPGeoData.objects.all()\n ser = IPGeoDataSerializer(addresses, many=True)\n\n response = ser.data\n return Response(response, status=status.HTTP_200_OK)\n\n def post(self, request):\n try:\n address = request.data['address']\n except KeyError:\n error_message = json.dumps({'details': 'key error'})\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n\n ip = getIP(address)\n\n if ip is False:\n error_message = json.dumps({'details': 'incorrect address'})\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n\n if IPGeoData.objects.filter(ip=ip).exists():\n error_message = json.dumps({'details': 'ip already exists'})\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n\n api = IpStackAPI()\n try:\n response_json = api.get_geo_data(ip)\n except requests.exceptions.RequestException:\n error_message = json.dumps({'details': 'internal server error'})\n return Response(error_message, status=status.status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n serializer = IPGeoDataSerializer(data=response_json)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=201)\n return Response(serializer.errors, status=400)\n","sub_path":"geo_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"100998623","text":"# MIT License\n#\n# Copyright (c) 2021 Soohwan Kim\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport torch.nn as nn\nfrom typing import Tuple\nfrom torch import Tensor\n\n\nclass JointCTCCrossEntropyLoss(nn.Module):\n \"\"\"\n Privides Joint CTC-CrossEntropy Loss function\n\n Args:\n num_classes (int): the number of classification\n ignore_index (int): indexes that are ignored when calculating loss\n dim (int): dimension of calculation loss\n reduction (str): reduction method [sum, mean] (default: mean)\n ctc_weight (float): weight of ctc loss\n cross_entropy_weight (float): weight of cross entropy loss\n blank_id (int): identification of blank for ctc\n \"\"\"\n def __init__(\n self,\n num_classes: int,\n ignore_index: int,\n dim: int = -1,\n reduction='mean',\n ctc_weight: float = 0.3,\n cross_entropy_weight: float = 0.7,\n blank_id: int = None,\n ) -> None:\n super(JointCTCCrossEntropyLoss, self).__init__()\n self.num_classes = num_classes\n self.dim = dim\n self.ignore_index = ignore_index\n self.reduction = reduction.lower()\n self.ctc_weight = ctc_weight\n self.cross_entropy_weight = cross_entropy_weight\n self.ctc_loss = nn.CTCLoss(blank=blank_id, reduction=self.reduction, zero_infinity=True)\n self.cross_entropy_loss = nn.CrossEntropyLoss(reduction=self.reduction, ignore_index=self.ignore_index)\n\n def forward(\n self,\n encoder_log_probs: Tensor,\n decoder_log_probs: Tensor,\n output_lengths: Tensor,\n targets: Tensor,\n target_lengths: Tensor,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n ctc_loss = self.ctc_loss(encoder_log_probs, targets, output_lengths, target_lengths)\n cross_entropy_loss = self.cross_entropy_loss(decoder_log_probs, targets.contiguous().view(-1))\n loss = cross_entropy_loss * self.cross_entropy_weight + ctc_loss * self.ctc_weight\n return loss, ctc_loss, cross_entropy_loss\n","sub_path":"lightning_asr/criterion/joint_ctc_cross_entropy.py","file_name":"joint_ctc_cross_entropy.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"526849335","text":"from flask import Flask, render_template, Response, request\nimport json\nimport Pyro4\ntry:\n from flask_cors import CORS # The typical way to import flask-cors\nexcept ImportError:\n # Path hack allows examples to be run without installation.\n import os\n parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.sys.path.insert(0, parentdir)\n\n from flask.ext.cors import CORS\n\nimport config\n\napp = Flask(__name__)\napp.config.from_object('config')\ncors = CORS(app)\n\n@app.route('/')\ndef index():\n return render_template('index.html', server=config.SERVER_URL)\n\n@app.route('/vehicles_positions.php')\ndef vehicles_positions():\n server = Pyro4.Proxy('PYRO:virtual_oulu@' + config.SERVER_HOST + ':' + str(config.SERVER_PORT))\n vehicles_positions = server.get_vehicles_positions()\n return Response(json.dumps(vehicles_positions), 200, mimetype='application/json')\n\n@app.route('/delete_congestion/')\ndef delete_congestion(congestion_id):\n server = Pyro4.Proxy('PYRO:virtual_oulu@' + config.SERVER_HOST + ':' + str(config.SERVER_PORT))\n server.delete_congestion(congestion_id)\n\n return Response(json.dumps({'success': True, 'congestion_id': congestion_id}), 202, mimetype='application/json')\n\n@app.route('/congest_edge//')\ndef congest_edge(lat, lon):\n server = Pyro4.Proxy('PYRO:virtual_oulu@' + config.SERVER_HOST + ':' + str(config.SERVER_PORT))\n congestion_id = server.add_congestion(lat, lon)\n\n return Response(json.dumps({'success': True, 'id': congestion_id}), 200, mimetype='application/json')\n\n@app.route('/update_congest///')\ndef update_congest(congestion_id, lat, lng):\n server = Pyro4.Proxy('PYRO:virtual_oulu@' + config.SERVER_HOST + ':' + str(config.SERVER_PORT))\n congestion_id = server.update_congestion(congestion_id, lat, lng)\n\n return Response(json.dumps({'success': True, 'id': congestion_id}), 200, mimetype='application/json')\n\n@app.route('/delete_vehicle/')\ndef delete_vehicle(vehicle_id):\n server = Pyro4.Proxy('PYRO:virtual_oulu@' + config.SERVER_HOST + ':' + str(config.SERVER_PORT))\n server.delete_vehicle(vehicle_id)\n\n return Response(json.dumps({'success': True, 'vehicle_id': vehicle_id}), 202, mimetype='application/json')\n\n@app.route('/get_traffic_lights')\ndef get_traffic_lights():\n import xml.etree.ElementTree as ET\n import sqlite3\n import config\n\n tree = ET.parse('data/plain_network/oulu.tll.xml')\n root = tree.getroot()\n results = []\n conn = sqlite3.connect(config.DATABASE_FILE)\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n for connection in root.iter('connection'):\n from_edge = connection.get('from')\n to_edge = connection.get('to')\n traffic_light_id = connection.get('tl')\n\n def get_nodes(edge_id, database_cursor):\n sql = 'select * from oulu_edges where id=?'\n database_cursor.execute(sql, (edge_id,))\n row = database_cursor.fetchone()\n if row is not None:\n return [row['from'], row['to']]\n return None\n def get_node_info (node_id, database_cursor):\n sql = 'select * from oulu_nodes where id=?'\n database_cursor.execute(sql, (node_id,))\n row = database_cursor.fetchone()\n if row is not None:\n return {'id': row['id'], 'x': row['x'], 'y': row['y'], 'lat': row['lat'], 'lon': row['lon']}\n return None\n\n from_nodes = get_nodes(from_edge, cur)\n to_nodes = get_nodes(to_edge, cur)\n\n node = None\n if from_nodes[0] == to_nodes[0] or from_nodes[0] == to_nodes[1]:\n node = from_nodes[0]\n elif from_nodes[1] == to_nodes[0] or from_nodes[1] == to_nodes[1]:\n node = from_nodes[1]\n if node is not None:\n node_info = get_node_info(node, cur)\n if node_info is not None:\n results.append({'node_id': node_info['id'],\n 'lat': node_info['lat'],\n 'lon': node_info['lon'],\n 'tl': traffic_light_id})\n\n return Response(json.dumps(results), 200, mimetype='application/json')\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5001, debug=True)\n\n","sub_path":"web_interface.py","file_name":"web_interface.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"649502461","text":"import os\nimport sys\nimport glob\nimport gzip\nimport queue\nimport logging\nimport memcache\nimport collections\nfrom time import time\nimport appsinstalled_pb2\nimport multiprocessing as mp\nfrom optparse import OptionParser\nfrom functools import partial\n\nNORMAL_ERR_RATE = 0.01\nAppsInstalled = collections.namedtuple(\"AppsInstalled\", [\"dev_type\", \"dev_id\", \"lat\", \"lon\", \"apps\"])\n\n\ndef dot_rename(path):\n head, fn = os.path.split(path)\n # atomic in most cases\n os.rename(path, os.path.join(head, \".\" + fn))\n\n\ndef insert_appsinstalled(memc_addr, appsinstalled, dry_run=False):\n ua = appsinstalled_pb2.UserApps()\n ua.lat = appsinstalled.lat\n ua.lon = appsinstalled.lon\n key = \"%s:%s\" % (appsinstalled.dev_type, appsinstalled.dev_id)\n ua.apps.extend(appsinstalled.apps)\n packed = ua.SerializeToString()\n # @TODO persistent connection\n # @TODO retry and timeouts!\n try:\n if dry_run:\n logging.debug(\"%s - %s -> %s\" % (memc_addr, key, str(ua).replace(\"\\n\", \" \")))\n else:\n memc = memcache.Client([memc_addr], socket_timeout=1)\n memc.set(key, packed)\n except Exception as e:\n logging.exception(\"Cannot write to memc %s: %s\" % (memc_addr, e))\n return False\n return True\n\n\ndef parse_appsinstalled(line):\n line_parts = line.strip().split(\"\\t\")\n if len(line_parts) < 5:\n return\n dev_type, dev_id, lat, lon, raw_apps = line_parts\n if not dev_type or not dev_id:\n return\n try:\n apps = [int(a.strip()) for a in raw_apps.split(\",\")]\n except ValueError:\n apps = [int(a.strip()) for a in raw_apps.split(\",\") if a.isidigit()]\n logging.info(\"Not all user apps are digits: `%s`\" % line)\n try:\n lat, lon = float(lat), float(lon)\n except ValueError:\n logging.info(\"Invalid geo coords: `%s`\" % line)\n return AppsInstalled(dev_type, dev_id, lat, lon, apps)\n\n\ndef insert_manager(in_queue):\n # processed = errors = 0\n while not in_queue.empty():\n try:\n task = in_queue.get(block=True)\n print('current process: ', mp.current_process().name)\n except queue.Empty:\n print(\"Empty queue exception\")\n break\n insert_appsinstalled(*task)\n # if ok:\n # processed += 1\n # else:\n # errors += 1\n # return processed, errors\n\n\ndef read_file(file, options):\n device_memc = {\n \"idfa\": options.idfa,\n \"gaid\": options.gaid,\n \"adid\": options.adid,\n \"dvid\": options.dvid,\n }\n processed = 1\n errors = 0\n logging.info('Processing %s' % file)\n in_queue = mp.Manager().Queue()\n\n processes = []\n\n for i in range(1):\n proc = mp.Process(target=insert_manager, args=(in_queue,), name=f\"proc{i + 1}\")\n processes.append(proc)\n \n for proc in processes:\n proc.start()\n \n with gzip.open(file, 'rt') as text:\n for line in text:\n line = line.strip()\n if not line:\n continue\n appsinstalled = parse_appsinstalled(line)\n if not appsinstalled:\n errors += 1\n continue\n memc_addr = device_memc.get(appsinstalled.dev_type)\n if not memc_addr:\n errors += 1\n logging.error(\"Unknow device type: %s\" % appsinstalled.dev_type)\n continue\n in_queue.put((memc_addr, appsinstalled, options.dry))\n\n # in_queue.join()\n for proc in processes:\n proc.join()\n\n err_rate = float(errors) / processed\n if err_rate < NORMAL_ERR_RATE:\n print(file, \"Acceptable error rate (%s). Successfull load\" % err_rate)\n else:\n print(file, \"High error rate (%s > %s). Failed load\" % (err_rate, NORMAL_ERR_RATE))\n # dot_rename(fn)\n\n\ndef main(options):\n # pool = mp.Pool(1)\n # pool.map(partial(read_file, options=options), glob.iglob(options.pattern))\n # pool.close()\n # pool.join()\n for file in glob.iglob(options.pattern):\n read_file(file, options)\n\n\ndef prototest():\n sample = \"idfa\\t1rfw452y52g2gq4g\\t55.55\\t42.42\\t1423,43,567,3,7,23\\ngaid\\t7rfw452y52g2gq4g\\t55.55\\t42.42\\t7423,424\"\n for line in sample.splitlines():\n dev_type, dev_id, lat, lon, raw_apps = line.strip().split(\"\\t\")\n apps = [int(a) for a in raw_apps.split(\",\") if a.isdigit()]\n lat, lon = float(lat), float(lon)\n ua = appsinstalled_pb2.UserApps()\n ua.lat = lat\n ua.lon = lon\n ua.apps.extend(apps)\n packed = ua.SerializeToString()\n unpacked = appsinstalled_pb2.UserApps()\n unpacked.ParseFromString(packed)\n assert ua == unpacked\n\n\nif __name__ == '__main__':\n op = OptionParser()\n op.add_option(\"-t\", \"--test\", action=\"store_true\", default=False)\n op.add_option(\"-l\", \"--log\", action=\"store\", default=None)\n op.add_option(\"--dry\", action=\"store_true\", default=False)\n op.add_option(\"--pattern\", action=\"store\", default=\"/data/appsinstalled/*.tsv.gz\")\n op.add_option(\"--idfa\", action=\"store\", default=\"127.0.0.1:33013\")\n op.add_option(\"--gaid\", action=\"store\", default=\"127.0.0.1:33014\")\n op.add_option(\"--adid\", action=\"store\", default=\"127.0.0.1:33015\")\n op.add_option(\"--dvid\", action=\"store\", default=\"127.0.0.1:33016\")\n (opts, args) = op.parse_args()\n logging.basicConfig(filename=opts.log, level=logging.INFO if not opts.dry else logging.DEBUG,\n format='[%(asctime)s] %(levelname).1s %(message)s', datefmt='%Y.%m.%d %H:%M:%S')\n if opts.test:\n prototest()\n sys.exit(0)\n\n logging.info(\"Memc loader started with options: %s\" % opts)\n try:\n t1 = time()\n main(opts)\n print('executing time: ', time() - t1)\n except Exception as e:\n logging.exception(\"Unexpected error: %s\" % e)\n sys.exit(1)","sub_path":"memc_hload2.py","file_name":"memc_hload2.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"112944551","text":"import math\nimport numpy as np\nimport scipy\nimport networkx as nx\nfrom scipy.stats import multivariate_normal as mn\nimport matplotlib.pyplot as plt\nimport scipy.cluster.hierarchy as hcluster\nimport nodes as nd\nfrom nodes import *\nfrom data import *\nfrom sklearn import mixture\nimport matplotlib.pyplot as plt\nfrom scipy.cluster.vq import vq, kmeans, whiten\nfrom time import time\n\ndef returnarr(arr,scope):\n\tq = []\n\tte = list(scope)\n\tte = sorted(te)\n\tfor i in arr:\n\t\tq.append(te[i])\n\treturn set(q)\n\nLeafcount = 0\n\nglobalcnt = 0\nnodes = []\n\ndef induce(tempdat,maxsize,scope,indsize,flag):\n\tfull = len(tempdat)\n\tif (flag==0):\n\t\tif (full>=30*len(scope)):\n\t\t\ttempdat2 = split(tempdat,8)\n\t\t\ts = sumNode()\n\t\t\t#global nodes\n\t\t\t#nodes.append(globalcnt,s.kind)\n\t\t\t#globalcnt = globalcnt + 1\n\t\t\tarr = []\n\t\t\tcnt = 0\n\t\t\tfor i in range(0,len(tempdat2)):\n\t\t\t\tif(len(tempdat2[i])>=(2*len(scope))):\n\t\t\t\t\tarr.append(len(tempdat2[i]))\n\t\t\t\t\ts.children.append(induce(np.asarray(tempdat2[i]),maxsize,scope,indsize,1))\n\t\t\t\t\tcnt = cnt + 1\n\t\t\t\n\t\t\tfor i in range(0,cnt):\n\t\t\t\tchosen = s.children[i]\n\t\t\t\tw = 0\n\t\t\t\tfor j in chosen.children:\n\t\t\t\t\ts.children.append(j)\n\t\t\t\t\tarr.append(chosen.wts[w]*arr[i])\n\t\t\t\t\tw = w+1\n\t\t\tarr = arr[cnt:]\n\t\t\ts.children = s.children[cnt:]\t\t\t\n\t\t\ts.setwts(arr)\n\t\t\tprint(\"wts are\",arr)\n\t\t\treturn s\n\teffdat = np.zeros(len(tempdat)*len(scope))\n\teffdat = np.reshape(effdat,(len(tempdat),len(scope)))\n\tfor i in range(0,len(tempdat)):\n\t\ttemp = submean(tempdat[i],scope)\n\t\tfor j in range(0,len(scope)):\n\t\t\teffdat[i][j] = temp[j]\n\teffcorr = np.corrcoef(np.transpose(effdat))\n\teffcov = np.cov(np.transpose(effdat))\n\tprint(np.shape(effcorr))\n\tprint(np.shape(effcov))\n\tempmean = np.mean(effdat,axis=0)\n\tprint(np.shape(empmean))\n\n\tG = nx.from_numpy_matrix(-abs(effcorr))\n\tG = G.to_undirected()\n\n\tDec = []\n\n\tT=nx.minimum_spanning_tree(G)\n\tOrder = np.asarray(T.edges(data='weight'))\n\tk = len(Order)\n\twts = np.zeros(k)\n\tOrder = Order[Order[:,2].argsort()]\n\tDec = []\n\tGc = max(nx.connected_component_subgraphs(T), key=len)\n\tn = Gc.number_of_nodes()\n\tif(n<=maxsize):\n\t\tDec.append(list(nx.connected_components(T)))\n\t\n\t#count = 0\n\n\tfor i in range(0,k):\n\t\tsum = 0\n\t\tfor j in range(0,len(Order)-i):\n\t\t\tsum = sum - Order[j,2]\n\t\twts[i] = sum - np.log(k-i) + np.log(k)\n\t\tidx = int(Order[len(Order)-i-1,0])\n\t\tidx2 = int(Order[len(Order)-i-1,1])\n\t\tT.remove_edge(idx,idx2)\n\t\tGc = max(nx.connected_component_subgraphs(T), key=len)\n\t\tn = Gc.number_of_nodes()\n\t\tif(n<=maxsize):\n\t\t\tDec.append(list(nx.connected_components(T)))\n\t\t\t#count = count+1\n\n\t#wts[k-1]=0.1\n\teffwts = np.zeros(len(Dec))\n\tfor i in range(0,len(Dec)):\n\t\teffwts[i] = wts[i+k-len(Dec)]\n\n\ts = sumNode()\n\ts.setwts(effwts)\n\tprint(effwts)\n\n\tprint(Dec)\n\n\tfor i in range(0,len(Dec)):\n\t\tp = prodNode()\n\t\ts.children.append(p)\n\t\tfor j in (Dec[i]):\n\t\t\tprint(j)\n\t\t\tsub = returnarr(j,scope)\n\t\t\tprint(sub)\n\t\t\tif (len(j)<=indsize):\n\t\t\t\tl = leafNode()\n\t\t\t\tglobal Leafcount\n\t\t\t\tLeafcount = Leafcount+1\n\t\t\t\ttempmean = submean(empmean,j)\n\t\t\t\ttempcov = submat(effcov,j)\n\t\t\t\tl.scope = sub\n\t\t\t\tl.create(tempmean,tempcov)\n\t\t\t\tp.children.append(l)\n\t\t\telse:\n\t\t\t\tp.children.append(induce(tempdat,maxsize-2,sub,indsize,0))\n\t\t\n\n\treturn s\n\n#test\n'''\ns = set(xrange(784))\n\nab=np.loadtxt(open(\"../train.csv\", \"rb\"), delimiter=\",\", skiprows=1)\n\nblank = []\nfor i in range(0,42000):\n\tif(ab[i][0]==2):\n\t\tblank.append(ab[i,1:])\n\nblank2 = []\n\nfor i in range(0,42000):\n\tif(ab[i][0]==7):\n\t\tblank2.append(ab[i,1:])\n\n\nprint(np.shape(np.asarray(blank)))\nab = np.asarray(blank)\n\nfor i in range(0,len(ab)):\n\tfor j in range(0,784):\n\t\tdraw = np.random.uniform(0.2,0.6)\n\t\tab[i][j] = ab[i][j] + draw\n\nTst = induce(ab[:,:],400,s,10,0,20)\n\n\nfor i in range(0,8000):\n\tt = time()\n\tidx = np.random.randint(0,len(ab))\n\tnd.globalarr = ab[idx]\n\tTst.passon()\n\tplaceholder = Tst.retval()\n\tTst.update()\n\tprint(time()-t)\n\nsum = 0\n\nplot1 = np.zeros(800)\n\nfor i in range(0,800):\n\tnd.globalarr = ab[i]\n\tTst.passon()\n\tsum = sum + Tst.retval()\n\tplot1[i] = Tst.retval()\n\n\nprint(sum/800)\nprint(np.amax(plot1),np.amin(plot1))\n\nab = np.asarray(blank2)\n\nsum = 0\n\nplot1 = np.zeros(800)\n\nfor i in range(0,800):\n\tnd.globalarr = ab[i]\n\tTst.passon()\n\tsum = sum + Tst.retval()\n\tplot1[i] = Tst.retval()\n\nprint(sum/800)\nprint(np.amax(plot1),np.amin(plot1))\n\n\nvalues = []\n\ns = set(xrange(8))\n\nab = np.genfromtxt('../AB.dat',delimiter=\",\")\nab = np.asarray(ab[:,1:])\nab = whiten(ab)\nprint(len(ab))\n\n\n\n\n\n#for i in range(0,len(ab)):\n#\tfor j in range(0,22):\n#\t\tab[i][j] = ab[i][j] + 1e-6\n\n\nfor w in range(0,2):\n\n\tab = np.random.permutation(ab)\n\n\tTst = induce(ab[:3600,:],8,s,2,0)\n\n\n\tfor i in range(0,7200):\n\t\tt = time()\n\t\tidx = np.random.randint(0,3600)\n\t\tnd.globalarr = ab[idx]\n\t\tTst.passon()\n\t\tplaceholder = Tst.retval()\n\t\tTst.update()\n\t\tprint(time()-t)\n\n\tsum = 0\t\n\n\tplot1 = np.zeros(400)\n\n\tfor i in range(3600,4000):\n\t\tnd.globalarr = ab[i]\n\t\tTst.passon()\n\t\tsum = sum + Tst.retval()\n\t\tplot1[i-3600] = Tst.retval()\n\n\tvalues.append((sum/400))\n\tprint(values)\n\nprint(values)\n'''\n","sub_path":"modul.py","file_name":"modul.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"12161174","text":"# a series of numbers in which each number ( Fibonacci number ) is the sum of the two preceding numbers.\n# The simplest is the series 1, 1, 2, 3, 5, 8, etc.\n# n = int(input(\"enter the number : \"))\n# a = 0\n# b = 1\n# if n < 0:\n# print(\"it is a negative number \")\n# elif n == 1:\n# print(a)\n# else:\n# print(a)\n# print(b)\n#\n# for i in range(2, n):\n# c = a + b\n# a = b\n# b = c\n# print(c)\n\n\n# def feb(n):\n# a = 0\n# b = 1\n# if n < 0:\n# print(\"it is a negative number\")\n# elif n == 1:\n# print(a)\n# elif n == 2:\n# print(a)\n# print(b)\n# else:\n# print(a)\n# print(b)\n#\n# for i in range(2, n):\n# c = a + b\n# a = b\n# b = c\n# print(c)\n#\n#\n# feb(10)\n\n\ndef fib(n):\n p, q = 0, 1\n while p < n:\n yield p\n p, q = q, p + q\n\n\n# create generator object\n\nfor i in fib(10):\n print(i)\n","sub_path":"BasicPrograms/Fibinocci.py","file_name":"Fibinocci.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"92764312","text":"import random\r\n\r\nnum = random.randint(1,10)\r\n\r\nguess = None\r\n\r\nwhile num != guess:\r\n guess = input(\"\\nGuess a number from 1 to 10: \")\r\n guess = int(guess)\r\n if num < guess:\r\n print(\"\\n Too high,try again\") \r\n elif num > guess:\r\n print(\"\\nToo low,try again\")\r\n else:\r\n print(f\"\\nYou guess of {num} is correct! You won\")\r\n play_again = input(\"\\nDo you want to play again? (y/n) \")\r\n if play_again == \"y\":\r\n num = random.randint(1,10)\r\n else:\r\n print(\"\\nThank you for playing\")\r\n break\r\n \r\n","sub_path":"Guessing_game.py","file_name":"Guessing_game.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"233433679","text":"import warnings\n\nfrom . import (\n Database,\n Method,\n Normalization,\n Weighting,\n methods,\n normalizations,\n projects,\n weightings,\n)\nfrom .backends.schema import ActivityDataset as AD\nfrom .backends.schema import get_id\nfrom .errors import Brightway2Project\n\n\nclass Mapping:\n \"\"\"A dictionary that maps object ids, like ``(\"Ecoinvent 2.2\", 42)``, to integers.\n\n Used only for backwards compatibility; preferred method is now to look up the ids of activities directly in the SQlite database.\"\"\"\n\n def add(self, keys):\n raise DeprecationWarning(\n \"This method is no longer necessary, and does nothing.\"\n )\n return\n\n def __getitem__(self, key):\n return get_id(key)\n\n def delete(self, keys):\n raise DeprecationWarning(\n \"This method is no longer necessary, and does nothing.\"\n )\n return\n\n def __str__(self):\n return \"Obsolete mapping dictionary.\"\n\n def __len__(self):\n return AD.select().count()\n\n\nclass _Databases:\n def clean(self):\n warnings.warn(\n \"Use `Database.clean_all()` instead of `databases.clean()`\",\n DeprecationWarning,\n )\n Database.clean_all()\n\n def set_dirty(self, name):\n warnings.warn(\n \"Use `Database.set_dirty(name)` instead of `databases.set_dirty(name)`\",\n DeprecationWarning,\n )\n Database.set_dirty(name)\n\n def __getitem__(self, name):\n warnings.warn(\n \"Use `Database` attributes directly instead of `databases[name]`\",\n DeprecationWarning,\n )\n return Database.get(Database.name == name).metadata\n\n def __contains__(self, name):\n warnings.warn(\n \"Use `Database.exists(name)` instead of `name in databases`\",\n DeprecationWarning,\n )\n return Database.exists(name)\n\n def __len__(self):\n warnings.warn(\n \"Use `Database.select().count()` instead of `len(databases)`\",\n DeprecationWarning,\n )\n return len(Database)\n\n def __delitem__(self, name):\n warnings.warn(\n \"Use `Database(name).delete()` instead of `del databases[name]`\",\n DeprecationWarning,\n )\n Database.get(Database.name == name).delete_instance()\n\n def __iter__(self):\n return (obj.name for obj in Database.select())\n\n def flush(self):\n warnings.warn(\n \"`databases.flush()` doesn't do anything. Modify `Database` attributes and call `.save()` instead.\",\n DeprecationWarning,\n )\n\n def __setitem__(self, *args, **kwargs):\n warnings.warn(\n \"`databases[foo] = something` doesn't do anything. Modify `Database` attributes and call `.save()` instead.\",\n DeprecationWarning,\n )\n\n\ndatabases = _Databases()\n\n\ndef unpack(dct):\n for obj in dct:\n if hasattr(obj, \"key\"):\n yield obj.key\n else:\n yield obj\n\n\ndef translate_key(key):\n if isinstance(key, int):\n return key\n else:\n return AD.get(AD.database == key[0], AD.code == key[1]).id\n\n\ndef prepare_lca_inputs(\n demand=None,\n method=None,\n weighting=None,\n normalization=None,\n demands=None,\n remapping=True,\n demand_database_last=True,\n):\n \"\"\"Prepare LCA input arguments in Brightway 2.5 style.\"\"\"\n if not projects.dataset.data.get(\"25\"):\n raise Brightway2Project(\n \"Please use `projects.migrate_project_25` before calculating using Brightway 2.5\"\n )\n\n databases.clean()\n data_objs = []\n remapping_dicts = None\n\n if demands:\n demand_database_names = [\n db_label for dct in demands for db_label, _ in unpack(dct)\n ]\n elif demand:\n demand_database_names = [db_label for db_label, _ in unpack(demand)]\n else:\n demand_database_names = []\n\n if demand_database_names:\n database_names = set.union(\n *[\n Database(db_label).find_graph_dependents()\n for db_label in demand_database_names\n ]\n )\n\n if demand_database_last:\n database_names = [\n x for x in database_names if x not in demand_database_names\n ] + demand_database_names\n\n data_objs.extend([Database(obj).datapackage() for obj in database_names])\n\n if remapping:\n # This is technically wrong - we could have more complicated queries\n # to determine what is truly a product, activity, etc.\n # However, for the default database schema, we know that each node\n # has a unique ID, so this won't produce incorrect responses,\n # just too many values. As the dictionary only exists once, this is\n # not really a problem.\n reversed_mapping = {\n i: (d, c)\n for d, c, i in AD.select(AD.database, AD.code, AD.id)\n .where(AD.database << database_names)\n .tuples()\n }\n remapping_dicts = {\n \"activity\": reversed_mapping,\n \"product\": reversed_mapping,\n \"biosphere\": reversed_mapping,\n }\n\n if method:\n assert method in methods\n data_objs.append(Method(method).datapackage())\n if weighting:\n assert weighting in weightings\n data_objs.append(Weighting(weighting).datapackage())\n if normalization:\n assert normalization in normalizations\n data_objs.append(Normalization(normalization).datapackage())\n\n if demands:\n indexed_demand = [{get_id(k): v for k, v in dct.items()} for dct in demands]\n elif demand:\n indexed_demand = {get_id(k): v for k, v in demand.items()}\n else:\n indexed_demand = None\n\n return indexed_demand, data_objs, remapping_dicts\n\n\ndef get_database_filepath(functional_unit):\n \"\"\"Get filepaths for all databases in supply chain of `functional_unit`\"\"\"\n dbs = set.union(\n *[Database(key[0]).find_graph_dependents() for key in functional_unit]\n )\n return [Database(obj).filepath_processed() for obj in dbs]\n","sub_path":"bw2data/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":6171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"486664180","text":"\n\n\nclass HashTable:\n # Initialize the constructor with the size of 10\n def __init__(self, size=10):\n self.hashtable = []\n for i in range(size):\n self.hashtable.append([])\n\n # Basic hash key that utilize the modulo of package ID as key\n # O(1)\n # Private\n def _hashkey_generator_(self, key):\n hashkey = key % len(self.hashtable)\n return hashkey\n\n # Insert a new item into the hash table - O(1)\n def add(self, key, item): # does both insert and update\n\n bucket_list = self.hashtable[self._hashkey_generator_(key)]\n for kv in bucket_list:\n # print (key_value)\n if kv[0] == key:\n kv[1] = item\n return True\n key_value = [key, item]\n bucket_list.append(key_value)\n return True\n\n # Search for an item by key, and return the item - O(log n)\n def get(self, key):\n\n bucket_list = self.hashtable[self._hashkey_generator_(key)]\n # search for the key in the bucket list\n for kv in bucket_list:\n #print (key_value)\n if kv[0] == key:\n return kv[1] # value\n return None\n\n","sub_path":"HashTable.py","file_name":"HashTable.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"310787836","text":"\"\"\"\nFunctions for performing interpolation\n\nFunctions\n---------\nbuild_remap_weights - constructs a mapping file containing the indices and\n weights needed to perform horizontal interpolation\n\nremap - perform horizontal interpolation on a data sets, given a mapping file\n\nAuthor\n------\nXylar Asay-Davis\n\nLast Modified\n-------------\n03/14/2017\n\"\"\"\n\nimport subprocess\nimport tempfile\nimport os\nfrom distutils.spawn import find_executable\n\nfrom .scrip import mpas_file_to_scrip, lat_lon_file_to_scrip, \\\n lat_lon_array_to_scrip\n\n\ndef build_remap_weights(sourceFileName, outWeightFileName,\n destintionFileName=None, sourceFileType='mpas',\n sourceLatVarName='lat', sourceLonVarName='lon',\n destintionLatVarName='lat', destintionLonVarName='lon',\n destinationLat=None, destinationLon=None,\n desitnationUnits='degrees',\n method='bilinear', overwrite=False): # {{{\n \"\"\"\n Given a source file defining either an MPAS mesh or a lat-lon grid and\n a destination file or set of arrays defining a lat-lon grid, constructs\n a mapping file used for interpolation between the source and destination\n grids.\n\n Parameters\n ----------\n sourceFileName : str\n The path of the file containing either the source MPAS mesh or\n the source lat-lon grid\n\n outWeightFileName : str\n The path to which the mapping file containing interpolation weights\n and indices should be written\n\n destintionFileName : str, optional\n The path of the file containing the destination lat-lon grid. Should\n be None if `destinationLat` and `destinationLon` are supplied instead.\n\n sourceFileType : {'mpas', 'latlon'}\n Whether the source file contains an MPAS mesh or a lat-lon grid\n\n sourceLatVarName, sourceLonVarName : str, optional\n If `sourceFileType == 'latlon'`, the name of the latitude and longitude\n variables in the source grid file\n\n destintionLatVarName, destintionLonVarName : str, optional\n If `destintionFileName` is not `None`, the name of the latitude and\n longitude variables in the source grid file\n\n destinationLat, destinationLon : 1D numpy.arrays, optional\n One dimensional arrays defining the latitude and longitude coordinates\n of grid corners on the destination grid. `destintionFileName` should be\n set to `None` if these are supplied\n\n desitnationUnits : {'degrees', 'radians'}, optional\n The units of `destinationLat` and `destinationLon` (if they are\n supplied)\n\n method : {'bilinear', 'neareststod', 'conserve'}\n The method of interpolation used, see documentation for\n `ESMF_RegridWeightGen` for details.\n\n overwrite : bool, optional\n Whether the mapping file should be overwritten if it already exists.\n If `False`, and the mapping file is already present, the function\n does nothing and returns immediately, potentially saving a costly\n re-computaiton of the mapping file.\n\n Raises\n ------\n OSError\n If `ESMF_RegridWeightGen` is not in the system path.\n\n Author\n ------\n Xylar Asay-Davis\n\n Last Modified\n -------------\n 03/14/2017\n \"\"\"\n\n if not overwrite and os.path.exists(outWeightFileName):\n # a valid weight file already exists, so nothing to do\n return\n\n if find_executable('ESMF_RegridWeightGen') is None:\n raise OSError('ESMF_RegridWeightGen not found. Make sure esmf package '\n 'is installed via\\nlatest nco: \\n'\n 'conda install nco\\n'\n 'Note: this presumes use of the conda-forge channel.')\n\n # two temporary SCRIP files, one for the MPAS mesh and one for the dest\n # grid\n sourceScripFileName = _get_temp_path()\n destintionScripFileName = _get_temp_path()\n\n args = ['ESMF_RegridWeightGen', '--source', sourceScripFileName,\n '--destination', destintionScripFileName,\n '--weight', outWeightFileName,\n '--method', method]\n\n if sourceFileType == 'mpas':\n mpas_file_to_scrip(mpasFileName=sourceFileName,\n scripFileName=sourceScripFileName)\n args.extend(['--src_regional', '--ignore_unmapped'])\n elif sourceFileType == 'latlon':\n lat_lon_file_to_scrip(inFileName=sourceFileName,\n scripFileName=sourceScripFileName,\n latVarName=sourceLatVarName,\n lonVarName=sourceLonVarName)\n else:\n raise ValueError(\"sourceFileType is neither 'mpas' or 'latlon'.\")\n\n if destintionFileName is not None:\n lat_lon_file_to_scrip(inFileName=destintionFileName,\n scripFileName=destintionScripFileName,\n latVarName=destintionLatVarName,\n lonVarName=destintionLonVarName)\n elif destinationLat is not None and destinationLon is not None:\n lat_lon_array_to_scrip(latCorner=destinationLat,\n lonCorner=destinationLon,\n units=desitnationUnits,\n scripFileName=destintionScripFileName)\n else:\n raise ValueError('Either destintionFileName or both config and '\n 'sectionName must be supplied.')\n\n subprocess.check_call(args)\n\n # remove the temporary SCRIP files\n os.remove(sourceScripFileName)\n os.remove(destintionScripFileName) # }}}\n\n\ndef remap(inFileName, outFileName, inWeightFileName, sourceFileType='mpas',\n sourceLatVarName='lat', sourceLonVarName='lon',\n variableList=None, overwrite=False): # {{{\n \"\"\"\n Given a source file defining either an MPAS mesh or a lat-lon grid and\n a destination file or set of arrays defining a lat-lon grid, constructs\n a mapping file used for interpolation between the source and destination\n grids.\n\n Parameters\n ----------\n inFileName : str\n The path to the file containing a data set on the source grid\n\n outFileName : str\n The path where the data on the destination grid should be written\n\n inWeightFileName : str\n The path to the mapping file containing interpolation weights\n and indices between the source and destination grids\n\n sourceFileType : {'mpas', 'latlon'}\n Whether the source file contains an MPAS mesh or a lat-lon grid\n\n sourceLatVarName, sourceLonVarName : str, optional\n If `sourceFileType == 'latlon'`, the name of the latitude and longitude\n variables in the source grid file\n\n variableList : list of str, optional\n A list of variables to be mapped. By default, all variables are mapped\n\n overwrite : bool, optional\n Whether the destination file should be overwritten if it already\n exists. If `False`, and the destination file is already present, the\n function does nothing and returns immediately\n\n Raises\n ------\n OSError\n If `ncremap` is not in the system path.\n\n Author\n ------\n Xylar Asay-Davis\n\n Last Modified\n -------------\n 03/14/2017\n \"\"\"\n\n if not overwrite and os.path.exists(outFileName):\n # a valid weight file already exists, so nothing to do\n return\n\n if find_executable('ncremap') is None:\n raise OSError('ncremap not found. Make sure the latest nco package '\n 'is installed: \\n conda install nco')\n\n args = ['ncremap',\n '-R', '--rgr lat_nm={} --rgr lon_nm={}'.format(sourceLatVarName,\n sourceLonVarName),\n '-i', inFileName,\n '-m', inWeightFileName,\n '-o', outFileName]\n\n if sourceFileType == 'mpas':\n # Note: using the -C (climatology) flag for now because otherwise\n # ncremap tries to add a _FillValue attribute that might already\n # be present and quits with an error\n args.extend(['-P', 'mpas', '-C'])\n if variableList is not None:\n args.extend(['-v', ','.join(variableList)])\n\n subprocess.check_call(args) # }}}\n\n\ndef _get_temp_path(): # {{{\n '''Returns the name of a temporary NetCDF file'''\n return '{}/{}.nc'.format(tempfile._get_default_tempdir(),\n next(tempfile._get_candidate_names())) # }}}\n\n# vim: ai ts=4 sts=4 et sw=4 ft=python\n","sub_path":"mpas_analysis/shared/interpolation/interpolate.py","file_name":"interpolate.py","file_ext":"py","file_size_in_byte":8521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"151560328","text":"from utils import read_input, write_output, check_result\r\nimport re\r\nfrom collections import Counter\r\n\r\n\r\ndef has_six_digits(x):\r\n return len(x) == 6\r\n\r\n\r\ndef has_two_adj_digits_not_part_of_larger_group(x):\r\n return 2 in Counter(x).values()\r\n\r\n\r\ndef digits_never_decrease(x):\r\n pointer = 1\r\n while pointer < len(x):\r\n if x[pointer - 1] > x[pointer]:\r\n return False\r\n pointer += 1\r\n return True\r\n\r\n\r\ndef calc(lines):\r\n result = 0\r\n parser = re.compile(\"\\d+\")\r\n values = [int(x) for line in lines for x in parser.findall(line.strip())]\r\n\r\n for x in range(values[0], values[1] + 1):\r\n x = str(x)\r\n if has_six_digits(x) and has_two_adj_digits_not_part_of_larger_group(x) and digits_never_decrease(x):\r\n result += 1\r\n return result\r\n\r\n\r\nif __name__ == '__main__':\r\n lines = read_input()\r\n result = str(calc(lines))\r\n write_output(result)\r\n check_result(result)\r\n","sub_path":"machalvan-python/day4/part2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"286134688","text":"import sys\nimport tracing\n\ndef get_pos(_raw):\n raw = _raw.strip()\n\n assert len(raw) == 10\n\n row = 0\n for x in raw[:7]:\n row = (1 if x == \"B\" else 0) + (row << 1)\n tracing.info(\"row: {}\", row)\n\n col = 0\n for x in raw[7:]:\n col = (1 if x == \"R\" else 0) + (col << 1)\n\n return row * 8 + col\n\n\ndef main():\n responses = []\n response = set()\n\n for _raw in sys.stdin:\n raw = _raw.strip()\n\n if raw == \"\":\n tracing.info(\"new group: {}\", response)\n responses.append(response)\n response = set()\n continue\n\n response |= set(raw)\n\n responses.append(response)\n\n result = sum(len(s) for s in responses)\n tracing.info(\"the result is: {}\", result)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"aoc_061/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"464365201","text":"import logging\nfrom powermon.formats.abstractformat import AbstractFormat\n\nfrom powermon.outputs.abstractoutput import AbstractOutput\n\nlog = logging.getLogger(\"screen\")\n\n\nclass Screen(AbstractOutput):\n def __init__(self, formatter):\n self.name = \"Screen\"\n self.set_formatter(formatter)\n \n\n def set_formatter(self, formatter: AbstractFormat):\n self.formatter = formatter\n\n def process(self, result):\n log.info(\"Using output sender: screen\")\n log.debug(\"formatter: %s\" % self.formatter)\n\n formatted_data = self.formatter.format(result)\n if formatted_data is None:\n print(\"Nothing returned from data formatting\")\n return\n\n for line in formatted_data:\n print(line)\n","sub_path":"powermon/outputs/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"66778879","text":"import sys\n\n\ndef solution():\n s, e = map(int, sys.stdin.readline().split())\n arr = [0]\n for i in range(1, e + 1):\n for j in range(i):\n arr.append(i)\n ans = 0\n for i in range(s, e + 1):\n ans += arr[i]\n print(ans)\n\n\nsolution()\n","sub_path":"python/implementation/1292_쉽게푸는문제.py","file_name":"1292_쉽게푸는문제.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"87110783","text":"import os \n\ndef makejson(detgen,N,Ndet,gsw,basename):\n for state in ['gs0','gs1','gs2','gs3','gs4','gs5','gs2_lo','gs3_lo','gs5_lo']:\n for j in range(1,N+1):\n f=basename+'/'+state+'_'+detgen+'_Ndet'+str(Ndet)+'_gsw'+str(gsw)+'_'+str(j)+'.vmc'\n os.system('../../../mainline/bin/gosling '+f+'.log -json &> '+f+'.gosling.json')\n return 1\n\nif __name__=='__main__':\n detgen='s'\n N=100\n Ndet=10\n gsw=0.7\n basename='run2s'\n makejson(detgen,N,Ndet,gsw,basename)\n","sub_path":"qwalk/old/vmc_eq/makejson.py","file_name":"makejson.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"110210660","text":"# -*- coding: utf-8 -*-\n'''\nSupport for haproxy\n\n.. versionadded:: 2014.7.0\n'''\n\nfrom __future__ import generators\nfrom __future__ import absolute_import\n\n# Import python libs\nimport stat\nimport os\nimport logging\n\ntry:\n import haproxy.cmds\n import haproxy.conn\n HAS_HAPROXY = True\nexcept ImportError:\n HAS_HAPROXY = False\n\nlog = logging.getLogger(__name__)\n\n__virtualname__ = 'haproxy'\n\n\ndef __virtual__():\n '''\n Only load the module if haproxyctl is installed\n '''\n if HAS_HAPROXY:\n return __virtualname__\n return (False, 'The haproxyconn execution module cannot be loaded: haproxyctl module not available')\n\n\ndef _get_conn(socket='/var/run/haproxy.sock'):\n '''\n Get connection to haproxy socket.\n '''\n assert os.path.exists(socket), '{0} does not exist.'.format(socket)\n issock = os.stat(socket).st_mode\n assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket)\n ha_conn = haproxy.conn.HaPConn(socket)\n return ha_conn\n\n\ndef list_servers(backend, socket='/var/run/haproxy.sock', objectify=False):\n '''\n List servers in haproxy backend.\n\n backend\n haproxy backend\n\n socket\n haproxy stats socket\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' haproxy.list_servers mysql\n '''\n ha_conn = _get_conn(socket)\n ha_cmd = haproxy.cmds.listServers(backend=backend)\n return ha_conn.sendCmd(ha_cmd, objectify=objectify)\n\n\ndef enable_server(name, backend, socket='/var/run/haproxy.sock'):\n '''\n Enable Server in haproxy\n\n name\n Server to enable\n\n backend\n haproxy backend, or all backends if \"*\" is supplied\n\n socket\n haproxy stats socket\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' haproxy.enable_server web1.example.com www\n '''\n\n if backend == '*':\n backends = show_backends(socket=socket).split('\\n')\n else:\n backends = [backend]\n\n results = {}\n for backend in backends:\n ha_conn = _get_conn(socket)\n ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend)\n ha_conn.sendCmd(ha_cmd)\n results[backend] = list_servers(backend, socket=socket)\n\n return results\n\n\ndef disable_server(name, backend, socket='/var/run/haproxy.sock'):\n '''\n Disable server in haproxy.\n\n name\n Server to disable\n\n backend\n haproxy backend, or all backends if \"*\" is supplied\n\n socket\n haproxy stats socket\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' haproxy.disable_server db1.example.com mysql\n '''\n\n if backend == '*':\n backends = show_backends(socket=socket).split('\\n')\n else:\n backends = [backend]\n\n results = {}\n for backend in backends:\n ha_conn = _get_conn(socket)\n ha_cmd = haproxy.cmds.disableServer(server=name, backend=backend)\n ha_conn.sendCmd(ha_cmd)\n results[backend] = list_servers(backend, socket=socket)\n\n return results\n\n\ndef get_weight(name, backend, socket='/var/run/haproxy.sock'):\n '''\n Get server weight\n\n name\n Server name\n\n backend\n haproxy backend\n\n socket\n haproxy stats socket\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' haproxy.get_weight web1.example.com www\n '''\n ha_conn = _get_conn(socket)\n ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend)\n return ha_conn.sendCmd(ha_cmd)\n\n\ndef set_weight(name, backend, weight=0, socket='/var/run/haproxy.sock'):\n '''\n Set server weight\n\n name\n Server name\n\n backend\n haproxy backend\n\n weight\n Server Weight\n\n socket\n haproxy stats socket\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' haproxy.set_weight web1.example.com www 13\n '''\n ha_conn = _get_conn(socket)\n ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend, weight=weight)\n ha_conn.sendCmd(ha_cmd)\n return get_weight(name, backend, socket=socket)\n\n\ndef set_state(name, backend, state, socket='/var/run/haproxy.sock'):\n '''\n Force a server's administrative state to a new state. This can be useful to\n disable load balancing and/or any traffic to a server. Setting the state to\n \"ready\" puts the server in normal mode, and the command is the equivalent of\n the \"enable server\" command. Setting the state to \"maint\" disables any traffic\n to the server as well as any health checks. This is the equivalent of the\n \"disable server\" command. Setting the mode to \"drain\" only removes the server\n from load balancing but still allows it to be checked and to accept new\n persistent connections. Changes are propagated to tracking servers if any.\n\n name\n Server name\n\n backend\n haproxy backend\n\n state\n A string of the state to set. Must be 'ready', 'drain', or 'maint'\n\n '''\n # Pulling this in from the latest 0.5 release which is not yet in PyPi.\n # https://github.com/neurogeek/haproxyctl\n class setServerState(haproxy.cmds.Cmd):\n \"\"\"Set server state command.\"\"\"\n cmdTxt = \"set server %(backend)s/%(server)s state %(value)s\\r\\n\"\n p_args = ['backend', 'server', 'value']\n helpTxt = \"Force a server's administrative state to a new state.\"\n\n ha_conn = _get_conn(socket)\n ha_cmd = setServerState(server=name, backend=backend, value=state)\n return ha_conn.sendCmd(ha_cmd)\n\n\ndef show_frontends(socket='/var/run/haproxy.sock'):\n '''\n Show HaProxy frontends\n\n socket\n haproxy stats socket\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' haproxy.show_frontends\n '''\n ha_conn = _get_conn(socket)\n ha_cmd = haproxy.cmds.showFrontends()\n return ha_conn.sendCmd(ha_cmd)\n\n\ndef show_backends(socket='/var/run/haproxy.sock'):\n '''\n Show HaProxy Backends\n\n socket\n haproxy stats socket\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' haproxy.show_backends\n '''\n ha_conn = _get_conn(socket)\n ha_cmd = haproxy.cmds.showBackends()\n return ha_conn.sendCmd(ha_cmd)\n\n\ndef get_sessions(name, backend, socket='/var/run/haproxy.sock'):\n '''\n .. versionadded:: Carbon\n\n Get number of current sessions on server in backend (scur)\n\n name\n Server name\n\n backend\n haproxy backend\n\n socket\n haproxy stats socket\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' haproxy.get_sessions web1.example.com www\n '''\n class getStats(haproxy.cmds.Cmd):\n p_args = [\"backend\", \"server\"]\n cmdTxt = \"show stat\\r\\n\"\n helpText = \"Fetch all statistics\"\n\n ha_conn = _get_conn(socket)\n ha_cmd = getStats(server=name, backend=backend)\n result = ha_conn.sendCmd(ha_cmd)\n for line in result.split('\\n'):\n if line.startswith(backend):\n outCols = line.split(',')\n if outCols[1] == name:\n return outCols[4]\n","sub_path":"salt/modules/haproxyconn.py","file_name":"haproxyconn.py","file_ext":"py","file_size_in_byte":6882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"637609851","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/vortex/repos/FENIX-MAPS/geobricks/geobricks_mapclassify/geobricks_mapclassify/core/mapclassify.py\n# Compiled at: 2015-04-01 04:09:55\nimport uuid, os\nfrom geobricks_common.core.log import logger\nfrom geobricks_common.core.utils import dict_merge\nfrom geobricks_mapclassify.core.sld import create_sld_xml\nfrom geobricks_mapclassify.core.colors import get_colors\nfrom geobricks_mapclassify.core.classify import get_ranges\nlog = logger(__file__)\ndefault_obj = {'intervals': 5, \n 'colorramp': 'Reds', \n 'colortype': None, \n 'colors': None, \n 'reverse': False, \n 'ranges': None, \n 'labels': None, \n 'nodata': {'codes': None, \n 'label': 'No Data Value', \n 'position': 'on top'}, \n 'classificationtype': 'jenks_caspall_forced', \n 'joincolumn': None, \n 'joindata': None, \n 'doublecounting': False, \n 'decimalvalues': 2, \n 'jointype': 'shaded'}\n\nclass MapClassify:\n config = None\n\n def __init__(self, config):\n self.config = config\n\n def classify(self, data, distribution_url=None, distribution_folder=None):\n data = dict_merge(default_obj, data)\n ranges = get_ranges(data)\n log.info('Ranges: ' + str(ranges))\n data['intervals'] = len(ranges)\n log.info('Intervals: ' + str(data['intervals']))\n colors = get_colors(data, data['intervals'])\n log.info('Colors: ' + str(colors))\n if data['jointype'] == 'shaded':\n return self.classify_sld(data, ranges, colors, distribution_url, distribution_folder)\n if data['jointype'] == 'point':\n sld, legend = create_sld_xml(data, ranges, colors)\n return {'legend': legend}\n raise Exception('Classification \"type\":\"' + data['jointype'] + '\" not supported.')\n\n def classify_sld(self, data, ranges, colors, distribution_url=None, distribution_folder=None):\n distribution_folder = get_distribution_folder(self.config, distribution_folder)\n sld, legend = create_sld_xml(data, ranges, colors)\n path, filename = _create_sld(distribution_folder, sld)\n if distribution_url is None:\n return path\n else:\n url = distribution_url + filename\n return {'url': url, 'legend': legend}\n\n\ndef _create_sld(distribution_folder, sld, extension='.sld'):\n filename = 'sld_' + str(uuid.uuid4()) + extension\n path = os.path.join(distribution_folder, filename)\n with open(path, 'w') as (f):\n f.write(sld)\n return (\n path, filename)\n\n\ndef get_distribution_folder(config, distribution_folder=None):\n try:\n if distribution_folder is None:\n if not os.path.isabs(config['settings']['folders']['distribution_sld']):\n config['settings']['folders']['distribution_sld'] = os.path.abspath(config['settings']['folders']['distribution_sld'])\n distribution_folder = config['settings']['folders']['distribution_sld']\n if not os.path.isdir(distribution_folder):\n os.makedirs(distribution_folder)\n except Exception as e:\n log.error(e)\n raise Exception(e)\n\n return distribution_folder","sub_path":"pycfiles/GeobricksMapClassify-0.0.9.tar/mapclassify.py","file_name":"mapclassify.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"503973629","text":"# -*- coding: utf-8 -*-\nimport os\nimport datetime\nimport daiquiri\nimport daiquiri.formatter\nfrom typing import Optional\nfrom enum import Enum, unique\nfrom mio.util.Helper import get_root_path\nfrom mio.util.LogConfigs import *\n\n\n@unique\nclass LoggerType(Enum):\n CONSOLE = 1\n FILE = 2\n DATADOG = 3\n SYSLOG = 4\n CONSOLE_FILE = 12\n CONSOLE_DATADOG = 13\n CONSOLE_SYSLOG = 14\n\n\nnameToLevel = {\n 'CONSOLE': LoggerType.CONSOLE,\n 'FILE': LoggerType.FILE,\n 'DATADOG': LoggerType.DATADOG,\n 'SYSLOG': LoggerType.SYSLOG,\n 'CONSOLE_FILE': LoggerType.CONSOLE_FILE,\n 'CONSOLE_DATADOG': LoggerType.CONSOLE_DATADOG,\n 'CONSOLE_SYSLOG': LoggerType.CONSOLE_SYSLOG,\n}\n\n\nclass LogHandler(object):\n logger: daiquiri.KeywordArgumentAdapter\n\n def __init__(self, logger_name: str,\n fmt: Optional[str] = '%(asctime)s [PID %(process)d] [%(levelname)s] %(name)s -> %(message)s',\n datefmt: Optional[str] = None, logger_type: LoggerType = None, log_level: int = logging.DEBUG,\n datadog_config: Optional[DataDog] = None, syslog_config: Optional[SysLog] = None):\n formatter: daiquiri.formatter.ColorFormatter = daiquiri.formatter.ColorFormatter(\n fmt=fmt,\n datefmt=datefmt\n )\n console_only = False\n if logger_type == LoggerType.FILE or logger_type == LoggerType.CONSOLE_FILE:\n logger_dir = os.path.join(get_root_path(), 'logs')\n errors_file = os.path.join(logger_dir, 'errors.log')\n everything_file = os.path.join(logger_dir, 'everything.log')\n if not os.path.isdir(logger_dir):\n os.makedirs(logger_dir)\n if logger_type == LoggerType.CONSOLE_FILE:\n daiquiri.setup(level=log_level, outputs=(\n daiquiri.output.Stream(formatter=formatter),\n daiquiri.output.File(errors_file, level=logging.ERROR),\n daiquiri.output.TimedRotatingFile(\n everything_file,\n level=log_level,\n interval=datetime.timedelta(days=1)),\n ))\n else:\n daiquiri.setup(level=log_level, outputs=(\n daiquiri.output.File(errors_file, level=logging.ERROR),\n daiquiri.output.TimedRotatingFile(\n everything_file,\n level=log_level,\n interval=datetime.timedelta(days=1)),\n ))\n elif logger_type == LoggerType.DATADOG or logger_type == LoggerType.CONSOLE_DATADOG:\n if datadog_config is None:\n console_only = True\n else:\n if logger_type == LoggerType.CONSOLE_DATADOG:\n daiquiri.setup(level=log_level, outputs=(\n daiquiri.output.Stream(formatter=formatter),\n daiquiri.output.Datadog(hostname=datadog_config.hostname, port=datadog_config.port,\n formatter=datadog_config.formatter, level=datadog_config.level),\n ))\n else:\n daiquiri.setup(level=log_level, outputs=(\n daiquiri.output.Datadog(hostname=datadog_config.hostname, port=datadog_config.port,\n formatter=datadog_config.formatter, level=datadog_config.level),\n ))\n elif logger_type == LoggerType.SYSLOG or logger_type == LoggerType.CONSOLE_SYSLOG:\n if syslog_config is None:\n console_only = True\n else:\n if logger_type == LoggerType.CONSOLE_SYSLOG:\n daiquiri.setup(level=log_level, outputs=(\n daiquiri.output.Stream(formatter=formatter),\n daiquiri.output.Syslog(program_name=syslog_config.program_name, facility=syslog_config.facility,\n formatter=syslog_config.formatter, level=syslog_config.level),\n ))\n else:\n daiquiri.setup(level=log_level, outputs=(\n daiquiri.output.Syslog(program_name=syslog_config.program_name, facility=syslog_config.facility,\n formatter=syslog_config.formatter, level=syslog_config.level),\n ))\n else:\n console_only = True\n if console_only:\n daiquiri.setup(level=log_level, outputs=(\n daiquiri.output.Stream(formatter=formatter),\n ))\n self.logger = daiquiri.getLogger(logger_name, subsystem='pymio')\n\n def info(self, msg):\n self.logger.info(msg)\n\n def error(self, msg):\n self.logger.error(msg)\n\n def debug(self, msg):\n self.logger.debug(msg)\n\n def warning(self, msg):\n self.logger.warning(msg)\n","sub_path":"mio/util/Logs.py","file_name":"Logs.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"300506558","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom gifts.views import signup_for_account\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'giftr.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^signup/$', 'gifts.views.signup_for_account', name='signup'),\n#not sure that login function is doing anything in the line below. Shoudl it be removed? Is it harmful?\n url(r'^$', 'django.contrib.auth.views.login', {'template_name': 'home_page.html'}, name='home_page'),\n url(r'^accounts/login/', 'django.contrib.auth.views.login', {'template_name': 'login.html'}, name='login'),\n url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),\n\n url(r'^gifts/', include('gifts.urls')), \n url(r'^header/', 'gifts.views.header')\n)\n","sub_path":"giftr/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"417904170","text":"import math\nimport numpy\nfrom itertools import chain\n\ndef randomIntegersUsingRandom(length=2):\n import random\n randTuple = [random.randint(2, 20) for i in range(length)]\n return randTuple\n\nclass point(object):\n def __init__(self, pointCords):\n self.pointCords = pointCords\n\n def GetDimensionOfPoint(self):\n self.DimenstionOfPoint = len(self.pointCords)\n #print(self.DimenstionOfPoint)\n\n @classmethod\n def distance_from_origin(self, P1, P2):\n #print(\"P1 : \" + str(P1.pointCords))\n #print(\"P2 : \" + str(P2.pointCords))\n numOfPointsInP1 = sum(1 for x in P1.pointCords if isinstance(x, list))\n numOfPointsInP2 = sum(1 for x in P2.pointCords if isinstance(x, list))\n distance = math.inf\n if numOfPointsInP1 > 0:\n if numOfPointsInP2 > 1:\n for point1 in P1.pointCords:\n for point2 in P2.pointCords:\n tempDist = 0\n for cord1, cord2 in zip(point1, point2):\n #print(\"[\" + str(cord1) + \" , \" + str(cord2) + \"]\")\n tempDist += (cord2 - cord1) ** 2\n distance = min(distance, tempDist)\n else:\n #print(\"Only P1\")\n for point1 in P1.pointCords:\n tempDist = 0\n for cord1, cord2 in zip(point1, P2.pointCords):\n #print(\"[\" + str(cord1) + \" , \" + str(cord2) + \"]\")\n tempDist += (cord2 - cord1) ** 2\n distance = min(distance, tempDist)\n\n elif numOfPointsInP2 > 0:\n #rint(\"Only P2\")\n for point2 in P2.pointCords:\n tempDist = 0\n for cord1, cord2 in zip(P1.pointCords, point2):\n #print(\"[\" + str(cord1) + \" , \" + str(cord2) + \"]\")\n tempDist += (cord2 - cord1) ** 2\n distance = min(distance, tempDist)\n else:\n tempDist = 0\n for cord1, cord2 in zip(P1.pointCords, P2.pointCords):\n # print(\"[\" + str(cord1) + \" , \" + str(cord2) + \"]\")\n tempDist += (cord2 - cord1) ** 2\n distance = tempDist\n return(math.sqrt(distance))\n\n\ndef printMatrix(matrix):\n if len(matrix) > 1:\n print(\"\\nThe Distances Matrix is :\")\n s = [[str(e) for e in row] for row in matrix]\n lens = [max(map(len, col)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print('\\n'.join(table))\n\ndef distanceMatrixMaker(points, newCluster = None):\n numberOfPoints = len(points)\n distMatrix = numpy.zeros((numberOfPoints, numberOfPoints))\n numpy.fill_diagonal(distMatrix, numpy.inf)\n for i, pointCords in enumerate(points):\n curPoint = point(pointCords)\n otherPoints = points[i + 1:]\n for j, rowPointCords in enumerate(otherPoints):\n otherPointInRow = point(rowPointCords)\n #print(\"Checking between : \" + str(curPoint.pointCords) + \" and \" + str(otherPointInRow.pointCords))\n distance = point.distance_from_origin(curPoint, otherPointInRow)\n distance = format(distance, '.2f')\n distMatrix[i][j + i + 1] = distMatrix[j + i + 1][i] = distance\n #printMatrix(distMatrix)\n print(\"++++++++++++++++++\")\n return distMatrix\n\ndef clustering(clusteredMatrix, points, numberOfPoints):\n while (len(points) > 1):\n print(\"+++++++++++++++++++\\n\" + str(points))\n minIndex = numpy.argmin(clusteredMatrix)\n minRowIndex = int(minIndex / numberOfPoints)\n minColIndex = (minIndex - (minRowIndex * numberOfPoints))\n print(\"\\nMin Value is at position : [\" + str(minRowIndex) + \"][\" + str(minColIndex) + \"] = \" + str(\n clusteredMatrix[minRowIndex, minColIndex]))\n print(\"combining \" + str(points[minRowIndex]) + \" With \" + str(points[minColIndex]))\n clustersList[minRowIndex] = (clustersList[minRowIndex], clustersList[minColIndex])\n numOfListsInPoint1 = sum(1 for x in points[minRowIndex] if isinstance(x, list))\n numOfListsInPoint2 = sum(1 for x in points[minColIndex] if isinstance(x, list))\n if numOfListsInPoint1 == 0 :\n points[minRowIndex] = [points[minRowIndex][i:i + 2] for i in range(0, len(points[minRowIndex]), 2)]\n if numOfListsInPoint2 == 0:\n points[minRowIndex].append(points[minColIndex])\n else:\n points[minRowIndex] += points[minColIndex]\n elif numOfListsInPoint2 == 0:\n points[minRowIndex].append(points[minColIndex])\n else:\n points[minRowIndex] += points[minColIndex]\n print(\"New content is : \" + str(points[minRowIndex]))\n del points[minColIndex]\n del clustersList[minColIndex]\n numberOfPoints -= 1\n # print(points)\n clusteredMatrix = distanceMatrixMaker(points, points[minRowIndex])\n printMatrix(clusteredMatrix)\n return clustersList\n\nif __name__ == \"__main__\":\n numberOfPoints = 100\n points = [list(randomIntegersUsingRandom()) for i in range(numberOfPoints)]\n #for i, point in enumerate(points):\n # points[i] = [points[i][j:j + 2] for j in range(0, len(points[i]), 2)]\n #new_list = [points[i:i + 2] for i in range(0, len(points), 2)]\n clustersList = list(points)\n print (points)\n distMatrix = distanceMatrixMaker(points)\n printMatrix(distMatrix)\n print(\"+++++++++++\\n\")\n clusteredMatrix = distMatrix\n print(\"The Final Cluster is : \" + str(clustering(clusteredMatrix, points, numberOfPoints)))\n\n\n","sub_path":"DataMining-MachineLearning/Ex-3/Ex3 - Clustering.py","file_name":"Ex3 - Clustering.py","file_ext":"py","file_size_in_byte":5695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"299866952","text":"import json\n\nfrom django.conf import settings\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import HttpResponse\n\nfrom healthcheck import (\n DjangoDBsHealthCheck, FilesDontExistHealthCheck, HealthChecker)\n\n\nclass JsonResponse(HttpResponse):\n def __init__(self, data, **kwargs):\n kwargs.setdefault('content_type', 'application/json')\n data = json.dumps(data)\n super(JsonResponse, self).__init__(content=data, **kwargs)\n\n\nclass JsonResponseServerError(JsonResponse):\n status_code = 500\n\n\n@require_http_methods(['GET'])\ndef status(request):\n checks = []\n\n if getattr(settings, 'STATUS_CHECK_DBS', True):\n checks.append(DjangoDBsHealthCheck())\n\n files_to_check = getattr(settings, 'STATUS_CHECK_FILES', None)\n if files_to_check:\n checks.append(FilesDontExistHealthCheck(\n files_to_check, check_id=\"quiesce file doesn't exist\"))\n\n ok, details = HealthChecker(checks)()\n\n if ok and not details:\n details = 'There were no checks.'\n\n if not ok:\n return JsonResponseServerError(details)\n\n return JsonResponse(details)\n","sub_path":"healthcheck/contrib/django/status_endpoint/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"146243986","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.core.mail import send_mail, send_mass_mail,EmailMultiAlternatives\n# Create your views here.\nimport logging\ndef send_my_mail(req):\n title = \"阿里offer\"\n message = \"恭喜您 成为我们公司的CEO\"\n email_from = \"3369571193@qq.com\"\n recs = [\"1486412190@qq.com\",\"ichenyouzhi@163.com\",\"1439854134@qq.com\"]\n send_mail(title,message,email_from,recs)\n return HttpResponse(\"小姐姐打的\")\n#\n#\ndef send_emailss(req):\n title1 = \"腾讯offer\"\n message1 = \"恭喜你 被骗了\"\n email_from = \"3369571193@qq.com\"\n title2 = \"一封挑事邮件\"\n message2 = \"大哥大哥\"\n resc1 =[\n \"1486412190@qq.com\", \"ichenyouzhi@163.com\"\n ]\n resc2 = [\"1486412190@qq.com\",\"ichenyouzhi@163.com\"\n \"m18742863100@163.com\",\"1439854134@qq.com\"]\n senders1 = (title1,message1,email_from,resc1)\n senders2 = (title2,message2,email_from,resc2)\n send_mass_mail((senders1,senders2),fail_silently=False)\n return HttpResponse(\"ok\")\n\ndef email_html(req):\n title = \"阿里offer\"\n message = \"恭喜您 成为我们公司的CEO\"\n email_from = \"3369571193@qq.com\"\n recs = [\"1486412190@qq.com\",\"ichenyouzhi@163.com\",\"1439854134@qq.com\"]\n html_content = '

This is an bbbbbbb message.

'\n msg = EmailMultiAlternatives(title,message,email_from,recs)\n msg.attach_alternative(html_content,\"text/html\")\n msg.send()\n return HttpResponse(\"ok了\")\n\n# def test_log(req):\n# logger = logging.getLogger('django')\n# logger.warning(\"thi is python 1906\")\n# return HttpResponse(\"ok\")","sub_path":"t8/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"149166223","text":"#!/usr/bin/python3\nimport random\nimport hill\nimport math\nfrom scipy import linalg as sla\nimport numpy as np\n\nl = 9\nn = int(math.sqrt(l))\nkey = \"H\"*l\n\nfor _ in range(10000):\n\talp = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\ti = random.randint(0, l-1)\n\tkey = key[:i] + random.choice(alp) + key[i+1:]\n\n\tif hill.valid_key(key):\n\t\tprint(key)\n\t\tkey = [ord(x) - 65 for x in key]\n\t\tfor i in range(0, l, n):\n\t\t\tprint(key[i:i+n])\n\t\tdet = int(sla.det(np.array(\n\t\t\t\tkey, dtype=np.int16).reshape(n,n))) % hill.alphabet\n\t\tprint(\"Определитель \", det)\n\t\tbreak","sub_path":"Hill cipher/find_valid_key.py","file_name":"find_valid_key.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"534905032","text":"\"\"\"bwonline URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path, include, re_path\nimport xadmin\nfrom django.views.generic import TemplateView\nfrom apps.users.views import LoginView, RegisterView, ActiveUserView, ForgetPwdView, ModifyPwdView, ResetView, IndexView\nfrom django.views.static import serve\nfrom bwonline.settings import MEDIA_ROOT\n# from bwonline.settings import STATIC_ROOT\n\nurlpatterns = [\n path('xadmin/', xadmin.site.urls),\n path('', IndexView.as_view(), name='index'),\n path('login/', LoginView.as_view(), name='login'),\n path('register/', RegisterView.as_view(), name='register'),\n path('captcha/',include('captcha.urls')),\n re_path('active/(?P.*)/', ActiveUserView.as_view(), name ='user_active'),\n path('forget/',ForgetPwdView.as_view(),name='forget_pwd'),\n re_path('reset/(?P.*)/', ResetView.as_view(), name='reset_pwd'),\n path('modify_pwd/', ModifyPwdView.as_view(), name='modify_pwd'),\n path('org/',include('oranizations.urls', namespace='org')),\n path('course/', include('course.urls', namespace='course')),\n # 处理图片显示的url,使用Django自带serve,传入参数告诉它去哪个路径找,我们有配置好的路径MEDIA_ROOT\n re_path(r'^media/(?P.*)', serve, {\"document_root\": MEDIA_ROOT}),\n # 个人信息\n path('users/',include('users.urls', namespace='users')),\n # #静态文件\n # re_path(r'^static/(?P.*)', serve, {\"document_root\": STATIC_ROOT}),\n\n]\n\n\n# # 全局404页面配置\n# handler404 = 'users.views.pag_not_found'\n# # 全局500页面配置\n# handler500 = 'users.views.page_error'","sub_path":"bwonline/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"371202739","text":"import json\nimport os\nfrom jinja2 import Template\n\nHERE = os.path.dirname(os.path.abspath(__file__))\n\nLABEL = 'TX Cities COVID-19 Model (Python)'\nDESCRIPTION = 'Parameterizable SEIR model of COVID-19 supporting 22 Texas metropolitan areas'\nAPPID = 'seir-city-covid19'\nAPPVERSION = '1.1.1'\n\n\ndef build_app_def(app_properties):\n source = open(os.path.join(HERE, 'template.json.j2'), 'r').read()\n var = {}\n var['inputs'] = json.dumps(app_properties['inputs'])\n var['parameters'] = json.dumps(app_properties['parameters'])\n var['label'] = LABEL\n var['description'] = DESCRIPTION\n var['appid'] = APPID\n var['appversion'] = APPVERSION\n template = Template(source)\n rendered = template.render(**var)\n return json.loads(rendered)\n","sub_path":"src/SEIRcity/cli/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"172550958","text":"import pandas as pd \nimport datetime\nimport time\n\nimport numpy as np\n\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import cross_val_score, learning_curve\nfrom sklearn.metrics import homogeneity_score, roc_auc_score, confusion_matrix\n\nfrom plot_utils import plot_multi_lines, gen_plot, accumulate_subplots\nfrom etl_utils import *\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.decomposition import PCA \nfrom sklearn.decomposition import FastICA\nfrom sklearn.random_projection import SparseRandomProjection\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\n\nfrom scipy.stats import kurtosis\n\nimport scipy.sparse as sps\nfrom scipy.linalg import pinv\n\n#general params:\nDATASETS = ('aps', 'spam') #any of of ('spam', 'aps')\n\nRANDOM_STATE = 27\nPLOT_ACTION = None # (None, 'save', 'show') - default to None to avoid issues with matplotlib depending on OS\nN_EPOCHS = 10000 # maximum number of epochs for neural network training\nN_ITER_ICA = 1000 #max number of iterations for ICA before stopping\nN_CLUSTERS = 30 #max number of clusters to try for part 1 of homework\nBALANCE_METHOD = 'downsample' # (int, 'downsample' or 'upsample') for training data\n\n\ndef reconstructionError(projections,X):\n '''\n stolen direction from: https://github.com/JonathanTay/CS-7641-assignment-3/blob/master/helpers.py\n '''\n W = projections.components_\n if sps.issparse(W):\n W = W.todense()\n p = pinv(W)\n reconstructed = ((p@W)@(X.T)).T # Unproject projected data\n errors = np.square(X-reconstructed)\n return np.nanmean(errors)\n\ndef generate_clustering_algorithms(id, cluster_list=[2,3,4,5,6,7,8,9,10], cluster_type='kmeans'):\n kmeans_models = {}\n for cluster_size in cluster_list:\n if cluster_type == 'kmeans':\n clusterer = KMeans(n_clusters=cluster_size, random_state=RANDOM_STATE)\n elif cluster_type == 'em':\n clusterer = GaussianMixture(n_components=cluster_size, random_state=RANDOM_STATE)\n kmeans_models[cluster_size] = clusterer\n return kmeans_models\n\ndef run_cluster_variations(id, train_dataset, test_dataset, clustering_model_list=['kmeans', 'em'], max_num_cluster=30):\n cluster_list = [x for x in range(2, max_num_cluster+1)]\n \n cluster_models_dict = {}\n \n for clustering_model in clustering_model_list:\n kmeans_models = generate_clustering_algorithms(id, cluster_list, cluster_type=clustering_model)\n train_homogeneity_list = []\n test_homogeneity_list = []\n model_list = []\n num_cluster_list = []\n for num_cluster, algo in kmeans_models.items():\n \n algo_start_time = time.time()\n algo.fit(train_dataset.data)\n algo_elapsed_time = round(time.time() - algo_start_time,2)\n print('{} with {} clusters/components training time: {:.2f} s'.format(clustering_model.upper(), num_cluster, algo_elapsed_time), end='\\r', flush=True)\n if clustering_model == 'kmeans':\n algo.train_cluster_assign = algo.labels_\n elif clustering_model == 'em':\n algo.train_cluster_assign = algo.predict(train_dataset.data)\n\n num_cluster_list.append(num_cluster)\n algo.train_homogeneity = homogeneity_score(train_dataset.target, algo.train_cluster_assign)\n train_homogeneity_list.append(algo.train_homogeneity)\n\n algo.test_cluster_assign = algo.predict(test_dataset.data)\n algo.test_homogeneity = homogeneity_score(test_dataset.target, algo.test_cluster_assign)\n test_homogeneity_list.append(algo.test_homogeneity)\n\n model_list.append(algo)\n print('')\n\n max_index = np.argmax(test_homogeneity_list)\n max_value = test_homogeneity_list[max_index]\n best_cluster = num_cluster_list[max_index]\n print('best homogeneity: {:.2f} achieved using {} with {}'.format(max_value, clustering_model.upper(), best_cluster))\n\n cluster_models_dict[clustering_model] = {'model_list':model_list, \n 'cluster_list':cluster_list, \n 'train_homogeneity_list':train_homogeneity_list, \n 'test_homogeneity_list':test_homogeneity_list}\n\n plot_multi_lines(cluster_models_dict, \n x_key='cluster_list', \n train_y_key='train_homogeneity_list', \n test_y_key='test_homogeneity_list', \n title_name='Homogeneity by Cluster Size',\n ylabel_name='Homogeneity',\n xlabel_name='Number of Clusters',\n figure_action=PLOT_ACTION, \n figure_path='output/'+str(id)+'/part1/figures',\n file_name='clustering_models')\n\n return cluster_models_dict\n\n\ndef reverse_sort_by_importance(data, ranking_list, n_features=None):\n rev_arg_sort_indices = np.argsort(ranking_list)[::-1]\n\n reverse_sorted_data = data[:, rev_arg_sort_indices]\n if n_features is not None:\n return reverse_sorted_data[:,n_features]\n else:\n return reverse_sorted_data\n \n\ndef run_feature_selection(id, train_dataset, test_dataset, param_variations=None, models_to_run=['pca', 'ica', 'srp', 'rffs'], n_rp_runs=10):\n '''\n PCA (https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html)\n - vary the number of principle components - see this plot: https://towardsdatascience.com/an-approach-to-choosing-the-number-of-components-in-a-principal-component-analysis-pca-3b9f3d6e73fe\n ICA (https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.FastICA.html)\n Randomized Projections (https://scikit-learn.org/stable/modules/generated/sklearn.random_projection.GaussianRandomProjection.html)\n Other feature selection algorithm (https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectFromModel.html)\n CONSIDER ADDING THIS TO THE DATASET OBJECT, creating a `get_pca(components=4)` or a `get_ica(components=4)` or `get_rp(components=4) or `get_dt_fs(n_components(threshold=-np.inf, max_features=4)\n '''\n plot_locs = [(0,0), (0,1), (0,2), (0,3)]\n fs_details = {}\n if 'pca' in models_to_run:\n pca_model = PCA(random_state=RANDOM_STATE)\n pca_start_time = time.time()\n pca_model.fit(train_dataset.data)\n pca_elapsed_time = (time.time() - pca_start_time)\n print('PCA elapsed time: {:.2f} s'.format(pca_elapsed_time))\n cumulative_variance = np.cumsum(pca_model.explained_variance_ratio_)\n pca_variance = pca_model.explained_variance_ratio_\n n_components = np.linspace(1,pca_variance.shape[0],pca_variance.shape[0])\n pca_plot_data = np.column_stack((n_components, pca_variance, cumulative_variance))\n\n fs_details[plot_locs[0]] = {'type':'line', \n 'data_dict':{'ICA':{'x':pca_plot_data[:,0], 'y':pca_plot_data[:,1]}}, \n 'title':'PCA',\n 'ylabel':'Variance',\n 'xlabel':'Components'}\n \n max_index = np.argmax(pca_variance)\n max_value = pca_variance[max_index]\n print('PCA - highest variance: {:.2f}'.format(max_value))\n\n \n if 'ica' in models_to_run:\n ica_model = FastICA(max_iter=N_ITER_ICA, tol=0.0001, random_state=RANDOM_STATE)\n # THIS THREW AN ERROR ABOUT NaNs once\n successful = False\n m = 0\n while m < 5 and not successful:\n try:\n ica_start_time = time.time()\n transformed_data = ica_model.fit_transform(train_dataset.data)\n successful=True\n ica_elapsed_time = (time.time() - ica_start_time)\n print('ICA elapsed time: {:.2f} s'.format(ica_elapsed_time))\n except:\n print('ICA got an infinity or NaN value, trying again')\n m += 1\n\n kurtosis_score_for_all_components = kurtosis(transformed_data, fisher=False)\n rev_arg_sort_indices = np.argsort(kurtosis_score_for_all_components)[::-1]\n rev_sorted_kurtosis_score = kurtosis_score_for_all_components[rev_arg_sort_indices]\n\n n_components = np.linspace(1,rev_sorted_kurtosis_score.shape[0],rev_sorted_kurtosis_score.shape[0])\n ica_plot_data = np.column_stack((n_components, rev_sorted_kurtosis_score, rev_arg_sort_indices))\n\n fs_details[plot_locs[1]] = {'type':'line', \n 'data_dict':{'ICA':{'x':ica_plot_data[:,0], 'y':ica_plot_data[:,1]}}, \n 'title':'ICA',\n 'ylabel':'Kurtosis',\n 'xlabel':'Components'}\n \n print('ICA - highest kurtosis: {:.2f}'.format(rev_sorted_kurtosis_score[0]))\n\n if 'srp' in models_to_run:\n srp_x_data_list = []\n srp_y_data_list = []\n srp_data_dict = {}\n for i in range(n_rp_runs):\n srp_model = SparseRandomProjection(n_components=train_dataset.data.shape[1])\n\n srp_start_time = time.time()\n transformed_data = srp_model.fit_transform(train_dataset.data)\n srp_elapsed_time = (time.time() - srp_start_time)\n print('SRP elapsed time: {:.2f} s'.format(srp_elapsed_time), end='\\r', flush=True)\n\n kurtosis_score_for_all_components = kurtosis(transformed_data, fisher=False)\n rev_sort_arg_indices = np.argsort(kurtosis_score_for_all_components)[::-1]\n rev_sorted_kurtosis_score = kurtosis_score_for_all_components[rev_sort_arg_indices]\n\n n_components = np.linspace(1,rev_sorted_kurtosis_score.shape[0],rev_sorted_kurtosis_score.shape[0])\n srp_plot_data = np.column_stack((n_components, rev_sorted_kurtosis_score, rev_sort_arg_indices))\n\n srp_data_dict['Run {}'.format(i)] = {'x':srp_plot_data[:,0], 'y':srp_plot_data[:,1]}\n \n fs_details[plot_locs[2]] = {'type':'line', \n 'data_dict':srp_data_dict, \n 'title':'RP',\n 'ylabel':'Kurtosis',\n 'xlabel':'Components'}\n print('')\n print('SRP - highest kurtosis: {:.2f}'.format(rev_sorted_kurtosis_score[0]))\n \n if 'rffs' in models_to_run:\n # random forest feature selection\n rffs_model = RandomForestClassifier(n_estimators=100,class_weight='balanced',random_state=RANDOM_STATE)\n rffs_start_time = time.time()\n feature_importances = rffs_model.fit(train_dataset.data,train_dataset.target).feature_importances_ \n rffs_elapsed_time = (time.time() - rffs_start_time)\n print('RF elapsed time: {:.2f} s'.format(rffs_elapsed_time))\n\n rev_sort_arg_indices = np.argsort(feature_importances)[::-1]\n rev_sorted_feature_importances = feature_importances[rev_sort_arg_indices]\n\n n_components = np.linspace(1,rev_sorted_feature_importances.shape[0],rev_sorted_feature_importances.shape[0])\n rfs_plot_data = np.column_stack((n_components, rev_sorted_feature_importances, rev_sort_arg_indices))\n\n fs_details[plot_locs[3]] = {'type':'line', \n 'data_dict':{'RFFS':{'x':rfs_plot_data[:,0], 'y':rfs_plot_data[:,1]}}, \n 'title':'RF',\n 'ylabel':'Feature Importance',\n 'xlabel':'Components'}\n \n print('RFFS - highest feature importance: {:.2f}'.format(rev_sorted_feature_importances[0]))\n\n accumulate_subplots(subplot_shape=(1,4), \n subplot_dict=fs_details, \n figure_action=PLOT_ACTION, \n figure_path='output/'+str(id)+'/part2/figures',\n file_name='feature_selection',\n wspace=0.3)\n\n return {'pca':pca_model, 'ica':ica_model, 'srp':srp_model, 'rffs':rffs_model}\n\n\ndef plot_feature_srp_reconstruction(id, train_dataset, n_component_ratio_list=np.linspace(0.1, 1.0, 7)):\n\n recon_error_list = []\n n_components_list = [int(x*train_dataset.data.shape[1]) for x in n_component_ratio_list]\n for n_components in n_components_list:\n fs_algo = SparseRandomProjection(random_state=RANDOM_STATE, n_components=n_components)\n fs_algo.fit(train_dataset.data)\n recon_error = reconstructionError(fs_algo, train_dataset.data)\n recon_error_list.append(recon_error)\n recon_error_dict = {'x':n_components_list,'y':recon_error_list}\n\n print('SRP - highest reconstruction error: {:.4f}'.format(np.max(recon_error_list)))\n\n gen_plot(x_data=recon_error_dict['x'], \n y_data=recon_error_dict['y'],\n title_name='Randomized Projection - Reconstruction Error',\n ylabel_name='Reconstruction Error', \n xlabel_name='# Components',\n figure_action=PLOT_ACTION, \n figure_path='output/'+str(id)+'/part2/figures',\n file_name='srp_reconstruction_error')\n \n return None\n\n\ndef etl_data(specified_dataset):\n if specified_dataset == 'spam':\n df = pd.read_csv('data/spam/spambasedata.csv', sep=',')\n print('using the dataset stored in ./data/spam')\n #shuffle data before splitting to train and test\n resampled_df = df.loc[:,:].sample(frac=1).reset_index(drop=True)\n train_frac = 0.8\n train_samples = int(round(resampled_df.shape[0]*train_frac))\n dirty_train_df = resampled_df.loc[:train_samples,:].reset_index(drop=True)\n dirty_test_df = resampled_df.loc[train_samples:,:].reset_index(drop=True)\n class_col = 'class'\n\n elif specified_dataset == 'aps':\n dirty_train_df = pd.read_csv('data/aps/aps_failure_training_set.csv', na_values=['na'])\n dirty_test_df = pd.read_csv('data/aps/aps_failure_test_set.csv', na_values=['na'])\n print('using the dataset stored in ./data/aps')\n class_col = 'class'\n\n #clean both datasets\n scaler = preprocessing.MinMaxScaler()\n train_and_test_df = clean_and_scale_dataset({'train':dirty_train_df, 'test':dirty_test_df}, scaler=scaler ,na_action=0)\n train_df, test_df = train_and_test_df[0], train_and_test_df[1]\n\n #prep the datasets \n [train_dataset, test_dataset], label_encoder = prep_data({'train':train_df, 'test':test_df}, shuffle_data=True, balance_method=BALANCE_METHOD, class_col=class_col)\n print('\\nTRAINING DATA INFORMATION')\n print('{} maps to {}'.format(label_encoder.classes_, label_encoder.transform(label_encoder.classes_)))\n print('size of training dataset:', train_dataset.data.shape)\n print('class counts:\\n', train_dataset.df[class_col].value_counts(), '\\n')\n\n return train_dataset, test_dataset, label_encoder\n\n\ndef cluster_homogeneity_by_feature_selection(id, fs_algo_dict, cluster_algo_list, train_dataset, cluster_size_list=[2,3,4,5,7,10,15,20,25,30], n_component_ratio_list=[1.0, 0.25, 0.125, 0.0625]):\n\n plot_locs = [(0,0), (0,1), (0,2), (0,3)]\n cluster_model_homogeneity = {}\n \n for cluster_algo_name in cluster_algo_list:\n for i, (fs_name, fs_algo) in enumerate(fs_algo_dict.items()):\n #transform data, run clustering on all data for variations of k, 1/2 of data for variations of k, 1/4 data for variations of k, etc\n if fs_name == 'pca':\n fs_sorted_data = fs_algo.transform(train_dataset.data)\n elif fs_name == 'rffs':\n fs_algo.fit(train_dataset.data,train_dataset.target)\n feature_importances = fs_algo.feature_importances_\n fs_sorted_data = reverse_sort_by_importance(train_dataset.data, feature_importances)\n elif fs_name in ('ica', 'srp'):\n fs_data = fs_algo.transform(train_dataset.data)\n fs_sorted_data = reverse_sort_by_importance(fs_data, kurtosis(fs_data, fisher=False))\n \n n_component_variation_list = [int(x*fs_sorted_data.shape[1]) for x in n_component_ratio_list]\n iteration_data_dict = {}\n for n_components in n_component_variation_list:\n selected_data = fs_sorted_data[:, :n_components]\n #do a loop here through kmeans number of clusters\n homogeneity_list = []\n for n_clusters in cluster_size_list:\n if cluster_algo_name == 'kmeans':\n cluster_model = KMeans(n_clusters=n_clusters, random_state=RANDOM_STATE)\n elif cluster_algo_name == 'em':\n cluster_model = GaussianMixture(n_components=n_clusters, random_state=RANDOM_STATE)\n\n cluster_model.fit(selected_data)\n cluster_model_predictions = cluster_model.predict(selected_data)\n homogeneity_list.append(homogeneity_score(train_dataset.target,cluster_model_predictions))\n\n iteration_data_dict['{}-components'.format(n_components)] = {'x':cluster_size_list, 'y':homogeneity_list}\n cluster_model_homogeneity[plot_locs[i]] = {'type':'line', \n 'data_dict':iteration_data_dict, \n 'title':'{} with {}'.format(cluster_algo_name.upper(), fs_name.upper())}\n \n accumulate_subplots(subplot_shape=(1,4), \n subplot_dict=cluster_model_homogeneity, \n figure_action=PLOT_ACTION, \n flat_xlabel='# Clusters',\n flat_ylabel='Homogeneity',\n figure_path='output/'+str(id)+'/part3/figures',\n file_name='{}_homogeneity_with_feature_selection'.format(cluster_algo_name.upper()))\n\n return None\n\n\ndef cluster_2d_by_feature_selection(id, fs_algo_dict, cluster_algo_list, train_dataset, selected_cluster_number, selected_component_number=2):\n\n plot_locs = [(0,0), (0,1), (1,0), (1,1)]\n cluster_model_clusters = {}\n \n for cluster_algo_name in cluster_algo_list:\n for i, (fs_name, fs_algo) in enumerate(fs_algo_dict.items()):\n #transform data, run clustering on all data for variations of k, 1/2 of data for variations of k, 1/4 data for variations of k, etc\n if fs_name == 'pca':\n fs_sorted_data = fs_algo.transform(train_dataset.data)\n elif fs_name == 'rffs':\n fs_algo.fit(train_dataset.data,train_dataset.target)\n feature_importances = fs_algo.feature_importances_\n fs_sorted_data = reverse_sort_by_importance(train_dataset.data, feature_importances)\n elif fs_name in ('ica', 'srp'):\n fs_data = fs_algo.transform(train_dataset.data)\n fs_sorted_data = reverse_sort_by_importance(fs_data, kurtosis(fs_data, fisher=False))\n\n if cluster_algo_name == 'kmeans':\n cluster_algo = KMeans(n_clusters=selected_cluster_number, random_state=RANDOM_STATE)\n cluster_algo.fit(fs_sorted_data)\n cluster_algo_predictions = cluster_algo.predict(fs_sorted_data)\n cluster_model_clusters[plot_locs[i]] = {'type':'cluster',\n 'model':cluster_algo,\n 'model_type':'kmeans',\n 'data':fs_sorted_data[:, :selected_component_number],\n 'target':train_dataset.target,\n 'predictions':cluster_algo_predictions,\n 'title':'{} with {}'.format(cluster_algo_name.upper(), fs_name.upper()),\n 'ylims':[-0.2, 0.2] if fs_name=='ica' else None,\n 'xlims':[-0.2, 0.2] if fs_name=='ica' else None}\n elif cluster_algo_name == 'em':\n cluster_algo = GaussianMixture(n_components=selected_cluster_number, random_state=RANDOM_STATE)\n cluster_algo.fit(fs_sorted_data)\n cluster_algo_predictions = cluster_algo.predict(fs_sorted_data)\n cluster_model_clusters[plot_locs[i]] = {'type':'cluster',\n 'model':cluster_algo,\n 'model_type':'em',\n 'data':fs_sorted_data[:, :selected_component_number],\n 'target':train_dataset.target,\n 'predictions':cluster_algo_predictions,\n 'title':'{} with {}'.format(cluster_algo_name.upper(), fs_name.upper()),\n 'ylims':[-0.2, 0.2] if fs_name=='ica' else None,\n 'xlims':[-0.2, 0.2] if fs_name=='ica' else None}\n\n accumulate_subplots(subplot_shape=(2,2), \n subplot_dict=cluster_model_clusters, \n figure_action=PLOT_ACTION,\n figure_path='output/'+str(id)+'/part3/figures',\n file_name='{}_clusters'.format(cluster_algo_name.upper()))\n return None \n\n\ndef run_neural_network_no_feature_selection(id, nn_model, train_dataset, test_dataset, label_encoder):\n nn_model.fit(train_dataset.data, train_dataset.target)\n predictions = nn_model.predict(test_dataset.data)\n print('neural network with no feature selection - ROC-AUC: {:.2f}'.format(roc_auc_score(test_dataset.target, predictions)))\n\n n_iters = list([i for i in range(nn_model.n_iter_)])\n train_scores = nn_model.loss_curve_\n val_scores = nn_model.validation_scores_\n nn_details = {}\n val_curve_dict = {}\n val_curve_dict['validation'] = {'x':n_iters, 'y':val_scores}\n nn_details[0,0] = {'type':'line', \n 'data_dict':val_curve_dict, \n 'title':'NN Validation Curve - No Feature Selection',\n 'ylabel':'Validation Score',\n 'xlabel':'Number of Epochs',\n 'ylims':[0.5, 1.0]}\n\n cm = confusion_matrix(test_dataset.target, predictions)\n normalized_cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n nn_details[0,1] = {'type':'cm', \n 'cm':cm,\n 'classes':label_encoder.classes_,\n 'title':'Confusion Matrix',\n 'normalized':False,\n 'ylabel':'True Label',\n 'xlabel':'Predicted Label'}\n\n nn_details[0,2] = {'type':'cm', \n 'cm':normalized_cm,\n 'classes':label_encoder.classes_,\n 'title':'Normalized Confusion Matrix',\n 'normalized':True,\n 'ylabel':'True Label',\n 'xlabel':'Predicted Label'}\n\n accumulate_subplots(subplot_shape=(1,3), \n subplot_dict=nn_details, \n figure_action=PLOT_ACTION, \n figure_path='output/'+str(id)+'/part4/figures',\n file_name='nn_with_no_feature_selection',\n wspace=0.3)\n \n return None\n\n\ndef run_neural_network_with_feature_selection(id, nn_model, fs_algo_dict, train_dataset, test_dataset, label_encoder, n_component_list=[128,32,8]):\n plot_locs = [(0,0), (0,1), (0,2), (0,3)]\n\n for i, (fs_name, fs_algo) in enumerate(fs_algo_dict.items()):\n if fs_name == 'pca':\n fs_sorted_training_data = fs_algo.transform(train_dataset.data)\n fs_sorted_testing_data = fs_algo.transform(test_dataset.data)\n elif fs_name == 'rffs':\n fs_sorted_training_data = reverse_sort_by_importance(train_dataset.data, fs_algo.feature_importances_)\n fs_sorted_testing_data = reverse_sort_by_importance(test_dataset.data, fs_algo.feature_importances_)\n elif fs_name in ('ica', 'srp'):\n fs_training_data = fs_algo.transform(train_dataset.data)\n fs_testing_data = fs_algo.transform(test_dataset.data)\n fs_sorted_training_data = reverse_sort_by_importance(fs_training_data, kurtosis(fs_training_data, fisher=False))\n fs_sorted_testing_data = reverse_sort_by_importance(fs_testing_data, kurtosis(fs_training_data, fisher=False))\n\n nn_details = {}\n component_variation_dict = {}\n for j, n_components in enumerate(n_component_list):\n\n \n selected_fs_training_data = fs_sorted_training_data[:,:n_components]\n selected_fs_testing_data = fs_sorted_testing_data[:,:n_components]\n \n nn_model.fit(selected_fs_training_data, train_dataset.target)\n predictions = nn_model.predict(selected_fs_testing_data)\n print('neural network trained with {} - {}-components - ROC-AUC: {:.2f}'.format(fs_name.upper(), n_components, roc_auc_score(test_dataset.target, predictions)))\n\n train_scores = nn_model.loss_curve_\n val_scores = nn_model.validation_scores_\n n_iters = list([i for i in range(nn_model.n_iter_)])\n component_variation_dict['{}-components'.format(n_components)] = {'x':n_iters, 'y':val_scores}\n\n cm = confusion_matrix(test_dataset.target, predictions)\n normalized_cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n nn_details[plot_locs[j+1]] = {'type':'cm', \n 'cm':normalized_cm,\n 'classes':label_encoder.classes_,\n 'title':'Normalized CM - {} Components'.format(n_components),\n 'normalized':True,\n 'ylabel':'True Label',\n 'xlabel':'Predicted Label'}\n \n nn_details[plot_locs[0]] = {'type':'line', \n 'data_dict':component_variation_dict, \n 'title':'NN Validation Curve',\n 'ylabel':'Validation Score',\n 'xlabel':'Number of Epochs',\n 'ylims':[0.5, 1.0]}\n\n accumulate_subplots(subplot_shape=(1,4), \n subplot_dict=nn_details, \n figure_action=PLOT_ACTION, \n figure_path='output/'+str(id)+'/part4/figures',\n file_name='nn_with_{}'.format(fs_name.upper()),\n wspace=0.3)\n \n return None\n\n\ndef kmeans_learning_curve(nn_model, train_dataset, test_dataset, cluster_type='kmeans', n_cluster_variations=[2,3,4,5,6,7,8,9,10], append_to_original_data=False):\n train_roc_auc_list = []\n test_roc_auc_list = []\n cm_list = []\n normalized_cm_list = []\n for cluster_size in n_cluster_variations:\n if cluster_type == 'kmeans':\n clusterer = KMeans(n_clusters=cluster_size, random_state=RANDOM_STATE)\n clusterer.fit(train_dataset.data)\n train_clusters = clusterer.predict(train_dataset.data).reshape(-1, 1)\n test_clusters = clusterer.predict(test_dataset.data).reshape(-1, 1)\n\n cluster_encoder = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n cluster_encoder.fit(train_clusters)\n\n train_cluster_data = cluster_encoder.transform(train_clusters)\n test_cluster_data = cluster_encoder.transform(test_clusters)\n\n elif cluster_type == 'em':\n clusterer = GaussianMixture(n_components=cluster_size, random_state=RANDOM_STATE)\n clusterer.fit(train_dataset.data)\n train_cluster_data = clusterer.predict_proba(train_dataset.data)\n test_cluster_data = clusterer.predict_proba(test_dataset.data)\n \n if append_to_original_data: \n new_train_data = np.column_stack((train_dataset.data, train_cluster_data))\n new_test_data = np.column_stack((test_dataset.data, test_cluster_data))\n\n else:\n new_train_data = train_cluster_data\n new_test_data = test_cluster_data\n\n nn_model.fit(new_train_data, train_dataset.target)\n train_predictions = nn_model.predict(new_train_data)\n test_predictions = nn_model.predict(new_test_data)\n\n train_roc_auc = roc_auc_score(train_dataset.target, train_predictions)\n train_roc_auc_list.append(train_roc_auc)\n\n test_roc_auc = roc_auc_score(test_dataset.target, test_predictions)\n test_roc_auc_list.append(test_roc_auc)\n print('neural network trained with {} - {}-clusters - ROC-AUC: {:.2f}'.format(cluster_type.upper(), cluster_size, test_roc_auc))\n\n cm = confusion_matrix(test_dataset.target,test_predictions)\n normalized_cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm_list.append(cm)\n normalized_cm_list.append(normalized_cm)\n\n return n_cluster_variations, train_roc_auc_list, test_roc_auc_list, cm_list, normalized_cm_list\n\n\ndef run_neural_network_with_clustering(id, nn_model, clustering_algo_list, train_dataset, test_dataset, label_encoder, n_clusters_list=[2,3,4,5,6,7,8,9,10], cluster_size_for_cm=None):\n \n for clustering_algo_name in clustering_algo_list:\n cluster_details = {} \n not_appended_cluster_list, not_appended_train_list, not_appended_test_list, not_appended_cm, not_appended_norm_cm = kmeans_learning_curve(nn_model, train_dataset, test_dataset, cluster_type=clustering_algo_name, \n n_cluster_variations=n_clusters_list, append_to_original_data=False)\n not_appended_curve_dict = {}\n not_appended_curve_dict['train'] = {'x':not_appended_cluster_list, 'y':not_appended_train_list}\n not_appended_curve_dict['test'] = {'x':not_appended_cluster_list, 'y':not_appended_test_list}\n cluster_details[0,0] = {'type':'line', \n 'data_dict':not_appended_curve_dict, \n 'title':'NN with {} - Clusters Only'.format(clustering_algo_name.upper()),\n 'ylabel':'ROC AUC Score',\n 'xlabel':'Number of Clusters',\n 'ylims':[0.5, 1.0]}\n \n if cluster_size_for_cm is not None:\n cm_index = not_appended_cluster_list[np.where(not_appended_cluster_list == cluster_size_for_cm)][0]\n else:\n cm_index = np.argmax(not_appended_test_list)\n\n cluster_details[0,1] = {'type':'cm', \n 'cm':not_appended_norm_cm[cm_index],\n 'classes':label_encoder.classes_,\n 'title':'Normalized CM - {} Clusters'.format(not_appended_cluster_list[cm_index]),\n 'normalized':True,\n 'ylabel':'True Label',\n 'xlabel':'Predicted Label'}\n \n appended_cluster_list, appended_train_list, appended_test_list, appended_cm, appended_norm_cm = kmeans_learning_curve(nn_model, train_dataset, test_dataset, cluster_type=clustering_algo_name, \n n_cluster_variations=n_clusters_list, append_to_original_data=True)\n \n appended_curve_dict = {}\n appended_curve_dict['train'] = {'x':appended_cluster_list, 'y':appended_train_list}\n appended_curve_dict['test'] = {'x':appended_cluster_list, 'y':appended_test_list}\n cluster_details[1,0] = {'type':'line', \n 'data_dict':appended_curve_dict, \n 'title':'NN with {} - Appended'.format(clustering_algo_name.upper()),\n 'ylabel':'ROC AUC Score',\n 'xlabel':'Number of Clusters',\n 'ylims':[0.5, 1.0]}\n\n if cluster_size_for_cm is not None:\n cm_index = not_appended_cluster_list[np.where(not_appended_cluster_list == cluster_size_for_cm)][0]\n else:\n cm_index = np.argmax(appended_test_list)\n\n cluster_details[1,1] = {'type':'cm', \n 'cm':appended_norm_cm[cm_index],\n 'classes':label_encoder.classes_,\n 'title':'Normalized CM - {} Clusters'.format(appended_cluster_list[cm_index]),\n 'normalized':True,\n 'ylabel':'True Label',\n 'xlabel':'Predicted Label'}\n\n accumulate_subplots(subplot_shape=(2,2), \n subplot_dict=cluster_details, \n figure_action=PLOT_ACTION, \n figure_path='output/'+str(id)+'/part5/figures',\n file_name='nn_using_{}'.format(clustering_algo_name.upper()),\n wspace=0.3)\n\n return None\n\ndef main():\n batch_id = str(int(datetime.datetime.now().strftime('%Y%m%d%H%M%S')))\n for specified_dataset in DATASETS:\n algo_batch_id = batch_id + '-' + str(specified_dataset) #set ID for one run, so all the algos have the same ID\n\n #load dataset\n train_dataset, test_dataset, label_encoder = etl_data(specified_dataset)\n \n # 1. Run the clustering algorithms on the datasets and describe what you see.\n print('{} - working on part 1: Run the clustering algorithms on the datasets'.format(specified_dataset.upper()))\n cluster_details = run_cluster_variations(algo_batch_id, train_dataset, test_dataset, max_num_cluster=N_CLUSTERS)\n\n #part 2 - feature selection\n print('{} - working on part 2: Apply the dimensionality reduction algorithms to the two datasets'.format(specified_dataset.upper()))\n fs_models = run_feature_selection(algo_batch_id, train_dataset, test_dataset, n_rp_runs=8)\n plot_feature_srp_reconstruction(algo_batch_id, train_dataset,n_component_ratio_list=np.linspace(0.03,1,10))\n\n # part 3 - cluster after feature selection\n print('{} - working on part 3: Reproduce your clustering experiment on the data after youve run dimensionality reduction on it'.format(specified_dataset.upper()))\n cluster_homogeneity_by_feature_selection(algo_batch_id, fs_models, ['kmeans', 'em'], train_dataset, cluster_size_list=[2,3,4,5,7,10,15,20,25,30])\n cluster_2d_by_feature_selection(algo_batch_id, fs_models, ['kmeans', 'em'], train_dataset, selected_cluster_number=5)\n \n if specified_dataset == 'aps':\n #only run parts 4 and 5 on APS dataset\n nn_model = MLPClassifier(hidden_layer_sizes=(100,20,), \n early_stopping=True, \n n_iter_no_change=50,\n validation_fraction=0.3,\n tol=0.0001, \n random_state=RANDOM_STATE, \n max_iter=N_EPOCHS, \n learning_rate_init=0.1)\n\n # part 4 - neural network with feature_selection\n print('{} - working on part 4: Apply the dimensionality reduction algorithms to one of your datasets from assignment #1 and rerun your neural network learner on the newly projected data.'.format(specified_dataset.upper()))\n run_neural_network_no_feature_selection(algo_batch_id, nn_model=nn_model, train_dataset=train_dataset, test_dataset=test_dataset, label_encoder=label_encoder)\n run_neural_network_with_feature_selection(algo_batch_id, nn_model=nn_model, fs_algo_dict=fs_models, train_dataset=train_dataset, test_dataset=test_dataset, label_encoder=label_encoder)\n \n # part 5 - neural network with clustering\n print('{} - working on part 5: Apply the clustering algorithms to the same dataset to which you just applied the dimensionality reduction algorithms, treating the clusters as if they were new features'.format(specified_dataset.upper()))\n run_neural_network_with_clustering(algo_batch_id, nn_model, train_dataset=train_dataset, test_dataset=test_dataset, clustering_algo_list=['kmeans', 'em'], label_encoder=label_encoder)\n \n return None\n\nif __name__ == '__main__': \n main()\n ","sub_path":"src/evaluate_models.py","file_name":"evaluate_models.py","file_ext":"py","file_size_in_byte":37400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"65197267","text":"# READ ME - IMPORTANT\n# Each integer represents a second in time\n# The priority rating is based on a scale from 1 -> 10, with 10 being the highest priority\n\n# A class that represents a process in an operating system\nclass process:\n # priority: the priority of a process\n # ctxs_time: the time it takes for a process to context swtich\n # burst_time: the time required by a process for CPU execution\n # arrival_rate: the time taken by a process to arrive in the ready queue\n # burst_clock: measures the amount of time spent on a process in the CPU\n # total_arrival_time: measures the total time it takes for the a process to enter the ready queue\n # total_ctxs_time: measures the total time from context switches\n # total_time: measures the total time a process takes to execute\n def __init__(self, priority, ctxs_time, burst_time, arrival_rate):\n self.priority = priority\n self.ctxs_time = ctxs_time\n self.burst_time = burst_time\n self.arrival_rate = arrival_rate\n \n self.burst_clock = 0\n\n self.total_arrival_time = 0\n self.total_ctxs_time = 0\n self.total_time = 0\n \n # A function that calculates the total time a process takes\n def calc_tt(self):\n self.total_time += self.total_arrival_time + self.total_ctxs_time\n\n# A class that represents a process scheduler\nclass scheduler:\n def __init__(self):\n # ready_queue: contains all of the processes/jobs to be scheduled\n # time_slice: the interval in which the process switchh algorithm occurs\n self.ready_queue = []\n self.time_slice = 1\n\n # total_ctxs_time: the sum of all context switching times for each process\n # total_arrival_time: the total time it takes for all process to arrive in the ready queue\n # arrival_iterate: a variable to ensure the correct process executes based on its arrival\n # total_time: the total time for all processes to execute\n # current_process: the current process on the CPU\n self.total_ctxs_time = 0\n self.total_arrival_time = 0\n self.arrival_iterate = 0\n self.total_time = 0\n self.current_process = None\n \n # Populates the ready_queue with a list of classes\n def populate_queue(self, processes):\n for process in processes:\n self.ready_queue.append(process)\n self.total_arrival_time += process.arrival_rate\n process.total_arrival_time = self.total_arrival_time\n \n # Schedules the processes and calculates simulated runtimes for each one.\n def schedule_processes(self):\n while self.ready_queue:\n\n # Selects the current process based on the other acceptable processes in the queue\n for process in self.ready_queue:\n if ((self.current_process is None) & (process.total_arrival_time <= self.arrival_iterate)):\n self.current_process = process\n process.total_ctxs_time += process.ctxs_time\n \n elif ((self.current_process is None) & (process.total_arrival_time > self.arrival_iterate)):\n continue\n\n if ((process.priority > self.current_process.priority) & (process.total_arrival_time <= self.arrival_iterate)):\n self.current_process.total_ctxs_time += self.current_process.ctxs_time\n process.total_ctxs_time += process.ctxs_time\n self.total_ctxs_time += self.current_process.ctxs_time + process.ctxs_time\n\n self.current_process = process \n \n self.arrival_iterate += 1\n \n # Ups the burst count for the current process and ends it if need be\n if (self.current_process is not None):\n self.total_time += self.time_slice\n self.current_process.total_time += self.time_slice\n self.current_process.burst_clock += self.time_slice\n\n if (self.current_process.burst_clock >= self.current_process.burst_time):\n self.current_process.total_ctxs_time += self.current_process.ctxs_time\n self.ready_queue.remove(self.current_process)\n self.current_process = None\n \ndef main():\n # Test processes for the program to run\n p1 = process(8, 2, 3, 4)\n p2 = process(9, 2, 6, 1)\n p3 = process(6, 2, 9, 2)\n\n job = [p1, p2, p3]\n\n s1 = scheduler()\n s1.populate_queue(job)\n s1.schedule_processes()\n\n for i in range(len(job)):\n print(\"Process \" + str(i + 1) + \"\\n\")\n print(\"Priority: \" + str(job[i].priority) + \"\\n\")\n print(\"Burst Time: \" + str(job[i].burst_time) + \"\\n\")\n print(\"Total Arrival Time: \" + str(job[i].total_arrival_time) + \"\\n\")\n print(\"Total Context Switching Time: \" + str(job[i].total_ctxs_time) + \"\\n\")\n \n job[i].calc_tt()\n print(\"Total Time: \" + str(job[i].total_time) + \"\\n\")\n \nif __name__ == \"__main__\":\n main()\n\n \n\n\n \n\n \n","sub_path":"HW1/simulator_a.py","file_name":"simulator_a.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"515773253","text":"'''\r\n\r\n@author: Frank\r\n'''\r\nimport unittest\r\nfrom zstackwoodpecker.engine import engine\r\n\r\nclass Test(unittest.TestCase):\r\n def testName(self):\r\n logfd = open('/tmp/log', 'w')\r\n engine.execute_case('test/testcase2.py', logfd)\r\n \r\nif __name__ == \"__main__\":\r\n #import sys;sys.argv = ['', 'Test.testName']\r\n unittest.main()","sub_path":"zstackwoodpecker/test/test_engine.py","file_name":"test_engine.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"396884555","text":"import similarity as Sim\nimport numpy as np\n\nclass Learning:\n\n def __init__(self, init_similarity):\n self.similarity = init_similarity\n\n '''\n random walk with restart method\n alpha is the absorbing rate\n '''\n def get_rwr_score(self, interaction_matrix, feature_matrix, similarity_func, alpha):\n similarity_matrix = similarity_func(feature_matrix)\n num = feature_matrix.shape[0]\n score_matrix = np.dot(np.linalg.inv(np.eye(num) - alpha * similarity_matrix), \n (1 - alpha) * interaction_matrix)\n return score_matrix\n\n '''\n collaborative filtering\n '''\n def get_cf_score(self, interaction_matrix, feature_matrix, similarity_func):\n row_num, col_num = interaction_matrix.shape\n similarity_matrix = similarity_func(feature_matrix)\n similarity_row_sum = similarity_matrix.sum(1).reshape([row_num, 1])\n denominator_matrix = np.dot(similarity_row_sum, np.ones([1, col_num]))\n numerator_matrix = np.dot(similarity_matrix, interaction_matrix)\n score_matrix = np.divide(numerator_matrix, denominator_matrix)\n return score_matrix\n\n '''\n matrix factorization method\n k is the dimension of vector, lamda controls regularizor\n '''\n def get_MF_score(self, interaction_matrix, k, steps = 5000, gamma = 0.002, lamda = 0.02):\n pass\n\n","sub_path":"learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"20205400","text":"import os\nimport pandas as pd\n\ndef symbol_to_path(symbol, base_dir=\"../data\"):\n #Return CSV path for the ticker\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol))) \n\ndef get_data(symbols, dates):\n #Read stock data from for given symbols from CSV file\n df = pd.DataFrame(index=dates)\n\n for symbol in symbols:\n df_temp = pd.read_csv(symbol_to_path(symbol), \n index_col='Date', \n parse_dates = True, \n usecols=['Date', 'Adj Close'],\n na_values=['nan'])\n df_temp = df_temp.rename(columns={'Adj Close' : symbol})\n df = df.join(df_temp)\n #remove NA in the data \n df = df.dropna(subset=[symbol])\n\n return df","sub_path":"common/stockdata.py","file_name":"stockdata.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"276797420","text":"import base64\nimport json\nimport yaml\nimport datetime\nimport pandas as pd\nimport gspread\nimport re\n\n\nfrom pyzaim import ZaimCrawler\n\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n\ndef cancatNote(x):\n tmp = x['name'] + ' | ' if x['name'] != '' else ''\n tmp += x.comment + ' | ' if x.comment != '' else ''\n tmp += x.place\n return tmp\n\n\ndef cleanUp(data, config, log=False):\n pd_data = pd.DataFrame(data) # 取得したデータをDataFrame型へ変換\n\n # --- データ整形 ---\n\n # 計上する取引だけを抽出する(銀行間の移動等は除外)\n pd_data = pd_data.query(\"count=='常に含める'\")\n # 入金額の列を加える\n pd_data['income'] = pd_data['amount'].where(pd_data['type'] == 'income', 0)\n # 入金の場合はamountを0にする\n pd_data.loc[pd_data['type'] == 'income', ['amount']] = 0\n # 'name'にデータがあれば投入\n pd_data['note'] = pd_data.apply(lambda x: cancatNote(x), axis=1)\n # 不要な列を削除する\n pd_data = pd_data.drop(['count', 'type'], axis=1)\n # 列の並び順を決める\n sort = ['id', 'date', 'amount', 'income', 'category', 'genre',\n 'place', 'name', 'comment', 'from_account', 'to_account', 'note']\n # 列の並び替えを行う\n pd_data = pd_data.loc[:, sort]\n # date列のTimestampを文字列型に変換\n pd_data['date'] = pd_data['date'].dt.strftime('%Y/%m/%d')\n # NaNを空文字'-'に変換\n pd_data = pd_data.fillna(\"\")\n\n # --- データ整形ここまで ---\n if log:\n # ログをCSV出力\n pd_data.to_csv(\n './log/zaim_{}{}_{}.csv'.format(config['year'], str(config['month']).zfill(2), datetime.datetime.now().strftime('%Y%m%d%H%M%S')))\n\n return pd_data\n\n\ndef myZaim(event, context):\n # パラメータの取得\n if 'data' in event:\n target = json.loads(base64.b64decode(\n event['data']).decode('utf-8'))['target']\n print(target)\n else:\n print(\"parameter is empty.\")\n return \"parameter is empty.\"\n\n today = datetime.datetime.today()\n if target == 'thisMonth':\n year = today.year\n month = today.month\n elif target == 'lastMonth':\n lastMonth = today.replace(day=1) - datetime.timedelta(days=1)\n year = lastMonth.year\n month = lastMonth.month\n else:\n print(\"unknown request.\")\n return \"unknown request.\"\n\n # 設定ファイルの読み込み\n with open('config.yaml', 'r') as file:\n config = yaml.load(file, Loader=yaml.SafeLoader)\n\n # Chrome Driverの起動とZaimへのログイン、ログインには少し時間がかかります\n crawler = ZaimCrawler(config['id'], config['pw'], gcf=True)\n\n try:\n # データの取得 (データの取得には少し時間がかかります、時間はデータ件数による)\n # progressをFalseにするとプログレスバーを非表示にできる\n data = crawler.get_data(\n year, month, progress=False)\n\n except:\n return \"failed pyZaim.\"\n\n # seleniumを閉じる\n finally:\n crawler.close()\n print('close')\n\n # データ整形\n pd_data = cleanUp(data, config)\n\n # ---\n\n # 2つのAPIを記述しないとリフレッシュトークンを3600秒毎に発行し続けなければならない\n scope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n\n # 認証情報設定\n # ダウンロードしたjsonファイル名をクレデンシャル変数に設定(秘密鍵、Pythonファイルから読み込みしやすい位置に置く)\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n config['secret'], scope)\n\n # OAuth2の資格情報を使用してGoogle APIにログインします。\n gc = gspread.authorize(credentials)\n\n # 共有設定したスプレッドシートキーを変数[SPREADSHEET_KEY]に格納する。\n SPREADSHEET_KEY = config['sheet']\n\n # 共有設定したスプレッドシートを開く\n sheet_name = \"transactions\"\n spread = gc.open_by_key(SPREADSHEET_KEY)\n worksheets = spread.worksheet(sheet_name)\n query = re.compile(\n r'^{}/{}.*$'.format(str(year), str(month).zfill(2)))\n # query = re.compile(r'^2020\\-06.*$')\n find = worksheets.findall(query, in_column=2)\n\n if len(find) > 0:\n range_start = find[0].row\n range_end = range_start\n\n for f in find:\n if f.row < range_start:\n range_start = f.row\n if f.row > range_end:\n range_end = f.row\n\n print('Remove range: {}-{}'.format(range_start, range_end))\n\n worksheets.delete_rows(range_start, range_end)\n\n worksheets.append_rows(pd_data.values.tolist())\n worksheets.sort((2, 'asc'))\n\n return \"completed.\"\n","sub_path":"GCP/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"338296251","text":"#! /bin/python\nimport random\nimport sys\n\nfinput = input(\"What file are we choosing a winner from? \")\n\nfile = open(finput, 'r')\n\ndef chooser(fname):\n\n with open(finput) as f:\n i =( sum(1 for _ in f) )\n\n\n print(\" Choosing out of \", str(i), \"contestants...\")\n\n winner = random.randint(1,i)\n print(\"Winning index is\", winner)\n\n list = file.readlines()\n print(\"email is\", list[winner - 1])\n\nchooser(file)\n","sub_path":"raffle-chooser/chooser.py","file_name":"chooser.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"440200204","text":"import numpy as np\nimport random\n\ndef initMaze(L,w):\n \"\"\"\n Initialize the maze\n\n IN: L(length), w(width)\n\n OUT: Maze(matrix Lxw)\n \"\"\"\n return np.zeros((L,w))\n\ndef neighborsCheck(Maze, cellX, cellY):\n \"\"\"\n Checks if a cell has only one visited neighbor\n\n IN: Maze(matrix), cellX, cellY\n\n OUT: boolean\n \"\"\"\n # Fetching sums of neighbors cells\n lines, columns = Maze.shape\n l0, l1 = max(0, cellX-1), min(lines-1, cellX+1)\n c0, c1 = max(0, cellY-1), min(columns-1, cellY+1)\n ls = list({l0, cellX, l1})\n cs = [[c] for c in list({c0, cellY, c1})]\n # If the sum of the neighbors equals 1\n if Maze[ls, cs].sum() - Maze[cellX, cellY] == 1:\n return True\n # Else\n return False\n\ndef cellNeighbors(Maze, cellX, cellY):\n \"\"\"\n Fetch neighbors of cell\n\n IN: Maze(matrix), cellX, cellY\n\n OUT: List of cells\n \"\"\"\n # Initialize the matrix for the neighbors of cell\n listCells = [[cellX-1, cellY-1], [cellX-1, cellY], [cellX-1, cellY+1],\n [cellX, cellY-1], [cellX, cellY+1],\n [cellX+1, cellY-1], [cellX+1, cellY], [cellX+1, cellY+1]]\n # Initialize a matrix for removing any neighbors to not have any OutOfRange\n deletionCells = []\n # If cellX is in top of the Maze\n if cellX == 0:\n deletionCells.append((cellX-1, cellY-1))\n deletionCells.append((cellX-1, cellY))\n deletionCells.append((cellX-1, cellY+1))\n # If cellX is at the bottom of the Maze\n if cellX == len(Maze) - 1:\n deletionCells.append((cellX+1, cellY-1))\n deletionCells.append((cellX+1, cellY))\n deletionCells.append((cellX+1, cellY+1))\n # If cellY is at the left of the Maze\n if cellY == 0:\n deletionCells.append((cellX-1, cellY-1))\n deletionCells.append((cellX, cellY-1))\n deletionCells.append((cellX+1, cellY-1))\n # If cellY is at the right of the Maze\n if cellY == len(Maze[0]) - 1:\n deletionCells.append((cellX-1, cellY+1))\n deletionCells.append((cellX, cellY+1))\n deletionCells.append((cellX+1, cellY+1))\n # Removing any duplicates\n deletionCells = list(dict.fromkeys(deletionCells))\n # Removing the cells\n for delete in deletionCells:\n listCells.pop(listCells.index([delete[0], delete[1]]))\n return listCells\n\ndef validNeighborsCell(Maze, cellX, cellY):\n \"\"\"\n Fetch neighbors of cell that are valid\n\n IN: Maze(matrix), cellX, cellY\n\n OUT: List of cells\n \"\"\"\n # Fetching neighbors of cell\n listCells = cellNeighbors(Maze, cellX, cellY)\n # Initializing list of valid neighbors\n listValidCells = []\n # For each neighbors of cell\n for neighbor in listCells:\n # If the sum of the neighbor cells equals 1\n if neighborsCheck(Maze, neighbor[0], neighbor[1]) and Maze[neighbor[0]][neighbor[1]] == 0:\n # We add this cell to the valid cells\n listValidCells.append(neighbor)\n return listValidCells\n\ndef DFS(L, w):\n \"\"\"\n Do the Depth-First Search (iterative implementation) in order to generate the maze\n\n IN: L(length), w(width)\n \n OUT: Maze(matrix), startingCell\n \"\"\"\n # Initializing the Maze\n Maze = initMaze(L, w)\n # We randomly choose the starting cell\n startingCell = [random.randrange(L), random.randrange(w)]\n # Initializing the stack\n stack = []\n # Adding the starting cell to the start\n stack.append([startingCell[0], startingCell[1]])\n Maze[startingCell[0]][startingCell[1]] = 1\n # While the stack is not empty\n while len(stack) != 0:\n # We take the last cell of the stack\n currentCell = stack[-1]\n # We get the valid neighbors of the current cell\n listNextCells = validNeighborsCell(Maze, currentCell[0], currentCell[1])\n # If there are neighbors\n if len(listNextCells) > 0:\n nextCell = random.choice(listNextCells)\n Maze[nextCell[0]][nextCell[1]] = 1\n stack.append(nextCell)\n # Else we need to backtrack\n else:\n stack.pop()\n # We avoid a IndexOutOfRange\n if len(stack) != 0:\n currentCell = stack[-1]\n return Maze, startingCell","sub_path":"Maze.py","file_name":"Maze.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"622212505","text":"# Time: O(n)\n# Space: O(n)\n#\n# Given a binary tree, return the bottom-up level order traversal of its nodes' values.\n# (ie, from left to right, level by level from leaf to root).\n# \n# For example:\n# Given binary tree {3,9,20,#,#,15,7},\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# return its bottom-up level order traversal as:\n# [\n# [15,7],\n# [9,20],\n# [3]\n# ]\n#\n# Definition for a binary tree node\n\nclass Solution(object):\n def levelOrderBottom(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n if not root:\n return []\n result = []\n current = [root]\n while current:\n next = []\n level = []\n for node in current:\n level.append(node.val)\n if node.left:\n next.append(node.left)\n if node.right:\n next.append(node.right)\n result.insert(0, level) # Only line diff\n current = next\n return result\n \n # simply reverse\n # https://github.com/kamyu104/LeetCode/blob/master/Python/binary-tree-level-order-traversal-ii.py\n \n","sub_path":"binary-tree-level-order-traversal-ii-(M)-(107).py","file_name":"binary-tree-level-order-traversal-ii-(M)-(107).py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"154723782","text":"import pygame, sys, random, allobjects\nfrom pygame.locals import *\n\nFPS = 30\nFPSCLOCK = pygame.time.Clock()\n\n# Game window dimensions\nWINDOWWIDTH = 550\nWINDOWHEIGHT = 700\n\n#Min/Max enemy plane velocity\nMINSPEED = 1; MAXSPEED = 7\n\n#Space above the what the player can see. \nSPAWNBUFFER = 414\n\n#Number of enemies\nNUMENEMIES = 30\n#Number of clouds\nNUMCLOUDS = 0\n\n#COLORS \nSTART_FC = (0,0,255)\nSTART_BG = (255,255,255,128)\nPAUSED_BG = (0,0,0,128)\nPAUSED_FC = (255,255,255)\nMAIN_BG = (56, 134, 232)\nBLACK = (0,0,0)\nWHITE = (255,255,255)\n\n#---- Main ----\ndef main():\n \n pygame.init()\n\n # Create game surface\n global DISPLAYSURF,SCREENLIMIT\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), RESIZABLE)\n pygame.display.set_caption('Dog fighter')\n SCREENLIMIT = [DISPLAYSURF.get_width(),DISPLAYSURF.get_height()]\n \n start_screen(DISPLAYSURF)\n\n # Create game Sprite list\n all_sprite_list = pygame.sprite.Group()\n ammo_list = pygame.sprite.Group()\n enemy_list = pygame.sprite.Group()\n \n # Creates the player's plane and set its location\n player = allobjects.Player()\n player.screenlimit(SCREENLIMIT)\n player.rect.x = int(WINDOWWIDTH/2) - int(player.rect.width/2)\n player.rect.y = int (3*(WINDOWHEIGHT/4 )) - (player.rect.height) \n\n # Add player to sprite list\n all_sprite_list.add(player)\n\n #Create 6 enemy planes\n \n for i in range(15):\n enemy = allobjects.Enemy(\"big\")\n enemy.screenlimit(SCREENLIMIT)\n pH = getRandomUpperPos()\n enemy.rect.x = pH[0]\n enemy.rect.y = pH[1]\n enemy.set_speed(4)\n enemy_list.add(enemy)\n all_sprite_list.add(enemy)\n\n print(allobjects.getEnemyHeight(\"big\"))\n # Game loop\n while True:\n DISPLAYSURF.fill(MAIN_BG)\n \n # Event handling\n for event in pygame.event.get():\n # Event conditions and results for player movements\n if event.type == KEYDOWN:\n if event.key in (K_LEFT, K_a):\n player.fighter_direction = 'left'\n if event.key in (K_RIGHT, K_d):\n player.fighter_direction = 'right'\n if event.key in (K_UP, K_w):\n player.fighter_direction = 'up'\n if event.key in (K_DOWN, K_s):\n player.fighter_direction = 'down'\n # Game halting conditions\n if event.key in (K_ESCAPE, K_p):\n if event.key == K_p:\n## showPausedSreen(DISPLAYSURF)\n displayLevelSummary(DISPLAYSURF)\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n # Release ammunition\n if event.key in (K_BACKSPACE,K_x):\n # Create 2 ammo objects\n ammo1 = allobjects.Ammo()\n ammo2 = allobjects.Ammo()\n # Assign ammo objects x and y coordinates\n ammo1.rect.x = player.rect.x + int(player.rect.width -10)\n ammo1.rect.y = player.rect.y\n ammo2.rect.x = player.rect.x \n ammo2.rect.y = player.rect.y\n all_sprite_list.add(ammo1,ammo2)\n ammo_list.add(ammo1,ammo2)\n\n #Update graphics\n all_sprite_list.update()\n # Check for collison of ammo and enemy \n for ammo in ammo_list:\n enemy_hit_list = pygame.sprite.spritecollide(ammo,enemy_list,True)\n for enemy in enemy_hit_list:\n enemy_list.remove(enemy)\n ammo_list.remove(ammo)\n all_sprite_list.remove(enemy,ammo)\n # Remove ammo once they leaves the screen\n if ammo.rect.y < -10:\n ammo_list.remove(ammo)\n all_sprite_list.remove(ammo)\n # Remove enemy once they leave the screen\n if enemy.rect.y > SCREENLIMIT[1]:\n enemy_list.remove(enemy)\n all_sprite_list.remove(enemy)\n for enemy in enemy_list:\n if enemy.rect.y >= 755:\n enemy_list.remove(enemy)\n all_sprite_list.remove(enemy)\n \n #adjustable screen event handling\n if event.type == VIDEORESIZE:\n DISPLAYSURF = pygame.display.set_mode(event.dict['size'], RESIZABLE)\n SCREENLIMIT = [DISPLAYSURF.get_width(),DISPLAYSURF.get_height()]\n player.screenlimit(SCREENLIMIT)\n if event.type == KEYUP:\n player.fighter_direction =''\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n # Update screen graphics\n all_sprite_list.update()\n all_sprite_list.draw(DISPLAYSURF)\n pygame.display.flip()\n \n FPSCLOCK.tick(FPS)\n\n#Pauses the game\ndef showPausedSreen(surf):\n pausedFont = pygame.font.Font('freesansbold.ttf',18)\n while True:\n surf.fill(PAUSED_BG)\n # Surface obj that displays 'Paused'\n pausedSurf = pausedFont.render('PAUSED', True, PAUSED_FC)\n pausedRect = pausedSurf.get_rect()\n pausedRect.center = (int(surf.get_width()/2)), (int(surf.get_height()/2))\n surf.blit(pausedSurf, pausedRect)\n # Surface obj that displays how to unpause\n unPauseButtonSurf = pausedFont.render('Press \\'p\\' to continue.', True, PAUSED_FC)\n unPauseButtonRect = unPauseButtonSurf.get_rect()\n unPauseButtonRect.center = (int(surf.get_width()/2), int(surf.get_height()/2) + 25)\n surf.blit(unPauseButtonSurf, unPauseButtonRect)\n \n # Event handling loop. Similar to the one in the main loop.\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_p:\n return\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n # Uses the parameter's (surf) width and height to allow dynamic adjusting of window size \n if event.type == VIDEORESIZE:\n surf = pygame.display.set_mode(event.dict['size'], RESIZABLE)\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n \ndef start_screen(surf):\n start_font = pygame.font.Font(None,50)\n enter_font = pygame.font.Font('freesansbold.ttf',30)\n lock_var = True\n while lock_var:\n surf.fill(START_BG)\n display_message(\"Welcome to Dog Fighters\",START_FC, surf,start_font,-100)\n display_message(\"Press enter to play:\",(234,23,154), surf,enter_font)\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_RETURN:\n lock_var = False\n if event.type == VIDEORESIZE:\n surf = pygame.display.set_mode(event.dict['size'], RESIZABLE)\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n\n#display message to a surface object \ndef display_message(msg,color,surf,font,displacement = 0):\n temp_surf = font.render(msg, True, color)\n temp_rect = temp_surf.get_rect()\n temp_rect.center = (surf.get_width()/2, surf.get_height()/2 + displacement)\n surf.blit(temp_surf, temp_rect)\n\n#Function that creates pseudo-random spawn locations\ndef getRandomUpperPos():\n x = random.randint(55, 505)\n y = random.randint(46, 368)\n return x, y\n\n#Function that generates random velocities for the enemy planes\ndef getRandomVelocity():\n velocity = random.randint(MINSPEED, MAXSPEED)\n return velocity\n#Displays the Level summary page\ndef displayLevelSummary(surface):\n while True:\n surface.fill(BLACK)\n pygame.draw.line(surface, WHITE, (int(0.3*surface.get_rect().width), 0), (int(0.30*surface.get_rect().width),surface.get_rect().height), 4)\n pygame.draw.line(surface, WHITE, (0,int(.08*surface.get_rect().height)), (surface.get_rect().width,int(.08*surface.get_rect().height)), 2)\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_c:\n return\n if event.key == K_ESCAPE:\n start_screen(surface)\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n\n#Function that generates enemies after each level\ndef createEnemyFleet():\n pass \n\nif __name__ == '__main__':\n main()\n","sub_path":"compsci-fighters_v.2.1.py","file_name":"compsci-fighters_v.2.1.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"601048940","text":"#!/usr/bin/env python3.7\n\n\"\"\"\nParsing wikipedia dump file.\n\nIt can be downloaded here: https://dumps.wikimedia.org/wikidatawiki/entities/\nFile name is `latest-lexemes.nt.bz2`\n\"\"\"\n\nimport codecs\nimport operator\nimport re\nimport stresses_lib as lib\nimport sys\n\nassert len(sys.argv) == 2, \"Wrong number of arguments\"\nlexems_fn = sys.argv[1]\n\n\nSTRESSED_WORD_PATTERN = re.compile(r'[а-яА-Я]*[ёЁ\\u0301][а-яА-ЯёЁ]*')\n\n\ndef extract_accents(raw_str):\n s = raw_str.lower()\n # Some words like 'сёгун' have other then 'ё' stressed,\n # so find explicit stress first\n accent_idx = s.find('\\u0301')\n if accent_idx == -1:\n accent_idx = s.index('ё') + 1\n clean_str = s\n else:\n clean_str = s.replace('\\u0301', '')\n # Wikipedia has some malformed string, i.e.\n # 'Плотников´Иван Васильевич' or\n # 'по поводу конкретного Ив́ан Ив́ановича Ив́анова'\n acceptable = len(clean_str) > 1 \\\n and accent_idx > 0 \\\n and lib.is_vowel(clean_str[accent_idx - 1])\n return acceptable, accent_idx, clean_str\n\n\nlc = 0\nwords = {}\nwith open(lexems_fn, 'r') as f:\n for line in f:\n lc += 1\n # if lc > 1000000:\n # break\n for acc_str in STRESSED_WORD_PATTERN.findall(line):\n acceptable, i, s = extract_accents(acc_str)\n if acceptable:\n # if s == \"иван\":\n # print(\"--------------\")\n # print(i)\n # print(line)\n poses = words.get(s, {})\n cnt = poses.get(i, 0)\n poses[i] = cnt + 1\n words[s] = poses\n\n# Now calculate most frequent stresses\nwords = {word: [max(stresses.items(), key=operator.itemgetter(1))[0]]\n for word, stresses in words.items()}\n# And finally save result\nlib.save_stresses(words)\n","sub_path":"espnet2/text/text_preparation/stress_dictionary/extracting/wikipedia-parsing.py","file_name":"wikipedia-parsing.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"606660046","text":"def add(a, b):\n print(f\"ADDING {a} + {b}\")\n return a + b\n\ndef subtract(a, b):\n print(f\"SUBTRACTING {a} - {b}\")\n return a - b\n\ndef multiply(a, b):\n print(f\"MULTIPLYING {a} * {b}\")\n return a * b\n\ndef divide(a, b):\n print(f\"DIVIDING {a} / {b}\")\n return a / b\n\nprint(\"Let's do some math with just functions!\")\n\nage = add(30, 5)\nheight = subtract(78, 4)\nweight = multiply(90, 2)\niq = divide(100, 2)\n\nprint(f\"Age: {age}, Height: {height}, Weight: {weight}, IQ: {iq}\")\n\n# a puzzle for the extra credit, type it in anyway\nprint(\"Here is a puzzle.\")\n\nwhat = multiply(age, add(height, subtract(weight, divide(iq, 2))))\n\nprint(\"That becomes: \", what, \"Can you do it by hand?\")\n\n# STUDY DRILL formula (5*10-50+100)/2\n\nwhat2 = divide(add(100, subtract(multiply(5, 10), 50)), 2)\nprint(what2) #correct\n\n# STUDY DRILL 2 formula with user inputs\nprint(\"We are going to add then multiply then we'll subtract and divide. You pick the numbers.\")\n\nwhat3 = divide(\n subtract(\n multiply(\n add(\n int(input(\"first number: \")), int(input(\"Number you're adding to it: \")))\n , int(input(\"Number you're multiplying by: \")))\n , int(input(\"Number you're subtracting: \")))\n , int(input(\"Number you're dividing by: \")))\n\nprint(\"Hurray!! It worked!! the answer is: \", int(what3))\n","sub_path":"mystuff/ex21.py","file_name":"ex21.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"262306725","text":"from GUI import Window, CheckBox, Label, TextField, Grid, application\nfrom testing import say\n\nitems = [\n\t[Label(\"Name\"), TextField(width = 100)],\n\t[Label(\"Age\"), TextField(width = 50)],\n\t[Label(\"Language\"), CheckBox(\"Python\")],\n]\n\ngrid = Grid(items)\n\nwin = Window(title = \"Grid\")\ngrid.position = (10, 10)\nwin.add(grid)\nwin.shrink_wrap()\nwin.show()\n\ninstructions = \"\"\"\nThere should be six components laid out in a grid of three rows\nand two columns. Each component should be centre-left aligned\nwithin its cell.\n\"\"\"\n\nsay(instructions)\napplication().run()\n","sub_path":"Tests/42-grid.py","file_name":"42-grid.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"242816726","text":"import os\nfrom uuid import uuid4\n\nfrom django.db import models\n\n\ndef path_and_rename(instance, filename):\n upload_to = 'fotos'\n ext = filename.split('.')[-1]\n # get filename\n if instance.pk:\n filename = '{}.{}'.format(f'{instance.pk}{uuid4().hex}{instance.pk}', ext)\n else:\n # set filename as random string\n filename = '{}.{}'.format(uuid4().hex, ext)\n # return the whole path to the file\n return os.path.join(upload_to, filename)\n\n\n# Create your models here.\n\n\nclass Categoria(models.Model):\n nombre = models.CharField(max_length=60, unique=True)\n ver_en_web = models.BooleanField(default=True)\n\n def __str__(self):\n return self.nombre\n\n\nclass Producto(models.Model):\n nombre = models.CharField(max_length=60, unique=True)\n descripcion = models.TextField(null=True)\n precio = models.DecimalField(decimal_places=2, max_digits=7)\n descuento = models.PositiveSmallIntegerField(null=True)\n ver_descuento = models.BooleanField(default=True)\n ver_en_web = models.BooleanField(default=True)\n\n categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE)\n\n def __str__(self):\n return \"\" + self.nombre + \" | \" + self.categoria.nombre\n\n\nclass FotoProducto(models.Model):\n fecha_registro = models.DateTimeField(auto_now_add=True)\n foto = models.ImageField(blank=True, null=True, upload_to=path_and_rename)\n producto = models.ForeignKey(Producto, on_delete=models.CASCADE)\n\n def __str__(self):\n return \"| PRODUCTO: \" + self.producto.nombre\n\n\nclass Servicio(models.Model):\n titulo = models.CharField(max_length=120)\n descripcion = models.TextField(null=True)\n precio = models.DecimalField(decimal_places=2, max_digits=7)\n descuento = models.PositiveSmallIntegerField(null=True)\n ver_descuento = models.BooleanField(default=True)\n ver_en_web = models.BooleanField(default=True)\n\n def __str__(self):\n return self.titulo\n\n\nclass FotoServicio(models.Model):\n fecha_registro = models.DateTimeField(auto_now_add=True)\n foto = models.ImageField(blank=True, null=True, upload_to=path_and_rename)\n servicio = models.ForeignKey(Servicio, on_delete=models.CASCADE)\n\n def __str__(self):\n return \"| SERVICIO: \" + self.servicio.titulo\n\n\nclass Cliente(models.Model):\n identificacion = models.CharField(max_length=30)\n primer_nombre = models.CharField(max_length=30)\n segundo_nombre = models.CharField(max_length=30)\n primer_apellido = models.CharField(max_length=30)\n segundo_apellido = models.CharField(max_length=30)\n fecha_nacimiento = models.DateField(null=True)\n correo = models.EmailField(null=True)\n telefono = models.CharField(max_length=30)\n direccion = models.TextField(null=True)\n servicios = models.ManyToManyField(Servicio, through='ServiciosCliente')\n\n\nclass ServiciosCliente(models.Model):\n porcentaje_total = models.PositiveSmallIntegerField(default=0)\n\n observaciones = models.TextField(null=True)\n estado = models.CharField(max_length=30)\n\n servicio = models.ForeignKey(Servicio, on_delete=models.CASCADE)\n cliente = models.ForeignKey(Cliente, on_delete=models.CASCADE)\n\n\nclass Seguimiento(models.Model):\n titulo = models.CharField(max_length=120)\n descripcion = models.TextField(null=True)\n porcentaje = models.PositiveSmallIntegerField(default=0)\n\n servicio_cliente = models.ForeignKey(ServiciosCliente,\n on_delete=models.CASCADE)\n\n def __str__(self):\n return self.titulo\n\n\nclass FotoSeguimiento(models.Model):\n fecha_registro = models.DateTimeField(auto_now_add=True)\n foto = models.ImageField(blank=True, null=True, upload_to=path_and_rename)\n seguimiento = models.ForeignKey(Seguimiento, on_delete=models.CASCADE)\n\n def __str__(self):\n return \"| Seguimiento: \" + self.seguimiento.titulo\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"397953996","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# Generating simple data\nt = np.linspace(0,4*np.math.pi,64)\ny = np.sin(t)\n\n# plot with title and axis labels\nplt.plot(t, y, 'b-')\nplt.title('Sine function')\nplt.ylabel('sin(t)')\nplt.xlabel('Angle t (radian)')\nplt.show()\n\n\n# The range of the axes are controlled now\nplt.plot(t, y, 'b-')\nplt.title('Sine function')\nplt.ylabel('sin(t)')\nplt.xlabel('Angle t (radian)')\nplt.axis([0,13,-2,2])\nplt.show()\n\n","sub_path":"plotExamples/PlotTitleLabels.py","file_name":"PlotTitleLabels.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"31749970","text":"import connection\nimport os\nimport pandas as pd\n\n# 提取excel表格中数据,将其转换成date frame类型\nos.chdir(r'c:\\kus\\neo4j-python-pandas-py2neo-v3')\ninvoice_data = pd.read_excel('./data/Invoice_data_Demo.xls', header=0, encoding='utf8')\n\n\ndef relation_extraction(data=invoice_data, col='发票名称'):\n \"\"\"联系数据抽取\"\"\"\n\n df = pd.melt(data, id_vars=[col], value_vars=data.columns[1:],\n var_name='relation', value_name='name2')\n df = df.applymap(str)\n df = df.rename(columns={col: 'name'})\n print(df)\n return df\n\n\ncreate_data = connection.DataToNeo4j()\ncreate_data.create_relation(relation_extraction())\n","sub_path":"extraction.py","file_name":"extraction.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"99779909","text":"from kombu import Exchange, Queue\nimport os\nBROKER_URL = os.environ['CELERY_BROKER']\nCELERY_RESULT_BACKEND = 'rpc'\nCELERY_RESULT_PERSISTENT = True\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_ACCEPT_CONTENT = ['json']\n\n\ngwas_exchange = Exchange('gwas', type='direct')\nenrichment_exchange = Exchange('enrichment',type='direct')\n\n\n\nCELERY_QUEUES = (\n Queue('gwas.portal.worker.slow', gwas_exchange, routing_key='gwas.portal.worker.slow'),\n Queue('gwas.portal.worker.fast', gwas_exchange, routing_key='gwas.portal.worker.fast'),\n Queue('enrichment',enrichment_exchange,routing_key='enrichment')\n)\n\n \nCELERY_ROUTES = {\n 'gwaportalpipeline.gwas.run_gwas':{'queue':'gwas.portal.worker.fast'},\n 'gwaportalpipeline.enrichment.candidate_gene_list_enrichment':{'queue':'enrichment'}\n}\n\n\n","sub_path":"conf/celeryconfig.py","file_name":"celeryconfig.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"440123618","text":"from keras import backend as K\n\nimport os,sys\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nplt.switch_backend('agg')\n\nfrom PIL import Image\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nimport numpy as np\nfrom numpy import linalg\nimport skimage.util\nimport scipy.ndimage.filters\nimport scipy.misc\nimport scipy.io\nfrom scipy import stats\n\nimport utils\n\nimport keras.backend.tensorflow_backend as KTF\nKTF.set_session(utils.get_session())\n\n################### Processing the data ##################\nfrom keras.datasets import cifar10\n(x_train, _), (x_test, _) = cifar10.load_data()\n\nx_train = x_train.astype('float32')/255.\nx_train = np.reshape(x_train, (len(x_train), 32, 32, 3))\n\nx_test = x_test.astype('float32') / 255.\nx_test = np.reshape(x_test, (len(x_test), 32, 32, 3))\n\n# Adding GBLUR\nx_train, x_train_noisy = utils.cifar10_gblur(x_train)\nx_test, x_test_noisy = utils.cifar10_gblur(x_test)\n\nx_train.shape, x_train_noisy.shape, x_test.shape, x_test_noisy.shape\n\n# Displaying noisy images\nn = 13\nplt.figure(figsize=(20, 4))\nfor i in range(1, n):\n# display original\n ax = plt.subplot(2, n, i)\n plt.imshow(x_train[i].reshape(32, 32, 3))\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n# display reconstruction\n ax = plt.subplot(2, n, i + n)\n plt.imshow(x_train_noisy[i].reshape(32, 32, 3))\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()\nplt.savefig('saves/cdA_gblur_noisy_vis.png')\n\n\"\"\"Constructing the Model\"\"\"\nimport keras\nimport tensorflow as tf\nimport keras.backend as K\nfrom keras import losses\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, add, Lambda\n\nL = 5\nF = [16, 32, 64, 128, 256] # Number of filters at each layer\n\n# Clean Encoder\nenc_inp = Input(shape = (32, 32, 3))\nencs = []\nenc = enc_inp\nfor i in range(L):\n enc = Conv2D(F[i], (3, 3), activation='relu', padding='same', strides=(2, 2))(enc)\n enc = Conv2D(F[i], (3, 3), activation='relu', padding='same', strides=(1, 1))(enc)\n encs.append(enc)\n\nencoder = Model(inputs=[enc_inp], outputs=encs)\nencoder.compile(optimizer='adadelta', loss='mean_squared_error')\n\n# Quality\ninp = Input(shape = (32, 32, 3))\n\nenc = inp\nfor i in range(L):\n enc = Conv2D(F[i], (3, 3), activation='relu', padding='same', strides=(2, 2))(enc)\n enc = Conv2D(F[i], (3, 3), activation='relu', padding='same', strides=(1, 1))(enc)\n \n if i == L-1:\n Q = Dense(1, activation='relu')(enc)\n\nquality = Model(inputs=[inp], outputs=[Q])\nquality.compile(optimizer='adadelta', loss='mean_squared_error')\n\n# Decoder\nclean_acvns = []\nfor i in range(L):\n if i == L-1:\n clean_acvn = Input(shape = (None, None, F[i]+1))\n else:\n clean_acvn = Input(shape = (None, None, F[i]))\n clean_acvns.append(clean_acvn)\n\nclean_dec = clean_acvns[L-1]\nfor i in range(-1, L-1)[::-1]:\n u = UpSampling2D((2, 2))(clean_dec)\n if i != -1:\n clean_dec = Conv2D(F[i], (3, 3), activation='relu', padding='same')(u)\n clean_dec = Conv2D(F[i], (3, 3), activation='relu', padding='same')(u)\n clean_dec = add([clean_dec, clean_acvns[i]])\n else:\n recon = Conv2D(3, (3, 3), activation='relu', padding='same')(u)\n\ndecoder = Model(inputs=clean_acvns, outputs=[recon])\ndecoder.compile(optimizer='adadelta', loss='mean_squared_error')\n\n# Fitting clean and distorted images\nclean_input = Input(shape=(32, 32, 3))\ndist_input = Input(shape=(32, 32, 3))\n\nencs = encoder(clean_input)\nq = quality(dist_input)\n\ncorr_encs = encs[0 : L-1]\ncorr_enc = concatenate([encs[L-1], q], axis=-1)\ncorr_encs.append(corr_enc)\ndist_pred = decoder(corr_encs)\n\nIQA_model = Model(inputs=[clean_input, dist_input], outputs=[dist_pred])\nIQA_model.compile(optimizer='adadelta', loss=utils.loss_SSIM)\n\n######################### Model Flow Diagram ###########################\n#from keras.utils import plot_model\n#plot_model(IQA_model, to_file='my_models/cdA_gblur_model.png', show_shapes=True)\n\n########################## Training the model ##########################\nfrom keras.callbacks import TensorBoard\nimport sys\nIQA_model.fit([x_train, x_train_noisy],\n [x_train_noisy],\n verbose=2,\n epochs=100,\n batch_size=256,\n shuffle=True,\n validation_data=([x_test, x_test_noisy], \n [x_test_noisy]),\n callbacks=[TensorBoard(log_dir='tmp/IQA_model', histogram_freq=0, \n write_graph=True, write_images=True)])\n\nfrom keras.models import load_model\nIQA_model.save('my_models/cdA_gblur.h5')\n\n######################### Testing the model ############################\nfilt_imgs = IQA_model.predict([x_test[0 : 200], x_test_noisy[0 : 200]])\n\nvis_clean = utils.visualize(x_test, [32, 32], [1, 1], [10, 10], color=1, channels='last')\nvis_dist = utils.visualize(x_test_noisy, [32, 32], [1, 1], [10, 10], color=1, channels='last')\nvis_filt = utils.visualize(filt_imgs, [32, 32], [1, 1], [10, 10], color=1, channels='last')\n\nplt.figure(figsize=(25, 25))\nplt.subplot(1, 3, 1), plt.imshow(vis_clean)\nplt.subplot(1, 3, 2), plt.imshow(vis_dist)\nplt.subplot(1, 3, 3), plt.imshow(vis_filt)\nplt.show()\nplt.savefig('saves/cdA_gblur_recon_vis.png')","sub_path":"codes_ks/IQA2.2/cdA_gblur.py","file_name":"cdA_gblur.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"177811982","text":"from os import path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import signal\nimport statsmodels.stats.weightstats as sm2\nfrom scipy import stats as sc\n\nimport coda_tools as coda\nimport processing_tools as tool\n\nntrials = [2, 3, 4, 5] # /!\\ changer noms de fichiers\npositions = ['UR', 'SP', 'UD']\nnames = ['GD', 'PDs', 'LH', 'MH']\ncolors = ['plum', 'aquamarine', 'aquamarine', 'royalblue', 'royalblue']\nsujet = {\n \"GD\": \"Sujet 1\",\n \"LH\": \"Sujet 3\",\n \"PDs\": \"Sujet 2\",\n \"MH\": \"Sujet 4\"\n}\npositionsdico = {\n \"SP\": \"Supine\",\n \"UD\": \"UpsideDown\",\n \"UR\": \"UpRight\"\n}\n\nsujetcolor = {\n \"PDs\": \"deeppink\",\n \"MH\": \"black\",\n \"GD\": \"green\",\n \"LH\": \"blueviolet\"\n}\nsujetmarker = {\n \"GD\": \"d\",\n \"MH\": \"o\",\n \"LH\": \"s\",\n \"PDs\": \"*\"\n}\n\n\ndef transformpvalue(p: float):\n if p < 0.001:\n return \"***\"\n elif p < 0.01:\n return \"**\"\n elif p < 0.05:\n return \"*\"\n else:\n return 'ns'\n\n\nfig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(7, 10))\ntup = (ax1, ax2, ax3)\nfile1 = open(\"stats34_delatx\", \"w\")\nfor p, ax in zip(positions, tup):\n file1.write(\"########################%s######################\\n\" % positionsdico[p])\n arrayopenall = []\n arraycloseall = []\n for name in names:\n arrayopen = []\n arrayclose = []\n for n in ntrials:\n file_path = \"../../data/Groupe_1_codas/%s_%s_coda000%d.txt\" % (name, p, n)\n if not path.exists(file_path):\n continue\n else:\n coda_df = coda.import_data(file_path)\n time1 = coda_df.time.to_numpy()\n markers_id = [6, 5, 8, 7]\n\n pos = coda.manipulandum_center(coda_df, markers_id)\n pos = pos / 1000\n vel = tool.derive(pos, 200, axis=1)\n\n pk = signal.find_peaks(vel[0], prominence=1, width=(100, 1000))\n ipk = pk[0] # index\n cycle_starts = ipk[:-1]\n cycle_ends = ipk[1:] - 1\n\n ecart = []\n for k in range(len(cycle_starts)):\n if not np.isnan(abs(np.nanmax(pos[0][cycle_starts[k]:cycle_ends[k]]) - np.nanmin(\n pos[0][cycle_starts[k]:cycle_ends[k]]))):\n ecart.append(abs(np.nanmax(pos[0][cycle_starts[k]:cycle_ends[k]]) - np.nanmin(\n pos[0][cycle_starts[k]:cycle_ends[k]])))\n\n if n == 2 or n == 3:\n arrayopen.append(np.nanmean(ecart))\n if n == 4 or n == 5:\n arrayclose.append(np.nanmean(ecart))\n\n arraycloseall.append(np.nanmean(arrayclose))\n arrayopenall.append(np.nanmean(arrayopen))\n X1 = sm2.DescrStatsW(arrayopenall)\n X2 = sm2.DescrStatsW(arraycloseall)\n Ttest = sm2.CompareMeans(X1, X2)\n t2, p2 = sc.ttest_ind(arrayopenall, arraycloseall)\n Txbis, pvalbis = sc.bartlett(arrayopenall, arraycloseall)\n file1.write(Ttest.summary(usevar='pooled').as_text() + \"\\n\")\n file1.write(\"les deux moyennes sont: %f et %f\\n\" % (np.nanmean(arrayopenall), np.nanmean(arraycloseall)))\n file1.write(\"p_value pour la variance %f \\n\" % pvalbis)\n file1.write(\"les deux variances sont %f et %f\\n\" % (np.nanstd(arrayopenall), np.nanstd(arraycloseall)))\n index = [1, 2]\n indexgraph1 = np.linspace(index[0] - 0.25, index[0] + 0.25, 50)\n plotarray1 = np.zeros(50) + np.nanmean(arrayopenall)\n indexgraph2 = np.linspace(index[1] - 0.25, index[1] + 0.25, 50)\n plotarray2 = np.zeros(50) + np.nanmean(arraycloseall)\n indexscatter1 = np.zeros(len(arrayopenall)) + index[0]\n indexscatter2 = np.zeros(len(arraycloseall)) + index[1]\n ax.plot(indexgraph1,plotarray1, linestyle='dotted')\n ax.plot(indexgraph2, plotarray2, linestyle='dotted')\n ax.scatter(indexscatter1,arrayopenall , alpha=0.5, s=20)\n ax.scatter(indexscatter2, arraycloseall, alpha=0.5, s=20)\n ax.text(index[0] + 0.35, 0.48, 'mean:%s' % transformpvalue(p2), fontsize=12)\n ax.text(index[0] + 0.375, 0.455, 'std:%s' % transformpvalue(pvalbis), fontsize=12)\n ax.set_ylim(0.20, 0.55)\n ax.set_xlim(0.8, 2.2)\n ax.set_xticks([1, 2])\n ax.set_xticklabels(['no blind', 'blind'])\n ax.set_xlim(0.70, 2.30)\n ax.set_title(\"%s\" % positionsdico[p],fontweight='bold')\n ax.set_ylabel(\"Amplitude mvt en X[m]\")\nfig.suptitle(\"amplitude x Errorbar all subjects\")\nplt.savefig(\"34_en_x_for_all.png\")\nfile1.close()\n","sub_path":"codepython/Amplitude en X/Just3and4.py","file_name":"Just3and4.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"490093409","text":"#SVR\n#NOTE=> SVR doesnt apply feature scaling automatically\n\n#regression template\n\n#polynomial regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:2].values#when we add only [:,1] => we get a vector to solve issue we use [:,1:2](same thing but rep as a matrix now)\ny = dataset.iloc[:, 2:3].values\n\n# =============================================================================\n# # Splitting the dataset into the Training set and Test set\n# from sklearn.model_selection import train_test_split\n# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n# =============================================================================\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nX = sc_X.fit_transform(X)\ny = sc_y.fit_transform(y)\n\n\n#fitting SVR to the dataset\nfrom sklearn.svm import SVR\nregressor = SVR(kernel='rbf')\nregressor.fit(X,y)\n\n\n\n\n#create your regressor\n\n\n#predicting a new result with polynomial regression\ny_pred = sc_y.inverse_transform(regressor.predict(sc_X.transform(np.array([[6.5]]))))\n\n#visualising the regression results\nplt.scatter(X,y,color='red')\nplt.plot(X, regressor.predict(X), color='blue')\nplt.title('Truth or bluff (SVR)')\nplt.xlabel('Postion level')\nplt.ylabel('Salary')\nplt.show()\n\n\n#visualising the regression results (for higher resultion and smooter curve)\nX_grid = np.arange(min(X),max(X),0.1)\nX_grid = X_grid.reshape((len(X_grid),1))\nplt.scatter(X,y,color='red')\nplt.plot(X, regressor.predict(X), color='blue')\nplt.title('Truth or bluff (SVR)')\nplt.xlabel('Postion level')\nplt.ylabel('Salary')\nplt.show()\n\n\n\n","sub_path":"Regression/Regression-SVR-randomfor-decisiontree/SVR_dion.py","file_name":"SVR_dion.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"109740740","text":"name=input('Name of person whos names need filling:')\nf=open(name+'.txt','r')\nnames=f.readlines()\n#print(names)\nfor i in range(5):\n names[i]=names[i][0:-1]\n#print(names)\nf.close()\nfor name in names:\n g=open(name+'.txt','a')\n g.close()\n\n","sub_path":"Programme/Test Group/Generator - Copy.py","file_name":"Generator - Copy.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"443213424","text":"import pygame, sys\nfrom settings import *\npygame.font.init()\n\n\n\nclass PlayerStats(object):\n def __init__(self):\n self.level = 0\n self.amunition = 100\n self.health = 100\n self.points = 0\n self.font = pygame.font.Font('freesansbold.ttf', 20)\n\n def draw(self):\n text_points = self.font.render(\n 'Points: {}'.format(self.points),\n True,\n colors['white']\n )\n text_amunition = self.font.render(\n 'Amunition: {}'.format(self.amunition),\n True,\n colors['white']\n )\n text_health = self.font.render(\n 'Health: {}'.format(self.health),\n True,\n colors['red']\n )\n text_next_level = self.font.render(\n 'next level: {}pt'.format((self.level * 15) + 15),\n True,\n colors['white']\n )\n for i in range(int(self.health/2)):\n pygame.draw.rect(\n DISPLAYSURF,\n colors['red'],\n (350+(i*2), 280, 1, 25)\n )\n text_level = self.font.render(\n 'level: {}'.format(self.level),\n True,\n colors['light-green']\n )\n DISPLAYSURF.blit(text_amunition,(350,50))\n DISPLAYSURF.blit(text_points,(350,150))\n DISPLAYSURF.blit(text_health,(350,250))\n DISPLAYSURF.blit(text_level,(350,350))\n DISPLAYSURF.blit(text_next_level,(350,450))\n\n def set(self, value=0, property=None):\n if property!=None:\n value += getattr(self, property)\n setattr(self, property, value)\n\n def get(self, property):\n if property != None:\n return getattr(self, property)\n\n def clear_results(self):\n self.level = 0\n self.amunition = 100\n self.health = 100\n self.points = 0\n\n\nplayer_stats = PlayerStats()\n","sub_path":"playerstats.py","file_name":"playerstats.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"213768606","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport pymysql\n\n\nclass IpMysqlPipelines(object):\n\n\tdef process_item(self, item, spider):\n\t\t\n\t\t# 打开数据连接\n\t\tself.conn = pymysql.connect(host='127.0.0.1', user='root',\n password='root', database='lyq_db', charset='utf8')\n\t\t# 获取游标对象\n\t\tself.cursor = self.conn.cursor()\n\t\t\n\t\tself.insert_db(item)\n\n\t\treturn item\n\n\tdef insert_db(self, item):\n\t\ttry:\n\t\t\t# 创建sql语句\n\t\t\tsql = 'INSERT INTO ip_list VALUES (null,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")' % (\n\t\t\t\t\titem['ip_address'],\n\t\t\t\t\titem['ip_port'],\n\t\t\t\t\titem['ip_type'],\n\t\t\t\t\titem['ip_survival_time'],\n\t\t\t\t\titem['ip_verify_time'],\n\t\t\t\t\titem['ip_location'],\n\t\t\t\t\titem['ip_create_time']\n\t\t\t\t)\n\t\t\tself.cursor.execute(sql)\n\t\t\tself.conn.commit()\n\t\t\tprint('%s:已存入数据库'%(item['ip_address']))\n\t\texcept Exception as e:\n\t\t\tprint('插入数据时发生异常' + e)\n\t\t\tself.conn.rollback()\n\t\tfinally:\n\t\t\tself.cursor.close()\n\t\t\tself.conn.close()\n\t\n\t\n","sub_path":"ip_spider_python/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"168218189","text":"\"\"\"\nThe first two consecutive numbers to have two distinct prime factors are:\n\n14 = 2 ? 7\n15 = 3 ? 5\n\nThe first three consecutive numbers to have three distinct prime factors are:\n\n644 = 2? ? 7 ? 23\n645 = 3 ? 5 ? 43\n646 = 2 ? 17 ? 19.\n\nFind the first four consecutive integers to have four distinct prime factors each. What is the first of these numbers?\n\"\"\"\nimport time\nstart_time = time.time()\n\nlist_of_primes = []\ndels = []\ncount = 0\nfoundx = 0\nsecndfndx = 0\n\ndef is_prime(num):\n d = 2\n while d * d <= num and num % d != 0:\n d += 1\n return d * d > num\n\nfor x in range(2, 10000):\n if is_prime(x):\n list_of_primes.append(x)\n\n#for 3 primes\n# for x in range(1, 1000):\n# foundx = 0\n# for a in list_of_primes:\n# if a > x / 6 or foundx == 1:\n# break\n# for b in list_of_primes:\n# if b > x / (a*2) or foundx == 1:\n# break\n# for c in list_of_primes:\n# if c > x / (a*b) or foundx == 1:\n# break\n# if x == a*b*c or x==a**2*b*c:# or x==a*b**2*c or x==a*b*c**2:# or x==a**2*b**2*c**2:\n# tmp_list_of_delit = [a, b, c]\n# if len(set(tmp_list_of_delit)) == len(tmp_list_of_delit):\n# foundx = 1\n# dels.append(x)\n# dels.append(sorted(tmp_list_of_delit))\n#\n# # print(x, a, b, c, count, tmp_list_of_delit)\n#\n# if x == secndfndx + 1:\n# count += 1\n# if count > 1:\n# print(f'count = {count}. deliteli: {dels}')\n# if x != secndfndx + 1:\n# count = 0\n# dels = []\n# secndfndx = x\n\n#for 4 primes\nfor x in range(1, 1000000):\n foundx = 0\n for a in list_of_primes:\n if a > x / 8 or foundx == 1:\n break\n for b in list_of_primes:\n if b > x / (a*4) or foundx == 1:\n break\n for c in list_of_primes:\n if c > x / (a*b*2) or foundx == 1:\n break\n for d in list_of_primes:\n if d > x / (a*b*c) or foundx == 1:\n break\n\n print(f'{x} = {a} * {b} * {c} * {d}')\n\n if x == a*b*c*d or x==a**2*b*c*d:# or x==a*b**2*c*d or x==a*b*c**2*d or x==a*b*c*d**2:# or x==a**2*b**2*c**2*d**2:\n tmp_list_of_delit = [a, b, c, d]\n\n if len(set(tmp_list_of_delit)) == len(tmp_list_of_delit):\n foundx = 1\n dels.append(x)\n dels.append(sorted(tmp_list_of_delit))\n\n print(x, a, b, c, d, count, tmp_list_of_delit)\n\n if x == secndfndx + 1:\n count += 1\n if count > 1:\n print(f'count = {count}. deliteli: {dels}')\n if x != secndfndx + 1:\n count = 0\n dels = []\n secndfndx = x\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n# input()","sub_path":"47.py","file_name":"47.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"618796226","text":"import sys\nimport os\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nprint(curPath)\nrootPath = curPath\nfor i in range(2):\n rootPath = os.path.split(rootPath)[0]\nprint(rootPath)\nsys.path.append(rootPath)\n\nimport tensorflow.keras as keras\nfrom sklearn.metrics import confusion_matrix\nfrom tensorflow.keras.utils import to_categorical\nfrom utils.uts_classification.utils import readmts_uci_har,transform_labels\nimport autokeras as ak\nimport numpy as np\nfrom NAS.logger import Logger\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\n# 加载UCI_HAR_Dataset\nfile_name = '../../datasets/mts_data/UCI_HAR_Dataset'\nx_train, y_train, x_test, y_test = readmts_uci_har(file_name)\ndata = np.concatenate((x_train, x_test),axis=0)\nlabel = np.concatenate((y_train, y_test),axis=0)\nN = data.shape[0]\nind = int(N*0.9)\nx_train = data[:ind]\ny_train = label[:ind]\nx_test = data[ind:]\ny_test = label[ind:]\ny_train, y_test = transform_labels(y_train, y_test)\n\nclasses = ['Cla.1','Cla.2','Cla.3','Cla.4','Cla.5','Cla.6']\nNUM_CLASSES = 6\ny_train = to_categorical(y_train, NUM_CLASSES)\ny_test = to_categorical(y_test, NUM_CLASSES)\n\nprint(x_train.shape)\nprint(y_train.shape)\nprint(x_test.shape)\nprint(y_test.shape)\nprint(y_train[:3])\n\n# 指定参数\nname = 'Dense_Greedy1'\nmax_trials = 100\nif not os.path.isdir('./result/'+name):\n os.makedirs('./result/'+name)\n\n# Initialize the classifier.\ninput_node = ak.Input()\noutput_node = ak.DenseBlock()(input_node)\noutput_node = ak.ClassificationHead(num_classes=6,dropout_rate=0)(output_node)\nclf = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=max_trials,\n name=name,directory='result')\n\n# Logger for NAS\nfp = open('result/'+name+'/log','w')\nfp.close()\nfp = open('result/'+name+'/log_file','w')\nfp.close()\nsys.stdout = Logger('result/'+name+'/log', sys.stdout)\nsys.stderr = Logger('result/'+name+'/log_file', sys.stderr)\t\t# redirect std err, if necessary\n\nclf.tuner.search_space_summary()\n# Search for the best model.\nclf.fit(x_train, y_train, epochs=100, validation_split=0.1, batch_size=128, callbacks=[keras.callbacks.EarlyStopping(patience=10)],verbose=1)\n\nclf.tuner.results_summary()\n\n# Evaluate the best model\nbest_model = clf.tuner.get_best_model()[1]\nprint('*************************----best_model----*************************')\ncvconfusion = np.zeros((NUM_CLASSES, NUM_CLASSES))\nypred = best_model.predict(x_test)\nypred = np.argmax(ypred, axis=1)\nytrue = np.argmax(y_test, axis=1)\ncvconfusion[:, :] = confusion_matrix(ytrue, ypred)\nF1 = np.zeros((6, 1))\nPrecision = np.zeros((6, 1))\nRecall = np.zeros((6, 1))\nAccuracy = 0\nfor i in range(6):\n F1[i] = 2 * cvconfusion[i, i] / (\n np.sum(cvconfusion[i, :]) + np.sum(cvconfusion[:, i]))\n print(\"test F1 measure for {} rhythm: {:1.4f}\".format(classes[i], F1[i, 0]))\n Precision[i] = cvconfusion[i, i] / np.sum(cvconfusion[:, i])\n Recall[i] = cvconfusion[i, i] / np.sum(cvconfusion[i, :])\n Accuracy += cvconfusion[i, i] / np.sum(cvconfusion[:, :])\nprint(\"test Overall F1 measure: {:1.4f}\".format(np.mean(F1[0:6])))\n\n# Evaluate the best 10 models( only a convenience shortcut, recommended to retrain the models)\nbest_models = clf.tuner.get_best_models(num_models=20)\nfor j in range(20):\n print('*************************----best_model_'+str(j)+'----*************************')\n model = best_models[j][2]\n cvconfusion = np.zeros((NUM_CLASSES, NUM_CLASSES))\n ypred = model.predict(x_test)\n ypred = np.argmax(ypred, axis=1)\n ytrue = np.argmax(y_test, axis=1)\n cvconfusion[:, :] = confusion_matrix(ytrue, ypred)\n F1 = np.zeros((6, 1))\n Precision = np.zeros((6, 1))\n Recall = np.zeros((6, 1))\n Accuracy = 0\n for i in range(6):\n F1[i] = 2 * cvconfusion[i, i] / (\n np.sum(cvconfusion[i, :]) + np.sum(cvconfusion[:, i]))\n print(\"test F1 measure for {} rhythm: {:1.4f}\".format(classes[i], F1[i, 0]))\n Precision[i] = cvconfusion[i, i] / np.sum(cvconfusion[:, i])\n Recall[i] = cvconfusion[i, i] / np.sum(cvconfusion[i, :])\n Accuracy += cvconfusion[i, i] / np.sum(cvconfusion[:, :])\n print(\"test Overall F1 measure: {:1.4f}\".format(np.mean(F1[0:6])))\n","sub_path":"NAS/test_uci/Dense.py","file_name":"Dense.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"90937515","text":"import json\n\nfrom django.http import HttpResponse\nfrom django.conf import settings\n\n\ndef render_json(data, code=0):\n\n json_data = {\n 'data': data,\n 'code':code\n }\n if settings.DEBUG:\n result = json.dumps(json_data, ensure_ascii=False, indent=4, sort_keys=True)\n else:\n result = json.dumps(json_data, ensure_ascii=False, separators=[',', ':'])\n return HttpResponse(result)\n","sub_path":"lib/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"68030969","text":"\"\"\"\nImplementation for other.py\n\"\"\"\nfrom operator import itemgetter\nimport error\nimport database\nimport channels\nimport input_checkers\n\n@input_checkers.validate_token\ndef users_all(token):\n \"\"\"\n returns a list of dictionaries for all users\n \"\"\"\n # pylint: disable=unused-argument\n # NB: Supressed this warning because token is in fact used in\n # the decorator, however pylint doesn't check for this.\n users_list = ([usr for usr in database.get_users()\n if database.get_permission_dict(usr['u_id']).get('permission_id') != 66])\n\n return {\"users\": users_list}\n\n@input_checkers.validate_token\ndef search(token, query_str):\n \"\"\"\n returns all messages that user is in with certain quesry string\n \"\"\"\n if query_str == \"\":\n raise error.InputError(description=\"search received an empty query string\")\n # user_channels = {'channels': [], []}\n user_channels = channels.channels_list(token)\n message_list = []\n for channel in user_channels['channels']:\n channel_data = database.get_channel_data(channel['channel_id'])\n for message in channel_data['messages']:\n if query_str in message['message']:\n message_list.append(message)\n message_list.sort(key=itemgetter('time_created'), reverse=True)\n return {\"messages\" : message_list}\n","sub_path":"src/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"463635884","text":"\"\"\"\nTest all OSSFile related methods\n\"\"\"\n# pylint:disable=invalid-name\n# pylint:disable=missing-function-docstring\n# pylint:disable=protected-access\nimport io\nimport os\n\nimport pytest\n\nfiles = {\n \"LICENSE\": (\n b\" Apache License\\n\"\n b\" Version 2.0, January 2004\\n\"\n b\" http://www.apache.org/licenses/\\n\"\n ),\n \"number\": (b\"1234567890\\n\"),\n}\n\nglob_files = {\"file.dat\": b\"\", \"filexdat\": b\"\"}\n\n\ndef test_simple(ossfs, test_path):\n file = test_path + \"/test_simple/file\"\n data = os.urandom(10 * 2 ** 20)\n\n with ossfs.open(file, \"wb\") as f:\n f.write(data)\n\n with ossfs.open(file, \"rb\") as f:\n out = f.read(len(data))\n assert len(data) == len(out)\n assert out == data\n\n\ndef test_seek(ossfs, test_path):\n file = test_path + \"/test_seek/file\"\n with ossfs.open(file, \"wb\") as f:\n f.write(b\"123\")\n\n with ossfs.open(file) as f:\n f.seek(1000)\n with pytest.raises(ValueError):\n f.seek(-1)\n with pytest.raises(ValueError):\n f.seek(-5, 2)\n with pytest.raises(ValueError):\n f.seek(0, 10)\n f.seek(0)\n assert f.read(1) == b\"1\"\n f.seek(0)\n assert f.read(1) == b\"1\"\n f.seek(3)\n assert f.read(1) == b\"\"\n f.seek(-1, 2)\n assert f.read(1) == b\"3\"\n f.seek(-1, 1)\n f.seek(-1, 1)\n assert f.read(1) == b\"2\"\n for i in range(4):\n assert f.seek(i) == i\n\n\ndef test_read_small(ossfs, test_bucket_name):\n fn = test_bucket_name + \"/number\"\n with ossfs.open(fn, \"rb\", block_size=3) as f:\n out = []\n while True:\n data = f.read(2)\n if data == b\"\":\n break\n out.append(data)\n assert ossfs.cat(fn) == b\"\".join(out)\n\n\ndef test_read_ossfs_block(ossfs, test_bucket_name):\n data = files[\"LICENSE\"]\n lines = io.BytesIO(data).readlines()\n path = test_bucket_name + \"/LICENSE\"\n assert ossfs.read_block(path, 0, 10, b\"\\n\") == lines[0]\n assert ossfs.read_block(path, 40, 10, b\"\\n\") == lines[1]\n assert ossfs.read_block(path, 0, 80, b\"\\n\") == lines[0] + lines[1]\n assert ossfs.read_block(path, 0, 120, b\"\\n\") == data\n\n data = files[\"number\"]\n lines = io.BytesIO(data).readlines()\n path = test_bucket_name + \"/number\"\n assert len(ossfs.read_block(path, 0, 5)) == 5\n assert len(ossfs.read_block(path, 4, 150)) == len(data) - 4\n assert ossfs.read_block(path, 20, 25) == b\"\"\n\n assert ossfs.read_block(path, 5, None) == ossfs.read_block(path, 5, 25)\n\n\n@pytest.mark.parametrize(\"size\", [2 ** 10, 2 ** 20, 10 * 2 ** 20])\ndef test_write(ossfs, test_path, size):\n file = test_path + \"/test_write/file\"\n data = os.urandom(size)\n with ossfs.open(file, \"wb\") as f:\n f.write(data)\n assert ossfs.cat(file) == data\n assert ossfs.info(file)[\"Size\"] == len(data)\n ossfs.open(file, \"wb\").close()\n assert ossfs.info(file)[\"Size\"] == 0\n\n\ndef test_write_fails(ossfs, test_path):\n file = test_path + \"/test_write_fails/temp\"\n ossfs.touch(file)\n with pytest.raises(ValueError):\n ossfs.open(file, \"rb\").write(b\"hello\")\n f = ossfs.open(file, \"wb\")\n f.close()\n with pytest.raises(ValueError):\n f.write(b\"hello\")\n with pytest.raises(FileNotFoundError):\n ossfs.open(\"nonexistentbucket/temp\", \"wb\").close()\n\n\ndef test_write_blocks(ossfs, test_path):\n file = test_path + \"/test_write_blocks/temp\"\n with ossfs.open(file, \"wb\") as f:\n f.write(os.urandom(2 * 2 ** 20))\n assert f.buffer.tell() == 2 * 2 ** 20\n f.flush()\n assert f.buffer.tell() == 2 * 2 ** 20\n f.write(os.urandom(2 * 2 ** 20))\n f.write(os.urandom(2 * 2 ** 20))\n assert ossfs.info(file)[\"Size\"] == 6 * 2 ** 20\n with ossfs.open(file, \"wb\", block_size=10 * 2 ** 20) as f:\n f.write(os.urandom(15 * 2 ** 20))\n assert f.buffer.tell() == 0\n assert ossfs.info(file)[\"Size\"] == 15 * 2 ** 20\n\n\ndef test_readline(ossfs, test_bucket_name):\n all_items = files.items()\n for k, data in all_items:\n with ossfs.open(\"/\".join([test_bucket_name, k]), \"rb\") as f:\n result = f.readline()\n expected = data.split(b\"\\n\")[0] + (\n b\"\\n\" if data.count(b\"\\n\") else b\"\"\n )\n assert result == expected\n\n\ndef test_readline_empty(ossfs, test_path):\n file = test_path + \"/test_readline_empty/empty\"\n data = b\"\"\n with ossfs.open(file, \"wb\") as f:\n f.write(data)\n with ossfs.open(file, \"rb\") as f:\n result = f.readline()\n assert result == data\n\n\ndef test_readline_blocksize(ossfs, test_path):\n test_file_a = test_path + \"/test_readline_blocksize/a\"\n data = b\"ab\\n\" + b\"a\" * (10 * 2 ** 20) + b\"\\nab\"\n with ossfs.open(test_file_a, \"wb\") as f:\n f.write(data)\n with ossfs.open(test_file_a, \"rb\") as f:\n result = f.readline()\n expected = b\"ab\\n\"\n assert result == expected\n\n result = f.readline()\n expected = b\"a\" * (10 * 2 ** 20) + b\"\\n\"\n assert result == expected\n\n result = f.readline()\n expected = b\"ab\"\n assert result == expected\n\n\ndef test_next(ossfs, test_bucket_name):\n expected = files[\"LICENSE\"].split(b\"\\n\")[0] + b\"\\n\"\n with ossfs.open(test_bucket_name + \"/LICENSE\") as f:\n result = next(f)\n assert result == expected\n\n\ndef test_iterable(ossfs, test_path):\n file = test_path + \"/test_iterable/file\"\n data = b\"abc\\n123\"\n with ossfs.open(file, \"wb\") as f:\n f.write(data)\n with ossfs.open(file) as f, io.BytesIO(data) as g:\n for fromossfs, fromio in zip(f, g):\n assert fromossfs == fromio\n f.seek(0)\n assert f.readline() == b\"abc\\n\"\n assert f.readline() == b\"123\"\n f.seek(1)\n assert f.readline() == b\"bc\\n\"\n\n with ossfs.open(file) as f:\n out = list(f)\n with ossfs.open(file) as f:\n out2 = f.readlines()\n assert out == out2\n assert b\"\".join(out) == data\n\n\ndef test_file_status(ossfs, test_path):\n file = test_path + \"/test_file_status/file\"\n with ossfs.open(file, \"wb\") as f:\n assert not f.readable()\n assert not f.seekable()\n assert f.writable()\n\n with ossfs.open(file, \"rb\") as f:\n assert f.readable()\n assert f.seekable()\n assert not f.writable()\n\n\n@pytest.mark.parametrize(\"data_size\", [0, 20, 10 * 2 ** 20])\n@pytest.mark.parametrize(\"append_size\", [0, 20, 10 * 2 ** 20])\ndef test_append(ossfs, test_path, data_size, append_size):\n file = test_path + \"/test_append/file_{}_{}\".format(data_size, append_size)\n data = os.urandom(data_size)\n extra = os.urandom(append_size)\n with ossfs.open(file, \"wb\") as f:\n f.write(data)\n assert ossfs.cat(file) == data\n with ossfs.open(file, \"ab\") as f:\n f.write(extra) # append, write, small file\n assert ossfs.cat(file) == data + extra\n\n\ndef test_bigger_than_block_read(ossfs, test_bucket_name):\n with ossfs.open(test_bucket_name + \"/number\", \"rb\", block_size=3) as f:\n out = []\n while True:\n data = f.read(4)\n out.append(data)\n if len(data) == 0:\n break\n assert b\"\".join(out) == b\"1234567890\\n\"\n\n\ndef test_text_io__stream_wrapper_works(ossfs, test_path):\n \"\"\"Ensure using TextIOWrapper works.\"\"\"\n file = test_path + \"/test_text_io__stream_wrapper_works/file\"\n with ossfs.open(file, \"wb\") as fd:\n fd.write(\"\\u00af\\\\_(\\u30c4)_/\\u00af\".encode(\"utf-16-le\"))\n\n with ossfs.open(file, \"rb\") as fd:\n with io.TextIOWrapper(fd, \"utf-16-le\") as stream:\n assert stream.readline() == \"\\u00af\\\\_(\\u30c4)_/\\u00af\"\n\n\ndef test_text_io__basic(ossfs, test_path):\n \"\"\"Text mode is now allowed.\"\"\"\n file = test_path + \"/test_text_io__basic/file\"\n with ossfs.open(file, \"w\", encoding=\"utf-8\") as fd:\n fd.write(\"\\u00af\\\\_(\\u30c4)_/\\u00af\")\n\n with ossfs.open(file, \"r\", encoding=\"utf-8\") as fd:\n assert fd.read() == \"\\u00af\\\\_(\\u30c4)_/\\u00af\"\n\n\ndef test_text_io__override_encoding(ossfs, test_path):\n \"\"\"Allow overriding the default text encoding.\"\"\"\n file = test_path + \"/test_text_io__override_encoding/file\"\n\n with ossfs.open(file, \"w\", encoding=\"ibm500\") as fd:\n fd.write(\"Hello, World!\")\n\n with ossfs.open(file, \"r\", encoding=\"ibm500\") as fd:\n assert fd.read() == \"Hello, World!\"\n\n\ndef test_readinto(ossfs, test_path):\n file = test_path + \"/test_readinto/file\"\n\n with ossfs.open(file, \"wb\") as fd:\n fd.write(b\"Hello, World!\")\n\n contents = bytearray(15)\n\n with ossfs.open(file, \"rb\") as fd:\n assert fd.readinto(contents) == 13\n\n assert contents.startswith(b\"Hello, World!\")\n\n\ndef test_seek_reads(ossfs, test_path):\n file = test_path + \"/test_seek_reads/file\"\n with ossfs.open(file, \"wb\") as f:\n f.write(os.urandom(5627146))\n with ossfs.open(file, \"rb\", blocksize=100) as f:\n f.seek(5561610)\n f.read(65536)\n\n f.seek(4)\n size = 562198\n d2 = f.read(size)\n assert len(d2) == size\n\n f.seek(562288)\n size = 562187\n d3 = f.read(size)\n assert len(d3) == size\n","sub_path":"tests/test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":9263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"108068265","text":"#if21. Даны целочисленные координаты точки на плоскости.\r\n#Если точка совпадает с началом координат, то вывести 0.\r\n#Если точка не совпадает с началом координат, но лежит на\r\n#оси OX или OY, то вывести соответственно 1 или 2. Если\r\n#точка не лежит на координатных осях, то вывести 3.\r\n\r\nimport m_if\r\n\r\ncoor = m_if.get_coor()\r\n\r\nif coor == [0, 0]:\r\n print(0)\r\nelif coor[1] == 0 :\r\n print(1)\r\nelif coor[0] == 0:\r\n print(2)\r\nelse:\r\n print(3)\r\n\r\ninput(\"Для продолжения нажмите enter.\")\r\n","sub_path":"python/if/if21.py","file_name":"if21.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"303063976","text":"import simpy\nimport random\nimport statistics\n\nwait_times =[]\n\nclass Theater(object):\n def __init__(self,env, num_cashiers, num_servers, num_ushers):\n # more coming here\n self.env = env\n self.cashier = simpy.Resource(env,num_cashiers)\n self.usher = simpy.Resource(env,num_ushers)\n self.server = simpy.Resource(env,num_servers)\n\n def purchase_ticket(self,moviegoer):\n yield self.env.timeout(random.randint(1,3))\n\n def sell_food(self,moviegoer):\n yield self.env.timeout(random.randint(1,5))\n\n def check_ticket(self,moviegoer):\n yield self.env.timeout(3/60)\n\ndef go_to_movies(env, moviegoer, theater):\n #Moviegoer arrived to the theater\n arrival_time = env.now\n with theater.cashier.request() as request:\n yield request\n yield env.process(theater.purchase_ticket(moviegoer))\n\n with theater.usher.request() as request:\n yield request\n yield env.process(theater.check_ticket(moviegoer))\n\n if random.choice([True,False]):\n with theater.server.request() as request:\n yield request\n yield env.process(theater.sell_food(moviegoer))\n\n #Moviegoer head to the theater \n wait_times.append(env.now - arrival_time)\n\ndef run_theater(env,num_cashiers,num_servers,num_ushers):\n theater = Theater(env,num_cashiers,num_servers,num_ushers)\n for moviegoer in range(3):\n env.process(go_to_movies(env, moviegoer, theater))\n while True:\n yield env.timeout(0.20)\n moviegoer += 1.0\n env.process(go_to_movies(env,moviegoer,theater))\n\n\ndef get_average_wait_time(wait_times):\n average_wait = statistics.mean(wait_times)\n minutes , frac_minutes = divmod(average_wait,1)\n seconds = frac_minutes * 60\n return round(minutes), round(seconds)\n\n# def calculate_wait_time(arrival_times, departure_times):\n# average_wait = statistics.mean(wait_times)\n# minutes , frac_minutes = divmod(average_wait,1)\n# seconds = frac_minutes * 60\n# return round(minutes), round(seconds)\n\n\ndef get_user_input():\n num_cashiers = input('Input # of Cashiers working: ')\n num_servers = input('Input # of Servers working: ')\n num_ushers = input('Input # of Ushers working: ')\n params = [num_cashiers, num_servers, num_ushers]\n if all(str(i).isdigit() for i in params):\n params = [int(x) for x in params]\n else:\n print ('Could not parse input. This simulation will use the'\n 'the default values: ')\n params = [1,1,1]\n return params\n\n\ndef main():\n random.seed(42)\n num_cashiers, num_servers,num_ushers = get_user_input()\n env = simpy.Environment()\n env.process(run_theater(env, num_cashiers, num_servers, num_ushers))\n env.run(until=90)\n mins, secs = get_average_wait_time(wait_times)\n secs = get_average_wait_time(wait_times)\n print ('Running Simulation,,,',\n f\"\\nThe average wait time is {mins} minutes and {secs} seconds.\",)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"playground/theater_simpy_simulation.py","file_name":"theater_simpy_simulation.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"285247799","text":"\n\n#calss header\nclass _TOPLESS():\n\tdef __init__(self,): \n\t\tself.name = \"TOPLESS\"\n\t\tself.definitions = [u'used to describe someone, usually a woman, wearing nothing on the upper part of the body, or something connected with this way of dressing: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_topless.py","file_name":"_topless.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"353125515","text":"from __future__ import division\nimport numpy as np\nimport pandas as pd\nimport random\nfrom tweet2vec.settings_char import DEBUG_MODE, AL_EQUAL_CLASS_LEARNING, GROUP_VALIDATION_MODE, GV_AVERAGE, GV_ARGMAX\n\n\ndef predict_user(single_predictions, users, n_classes, nr_top_value=None, labeldict=None):\n \"\"\"\n Override single tweet-predictions by the predictions of their user\n :param single_predictions: matrix of probabilities (tweet X classes)\n :param users: list of authors for given tweets, has to be in same order as single_predictions\n :param n_classes: amount of different classes\n :param nr_top_value: amount users that are returned\n :return: Matrix of class-predictions (all tweets of a user have the same values);\n Boolean matrix for tweets, telling if they should be included to training-set\n (only when nr_top_value is not None)\n \"\"\"\n\n np_single_predictions = np.array(single_predictions)\n unique_users, user_indices, user_cnt = np.unique(users, return_inverse=True, return_counts=True)\n avg_class_probabilities = np.zeros(len(unique_users) * n_classes).reshape(len(unique_users), n_classes)\n\n if GROUP_VALIDATION_MODE == GV_AVERAGE:\n # get average probability for each user as an array with a value for each class\n for i in range(len(unique_users)):\n avg_class_probabilities[i] = [sum(x) / user_cnt[i] for x in zip(*np_single_predictions[user_indices == i])]\n elif GROUP_VALIDATION_MODE == GV_ARGMAX:\n # get class-index for max-classification for each tweet\n np_single_argmax = np.argmax(np_single_predictions, axis=1)\n # set weights according to max-classified tweets\n for i in range(len(unique_users)):\n avg_class_probabilities[i] = np.bincount(np_single_argmax[user_indices == i],\n minlength=n_classes).astype('float32') / user_cnt[i]\n else:\n raise Exception(\"Invalid validation mode (%s) - please check settings.char\" % GROUP_VALIDATION_MODE)\n\n # order by descending probability\n rank = np.argsort(avg_class_probabilities)[:, ::-1]\n\n if nr_top_value is None:\n # default logic\n return rank[user_indices]\n else:\n # check which users should be included to training set\n included_user_indices = get_included_user_indices(avg_class_probabilities, nr_top_value)\n if DEBUG_MODE:\n pd_frame = pd.DataFrame(avg_class_probabilities, index=unique_users,\n columns=np.array([u'none'] + list(labeldict.keys())))\n print(pd_frame)\n print(\"---- users added to training set ----\")\n print(pd_frame.loc[unique_users[included_user_indices]])\n\n included_user_matrix = np.isin(user_indices, included_user_indices)\n return rank[user_indices], included_user_matrix\n\n\ndef get_included_user_indices(avg_class_probabilities, nr_top_value):\n \"\"\"\n depending on evaluation method this function returns an arrays of indices (of array unique_users)\n of users which should be included to training set\n :param avg_class_probabilities: average predictions per class\n :param nr_top_value: number of users, which should be included\n :return: indices of included users\n \"\"\"\n if AL_EQUAL_CLASS_LEARNING:\n group_amount = int(nr_top_value/(len(avg_class_probabilities[0])-1))\n # indices for highest predictions of a given class (-> user indices)\n idx_max_class_prediction = np.argsort(avg_class_probabilities, axis=0)[:(group_amount*-1)-1:-1]\n # indices for highest predictions of a given user (-> class indices)\n idx_max_user_prediction = np.argmax(avg_class_probabilities, axis=1)\n included_user_indices = []\n for user_idx, class_idx in enumerate(idx_max_user_prediction):\n if user_idx in idx_max_class_prediction[:, class_idx]:\n # user is one of the highest predicted users of this class\n included_user_indices.append(user_idx)\n else:\n # max value per row (how certain can a user be classified?)\n certainty = np.amax(avg_class_probabilities, axis=1)\n # include only top X users\n included_user_indices = np.argsort(certainty)[:(int(nr_top_value)*-1)-1:-1]\n\n return included_user_indices\n\n\ndef get_equal_subclasses(df):\n \"\"\"\n :param df: pandas-dataframe with either 3 (mapped to [user, group, tweet]) or 2 (m. t. [group, tweet]) columns\n :return: pandas-dataframe in same format, containing the same amount of tweets for all classes\n \"\"\"\n if len(df.columns) == 3:\n df.columns = [\"user\", \"group\", \"tweet\"]\n else:\n df.columns = [\"group\", \"tweet\"]\n\n groupby_obj = df.groupby(\"group\")\n group_cnt = groupby_obj[\"tweet\"].count()\n if len(df.columns) != 3:\n df_new = df.groupby(\"group\").apply(lambda x: x.sample(group_cnt.min()))\n else:\n dfs = []\n # get the mean number of tweets per user for the group with the least amount of tweets\n # this is used to get a similar number of users per group\n mean_tweet_per_user = int(groupby_obj.get_group(group_cnt.idxmin()).groupby([\"user\"])[\"tweet\"].count().mean())\n for group_name in groupby_obj.groups:\n df_group = groupby_obj.get_group(group_name).sample(frac=1)\n user_groupby_obj = df_group.groupby(\"user\")\n tweets_left = group_cnt.min()\n start_idx = 0\n while tweets_left > 0:\n user_list = user_groupby_obj.groups.keys()\n random.shuffle(user_list)\n for user_name in user_list:\n # try getting avg number of tweets\n df_user = user_groupby_obj.get_group(user_name)[start_idx:\n start_idx + min(mean_tweet_per_user, tweets_left)]\n dfs.append(df_user)\n tweets_left -= df_user[\"tweet\"].count()\n if tweets_left <= 0:\n break\n start_idx += mean_tweet_per_user\n\n df_new = pd.concat(dfs)\n\n return df_new\n\n\ndef get_equal_subclasses_arr(arr_user, arr_group, arr_tweet):\n \"\"\"\n maps three numpy-arrays to a pandas-dataframe and calls subsampling function\n :return: three subsampled numpy arrays\n \"\"\"\n # use tweet indices instead of tweets (to cope with multidimensionality)\n tweet_indices = np.arange(np.size(arr_tweet,0))\n cols = {\"user\":arr_user, \"group\": arr_group, \"tweet\": tweet_indices}\n df_tweets = pd.DataFrame(cols)\n # restore order (dict removes order)\n df_tweets = df_tweets[[\"user\", \"group\", \"tweet\"]]\n df_reduced = get_equal_subclasses(df_tweets)\n return df_reduced[\"user\"].as_matrix(), df_reduced[\"group\"].as_matrix(), arr_tweet[df_reduced[\"tweet\"],:]","sub_path":"tweet2vec/adaptive_learning.py","file_name":"adaptive_learning.py","file_ext":"py","file_size_in_byte":6869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"636853826","text":"import argparse\nimport csv\nimport os\nfrom datetime import datetime\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nimport models\nfrom utils import progress_bar\nfrom torch.utils.tensorboard import SummaryWriter\nimport os\nimport sys\nsys.argv = ['']\ndel sys\n# 设置参数\nparser = argparse.ArgumentParser(description='PyTorch CIFAR100 Training Cutmix')\nparser.add_argument('--dataset', default='CIFAR-100', type=str, help='using data set')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--resume', '-r', action='store_true',\n help='resume from checkpoint')\nparser.add_argument('--model', default=\"ResNet18\", type=str,\n help='model type (default: ResNet18)')\nparser.add_argument('--name', default='0', type=str, help='name of run')\nparser.add_argument('--seed', default=1234, type=int, help='random seed')\nparser.add_argument('--batch-size', default=50, type=int, help='batch size')\nparser.add_argument('--epoch', default=100, type=int,\n help='total epochs to run')\nparser.add_argument('--no-augment', dest='augment', action='store_false',\n help='use standard augmentation (default: True)')\nparser.add_argument('--decay', default=1e-4, type=float, help='weight decay')\nparser.add_argument('--alpha', default=1., type=float,\n help='mixup interpolation coefficient (default: 1)')\nparser.add_argument('--cutmix_prob', default=0.5, type=float,\n help='cutmix probability')\nargs = parser.parse_args()\n\nuse_cuda = torch.cuda.is_available()\n\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n# 设置随机种子\nif args.seed != 0:\n print('Set random seed:', args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n np.random.seed(args.seed)\ntorch.backends.cudnn.deterministic = True\n\n# 数据载入和处理\nprint('==> Preparing data..')\nif args.augment:\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n ])\nelse:\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n ])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n])\n\nif args.dataset == 'CIFAR-10':\n trainset = datasets.CIFAR10(root='./data', train=True, download=False,\n transform=transform_train)\n trainloader = torch.utils.data.DataLoader(trainset,\n batch_size=args.batch_size,\n shuffle=True, num_workers=2)\n\n testset = datasets.CIFAR10(root='./data', train=False, download=False,\n transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=100,\n shuffle=False, num_workers=2)\n num_classes = 10\nelif args.dataset == 'CIFAR-100':\n trainset = datasets.CIFAR100(root='./data', train=True, download=False,\n transform=transform_train)\n trainloader = torch.utils.data.DataLoader(trainset,\n batch_size=args.batch_size,\n shuffle=True, num_workers=2)\n\n testset = datasets.CIFAR100(root='./data', train=False, download=False,\n transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=100,\n shuffle=False, num_workers=2)\n num_classes = 100\n\n# 调控学习率每30个epoch除以10\ndef adjust_learning_rate(optimizer, epoch):\n lr = 0.1 * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n# cutmix主体函数\ndef cutmix_data(x, y, alpha=1.0, use_cuda=True):\n # lambda ~ Beta(alpha, alpha)服从beta分布\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n batch_size = x.size()[0]\n # 在batch中随机找一副图像\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n # y_a为原图像label,y_b为用于mix的图像label\n y_a, y_b = y, y[index]\n # 生成替换区域\n bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam)\n # 进行替换\n x[:, :, bbx1:bbx2, bby1:bby2] = x[index, :, bbx1:bbx2, bby1:bby2]\n # 根据实际被替换区域的面积占比修正lambda\n lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2]))\n return x, y_a, y_b, lam\n\ndef rand_bbox(size, lam):\n # W,H分别为图像的宽和高,以cifar-10为例,W = H = 32\n W = size[2]\n H = size[3]\n # 替换区域的w和h\n cut_rat = np.sqrt(1. - lam)\n cut_w = np.int(W * cut_rat)\n cut_h = np.int(H * cut_rat)\n\n cx = np.random.randint(W)\n cy = np.random.randint(H)\n # 计算替换区域的位置\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\n bby1 = np.clip(cy - cut_h // 2, 0, H)\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\n bby2 = np.clip(cy + cut_h // 2, 0, H)\n\n return bbx1, bby1, bbx2, bby2\n\n# cutmix损失函数,原图像label和替换图像label的交叉熵损失函数的线性组合\ndef cutmix_criterion(criterion, pred, y_a, y_b, lam):\n return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\n\nif __name__ == '__main__':\n\n print('==> Building model..')\n # 生成网络\n net = models.__dict__[args.model](num_classes=num_classes)\n if use_cuda:\n net.cuda()\n net = torch.nn.DataParallel(net)\n print(torch.cuda.device_count())\n cudnn.benchmark = True\n print('Using CUDA..')\n if not os.path.isdir('results'):\n os.mkdir('results')\n # logname = ('results/log_' + net.__class__.__name__ + '_' + args.name + '_'\n # + str(args.seed) + '.csv')\n # 基础交叉熵损失函数,对于不进行cutmix的训练集和全部测试集,仍使用交叉熵损失函数\n criterion = nn.CrossEntropyLoss()\n # 优化器使用SGD\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9,\n weight_decay=args.decay)\n best_acc1 = 0\n best_acc5 = 0\n # 计算实验耗时\n start_time = datetime.now()\n print(net)\n # 创建tensorboard文件夹\n writer_train_loss = SummaryWriter('./runs' + args.dataset + args.model + '/train/' + 'cutmix')\n writer_test_acc = SummaryWriter('./runs' + args.dataset + args.model + '/test/' + 'cutmix')\n writer_test_acc5 = SummaryWriter('./runs' + args.dataset + args.model + '/test_top5/' + 'cutmix')\n\n # 训练\n for epoch in range(start_epoch, args.epoch):\n print('\\nEpoch: %d' % (epoch + 1))\n adjust_learning_rate(optimizer, epoch + 1)\n net.train()\n train_loss = 0\n reg_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n r = np.random.rand(1)\n # 随机对图像进行cutmix\n if args.alpha > 0 and r < args.cutmix_prob:\n inputs, targets_a, targets_b, lam = cutmix_data(inputs, targets,\n args.alpha, use_cuda)\n inputs, targets_a, targets_b = map(Variable, (inputs,\n targets_a, targets_b))\n outputs = net(inputs)\n loss = cutmix_criterion(criterion, outputs, targets_a, targets_b, lam)\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()\n + (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())\n else:\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum().float()\n train_loss += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n progress_bar(batch_idx, len(trainloader),\n 'Loss: %.3f | Reg: %.5f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), reg_loss/(batch_idx+1),\n 100.*correct/total, correct, total))\n if batch_idx % 100 == 99:\n print('[epoch:%d, iter:%d] Loss: %.03f | Acc: %.3f%% '\n % (epoch + 1, (batch_idx + 1), train_loss / (batch_idx + 1), 100. * correct / total))\n writer_train_loss.add_scalar('training loss',\n train_loss / (batch_idx + 1),\n epoch * len(trainloader) + batch_idx)\n # 测试,不计算梯度提升运算效率\n with torch.no_grad():\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n top1 = 0\n top5 = 0\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, maxk = torch.topk(outputs.data, 5, dim=-1)\n total += targets.size(0)\n # _, predicted = torch.max(outputs.data, 1)\n test_labels = targets.view(-1, 1) # reshape labels from [n] to [n,1] to compare [n,k]\n\n top1 += (test_labels == maxk[:, 0:1]).sum().item()\n top5 += (test_labels == maxk).sum().item()\n\n # progress_bar(batch_idx, len(testloader),\n # 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n # % (test_loss / (batch_idx + 1), 100. * correct / total,\n # correct, total))\n acc1 = 100. * top1 / total\n acc5 = 100. * top5 / total\n # 记录最优准确率\n best_acc1 = max(best_acc1, acc1)\n best_acc5 = max(best_acc5, acc5)\n\n # 每个epoch向tensorboard中写入一次测试集准确率\n writer_test_acc.add_scalar('test accuracy',\n top1 / total,\n epoch + 1)\n writer_test_acc5.add_scalar('test accuracy top5',\n top5 / total,\n epoch + 1)\n print('Accuracy of the network on total {} test images: @top1={}%; @top{}={}%'.format(total,\n 100 * top1 / total, 5,\n 100 * top5 / total))\n\n # print('Test\\'s accuracy is: %.3f%%' % (100 * correct / total))\n checkpoint_path = './checkpoint/model_{}'.format(args.dataset + args.model + '_random_erasing')\n if epoch == args.epoch - 1:\n torch.save({'state_dict': net.state_dict()},\n checkpoint_path + '.pth')\n print('Model trained to epoch {} has been saved.'.format(epoch + 1))\n # 计算结束时间\n end_time = datetime.now()\n run_time = end_time - start_time\n\n print('Train has finished, total epoch is %d' % args.epoch)\n print(run_time)\n print('Best accuracy is: %.3f%%' % best_acc1)\n print('Best accuracy5 is: %.3f%%' % best_acc5)\n logname = ('results/log_' + str(args.seed) + '.csv')\n\n with open(logname, 'a') as logfile:\n logwriter = csv.writer(logfile, delimiter=',')\n logwriter.writerow(\n ['dataset:', args.dataset, 'model:', args.model, 'arg_method:', 'cutmix', 'epoch:', args.epoch,\n 'batch_size',\n args.batch_size, 'best accuracy1:', best_acc1, 'best accuracy5:', best_acc5])\n\n","sub_path":"train_cutmix.py","file_name":"train_cutmix.py","file_ext":"py","file_size_in_byte":12897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"341572112","text":"#!/Users/kentaro/anaconda3/bin/python3\n\nimport argparse\nimport pickle\nimport sys\nimport glob\nimport os\nimport pandas as pd\nimport numpy as np\nfrom scipy import special\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nfrom matplotlib.backends.backend_pdf import PdfPages\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"convert sequences into a single text file\")\n parser.add_argument(\"-d\", \"--dir\", help=\"the name of directory to store files\", default=None)\n #parser.add_argument(\"-e\", \"--e\", help=\"level of significance for autocorr test\", type = np.float32, default = 0.01)\n return parser.parse_args()\n\ndef get_curr_dir():\n import os\n return os.getcwd().rstrip(\"/\")\n\ndef create_dir(dir):\n import os\n try:\n if not os.path.exists(dir):\n os.mkdir(dir)\n if not os.path.exists(dir+\"/autocorr\"):\n os.mkdir(dir+\"/autocorr\")\n if not os.path.exists(dir+\"/figures\"):\n os.mkdir(dir+\"/figures\")\n except FileExistsError:\n pass\n\ndef listup_files(dir):\n return [os.path.abspath(p) for p in glob.glob(dir+\"/*\")]\n\ndef pro(seq):\n return np.sum(seq)/len(seq)\n\ndef xor(a,b):\n return np.bitwise_xor(a, b)\n\ndef auto(a, pro_a, l=1):\n import math\n from scipy import special\n b = [a[k] for k in range(l,len(a))]\n a = [a[k] for k in range(0,len(a)-l)]\n sample_size = len(a)\n SUM = sum([xor(a[k],b[k]) for k in range(sample_size)])\n pro = pro_a + pro_a - 2*pro_a*pro_a\n obs = (SUM-pro*sample_size)/math.sqrt(sample_size*pro*(1-pro))\n pval = special.erfc(abs(obs)/math.sqrt(2))\n return obs, pval\n\ndef convert_and_filter(pvals, blackth = 0.01, whiteth = 0.1):\n \"\"\"\n this is a function to filter values of data to be plotted\n value below blackth is black, above whiteth is white\n\n \"\"\"\n cols, rows = pvals.shape\n Z = np.zeros((rows,cols,3))\n #set RGV values\n for i in range(rows):\n for j in range(cols):\n if pvals[j][i] < blackth:\n Z[i,j] = [0, 0, 0] #black\n elif pvals[j][i] >= whiteth:\n Z[i,j] = [1, 1, 1] #white\n else:\n Z[i,j] = [0.5, 0.5, 0.5]\n return Z\n\ndef main(args):\n NQBITS = len(listup_files(args.dir+\"/text\"))\n NSAMPS = len(listup_files(args.dir+\"/sequence\"))\n\n pvals = np.ones((NQBITS, NSAMPS), dtype = np.float32)\n observed = np.ones((NQBITS, NSAMPS), dtype = np.float32)\n for qnum in range(NQBITS):\n f = open(f\"{args.dir}/text/qubit{qnum}.txt\", 'r')\n sequence = list(map(int,f.read()))\n NSHOTS = len(sequence)//NSAMPS\n for index in range(NSAMPS):\n print(f\"checking temporal correlation of qubit{qnum} sample{index+1}\")\n seq = sequence[index*NSHOTS:index*NSHOTS+NSHOTS]\n prop = pro(seq)\n obs, pval = auto(seq, prop, 1)\n pvals[qnum][index] = pval\n observed[qnum][index] = obs\n f.close()\n np.savetxt(f\"{args.dir}/autocorr/1_pvals.txt\", pvals)\n np.savetxt(f\"{args.dir}/autocorr/2_obs.txt\", observed)\n\n print(\"creating temporal correlation figure\")\n\n size = 18\n blackth = 0.01\n whiteth = 0.1\n xlim, ylim = pvals.shape\n plt.figure(num=None, figsize=(15, 20), dpi=100, facecolor='w', edgecolor='k')\n plt.imshow(convert_and_filter(pvals, blackth, whiteth), aspect=\"auto\", interpolation=\"nearest\")\n #plt.imshow(df, aspect=\"auto\", interpolation=\"nearest\")\n plt.xticks(np.arange(xlim, step=1), fontsize=size)\n plt.yticks(np.arange(ylim, step=10), fontsize=size)\n plt.ylabel(\"Sample number\", fontsize=size)\n plt.xlabel(\"Qubit number\", fontsize=size)\n\n colors = [ [0,0,0], [0.5,0.5,0.5], [1,1,1] ]\n labels = [ \"p-value < %.2f\" % blackth, \" %.2f\" % blackth + r\"$\\leq$\" + \"p-value < %.2f\" % whiteth, \"%.2f\" % whiteth + \"$\\leq$\" + \"p-value\" ]\n # create a patch (proxy artist) for every color\n patches = [ mpatches.Patch(color=colors[i], label=labels[i]) for i in range(3) ]\n # put those patched as legend-handles into the legend\n plt.legend(handles=patches, bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=3, mode=\"expand\", borderaxespad=0., shadow=False, fontsize=size)\n\n pdf = PdfPages(args.dir+\"/figures/temporal_correlation_bitmap.pdf\")\n pdf.savefig()\n pdf.close()\n plt.close()\n\nif __name__ == \"__main__\":\n args = get_args()\n if args.dir is None:\n args.dir = get_curr_dir()\n create_dir(args.dir)\n else:\n args.dir = get_curr_dir() + \"/\" + args.dir\n create_dir(args.dir)\n main(args)\n exit(\"done\")\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"482656723","text":"num0 = \"zero\\n\"\nnum1 = \"Um\\n\"\nnum2 = \"Dois\\n\"\nnum3 = \"Três\\n\"\nnum4 = \"Quatro\\n\"\nnum5 = \"Cinco\\n\"\n\nnumero = -1\n\nwhile numero != 0:\n numero = int (input(\"Para encerrar o programa digite 0 \\nDigite um numero inteiro entre 1 e 5: \"))\n\n if numero < 1 or numero > 5:\n print (\"Numero inválido! Para encerrar o programa digite 0 \\n Digite um numero entre 1 e 5: \")\n\n if numero == 0:\n print(num0)\n if numero == 1:\n print(num1)\n\n if numero == 2:\n print(num2)\n \n if numero == 3:\n print(num3)\n \n if numero == 4:\n print (num4)\n \n if numero == 5:\n print (num5)\n\nprint(\"Programa finalizado!!!\")\n","sub_path":"Num_Interiro_A2.py","file_name":"Num_Interiro_A2.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"424346362","text":"import urllib.request\nimport urllib.parse\n\npost_url = 'https://fanyi.baidu.com/sug'\nfromdata = {\n 'kw': 'like'\n}\n\n#构建请求对象\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',\n}\nrequest = urllib.request.Request(url=post_url,headers=headers)\n#首先对post参数进行处理\nfromdata = urllib.parse.urlencode(fromdata).encode('utf8')\n# print(type(fromdata))\nresponse = urllib.request.urlopen(request,data=fromdata)\nprint(response.read().decode('utf8'))\n\n\n\n\n","sub_path":"day2早上/day2/4-post.py","file_name":"4-post.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"573797771","text":"#log \r\nimport string, time, os\r\n\r\n# PT log\r\n # 0: no print, 1: console, 2:file\r\n\r\ndef pt_itoe(dd):\r\n return (\"%E\" % dd)\r\n\r\ndef pt_itof(d):\r\n return (\"%f\" % d)\r\n\r\ndef pt_join_list_str(*ll):\r\n if len(ll) == 1 and (type(ll[0])== tuple or type(ll[0]) == list):\r\n return \", \".join(ll[0])\r\n else:\r\n return \", \".join(ll)\r\ndef pt_join_list_dec(*ll):\r\n if len(ll) == 1 and (type(ll[0])== tuple or type(ll[0]) == list):\r\n return \", \".join(map(str, ll[0]))\r\n else:\r\n return \", \".join(map(str, ll))\r\ndef pt_join_list_E(*ll):\r\n if len(ll) == 1 and (type(ll[0])== tuple or type(ll[0]) == list):\r\n return \", \".join(map(pt_itoe, ll[0]))\r\n else:\r\n return \", \".join(map(pt_itoe, ll))\r\ndef pt_atof(a):\r\n try:\r\n ret = string.atof(a)\r\n return ret\r\n except:\r\n return 0\r\ndef pt_atoi(a):\r\n try:\r\n ret = string.atoi(a)\r\n return ret\r\n except:\r\n return 0\r\ndef pt_format_on(s):\r\n if str(s) == \"1\" or str(s).upper() == \"ON\":\r\n return \"ON\"\r\n else:\r\n return \"OFF\"\r\ndef pt_format_rep(s):\r\n if str(s) == \"1\" or str(s).upper() == \"CONT\" or str(s).upper() == \"CONTINUOUS\":\r\n return \"CONT\"\r\n else:\r\n return \"SING\"\r\ndef pt_lib_list_all_set(cmd, rep_cmd, para_type, *para_list):\r\n fmt = \"%%s %%d, %%%s\" % para_type\r\n if len(para_list) == 1 and (type(para_list[0])== tuple or type(para_list[0]) == list):\r\n ll = para_list[0]\r\n else:\r\n ll = para_list\r\n for i in xrange(len(ll)):\r\n cmd = cmd + fmt % (rep_cmd, i, ll[i])\r\n if i != len(ll) - 1:\r\n cmd = cmd + \";\"\r\n return cmd\r\n\r\ndef pt_get_clock():\r\n return time.time()\r\n\r\ndef pt_get_path_name(name):\r\n if name.find(\":\") != -1:\r\n file_name = name[name.rfind(os.sep) + 1:]\r\n file_path = name[0:name.rfind(os.sep) + 1]\r\n else:\r\n file_name = name\r\n file_path = \".%s\" % os.sep\r\n return file_path, file_name\r\n\r\ndef pt_split_str(line):\r\n if len(line) == 0:\r\n return ''\r\n line = line.strip('\\n')\r\n line = line.strip('\\r')\r\n line = line.lstrip()\r\n line = line.rstrip()\r\n return line.split(',')\r\n \r\n\r\n############pdb debug#####################\r\n_DEBUG = True\r\ndef pt_debug_break(enable = True):\r\n if _DEBUG == True and enable == True:\r\n import pdb\r\n pdb.set_trace()\r\n\r\n","sub_path":"tools/gpib_test/lib/pt_lib_utility.py","file_name":"pt_lib_utility.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"346539375","text":"# -*- coding: utf-8 -*-\nimport random\nimport re\n\nimport pandas as pd\n\nfrom music_tools.album import Album, get_tracks_from_albums\nfrom music_tools.artist import Artist\nfrom music_tools.shuffling import smart_shuffle\nfrom music_tools.playlist_utils import (\n clear_playlist,\n shuffle_playlist,\n tracks_from_playlist,\n)\nfrom music_tools.user import User\nfrom music_tools.utils import take_x_at_a_time\n\nDUMP_ID = \"5AZxg3qZIC7cGnxWa7EuSd\"\n\nREVIEW_ID = \"6P9AB5NtkXBmQxnTqFFoZK\"\n\nQ_IDS = {\n \"q - harder\": \"5mRa71QUmE6EWavxTA22g6\",\n \"q - hop\": \"0sFhYQaTiuZlG1vMDSiFMR\",\n \"q - jazz\": \"4HQnus8hcLfX5pYtG95pKY\",\n \"q - misc\": \"7DOqATuWsl640ustK8lhhI\",\n \"q - rock\": \"1tlzpLpRdQXUicLbhIJMcM\",\n}\n\nQ_PATTERNS = [\n (\"q - hop\", \".*hop.*\"),\n (\"q - harder\", \".*(core|doom|metal|punk).*\"),\n (\"q - jazz\", \".*jazz.*\"),\n (\"q - rock\", \".*rock.*\"),\n]\n\n\ndef refresh_review_playlist(user):\n print(\"Picking random albums for 'review' playlist\")\n random_albums = random.choices(user.albums(), k=12)\n review_tracks = get_tracks_from_albums(random_albums)\n\n print(\"Clearing 'review' playlist\")\n clear_playlist(user, REVIEW_ID)\n\n print(\"Shuffling 'review' tracks\")\n shuffled = smart_shuffle(review_tracks)\n\n print(\"Adding shuffled tracks to 'review'\")\n for subset in take_x_at_a_time(shuffled, 100):\n to_add = [track.id for track in subset]\n user.sp.user_playlist_add_tracks(user.username, REVIEW_ID, to_add)\n\n\n# Identify playlists for tracks\ndef separate_dump_tracks_to_q_playlists(user):\n print(\"Identifying 'dump' tracks\")\n dump_tracks = tracks_from_playlist(DUMP_ID)(user) # XXX\n\n print(f\"Found {len(dump_tracks)} tracks in 'dump' playlist\")\n\n if not dump_tracks:\n return\n\n print(\"Mapping tracks to 'q' playlists\")\n\n dump_frame = pd.DataFrame()\n dump_frame[\"track\"] = dump_tracks\n dump_frame[\"id\"] = [track.id for track in dump_frame[\"track\"]]\n dump_frame[\"album\"] = [Album(track.album_id) for track in dump_frame[\"track\"]]\n dump_frame[\"genres\"] = [\n tuple(\n sorted(\n set(\n genre\n for artist_id in album.artist_ids\n for genre in Artist(artist_id).genres\n )\n )\n )\n for album in dump_frame[\"album\"]\n ]\n dump_frame[\"playlist\"] = None\n\n for q, pattern in Q_PATTERNS:\n print(f\"Identifying '{q}' tracks\")\n compiled = re.compile(pattern)\n dump_frame[\"playlist\"] = [\n playlist\n if pd.notna(playlist)\n else q\n if any(map(compiled.fullmatch, genres))\n else None\n for playlist, genres in zip(dump_frame[\"playlist\"], dump_frame[\"genres\"])\n ]\n\n print(\"Identifying 'q - misc' tracks\")\n dump_frame.loc[dump_frame[\"playlist\"].isna(), \"playlist\"] = \"q - misc\"\n\n # Add tracks to playlists\n for q, q_id in Q_IDS.items():\n playlist_tracks = set(tracks_from_playlist(q_id)(user))\n\n tracks_to_add = set(\n dump_frame.loc[dump_frame[\"playlist\"] == q, \"id\"].values\n ) - playlist_tracks\n\n print(f\"Adding {len(tracks_to_add)} tracks to '{q}'\")\n for to_add in take_x_at_a_time(tracks_to_add, 100):\n user.sp.user_playlist_add_tracks(user.username, q_id, to_add)\n\n # Clear dump\n print(\"Clearing dump\")\n clear_playlist(user, DUMP_ID)\n\n\ndef prepare_q_playlists(user):\n user_tracks = set(user.all_tracks())\n\n for q, q_id in Q_IDS.items():\n playlist_tracks = set(tracks_from_playlist(q_id)(user))\n\n already_saved = {\n track.id for track in playlist_tracks & user_tracks\n }\n\n print(f\"Removing {len(already_saved)} saved tracks from '{q}'\")\n for to_remove in take_x_at_a_time(already_saved, 100):\n user.sp.user_playlist_remove_all_occurrences_of_tracks(\n user.username, q_id, to_remove\n )\n\n print(f\"Shuffling '{q}'\")\n shuffle_playlist(user, q_id)\n\n\nif __name__ == \"__main__\":\n user = User()\n\n refresh_review_playlist(user)\n\n separate_dump_tracks_to_q_playlists(user)\n\n prepare_q_playlists(user)\n","sub_path":"q_management.py","file_name":"q_management.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"558052518","text":"import numpy as np\n\nfrom hand_crafted_models.loss_functions import mean_squared_error\nfrom hand_crafted_models.optimization import (\n gradient_descent, closed_form_linear_algebra, GradientStep, WeightsAndBias,\n)\nfrom hand_crafted_models.utils import forward_pass\n\n\ndef _step(\n x: np.ndarray,\n y: np.ndarray,\n weights: np.ndarray,\n bias: np.ndarray,\n one: np.ndarray\n) -> GradientStep:\n \"\"\"\n Mean-squared-error: Calculate gradients for a given step.\n \n :param x: Input data [Batch, Features]\n :param y: Label data [Batch, 1]\n :param weights: Feature parameters [1, Features]\n :param bias: Bias parameter [1, 1]\n :param one: (Ignore) Vector of ones\n :return: step loss, weight gradients, bias gradient\n \"\"\"\n # Get predictions\n y_hat = forward_pass(x=x, weights=weights, bias=bias)\n # Calculate total loss value for current parameter values (e.g., MSE cost fn)\n loss = mean_squared_error(y_hat=y_hat, y=y)\n # Perform back-propagation to parameters\n # The MSE derivative\n d_y = 2 * (y - y_hat) # [B, 1]\n # The Weights derivative w.r.t. MSE loss fn\n d_w = d_y.T @ x # [1, B] @ [B, N] -> [1, N]\n # The Bias derivative w.r.t. MSE loss fn\n d_b = d_y.T @ one # [B, 1] -> [1]\n return loss, d_w, d_b\n\n\ndef get_beta_sgd(\n x: np.ndarray,\n y: np.ndarray,\n lr: float = 0.001,\n tol: float = 1e-6,\n max_grad: float = 10.0,\n max_loops: int = 10000\n) -> WeightsAndBias:\n \"\"\"\n Fit parameters using gradient descent.\n \n :param x: Input data [Batch, Features]\n :param y: Label data [Batch, 1]\n :param lr: Learning rate (i.e., optimizer step size)\n :param tol: Tolerance for early-stopping\n :param max_grad: (Optional) Max size of gradient\n :param max_loops: Maximum number of steps to take\n :return: weight gradients, bias gradient\n \"\"\"\n return gradient_descent(\n x=x,\n y=y,\n fn=_step,\n lr=lr,\n tol=tol,\n max_grad=max_grad,\n max_loops=max_loops\n )\n\n\ndef get_beta_linalg(\n x: np.ndarray,\n y: np.ndarray,\n add_bias: bool = True\n) -> WeightsAndBias:\n \"\"\"\n Fit parameters using matrices and linear algebra.\n \n :param x: Input data [Batch, Features]\n :param y: Label data [Batch, 1]\n :param add_bias: If 'true', append a column of ones to use for the bias\n :return: weight gradients, bias gradient\n \"\"\"\n return closed_form_linear_algebra(\n x=x,\n y=y,\n add_bias=add_bias\n )\n","sub_path":"hand_crafted_models/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"191387074","text":"import argparse\nfrom cryptoker import GetData\nfrom art import tprint\n\n#GetData.get_price(\"ETH\", \"USD\")\nparser = argparse.ArgumentParser(\n prog=\"cryptoker\",\n description= \"cryptoker - A simple crypto price tool.\"\n)\nparser.add_argument(\"Crypto\", choices=GetData.crypto_currencies, metavar=\"Crypto\", type=str.upper)\nparser.add_argument(\"Fiat\", default=\"USD\", choices=GetData.fiat, metavar=\"Fiat\", type=str.upper)\n\n# will maybe work on later\n# parser.add_argument(\"-l\", \"--list\", required=False, action=\"store_true\")\n\nparsed_args = parser.parse_args()\n\ndef main():\n #GetData.get_price(parsed_args.Crpyto, parsed_args.Fiat_Currency)\n print()\n tprint(parsed_args.Crypto, font=\"rand\")\n print(GetData.get_price(parsed_args.Crypto, parsed_args.Fiat))\n print()\n\nif __name__==\"__main__\":\n main()","sub_path":"src/cryptoker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"642318085","text":"#\n# 421. Maximum XOR of Two Numbers in an Array\n#\n# Q: https://leetcode.com/problems/maximum-xor-of-two-numbers-in-an-array/\n# A: https://leetcode.com/problems/maximum-xor-of-two-numbers-in-an-array/discuss/849679/Javascript-Python3-C%2B%2B-Trie-%2B-Greedy-Alternative-Path\n#\n\nfrom typing import List\n\nclass Solution:\n def findMaximumXOR(self, A: List[int], best = 0) -> int:\n root = {} # 🌲 trie\n for x in A:\n xor = 0\n cur = root # 👀 current path in trie for inserting binary representation of x\n alt = root # 🤔 alternative path for pre-existing values in trie\n for i in range(31, -1, -1):\n p = 1 if 0 < (x & (1 << i)) else 0 # 🚙 direction p and opposite 🚗 direction q\n q = p ^ 1\n cur[p] = cur[p] if p in cur else {} # 🚙 add direction p to 👀 current path (as needed)\n cur = cur[p]\n if q in alt: # 🚗 diff direction q for 🤔 alternative path (💰 greedily take this path whenever possible)\n alt = alt[q]; xor ^= (1 << i)\n else: # 🚙 same direction p for 🤔 alternative path\n alt = alt[p]\n best = max(best, xor) # 🎯 max xor\n return best\n","sub_path":"421_max_xor_two_nums.py","file_name":"421_max_xor_two_nums.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"508189137","text":"import json\nfrom tests.basetest import BaseTest\n\nmeetup = {\n\t\"topic\": \"Petetr\",\n\t\"location\": \"Gichia\",\n\t\"happeningOn\": \"peter@test.com\",\n\t\"tags\": [\"Web Design\"]\n}\n\nclass MeetupsTest(BaseTest):\n '''Class to add all meetup tests'''\n\n def test_post_meetup(self):\n url = \"/api/v2/meetups\"\n\n response = self.post(url, meetup)\n result = json.loads(response.data.decode(\"UTF-8\"))\n\n self.assertEqual(result[\"status\"], 201)\n","sub_path":"tests/test_meetups.py","file_name":"test_meetups.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"246434867","text":"'''\nThis program selects a rundom number between 1 and 20,\n and asks the user to guess it in less then 6 trys while giving hints every wrong answer\n '''\nimport random #imports a \"random\" module\n#nothing\nguesses_taken = 0# initialize guesses_taken var at value of 0, it is a counter varialbe\n#nothing\nprint('Hello! What is your name?')#prints a string\nmyName = input()#assign user input to myName varialbe\n#nothing\nnumber = random.randint(1, 20)#selects a rundom number between 1 and 20, assign it to \"number\" varialbe\nprint('Well, ' + myName + ', I am thinking of a number between 1 and 20.')#prints a string with myName value inside it\n#nothing\nwhile guesses_taken < 6:#starting while loop with condition based on \"guesses_taken\" value, it will run until it reach value of 6 or bigger\n print('Take a guess.')#prints a string\n guess = input()#assign user input to guess varialbe\n guess = int(guess)#changes guess var type from str to int\n#nothing\n guesses_taken += 1#increases guesses_taken value by 1\n#nothing\n if guess < number:#conditional statement, checks if guess value is lower than number value\n print('Your guess is too low.')#prints a string, it runs only if condition above is fulfilled\n#nothing\n if guess > number:#conditional statement, checks if guess value is higher than number value\n print('Your guess is too high.')#prints a string, it runs only if condition above is fulfilled\n#nothing\n if guess == number:#conditional statement, checks if guess value and number value are equal\n break#terminates while loop, regardless of its condition, it runs only if condition above is fulfilled\n#nothing, but we are out of the loop now\nif guess == number:#conditional statement, checks if guess value and number value are equal\n guesses_taken = str(guesses_taken)#changes guesses_taken var type from int to str\n print('Good job, ' + myName + '! You guessed my number in ' + guesses_taken + ' guesses!')#prints a string with myName and guesses_taken values inside it\n#nothing\nif guess != number:#conditional statement, checks if guess value and number value are unequal\n number = str(number)#changes guesses_taken var type from int to str\n print('Nope. The number I was thinking of was ' + number)#prints a string with number value at the end\n#nothing\n","sub_path":"comprehension.py","file_name":"comprehension.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"395485858","text":"import argparse\nimport csv\nimport sys\nimport pprint\n\nimport numpy as np\nfrom scipy import linalg\n\n\ndef read_csv_into_2darray(csv_filepath):\n \"\"\"\n Read data from CSV file.\n\n The data should be organized in a 2D matrix, separated by comma. Each row\n correspond to a PVS; each column corresponds to a subject. If a vote is\n missing, a 'nan' is put in place.\n\n :param csv_filepath: filepath to the CSV file.\n :return: the numpy array in 2D.\n \"\"\"\n with open(csv_filepath, 'rt') as datafile:\n datareader = csv.reader(datafile, delimiter=',')\n data = [row for row in datareader]\n return np.array(data, dtype=np.float64)\n\n\ndef weighed_nanmean_2d(a, wts, axis):\n \"\"\"\n Compute the weighted arithmetic mean along the specified axis, ignoring\n NaNs. It is similar to numpy's nanmean function, but with a weight.\n\n :param a: 1D array.\n :param wts: 1D array carrying the weights.\n :param axis: either 0 or 1, specifying the dimension along which the means\n are computed.\n :return: 1D array containing the mean values.\n \"\"\"\n\n assert len(a.shape) == 2\n assert axis in [0, 1]\n d0, d1 = a.shape\n if axis == 0:\n return np.divide(\n np.nansum(np.multiply(a, np.tile(wts, (d1, 1)).T), axis=0),\n np.nansum(np.multiply(~np.isnan(a), np.tile(wts, (d1, 1)).T), axis=0)\n )\n elif axis == 1:\n return np.divide(\n np.nansum(np.multiply(a, np.tile(wts, (d0, 1))), axis=1),\n np.nansum(np.multiply(~np.isnan(a), np.tile(wts, (d0, 1))), axis=1),\n )\n else:\n assert False\n\n\ndef one_or_nan(x):\n \"\"\"\n Construct a \"mask\" array with the same dimension as x, with element NaN\n where x has NaN at the same location; and element 1 otherwise.\n\n :param x: array_like\n :return: an array with the same dimension as x\n \"\"\"\n y = np.ones(x.shape)\n y[np.isnan(x)] = float('nan')\n return y\n\n\ndef get_sos_j(sig_r_j, o_ji):\n \"\"\"\n Compute SOS (standard deviation of score) for PVS j\n :param sig_r_j: \n :param o_ji: \n :return: array containing the SOS for PVS j\n \"\"\"\n den = np.nansum(one_or_nan(o_ji) /\n np.tile(sig_r_j ** 2, (o_ji.shape[1], 1)).T, axis=1)\n s_j_std = 1.0 / np.sqrt(np.maximum(0., den))\n return s_j_std\n\n\ndef run_alternating_projection(o_ji):\n \"\"\"\n Run Alternating Projection (AP) algorithm.\n\n :param o_ji: 2D numpy array containing raw votes. The first dimension\n corresponds to the PVSs (j); the second dimension corresponds to the\n subjects (i). If a vote is missing, the element is NaN.\n\n :return: dictionary containing results keyed by 'mos_j', 'sos_j', 'bias_i'\n and 'inconsistency_i'.\n \"\"\"\n J, I = o_ji.shape\n\n # video by video, estimate MOS by averaging over subjects\n psi_j = np.nanmean(o_ji, axis=1) # mean marginalized over i\n\n # subject by subject, estimate subject bias by comparing with MOS\n b_ji = o_ji - np.tile(psi_j, (I, 1)).T\n b_i = np.nanmean(b_ji, axis=0) # mean marginalized over j\n\n MAX_ITR = 1000\n DELTA_THR = 1e-8\n EPSILON = 1e-8\n\n itr = 0\n while True:\n\n psi_j_prev = psi_j\n\n # subject by subject, estimate subject inconsistency by averaging the\n # residue over stimuli\n r_ji = o_ji - np.tile(psi_j, (I, 1)).T - np.tile(b_i, (J, 1))\n sig_r_i = np.nanstd(r_ji, axis=0)\n sig_r_j = np.nanstd(r_ji, axis=1)\n\n # video by video, estimate MOS by averaging over subjects, inversely\n # weighted by residue variance\n w_i = 1.0 / (sig_r_i ** 2 + EPSILON)\n # mean marginalized over i:\n psi_j = weighed_nanmean_2d(o_ji - np.tile(b_i, (J, 1)), wts=w_i, axis=1)\n\n # subject by subject, estimate subject bias by comparing with MOS,\n # inversely weighted by residue variance\n b_ji = o_ji - np.tile(psi_j, (I, 1)).T\n # mean marginalized over j:\n b_i = np.nanmean(b_ji, axis=0)\n\n itr += 1\n\n delta_s_j = linalg.norm(psi_j_prev - psi_j)\n\n msg = 'Iteration {itr:4d}: change {delta_psi_j}, psi_j {psi_j}, ' \\\n 'b_i {b_i}, sig_r_i {sig_r_i}'.format(\n itr=itr, delta_psi_j=delta_s_j, psi_j=np.mean(psi_j),\n b_i=np.mean(b_i), sig_r_i=np.mean(sig_r_i))\n\n sys.stdout.write(msg + '\\r')\n sys.stdout.flush()\n\n if delta_s_j < DELTA_THR:\n break\n\n if itr >= MAX_ITR:\n break\n\n psi_j_std = get_sos_j(sig_r_j, o_ji)\n sys.stdout.write(\"\\n\")\n\n mean_b_i = np.mean(b_i)\n b_i -= mean_b_i\n psi_j += mean_b_i\n\n return {\n 'mos_j': list(psi_j),\n 'sos_j': list(psi_j_std),\n 'bias_i': list(b_i),\n 'inconsistency_i': list(sig_r_i),\n }\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--input-csv\", dest=\"input_csv\", nargs=1, type=str,\n help=\"Filepath to input CSV file. The data should be organized in a 2D \"\n \"matrix, separated by comma. The rows correspond to PVSs; the \"\n \"columns correspond to subjects. If a vote is missing, input 'nan'\"\n \" instead.\", required=True)\n\n args = parser.parse_args()\n input_csv = args.input_csv[0]\n\n o_ji = read_csv_into_2darray(input_csv)\n\n ret = run_alternating_projection(o_ji)\n\n pprint.pprint(ret)\n\n\n","sub_path":"itut_p910_demo/demo_p910.py","file_name":"demo_p910.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"115044563","text":"import utils\n\nimport numpy as np\nimport pyqtgraph as pg\nimport pyaudio\nfrom PyQt5 import QtCore, QtGui\n\nimport matplotlib.pyplot as plt\n\nclass SpectrogramWidget(pg.PlotWidget):\n\n read_collected = QtCore.pyqtSignal(np.ndarray)\n\n def __init__(self, spectrum_analyzer, max_freq=22000):\n super(SpectrogramWidget, self).__init__()\n\n self.max_freq = max_freq\n\n freqs = spectrum_analyzer.get_freqs()\n nyquist_freq = freqs[-1]\n print('Nyquist freq:', nyquist_freq)\n print('Raw number of freqs:', len(freqs))\n\n self.crop_index = len(freqs)\n if max_freq < nyquist_freq:\n self.crop_index = int(len(freqs) * max_freq / nyquist_freq)\n freqs = freqs[:self.crop_index]\n\n print('Max freq:', freqs[-1])\n print('Number of freqs:', len(freqs))\n\n # Make image\n self.img = pg.ImageItem()\n self.addItem(self.img)\n\n # Get chunk size and sample rate from spectrum analyzer\n nsamples = spectrum_analyzer.nsamples\n sample_rate = spectrum_analyzer.sample_rate\n\n # Instantiate image array\n self.img_array = np.zeros((150, self.crop_index))\n\n # Get colormap\n np_cmap = plt.get_cmap('viridis')\n cmap = utils.get_pyqt_cmap(np_cmap)\n lut = cmap.getLookupTable(0.0, 1.0, 256, alpha=False)\n\n # Set colormap\n self.img.setLookupTable(lut)\n self.img.setLevels([15,60])\n\n # Setup the correct scaling for y-axis\n yscale = freqs[-1] / self.img_array.shape[1]\n self.img.scale(nsamples / sample_rate, yscale)\n\n self.setLabel('left', 'Frequency', units='Hz')\n\n self.show()\n\n def update(self, chunk):\n\n spectrum = self.get_spectrum(chunk)[:self.crop_index]\n\n p10 = np.percentile(spectrum, 10)\n p90 = np.percentile(spectrum, 90)\n\n print('p10:', p10, '\\tp90:', p90, '\\t\\tmin:', spectrum.min(), '\\tmax:', spectrum.max())\n\n # Roll down one and replace leading edge with new data\n self.img_array = np.roll(self.img_array, -1, 0)\n self.img_array[-1:] = spectrum\n\n self.img.setImage(self.img_array, autoLevels=False)\n","sub_path":"lib/renderers/pyqt/spectrogram_widget_async.py","file_name":"spectrogram_widget_async.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"216520","text":"import gym\r\n\r\nfrom DQN import *\r\nenv = gym.make('CartPole-v0')\r\nRL = DeepQNetwork(2, 4,\r\n learning_rate=0.001,\r\n reward_decay=0.9,\r\n replace_target_iter=200,\r\n memory_size=2000,\r\n output_graph=True\r\n )\r\n\r\nresult = []\r\n# checkpoint = tf.train.get_checkpoint_state('models')\r\n# RL.saver = tf.train.import_meta_graph(checkpoint.model_checkpoint_path + '.meta')\r\n# RL.saver.restore(RL.sess, checkpoint.model_checkpoint_path)\r\nfor i_episode in range(180):\r\n observation = env.reset()\r\n for t in range(10000):\r\n env.render()\r\n action = RL.choose_action(observation)\r\n observation_, reward, done,info = env.step(action)\r\n RL.store_transition(observation, action, reward, observation_)\r\n observation = observation_\r\n if i_episode >= 100:\r\n RL.learn()\r\n if done:\r\n result.append(t)\r\n print(i_episode,\" Finished in {} timesteps\".format(t+1))\r\n break\r\nRL.save_model()\r\nenv.close()","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"429700145","text":"from easygraphics.turtle import *\nfrom easygraphics import *\nimport math\nimport random\n\n\ndef distance(x1, y1, x2, y2):\n dx = x2 - x1\n dy = y2 - y1\n return math.hypot(dx, dy)\n\n\nlast_x = 0\nlast_y = 0\n\n\ndef smell(x, y):\n global last_x, last_y\n current_x = get_x()\n curretn_y = get_y()\n if (distance(current_x, curretn_y, x, y) > distance(last_x, last_y, x, y)):\n result = -1\n else:\n result = 1\n last_x = current_x\n last_y = curretn_y\n return result\n\n\ndef find_by_smell(x, y, angle=1):\n while is_run():\n fd(1)\n if smell(x, y) == -1:\n rt(angle)\n\n\ndef find_by_smell2(x, y, d1, d2, smell_turn, rand_turn):\n while is_run():\n fd(random.randint(d1, d2))\n lt(random.randint(-rand_turn, rand_turn))\n if smell(x, y) == -1:\n rt(smell_turn)\n\n\ncreate_world(800, 600)\nset_speed(100)\nrandom.seed(100)\n\nset_fill_color(\"red\")\nfill_circle(200, 200, 4)\n\n# find_by_smell(200,200)\n# find_by_smell(200,200,20)\n# find_by_smell(200,200,60)\nfind_by_smell(200, 200, 120)\n# find_by_smell2(200,200,1,2,60,10)\n# find_by_smell2(200,200,1,2,60,30)\n# find_by_smell2(200,200,1,2,60,120)\n\npause()\nclose_world()\n","sub_path":"examples/introduction.to.programming.with.turtle/windows_only/6-2-1.smell.py","file_name":"6-2-1.smell.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"127898130","text":"# partyFormFiller.py - Automatically fills in the form (English character).\n# 不参加的直接跳到后面\n# 表格填完后推出全屏模式\n\nimport pyautogui, time, csv\n\n# Set these to the correct coordinates for your particular computer.3\nschoolField = (439, 486)\nsubmitButton = (474, 544)\nsubmitButtonColor = (49, 123, 253)\nsubmitAnotherLink = (478, 422)\n\n# custermer list from csv file\ncustomers = []\n\n# convert to dictionary variable\npartyFile = open('partyEnglish.csv', encoding='utf-8')\npartyReader = csv.reader(partyFile)\n\n# read data from csv file\nfor row in partyReader:\n if partyReader.line_num == 1:\n continue\n\n food = row[3]\n foods = food.split()\n customer = {\n 'school': row[0],\n 'attend': row[1],\n 'headcount': row[2],\n 'foodOption': foods,\n 'taboos': row[4],\n 'contact': row[5]\n }\n customers.append(customer)\npartyFile.close()\n\n# slow down pace of pyautogui action\npyautogui.PAUSE = 1\n\n# get form window on focus\npyautogui.click((100, 100))\n\n# full screen\npyautogui.hotkey('ctrl', 'command', 'f')\n\n\n# automate filling in form\nfor customer in customers:\n\n # To confirm the form is ready to fill in\n\n time.sleep(1)\n\n # To bottom of the page\n pyautogui.hotkey('command', 'down')\n\n # Wait until the form page has loaded.\n while not pyautogui.pixelMatchesColor(submitButton[0], submitButton[1], submitButtonColor):\n time.sleep(0.5)\n\n # To top of the page for input\n pyautogui.hotkey('command', 'up')\n\n schoolList = ['少林', '武当', '昆仑', '峨眉', '五岳', '逍遥', '丐帮', '全真', '古墓', '移花宫']\n school = int(customer['school'])\n print('Entering {} info...'.format(schoolList[school-1]))\n\n # Click the first field\n pyautogui.click(schoolField[0], schoolField[1])\n\n # Fill out the 门派 field.\n for order in range(school):\n pyautogui.press('down')\n pyautogui.press('enter')\n pyautogui.press('\\t')\n\n # Fill out the 是否参加 field.\n if customer['attend'] == '1':\n\n pyautogui.typewrite(['space', '\\t'])\n\n # Fill out 多少人 field.\n pyautogui.typewrite(customer['headcount'] + '\\t', 0.25)\n\n # Fill out 食物 field.\n # 1-肉夹馍;2-山东大饼;3-手撕饼;4-煎饼果子 5-馒头;6-肉包子;7-素馅包子\n\n if '1' in customer['foodOption']:\n pyautogui.typewrite(['space', '\\t'])\n else:\n pyautogui.press('\\t')\n\n if '2' in customer['foodOption']:\n pyautogui.typewrite(['space', '\\t'])\n else:\n pyautogui.press('\\t')\n\n if '3' in customer['foodOption']:\n pyautogui.typewrite(['space', '\\t'])\n else:\n pyautogui.press('\\t')\n\n if '4' in customer['foodOption']:\n pyautogui.typewrite(['space', '\\t'])\n else:\n pyautogui.press('\\t')\n\n if '5' in customer['foodOption']:\n pyautogui.typewrite(['space', '\\t'])\n else:\n pyautogui.press('\\t')\n\n if '6' in customer['foodOption']:\n pyautogui.typewrite(['space', '\\t'])\n else:\n pyautogui.press('\\t')\n\n if '7' in customer['foodOption']:\n pyautogui.typewrite(['space', '\\t'])\n else:\n pyautogui.press('\\t')\n\n # Fill out 禁忌 field.\n pyautogui.typewrite(customer['taboos'] + '\\t', 0.25)\n\n # Fill out 手机号码 field.\n pyautogui.typewrite(customer['contact'] + '\\t', 0.25)\n\n # Click Submit.\n pyautogui.press('enter')\n\n elif customer['attend'] == '2':\n pyautogui.typewrite(['down', 'space'])\n pyautogui.click(100, 100)\n # To bottom of the page\n pyautogui.hotkey('command', 'down')\n pyautogui.click(submitButton)\n\n elif customer['attend'] == '3':\n pyautogui.typewrite(['down', 'down', 'space'])\n pyautogui.click(100, 100)\n # To bottom of the page\n pyautogui.hotkey('command', 'down')\n pyautogui.click(submitButton)\n\n # Wait until form page has loaded.\n print('Clicked Submit.')\n\n # Click the Submit another response link.\n pyautogui.click(submitAnotherLink[0], submitAnotherLink[1])\n\n# exit full screen mode\npyautogui.hotkey('ctrl', 'command', 'f')\n","sub_path":"Python_ABC/2-19autoGUI/3partyFormFiller4.0(English).py","file_name":"3partyFormFiller4.0(English).py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"125130136","text":"\"\"\"Rm reads from bam\"\"\"\nimport argparse, pysam, sys\n\ndef load_reads(rm_file):\n reads = {}\n with open(rm_file) as f:\n for line in f:\n reads[ line.strip() ] = True\n return reads\n\ndef main(args):\n reads = load_reads(args.rm_file)\n ext = args.sam_file.split('.')[-1]\n if ext == 'sam':\n samfile = pysam.AlignmentFile(args.sam_file, \"r\")\n elif ext == 'bam':\n samfile = pysam.AlignmentFile(args.sam_file, \"rb\")\n else:\n i = 1/0\n\n bam = pysam.AlignmentFile(args.out_file, \"wb\", template=samfile)\n\n for read in samfile:\n name = read.query_name\n if not name in reads:\n bam.write(read)\n samfile.close()\n bam.close()\n \nif __name__ == \"__main__\":\n desc = 'rm raeds'\n parser = argparse.ArgumentParser(description=desc)\n argLs = ('sam_file', 'rm_file', 'out_file')\n for param in argLs:\n parser.add_argument(param)\n args = parser.parse_args()\n main(args)\n","sub_path":"code/scripts/rm_cross_species.py","file_name":"rm_cross_species.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"478616366","text":"from flask import current_app\nfrom app import db\n\nclass Goal(db.Model):\n goal_id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String)\n tasks = db.relationship(\"Task\", backref=\"goal\", lazy=True)\n\n def create_response(self):\n return{\n \"id\": self.goal_id,\n \"title\": self.title,\n }\n \n def return_tasks(self):\n return {\n \"id\": self.goal_id,\n \"task_ids\": self.tasks\n }\n def return_goal_tasks(self):\n tasks_list = []\n for task in self.tasks:\n tasks_list.append(task.make_json())\n return{\n \"id\": self.goal_id,\n \"title\": self.title,\n \"tasks\": tasks_list\n }","sub_path":"app/models/goal.py","file_name":"goal.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"244696307","text":"#Import Modules\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport helper as hlp\r\n\r\n# Loading data\r\ndata = np.load('data2D.npy')\r\n#data = np.load('data100D.npy')\r\n[num_pts, dim] = np.shape(data)\r\nis_valid = 1\r\n\r\n# For Validation set\r\nif is_valid:\r\n valid_batch = int(num_pts / 3.0)\r\n np.random.seed(45689)\r\n rnd_idx = np.arange(num_pts)\r\n np.random.shuffle(rnd_idx)\r\n val_data = data[rnd_idx[:valid_batch]]\r\n data = data[rnd_idx[valid_batch:]]\r\n\r\n# Distance function for K-means\r\ndef distanceFunc(X, MU):\r\n # Inputs - X: is an NxD matrix (N observations and D dimensions), MU: is an KxD matrix (K means and D dimensions)\r\n # Outputs - pair_dist: is the pairwise distance matrix (NxK)\r\n \r\n X_squared = tf.reduce_sum(tf.square(X),axis=1,keepdims=True) #Nx1\r\n MU_squared = tf.reduce_sum(tf.square(MU),axis=1) #Kx1\r\n X_MU = tf.matmul(X,MU,transpose_b=True) #NxK matrix\r\n \r\n pair_dist = X_squared-2*X_MU+MU_squared\r\n \r\n return pair_dist\r\n\r\n#Model Parameters\r\nK = 5 #Number of clusters\r\nN = num_pts-valid_batch #Number of training data points\r\nD = dim # Dimension of data\r\nMAX_ITERS = 1000\r\nLEARNING_RATE = 0.01\r\nnp.random.seed(421) \r\n\r\n#Build K-means Graph\r\n#Reset to defaultgraph\r\ntf.reset_default_graph()\r\n\r\nif D==2:\r\n points = tf.placeholder(dtype=tf.float64, shape=[None,D],name='points')\r\n centroid_init = tf.truncated_normal(shape=[K,D],dtype = tf.float64)\r\n centroids = tf.get_variable(dtype = tf.float64,initializer = centroid_init, name = \"centroids\")\r\nelif D==100:\r\n point = tf.placeholder(tf.float32,shape=[None,D],name='points')\r\n centroid_init = tf.truncated_normal(shape=[K,D],dtype = tf.float32)\r\n centroids = tf.get_variable(dtype = tf.float32,initializer = centroid_init, name = \"centroids\")\r\n\r\n\r\ndistances = distanceFunc(points,centroids)\r\nassignment = tf.argmin(distances,axis=1)\r\n\r\nloss = tf.reduce_sum(tf.reduce_min(distances,axis=1,keepdims=True))\r\n\r\noptimizer = tf.train.AdamOptimizer(LEARNING_RATE,beta1=0.9,beta2=0.99,epsilon=1e-5).minimize(loss)\r\n\r\n#Train and Test K-means Model\r\nwith tf.Session() as sess:\r\n #Run the initializer\r\n init = tf.global_variables_initializer()\r\n sess.run(init) \r\n trainLoss = []\r\n \r\n #Train Model\r\n for epoch in range(MAX_ITERS): \r\n opt,current_trainLoss = sess.run([optimizer,loss], feed_dict={points:data})\r\n trainLoss.append(current_trainLoss)\r\n\r\n print(\"Optimization finished!\")\r\n \r\n #Test Model\r\n trainAssignment,trainCentroids = sess.run([assignment,centroids],feed_dict={points:data})\r\n validAssignment,validLoss = sess.run([assignment,loss],feed_dict={points:val_data})\r\n print(\"Validation Loss: \" +str(validLoss))\r\n clusterPct = np.zeros(K)\r\n for i in range(valid_batch):\r\n clusterPct[validAssignment[i]]+=1\r\n clusterPct*=100/valid_batch\r\n for i in range(K):\r\n print(\"Cluster\"+str(i+1)+\": \"+\"{0:.2f}\".format(round(clusterPct[i],2))+\"%\")\r\n\r\n#Print out plot\r\nif D==2:\r\n plt.figure(figsize=(10,10)) #figsize=(width,height)\r\n\r\n #Plot 1 - 2D Scatter\r\n plt.subplot(211)\r\n c_assignment = np.asarray(trainAssignment,dtype=np.float32).reshape(np.shape(data[:,0]))\r\n plt.scatter(data[:,0],data[:,1],c=c_assignment,cmap='jet',marker='.')\r\n plt.scatter(trainCentroids[:,0],trainCentroids[:,1],c='k',marker = 'o')\r\n plt.title(\"Kmeans Learning with K = \"+str(K))\r\n plt.ylabel('x1')\r\n plt.xlabel('x2')\r\n \r\n\r\n #Plot 2 - Loss\r\n plt.subplot(212)\r\n x_axis = np.arange(MAX_ITERS)+1\r\n plt.plot(x_axis,trainLoss,color='c',linewidth=2.0,label=\"Training\")\r\n plt.ylabel('Loss')\r\n plt.xlabel('Epochs')\r\n plt.legend()\r\n\r\nelse:\r\n #Plot 1 - Loss\r\n plt.figure(figsize=(10,5)) #figsize=(width,height)\r\n x_axis = np.arange(MAX_ITERS)+1\r\n plt.plot(x_axis,trainLoss,color='c',linewidth=2.0,label=\"Training\")\r\n plt.ylabel('Loss')\r\n plt.xlabel('Epochs')\r\n plt.title(\"Kmeans Learning with K = \"+str(K))\r\n plt.legend()\r\n \r\n \r\nplt.show()\r\n \r\n\r\n\r\n","sub_path":"a3/starter_kmeans.py","file_name":"starter_kmeans.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"430259848","text":"\"\"\"\nInfluxDB submission / communication implementations.\n\n\"\"\"\n\nimport socket\nimport time\nimport rospy\nfrom lg_common.logger import get_logger\nlogger = get_logger('stats_submitters')\n\n\nclass Submitter(object):\n \"\"\"\n LG Stats - base submission class for sending stats messages.\n\n \"\"\"\n\n def __init__(self):\n raise RuntimeError(\"Can't instantiate this base class directly.\")\n\n def get_data_for_influx(self, msg, measurement_name):\n raise RuntimeError(\"Base class method called, not implemented.\")\n\n def write_stats(self, data):\n raise RuntimeError(\"Base class method called, not implemented.\")\n\n @staticmethod\n def get_timestamp():\n raise RuntimeError(\"Base class method called, not implemented.\")\n\n\nclass InfluxDirect(Submitter):\n \"\"\"\n Direct connection to InfluxDB database.\n\n \"\"\"\n def __init__(self, host=None, port=None, database=None):\n from influxdb import InfluxDBClient\n self._client = InfluxDBClient(host=host, port=port, database=database)\n logger.info(\"InfluxDB (direct) client initialized (%s:%s/%s).\" % (host, port, database))\n\n @staticmethod\n def get_data_for_influx(msg, measurement_name):\n \"\"\"\n Prepare data for InfluxDB based on the ROS topic message that\n is sent to the debug topic, it contains all stats pertinent details.\n Direct InfluxDB submitter talks JSON.\n\n \"\"\"\n influx_dict = dict(tags=dict(measurement=measurement_name,\n topic=msg.src_topic,\n field_name=msg.field_name,\n type=msg.type,\n metadata=msg.metadata,\n span=msg.span,\n value=msg.value),\n # timestamp may be added here or will be added by the server\n # \"time\": \"2015-11-10T23:00:00Z\",\n # fields must be of type float\n fields=dict(value=0.0))\n return influx_dict\n\n def write_stats(self, data):\n \"\"\"\n Send data to InfluxDB database.\n The Python Influx library converts the Python dictionary to\n the default *line_protocol* before submitting to Influx.\n\n \"\"\"\n self._client.write_points([data])\n\n\nclass InfluxTelegraf(Submitter):\n \"\"\"\n Handles connection to InfluxDB via Telegraf submission agent.\n Telegraf accepts data through its tcp_listener.\n It accepts text message in the form of Influx line protocol via plain socket.\n\n Debugging:\n echo \"application,application=someapplication1,type=event value=0.0\" | nc localhost 8094\n (sent right to the telegraf tcp_listener port)\n\n Another format possibility is JSON, was not successful with\n sending JSON, still getting parsing errors.\n\n \"\"\"\n def __init__(self, host=None, port=None, database=None):\n self.host = host\n self.port = port\n logger.info(\"InfluxDB (telegraf-socket) client initialized (%s:%s).\" % (host, port))\n\n @staticmethod\n def get_data_for_influx(msg, measurement_name):\n \"\"\"\n Accept Event message as an input\n Return a string ready for influx submission\n Value is always a float\n \"\"\"\n try:\n influx_str = (\"\"\"%s topic_name=\"%s\",field_name=\"%s\",type=\"%s\",metadata=\"%s\",span=%s,value=%s %s\"\"\" %\n (measurement_name,\n msg.src_topic,\n msg.field_name,\n msg.type,\n msg.metadata,\n float(msg.span),\n float(msg.value),\n InfluxTelegraf.get_timestamp()))\n except TypeError:\n return ''\n\n return influx_str\n\n @staticmethod\n def get_timestamp():\n return rospy.Time.now().to_nsec()\n\n def write_stats(self, data):\n \"\"\"\n Input is a text message in the form of Influx line protocol.\n\n A socket connection connection and close is performed at each send operation.\n\n It's impossible to tell whether all data was sent or not.\n \"\"\"\n logger.debug(\"Going to write: '%s' to influx\" % data)\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = (self.host, self.port)\n sock.settimeout(5.0)\n sock.connect(server_address)\n sock.sendall(data.encode('utf-8'))\n logger.debug(\"Wrote: '%s' to influx\" % data)\n except Exception as ex:\n logger.error(\"Socket error while sending data '%s' to %s, reason: %s\" %\n (data, server_address, ex))\n finally:\n sock.close()\n\n\nclass InfluxMock(Submitter):\n \"\"\"\n Mock test class which doesn't submit anything\n and does not report any Connection refused and stuff from the tests.\n\n \"\"\"\n def __init__(self, host=None, port=None, database=None):\n self.messages = []\n logger.info(\"InfluxDB Mock client initialized ... won't do anything.\")\n\n @staticmethod\n def get_data_for_influx(msg, measurement_name):\n logger.debug(\"%s called, received msg: '%s'\" % (InfluxMock.__class__.__name__, msg))\n influx_str = (\"\"\"%s topic_name=\"%s\",field_name=\"%s\",type=\"%s\",metadata=\"%s\",value=%s\" %s\"\"\" %\n (measurement_name,\n msg.src_topic,\n msg.field_name,\n msg.type,\n msg.metadata,\n float(msg.value),\n InfluxMock.get_timestamp()))\n return influx_str\n\n def write_stats(self, data):\n logger.debug(\"%s called, received msg: '%s'\" % (self.__class__.__name__, data))\n self.messages.append(data)\n\n @staticmethod\n def get_timestamp():\n \"\"\"\n This doesn't require ROS init_node.\n\n \"\"\"\n return time.time() * int(1e9)\n","sub_path":"lg_stats/src/lg_stats/submitters.py","file_name":"submitters.py","file_ext":"py","file_size_in_byte":6059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"3142919","text":"# coding=utf-8\nimport random\n\nimport src.crawler.tb_crawler\nfrom src.crawler.util.cr_logging import cr_logger\n\nbaseUrlBeiguo = r'http://tieba.baidu.com/mo/m?kw=%E8%83%8C%E9%94%85'\nbaseUrlKangya = r'http://tieba.baidu.com/mo/m?kw=%E6%8A%97%E5%8E%8B'\nbaseUrlKangyaTemp = r'http://tieba.baidu.com/mo/m?kw=%E6%8A%97%E5%8E%8B&pn={0}'\nbaseUrlKangyaTemp = 'http://tieba.baidu.com/mo/m?pnum={0}&kw=%E6%8A%97%E5%8E%8B&lp=5009&pinf=1_2_0&sub=%E8%B7%B3%E9%A1%B5'\n\ncurrentMaxPage = 74363\n\n\ndef run_crawler():\n pnList = list(range(0, 25000))\n listLen = len(pnList)\n\n cr = src.crawler.tb_crawler.TbCrawler()\n\n while (listLen > 0):\n index = pnList[random.randrange(0, len(pnList))]\n cr_logger.info(\"will process index: \" + str(index))\n print(\"index: \" + str(index))\n\n pageUrl = str.format(baseUrlKangyaTemp, str(index))\n try:\n cr.getPage(pageUrl)\n\n cr_logger.info('finish on page: ' + pageUrl)\n # break\n except BaseException as ex:\n cr_logger.warning('getPage error detail: ' + str(ex))\n\n pnList.remove(index)\n listLen = len(pnList)\n cr.close()\n\n\nif __name__ == '__main__':\n print(\"start crawling\")\n run_crawler()\n","sub_path":"run_crawler.py","file_name":"run_crawler.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"276279710","text":"from sequence import NeuriteSequence\n\nclass TreeIndexLogic():\n ''' \n A class which captures tree-specific logic given two characters.\n @param char1: First character\n @param char2: Second character\n '''\n def __init__(self, char1, char2):\n self.char1 = char1\n self.char2 = char2\n \n def get(self):\n if (self.char1 == 'A' and self.char2 == 'C') or (self.char2 == 'A' and self.char1 == 'C'):\n return 'A'\n if (self.char1 == 'A' and self.char2 == '-') or (self.char2 == 'A' and self.char1 == '-'):\n return 'A'\n if (self.char1 == 'T' and self.char2 == '-') or (self.char2 == 'T' and self.char1 == '-'):\n return 'T'\n if (self.char1 == 'C' and self.char2 == '-') or (self.char2 == 'C' and self.char1 == '-'):\n return 'C'\n raise Exception(\"Improper character alignment: \"+self.char1+\" with \"+self.char2)\n\nclass TreeLogicFactory():\n '''\n Parses and processes the composite string to ultimately yield a single\n string which encapsulate the pairwise alignment.\n '''\n def __init__(self, str1, str2):\n self.str1 = str1\n self.str2 = str2\n \n def get_alignment(self):\n ''' \n Simple function to merge two strings and produce a composite.\n @return: NeuriteSequence object representing the composite sequence.\n '''\n composite = ''\n for idx, char1 in enumerate(self.str1):\n char2 = self.str2[idx]\n if char1 == self.str2[idx]:\n composite += char1\n else:\n # Apply neuronal logic given two specific characters.\n composite += TreeIndexLogic(char1, char2).get()\n return composite\n","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"518393621","text":"from .simplewalk import simple_walk\n\nfrom errno import ENOTEMPTY\nfrom os.path import dirname\nfrom os import remove, rmdir\n\ndef clean_dir(root, condition):\n def try_rmdir(directory):\n try:\n rmdir(directory)\n except OSError as e:\n if e.errno != ENOTEMPTY:\n raise\n for path, filename in simple_walk(root):\n if not condition(path, filename):\n continue\n remove(path)\n try_rmdir(dirname(path))\n try_rmdir(root)","sub_path":"x19290/cleandir.py","file_name":"cleandir.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"94466443","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 11 16:17:04 2020\n\n@author: nhatten\n\"\"\"\n\n\"\"\"\n\nRevision history\n2018-12-18; Noble Hatten; begun\n\n\"\"\"\n\nfrom mathUtilities import mathUtilities\nfrom posVel2BPlaneOut import posVel2BPlaneOut\nfrom BPlane2PosVelOut import BPlane2PosVelOut\nimport autograd.numpy as np # use the autograd version of np\nfrom autograd import grad\nfrom autograd import jacobian\n\nif __name__ == \"__main__\":\n np.set_printoptions(precision=20)\n mathUtil = mathUtilities()\n\n # initial conditions\n #r = np.array([7000.0, 1000.0, -5000.0])\n #v = np.array([-1.0, 22.0, 7.0])\n \n # outgoing asypmtote (TA = ~10 deg)\n r = np.array([-1389.182993860459, -6830.153393624416, -1183.630020141228])\n v = np.array([10.080073981938, -3.269814704898709, 1.400774599839738])\n \n \n # incoming asymptote (TA = ~350 deg)\n #r = np.array([-3636.648315948511, -5882.749052462134, -1466.239205353441])\n #v = np.array([9.416328692291867, -4.948766622733928, 1.050814939799521])\n \n \n mu = 3.986e5\n\n bplane = posVel2BPlaneOut()\n\n e = bplane.eVector(r, v, mu)\n h = bplane.hVector(r, v)\n S = bplane.sVector(r, v, mu)\n B = bplane.bVector(r, v, mu)\n R = bplane.rVector(r, v, mu)\n T = bplane.tVector(r, v, mu)\n BR = bplane.bDotR(r, v, mu)\n BT = bplane.bDotT(r, v, mu)\n theta = bplane.bTheta(r, v, mu)\n Bmag = bplane.bScalar(r, v, mu)\n rp = bplane.rPeri(r, v, mu)\n vinf = bplane.vInfMag(r, v, mu)\n RA = bplane.vInfRA(r, v, mu)\n Dec = bplane.vInfDec(r, v, mu)\n trueAnomaly = bplane.trueAnomaly(r, v, mu)\n\n # autograd derivatives\n \n # true anomaly\n d_trueAnomaly_d_r_func = jacobian(bplane.trueAnomaly, 0)\n d_trueAnomaly_d_r_ad = d_trueAnomaly_d_r_func(r, v, mu)\n d_trueAnomaly_d_v_func = jacobian(bplane.trueAnomaly, 1)\n d_trueAnomaly_d_v_ad = d_trueAnomaly_d_v_func(r, v, mu)\n \n d_trueAnomaly_d_x = bplane.trueAnomaly_derivs(r, v, mu)\n \n # e cross r\n d_eCrossR_d_r_func = jacobian(bplane.eCrossR, 0)\n d_eCrossR_d_r_ad = d_eCrossR_d_r_func(r, v, mu)\n d_eCrossR_d_v_func = jacobian(bplane.eCrossR, 1)\n d_eCrossR_d_v_ad = d_eCrossR_d_v_func(r, v, mu)\n \n d_eCrossR_d_x = bplane.eCrossR_derivs(r, v, mu)\n\n # eVector\n d_eVector_d_r_func = jacobian(bplane.eVector, 0)\n d_eVector_d_r_ad = d_eVector_d_r_func(r, v, mu)\n d_eVector_d_v_func = jacobian(bplane.eVector, 1)\n d_eVector_d_v_ad = d_eVector_d_v_func(r, v, mu)\n\n d_eVector_d_x = bplane.eVector_derivs(r, v, mu)\n\n # nVector\n d_nVector_d_r_func = jacobian(bplane.nVector, 0)\n d_nVector_d_r_ad = d_nVector_d_r_func(r, v, mu)\n d_nVector_d_v_func = jacobian(bplane.nVector, 1)\n d_nVector_d_v_ad = d_nVector_d_v_func(r, v, mu)\n\n d_nVector_d_x = bplane.nVector_derivs(r, v, mu)\n\n # hVector\n d_hVector_d_r_func = jacobian(bplane.hVector, 0)\n d_hVector_d_r_ad = d_hVector_d_r_func(r, v)\n d_hVector_d_v_func = jacobian(bplane.hVector, 1)\n d_hVector_d_v_ad = d_hVector_d_v_func(r, v)\n\n d_hVector_d_x = bplane.hVector_derivs(r, v)\n \n # hVector v2\n d_hVector_d_x_v2 = bplane.hVector_derivs_v2(r, v)\n \n\n # sVector\n d_sVector_d_r_func = jacobian(bplane.sVector, 0)\n d_sVector_d_r_ad = d_sVector_d_r_func(r, v, mu)\n d_sVector_d_v_func = jacobian(bplane.sVector, 1)\n d_sVector_d_v_ad = d_sVector_d_v_func(r, v, mu)\n\n d_sVector_d_x = bplane.sVector_derivs(r, v, mu)\n\n # tVector\n d_tVector_d_r_func = jacobian(bplane.tVector, 0)\n d_tVector_d_r_ad = d_tVector_d_r_func(r, v, mu)\n d_tVector_d_v_func = jacobian(bplane.tVector, 1)\n d_tVector_d_v_ad = d_tVector_d_v_func(r, v, mu)\n\n d_tVector_d_x = bplane.tVector_derivs(r, v, mu)\n\n # rVector\n d_rVector_d_r_func = jacobian(bplane.rVector, 0)\n d_rVector_d_r_ad = d_rVector_d_r_func(r, v, mu)\n d_rVector_d_v_func = jacobian(bplane.rVector, 1)\n d_rVector_d_v_ad = d_rVector_d_v_func(r, v, mu)\n\n d_rVector_d_x = bplane.rVector_derivs(r, v, mu)\n\n # bVector\n d_bVector_d_r_func = jacobian(bplane.bVector, 0)\n d_bVector_d_r_ad = d_bVector_d_r_func(r, v, mu)\n d_bVector_d_v_func = jacobian(bplane.bVector, 1)\n d_bVector_d_v_ad = d_bVector_d_v_func(r, v, mu)\n\n d_bVector_d_x = bplane.bVector_derivs(r, v, mu)\n\n # bScalar\n d_bScalar_d_r_func = jacobian(bplane.bScalar, 0)\n d_bScalar_d_r_ad = d_bScalar_d_r_func(r, v, mu)\n d_bScalar_d_v_func = jacobian(bplane.bScalar, 1)\n d_bScalar_d_v_ad = d_bScalar_d_v_func(r, v, mu)\n\n d_bScalar_d_x = bplane.bScalar_derivs(r, v, mu)\n\n # bDotR\n d_bDotR_d_r_func = jacobian(bplane.bDotR, 0)\n d_bDotR_d_r_ad = d_bDotR_d_r_func(r, v, mu)\n d_bDotR_d_v_func = jacobian(bplane.bDotR, 1)\n d_bDotR_d_v_ad = d_bDotR_d_v_func(r, v, mu)\n\n d_BdotR_d_x = bplane.bDotR_derivs(r, v, mu)\n\n # bDotT\n d_bDotT_d_r_func = jacobian(bplane.bDotT, 0)\n d_bDotT_d_r_ad = d_bDotT_d_r_func(r, v, mu)\n d_bDotT_d_v_func = jacobian(bplane.bDotT, 1)\n d_bDotT_d_v_ad = d_bDotT_d_v_func(r, v, mu)\n\n d_BdotT_d_x = bplane.bDotT_derivs(r, v, mu)\n\n # bTheta\n d_bTheta_d_r_func = jacobian(bplane.bTheta, 0)\n d_bTheta_d_r_ad = d_bTheta_d_r_func(r, v, mu)\n d_bTheta_d_v_func = jacobian(bplane.bTheta, 1)\n d_bTheta_d_v_ad = d_bTheta_d_v_func(r, v, mu)\n\n d_bTheta_dx = bplane.bTheta_derivs(r, v, mu)\n\n # rPeri\n d_rPeri_d_r_func = jacobian(bplane.rPeri, 0)\n d_rPeri_d_r_ad = d_rPeri_d_r_func(r, v, mu)\n d_rPeri_d_v_func = jacobian(bplane.rPeri, 1)\n d_rPeri_d_v_ad = d_rPeri_d_v_func(r, v, mu)\n\n d_rPeri_d_x = bplane.rPeri_derivs(r, v, mu)\n\n # vInfMag\n d_vInfMag_d_r_func = jacobian(bplane.vInfMag, 0)\n d_vInfMag_d_r_ad = d_vInfMag_d_r_func(r, v, mu)\n d_vInfMag_d_v_func = jacobian(bplane.vInfMag, 1)\n d_vInfMag_d_v_ad = d_vInfMag_d_v_func(r, v, mu)\n\n d_vInfMag_d_x = bplane.vInfMag_derivs(r, v, mu)\n\n # vInfRA\n d_vInfRA_d_r_func = jacobian(bplane.vInfRA, 0)\n d_vInfRA_d_r_ad = d_vInfRA_d_r_func(r, v, mu)\n d_vInfRA_d_v_func = jacobian(bplane.vInfRA, 1)\n d_vInfRA_d_v_ad = d_vInfRA_d_v_func(r, v, mu)\n\n d_vInfRA_d_x = bplane.vInfRA_derivs(r, v, mu)\n\n # vInfDec\n d_vInfDec_d_r_func = jacobian(bplane.vInfDec, 0)\n d_vInfDec_d_r_ad = d_vInfDec_d_r_func(r, v, mu)\n d_vInfDec_d_v_func = jacobian(bplane.vInfDec, 1)\n d_vInfDec_d_v_ad = d_vInfDec_d_v_func(r, v, mu)\n\n d_vInfDec_d_x = bplane.vInfDec_derivs(r, v, mu)\n\n # hCross\n d_hCross_d_r_func = jacobian(bplane.hCross, 0)\n d_hCross_d_r_ad = d_hCross_d_r_func(r, v)\n d_hCross_d_v_func = jacobian(bplane.hCross, 1)\n d_hCross_d_v_ad = d_hCross_d_v_func(r, v)\n\n # periapsis position\n rp = bplane.periapsisPositionVector(r, v, mu)\n\n # periapsis velocity\n vp = bplane.periapsisVelocityVector(r, v, mu)\n\n ## print values\n #print('S = ', S)\n #print('B = ', B)\n #print('R = ', R)\n #print('T = ', T)\n #print('BR = ', BR)\n #print('BT = ', BT)\n #print('theta (deg) = ', np.rad2deg(theta))\n #print('b = ', Bmag)\n #print('rp = ', rp)\n #print('vinf = ', vinf)\n #print('vinf RA = ', np.rad2deg(RA))\n #print('vinf Dec = ', np.rad2deg(Dec))\n #print('')\n\n ## print derivatives\n # print('')\n # print('true anomaly = ', trueAnomaly)\n # print('d_trueAnomaly_d_r difference = \\n', d_trueAnomaly_d_r_ad - d_trueAnomaly_d_x[0:3]) # correct\n # print('d_trueAnomaly_d_v difference = \\n', d_trueAnomaly_d_v_ad - d_trueAnomaly_d_x[3:6]) # correct\n # print('d_trueAnomaly_d_r_ad = \\n', d_trueAnomaly_d_r_ad)\n # print('d_trueAnomaly_d_v_ad = \\n', d_trueAnomaly_d_v_ad)\n # print('d_trueAnomaly_d_r = \\n', d_trueAnomaly_d_x[0:3])\n # print('d_trueAnomaly_d_v = \\n', d_trueAnomaly_d_x[3:6])\n # print('')\n # print('d_eCrossR_d_r_ad = \\n', d_eCrossR_d_r_ad - d_eCrossR_d_x[0:3,0:3]) # correct\n # print('d_eCrossR_d_v_ad = \\n', d_eCrossR_d_v_ad - d_eCrossR_d_x[0:3,3:6]) # correct\n # print('d_eCrossR_d_r_ad = \\n', d_eCrossR_d_r_ad)\n # print('d_eCrossR_d_v_ad = \\n', d_eCrossR_d_v_ad)\n # print('d_eCrossR_d_r = \\n', d_eCrossR_d_x[0:3,0:3])\n # print('d_eCrossR_d_v = \\n', d_eCrossR_d_x[0:3,3:6])\n # print('')\n #print('d_eVector_d_r_ad = \\n', d_eVector_d_r_ad - d_eVector_d_x[0:3,0:3])\n #print('d_eVector_d_v_ad = \\n', d_eVector_d_v_ad - d_eVector_d_x[0:3,3:6])\n #print('d_eVector_d_r = \\n', d_eVector_d_x[0:3,0:3])\n #print('d_eVector_d_v = \\n', d_eVector_d_x[0:3,3:6])\n #print('')\n #print('d_nVector_d_r_ad = \\n', d_nVector_d_r_ad - d_nVector_d_x[0:3,0:3])\n #print('d_nVector_d_v_ad = \\n', d_nVector_d_v_ad - d_nVector_d_x[0:3,3:6])\n #print('d_nVector_d_r = \\n', d_nVector_d_x[0:3,0:3])\n #print('d_nVector_d_v = \\n', d_nVector_d_x[0:3,3:6])\n #print('')\n #print('d_hVector_d_r_ad = \\n', d_hVector_d_r_ad - d_hVector_d_x[0:3,0:3])\n #print('d_hVector_d_v_ad = \\n', d_hVector_d_v_ad - d_hVector_d_x[0:3,3:6])\n #print('d_hVector_d_r = \\n', d_hVector_d_x[0:3,0:3])\n #print('d_hVector_d_v = \\n', d_hVector_d_x[0:3,3:6])\n #print('d_hVector_d_r v2 = \\n', d_hVector_d_x_v2[0:3,0:3])\n #print('d_hVector_d_v v2 = \\n', d_hVector_d_x_v2[0:3,3:6])\n # print('')\n # print('d_sVector_d_r diff = \\n', d_sVector_d_r_ad - d_sVector_d_x[0:3,0:3])\n # print('d_sVector_d_v diff = \\n', d_sVector_d_v_ad - d_sVector_d_x[0:3,3:6])\n # print('d_sVector_d_r_ad = \\n', d_sVector_d_r_ad)\n # print('d_sVector_d_v_ad = \\n', d_sVector_d_v_ad)\n # print('d_sVector_d_r = \\n', d_sVector_d_x[0:3,0:3])\n # print('d_sVector_d_v = \\n', d_sVector_d_x[0:3,3:6])\n # print('')\n #print('d_tVector_d_r_ad = \\n', d_tVector_d_r_ad - d_tVector_d_x[0:3,0:3])\n #print('d_tVector_d_v_ad = \\n', d_tVector_d_v_ad - d_tVector_d_x[0:3,3:6])\n #print('d_tVector_d_r = \\n', d_tVector_d_x[0:3,0:3])\n #print('d_tVector_d_v = \\n', d_tVector_d_x[0:3,3:6])\n #print('')\n #print('d_rVector_d_r_ad = \\n', d_rVector_d_r_ad - d_rVector_d_x[0:3,0:3])\n #print('d_rVector_d_v_ad = \\n', d_rVector_d_v_ad - d_rVector_d_x[0:3,3:6])\n #print('d_rVector_d_r = \\n', d_rVector_d_x[0:3,0:3])\n #print('d_rVector_d_v = \\n', d_rVector_d_x[0:3,3:6])\n # print('')\n # print('d_bVector_d_r diff = \\n', d_bVector_d_r_ad - d_bVector_d_x[0:3,0:3])\n # print('d_bVector_d_v diff = \\n', d_bVector_d_v_ad - d_bVector_d_x[0:3,3:6])\n # print('d_bVector_d_r_ad = \\n', d_bVector_d_r_ad)\n # print('d_bVector_d_v_ad = \\n', d_bVector_d_v_ad)\n # print('d_bVector_d_r = \\n', d_bVector_d_x[0:3,0:3])\n # print('d_bVector_d_v = \\n', d_bVector_d_x[0:3,3:6])\n # print('')\n #print('d_bScalar_d_r_ad = \\n', d_bScalar_d_r_ad - d_bScalar_d_x[0:3])\n #print('d_bScalar_d_v_ad = \\n', d_bScalar_d_v_ad - d_bScalar_d_x[3:6])\n #print('d_bScalar_d_r = \\n', d_bScalar_d_x[0:3])\n #print('d_bScalar_d_v = \\n', d_bScalar_d_x[3:6]) \n #print('')\n #print('d_bDotR_d_r_ad = \\n', d_bDotR_d_r_ad - d_BdotR_d_x[0:3])\n #print('d_bDotR_d_v_ad = \\n', d_bDotR_d_v_ad - d_BdotR_d_x[3:6])\n #print('d_bDotR_d_r = \\n', d_BdotR_d_x[0:3])\n #print('d_bDotR_d_v = \\n', d_BdotR_d_x[3:6])\n #print('')\n #print('d_bDotT_d_r_ad = \\n', d_bDotT_d_r_ad - d_BdotT_d_x[0:3])\n #print('d_bDotT_d_v_ad = \\n', d_bDotT_d_v_ad - d_BdotT_d_x[3:6])\n #print('d_bDotT_d_r = \\n', d_BdotT_d_x[0:3])\n #print('d_bDotT_d_v = \\n', d_BdotT_d_x[3:6])\n # print('')\n # print('d_bTheta_d_r_ad = \\n', d_bTheta_d_r_ad - d_bTheta_dx[0:3]) # correct\n # print('d_bTheta_d_v_ad = \\n', d_bTheta_d_v_ad - d_bTheta_dx[3:6]) # correct\n # print('d_bTheta_d_r = \\n', d_bTheta_dx[0:3])\n # print('d_bTheta_d_v = \\n', d_bTheta_dx[3:6])\n # print('')\n # print('d_rPeri_d_r_ad = \\n', d_rPeri_d_r_ad - d_rPeri_d_x[0:3]) # correct\n # print('d_rPeri_d_v_ad = \\n', d_rPeri_d_v_ad - d_rPeri_d_x[3:6]) # correct\n # print('d_rPeri_d_r = \\n', d_rPeri_d_x[0:3])\n # print('d_rPeri_d_v = \\n', d_rPeri_d_x[3:6])\n #print('')\n # print('d_vInfMag_d_r_ad = \\n', d_vInfMag_d_r_ad - d_vInfMag_d_x[0:3]) # correct\n # print('d_vInfMag_d_v_ad = \\n', d_vInfMag_d_v_ad - d_vInfMag_d_x[3:6]) # correct\n # print('d_vInfMag_d_r = \\n', d_vInfMag_d_x[0:3])\n # print('d_vInfMag_d_v = \\n', d_vInfMag_d_x[3:6])\n #print('')\n # print('d_vInfRA_d_r_ad = \\n', d_vInfRA_d_r_ad - d_vInfRA_d_x[0:3]) # correct\n # print('d_vInfRA_d_v_ad = \\n', d_vInfRA_d_v_ad - d_vInfRA_d_x[3:6]) # correct\n # print('d_vInfRA_d_r = \\n', d_vInfRA_d_x[0:3])\n # print('d_vInfRA_d_v = \\n', d_vInfRA_d_x[3:6])\n #print('')\n # print('d_vInfDec_d_r_ad = \\n', d_vInfDec_d_r_ad - d_vInfDec_d_x[0:3]) # correct\n # print('d_vInfDec_d_v_ad = \\n', d_vInfDec_d_v_ad - d_vInfDec_d_x[3:6]) # correct\n # print('d_vInfDec_d_r = \\n', d_vInfDec_d_x[0:3])\n # print('d_vInfDec_d_v = \\n', d_vInfDec_d_x[3:6])\n \n\n # now, go back the other way\n TA0 = 0.0 # fix at periapsis\n \n # true anomaly not fixed at periapsis, but taken from initial Cartesian state\n TA = trueAnomaly\n \n x = np.array([vinf, RA, Dec, Bmag, theta, TA])\n print(x)\n xp = np.array([vinf, RA, Dec, Bmag, theta, TA0]) # periapsis\n #x = xp\n \n xDeg = np.zeros((6))\n xDeg[0] = x[0]\n xDeg[1] = x[1] * 180./np.pi\n xDeg[2] = x[2] * 180./np.pi\n xDeg[3] = x[3]\n xDeg[4] = x[4] * 180./np.pi\n xDeg[5] = x[5] * 180./np.pi\n \n # print('B plane state = \\n', x)\n # print(\"\\n\")\n # print('B plane state (deg) = \\n', xDeg)\n # print(\"\\n\")\n \n back = BPlane2PosVelOut()\n\n # calculate the stuff\n eMagBack = back.eMag(x, mu)\n sBack = back.sVector(x)\n hMagBack = back.hMag(x)\n tBack = back.tVector(x)\n rBack = back.rVector(x)\n BRBack = back.bDotR(x)\n BTBack = back.bDotT(x)\n BBack = back.bVector(x)\n hUnitBack = back.hUnit(x)\n hBack = back.hVector(x)\n eBack = back.eVector(x, mu)\n rpBack = back.positionVector(xp, mu)\n vpBack = back.velocityVector(xp, mu)\n rBack = back.positionVector(x, mu)\n vBack = back.velocityVector(x, mu)\n\n # autograd derivatives\n\n # eMag\n d_eMag_d_x_func = jacobian(back.eMag, 0)\n d_eMag_d_x_ad = d_eMag_d_x_func(x, mu)\n d_eMag_d_x = back.eMag_derivs(x, mu)\n\n # hMag\n d_hMag_d_x_func = jacobian(back.hMag, 0)\n d_hMag_d_x_ad = d_hMag_d_x_func(x)\n d_hMag_d_x = back.hMag_derivs(x)\n\n # sVector\n d_sVector_d_x_func = jacobian(back.sVector, 0)\n d_sVector_d_x_ad = d_sVector_d_x_func(x)\n d_sVector_d_x = back.sVector_derivs(x)\n\n # tVector\n d_tVector_d_x_func = jacobian(back.tVector, 0)\n d_tVector_d_x_ad = d_tVector_d_x_func(x)\n d_tVector_d_x = back.tVector_derivs(x)\n\n # rVector\n d_rVector_d_x_func = jacobian(back.rVector, 0)\n d_rVector_d_x_ad = d_rVector_d_x_func(x)\n d_rVector_d_x = back.rVector_derivs(x)\n\n # bVector\n d_bVector_d_x_func = jacobian(back.bVector, 0)\n d_bVector_d_x_ad = d_bVector_d_x_func(x)\n d_bVector_d_x = back.bVector_derivs(x)\n\n # hUnit\n d_hUnit_d_x_func = jacobian(back.hUnit, 0)\n d_hUnit_d_x_ad = d_hUnit_d_x_func(x)\n d_hUnit_d_x = back.hUnit_derivs(x)\n\n # hVector\n d_hVector_d_x_func = jacobian(back.hVector, 0)\n d_hVector_d_x_ad = d_hVector_d_x_func(x)\n d_hVector_d_x = back.hVector_derivs(x)\n\n # eUnitVector\n d_eUnitVector_d_x_func = jacobian(back.eUnitVector, 0)\n d_eUnitVector_d_x_ad = d_eUnitVector_d_x_func(x, mu)\n d_eUnitVector_d_x = back.eUnitVector_derivs(x, mu)\n\n # eVector\n d_eVector_d_x_func = jacobian(back.eVector, 0)\n d_eVector_d_x_ad = d_eVector_d_x_func(x, mu)\n d_eVector_d_x = back.eVector_derivs(x, mu)\n\n # TAinf\n d_TAinf_d_x_func = jacobian(back.TAinf, 0)\n d_TAinf_d_x_ad = d_TAinf_d_x_func(x, mu)\n d_TAinf_d_x = back.TAinf_derivs(x, mu)\n\n # position wrt ang mo\n d_position_d_h_func = jacobian(back.positionVectorFromheTA, 0)\n d_position_d_h_ad = d_position_d_h_func(hBack, eBack, x[5], mu)\n d_position_d_h = back.dPositionVectordh(x, mu)\n\n # position wrt ecc vec\n d_position_d_e_func = jacobian(back.positionVectorFromheTA, 1)\n d_position_d_e_ad = d_position_d_e_func(hBack, eBack, x[5], mu)\n d_position_d_e = back.dPositionVectorde(x, mu)\n\n # position wrt ecc vec\n d_position_d_TA_func = jacobian(back.positionVectorFromheTA, 2)\n d_position_d_TA_ad = d_position_d_TA_func(hBack, eBack, x[5], mu)\n d_position_d_TA = back.dPositionVectordTA(x, mu)\n\n # velocity wrt ang mo\n d_velocity_d_h_func = jacobian(back.velocityVectorFromheTA, 0)\n d_velocity_d_h_ad = d_velocity_d_h_func(hBack, eBack, x[5], mu)\n d_velocity_d_h = back.dVelocityVectordh(x, mu)\n\n # velocity wrt ecc vec\n d_velocity_d_e_func = jacobian(back.velocityVectorFromheTA, 1)\n d_velocity_d_e_ad = d_velocity_d_e_func(hBack, eBack, x[5], mu)\n d_velocity_d_e = back.dVelocityVectorde(x, mu)\n\n # velocity wrt TA\n d_velocity_d_TA_func = jacobian(back.velocityVectorFromheTA, 2)\n d_velocity_d_TA_ad = d_velocity_d_TA_func(hBack, eBack, x[5], mu)\n d_velocity_d_TA = back.dVelocityVectordTA(x, mu)\n\n # positionVector\n d_positionVector_d_x_func = jacobian(back.positionVector, 0)\n d_positionVector_d_x_ad = d_positionVector_d_x_func(x, mu)\n d_positionVector_d_x = back.positionVector_derivs(x, mu)\n\n # velocityVector\n d_velocityVector_d_x_func = jacobian(back.velocityVector, 0)\n d_velocityVector_d_x_ad = d_velocityVector_d_x_func(x, mu)\n d_velocityVector_d_x = back.velocityVector_derivs(x, mu)\n\n ## print derivatives\n print('BPlane2PosVel derivatives')\n print('')\n #print('d_eMag_d_x_ad = \\n', d_eMag_d_x_ad - d_eMag_d_x) # correct\n #print('d_eMag_d_x = \\n', d_eMag_d_x) # correct\n #print('')\n #print('d_hMag_d_x_ad = \\n', d_hMag_d_x_ad - d_hMag_d_x) # correct\n #print('d_hMag_d_x = \\n', d_hMag_d_x) # correct\n #print('')\n #print('d_sVector_d_x_ad = \\n', d_sVector_d_x_ad - d_sVector_d_x) # correct\n #print('d_sVector_d_x = \\n', d_sVector_d_x) # correct\n #print('')\n #print('d_tVector_d_x_ad = \\n', d_tVector_d_x_ad - d_tVector_d_x) # correct\n #print('d_tVector_d_x = \\n', d_tVector_d_x) # correct\n #print('')\n #print('d_rVector_d_x_ad = \\n', d_rVector_d_x_ad - d_rVector_d_x) # correct\n #print('d_rVector_d_x = \\n', d_rVector_d_x) # correct\n # print('')\n # print('d_bVector_d_x_ad = \\n', d_bVector_d_x_ad - d_bVector_d_x) # doesn't look exactly correct, but that is because of cross product terms that should cancel not EXACTLY canceling\n # ## this occurs in rVector_derivs, actually: print(crossProduct[0]*dRdxNotUnit[0,1], crossProduct[1]*dRdxNotUnit[1,1])\n # print('d_bVector_d_x = \\n', d_bVector_d_x) # correct\n # print('')\n # print('d_TAinf_d_x diff = \\n', d_TAinf_d_x_ad - d_TAinf_d_x) # correct\n # print('d_TAinf_d_x_ad = \\n', d_TAinf_d_x_ad) # correct\n # print('d_TAinf_d_x = \\n', d_TAinf_d_x) # correct\n # print('')\n #print('d_hUnit_d_x_ad = \\n', d_hUnit_d_x_ad - d_hUnit_d_x) # correct\n #print('d_hUnit_d_x = \\n', d_hUnit_d_x) # correct\n # print('')\n # print('d_hVector_d_x_ad = \\n', d_hVector_d_x_ad - d_hVector_d_x) # should be correct, except there are scaling issues that result in small errors in cross product cancellations in hUnitVector_derivs resulting in slightly larger errors later\n # print('d_hVector_d_x = \\n', d_hVector_d_x) # correct\n # print('')\n # print('d_eUnitVector_d_x diff = \\n', d_eUnitVector_d_x_ad - d_eUnitVector_d_x) # correct\n # print('d_eUnitVector_d_x_ad = \\n', d_eUnitVector_d_x_ad) # correct\n # print('d_eUnitVector_d_x = \\n', d_eUnitVector_d_x) # correct\n # print('')\n # print('d_eVector_d_x_ad = \\n', d_eVector_d_x_ad - d_eVector_d_x) # correct\n # print('d_eVector_d_x = \\n', d_eVector_d_x) # correct\n # print('')\n # print('d_positionVector_d_h difference = \\n', d_position_d_h_ad - d_position_d_h) # correct\n # print('d_positionVector_d_h_ad = \\n', d_position_d_h_ad) # correct\n # print('d_positionVector_d_h = \\n', d_position_d_h) # correct\n # print('')\n # print('d_positionVector_d_e difference = \\n', d_position_d_e_ad - d_position_d_e) # correct\n # print('d_positionVector_d_e_ad = \\n', d_position_d_e_ad) # correct\n # print('d_positionVector_d_e = \\n', d_position_d_e) # correct\n # print('')\n # print('d_positionVector_d_TA_ad = \\n', d_position_d_TA_ad - d_position_d_TA) # correct\n # print('d_positionVector_d_TA = \\n', d_position_d_TA) # correct\n print('')\n print('d_positionVector_d_x difference = \\n', d_positionVector_d_x_ad - d_positionVector_d_x) # wrong\n print('d_positionVector_d_x_ad = \\n', d_positionVector_d_x_ad) # wrong\n print('d_positionVector_d_x = \\n', d_positionVector_d_x) # wrong\n print('')\n # print('d_velocityVector_d_h_ad = \\n', d_velocity_d_h_ad - d_velocity_d_h) # correct\n # print('d_velocityVector_d_h = \\n', d_velocity_d_h) # correct\n #print('')\n #print('d_velocityVector_d_e_ad = \\n', d_velocity_d_e_ad - d_velocity_d_e) # correct\n #print('d_velocityVector_d_e = \\n', d_velocity_d_e) # correct\n #print('')\n #print('d_velocityVector_d_TA_ad = \\n', d_velocity_d_TA_ad - d_velocity_d_TA) # correct\n #print('d_velocityVector_d_TA = \\n', d_velocity_d_TA) # correct\n print('')\n print('dvelocityVector_d_x difference = \\n', d_velocityVector_d_x_ad - d_velocityVector_d_x) # correct\n print('dvelocityVector_d_x_ad = \\n', d_velocityVector_d_x_ad) # correct\n print('dvelocityVector_d_x = \\n', d_velocityVector_d_x) # correct\n\n\n # print the stuff\n #print('')\n #print('*****************************')\n #print('*****************************')\n #print('*****************************')\n #print('')\n #print('eMag forward = ', np.linalg.norm(e) - eMagBack)\n #print('eMag back = ', eMagBack)\n #print('')\n #print('s vector forward = ', S - sBack)\n #print('s vector backward = ', sBack)\n #print('')\n #print('hMag forward = ', np.linalg.norm(h) - hMagBack)\n #print('hMag backward = ', hMagBack)\n #print('')\n #print('tVector forward = ', T - tBack)\n #print('tVector backward = ', tBack)\n #print('')\n #print('rVector forward = ', R - rBack)\n #print('rVector backward = ', rBack)\n #print('')\n #print('bDotT forward = ', BT - BTBack)\n #print('bDotT backward = ', BTBack)\n #print('')\n #print('bDotR forward = ', BR - BRBack)\n #print('bDotR backward = ', BRBack)\n #print('')\n #print('BVector forward = ', B - BBack)\n #print('BVector backward = ', BBack)\n #print('')\n #print('hUnit forward = ', h/np.linalg.norm(h) - hUnitBack)\n #print('hUnit backward = ', hUnitBack)\n #print('')\n #print('h forward = ', h - hBack)\n #print('h backward = ', hBack)\n #print('')\n #print('e forward = ', e - eBack)\n #print('e backward = ', eBack)\n # print('')\n # print('periapsis position fwd - bwd = ', rp - rpBack)\n # print('periapsis position backward = ', rpBack)\n # print('')\n # print('periapsis velocity fwd - bwd = ', vp - vpBack)\n # print('periapsis velocity backward = ', vpBack)\n # print('')\n # print('position fwd - bwd = ', r - rBack)\n # print('position forward = ', r)\n # print('position backward = ', rBack)\n # print('')\n # print('velocity fwd - bwd = ', v - vBack)\n # print('velocity forward = ', v)\n # print('velocity backward = ', vBack)\n # print('')\n\n print(\"Done!\")\n","sub_path":"emtg/docs/bplane_parameterization/bplane_math_driver_out.py","file_name":"bplane_math_driver_out.py","file_ext":"py","file_size_in_byte":23163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"443758373","text":"import fractions\nN, M = map(int, input().split())\nS = input()\nT = input()\n\nG = N * M // fractions.gcd(N, M)\nslist = [i*(G//N) for i in range(N)]\nmlist = [i*(G//M) for i in range(M)]\nsm = set(slist) & set(mlist)\nfor i in sm:\n si = slist.index(i)\n mi = mlist.index(i)\n if S[si] != T[mi]:\n print(-1)\n exit()\n\nprint(G)","sub_path":"Python_codes/p03231/s674124725.py","file_name":"s674124725.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"642145302","text":"# Copyright (c) Facebook, Inc. and its affiliates.\r\n#\r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\n# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the BSD 3-Clause License (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# https://spdx.org/licenses/BSD-3-Clause.html\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport torch\r\nfrom torch.optim.optimizer import Optimizer, required\r\nfrom collections import defaultdict\r\nfrom pycls.core.combine_tensors import combine_npu\r\n\r\nclass NpuFusedSGD(Optimizer):\r\n r\"\"\"Implements stochastic gradient descent (optionally with momentum).\r\n\r\n Currently NPU-only. Requires Apex to be installed via\r\n ``pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--npu_float_status\" ./``.\r\n\r\n This version of fused SGD implements 1 fusions.\r\n\r\n * A combine-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.\r\n\r\n :class:`apex.optimizers.NpuFusedSGD` may be used as a drop-in replacement for ``torch.optim.SGD``::\r\n\r\n opt = apex.optimizers.NpuFusedSGD(model.parameters(), lr = ....)\r\n ...\r\n opt.step()\r\n\r\n :class:`apex.optimizers.FusedSGD` should be used with Amp. Currently, if you wish to use :class:`NpuFusedSGD` with Amp,\r\n only ``opt_level O2`` can be choosed::\r\n\r\n opt = apex.optimizers.NpuFusedSGD(model.parameters(), lr = ....)\r\n model, opt = amp.initialize(model, opt, opt_level=\"O2\")\r\n ...\r\n opt.step()\r\n\r\n Nesterov momentum is based on the formula from\r\n `On the importance of initialization and momentum in deep learning`__.\r\n\r\n Args:\r\n params (iterable): iterable of parameters to optimize or dicts defining\r\n parameter groups\r\n lr (float): learning rate\r\n momentum (float, optional): momentum factor (default: 0)\r\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\r\n dampening (float, optional): dampening for momentum (default: 0)\r\n nesterov (bool, optional): enables Nesterov momentum (default: False)\r\n\r\n Example:\r\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\r\n >>> optimizer.zero_grad()\r\n >>> loss_fn(model(input), target).backward()\r\n >>> optimizer.step()\r\n\r\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\r\n\r\n .. note::\r\n The implementation of SGD with Momentum/Nesterov subtly differs from\r\n Sutskever et. al. and implementations in some other frameworks.\r\n\r\n Considering the specific case of Momentum, the update can be written as\r\n\r\n .. math::\r\n \\begin{aligned}\r\n v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\\\\r\n p_{t+1} & = p_{t} - \\text{lr} * v_{t+1},\r\n \\end{aligned}\r\n\r\n where :math:`p`, :math:`g`, :math:`v` and :math:`\\mu` denote the \r\n parameters, gradient, velocity, and momentum respectively.\r\n\r\n This is in contrast to Sutskever et. al. and\r\n other frameworks which employ an update of the form\r\n\r\n .. math::\r\n \\begin{aligned}\r\n v_{t+1} & = \\mu * v_{t} + \\text{lr} * g_{t+1}, \\\\\r\n p_{t+1} & = p_{t} - v_{t+1}.\r\n \\end{aligned}\r\n\r\n The Nesterov version is analogously modified.\r\n \"\"\"\r\n\r\n def __init__(self, params, lr=required, momentum=0, dampening=0,\r\n weight_decay=0, nesterov=False):\r\n if lr is not required and lr < 0.0:\r\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\r\n if momentum < 0.0:\r\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\r\n if weight_decay < 0.0:\r\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\r\n\r\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\r\n weight_decay=weight_decay, nesterov=nesterov)\r\n if nesterov and (momentum <= 0 or dampening != 0):\r\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\r\n super(NpuFusedSGD, self).__init__(params, defaults)\r\n\r\n def __setstate__(self, state):\r\n super(NpuFusedSGD, self).__setstate__(state)\r\n for group in self.param_groups:\r\n group.setdefault('nesterov', False)\r\n\r\n def combine_param_state_by_group(self, momentum_buffer_in_state_before):\r\n if not hasattr(self, \"_amp_stash\"):\r\n raise RuntimeError('apex.optimizers.NpuFusedSGD should be used with AMP.')\r\n\r\n momentum_buffer_in_state_before = True\r\n\r\n stash = self._amp_stash\r\n if stash.param_state_combined:\r\n return\r\n \r\n for group in self.param_groups:\r\n weight_decay = group['weight_decay']\r\n momentum = group['momentum']\r\n if momentum == 0:\r\n state_combined = defaultdict(dict)\r\n state_combined['momentum_buffer'] = None\r\n stash.param_state_combined_list.append(state_combined)\r\n continue\r\n\r\n momentum_buffer_list = []\r\n for p in group['params']:\r\n if p.grad is None:\r\n continue\r\n\r\n d_p = p.grad\r\n state = self.state[p]\r\n if 'momentum_buffer' not in state:\r\n momentum_buffer_in_state_before = False\r\n if weight_decay != 0:\r\n d_p = d_p.add(p, alpha=weight_decay)\r\n state['momentum_buffer'] = torch.clone(d_p).detach()\r\n else:\r\n temp = torch.clone(d_p).detach()\r\n temp.copy_(state['momentum_buffer'])\r\n state['momentum_buffer'] = temp\r\n\r\n momentum_buffer_list.append(state['momentum_buffer'])\r\n\r\n momentum_buffer_combined = None\r\n if len(momentum_buffer_list) > 0:\r\n momentum_buffer_combined = combine_npu(momentum_buffer_list)\r\n \r\n state_combined = defaultdict(dict)\r\n state_combined['momentum_buffer'] = momentum_buffer_combined\r\n stash.param_state_combined_list.append(state_combined)\r\n\r\n stash.param_state_combined = True\r\n\r\n @torch.no_grad()\r\n def step(self, closure=None):\r\n \"\"\"Performs a single optimization step.\r\n\r\n Arguments:\r\n closure (callable, optional): A closure that reevaluates the model\r\n and returns the loss.\r\n \"\"\"\r\n if not hasattr(self, \"_amp_stash\"):\r\n raise RuntimeError('apex.optimizers.NpuFusedSGD should be used with AMP.')\r\n\r\n momentum_buffer_in_state_before = True\r\n self._combine_params_and_grads_by_group()\r\n self.combine_param_state_by_group(momentum_buffer_in_state_before)\r\n\r\n loss = None\r\n if closure is not None:\r\n with torch.enable_grad():\r\n loss = closure()\r\n\r\n stash = self._amp_stash\r\n\r\n for i, group in enumerate(self.param_groups):\r\n weight_decay = group['weight_decay']\r\n momentum = group['momentum']\r\n dampening = group['dampening']\r\n nesterov = group['nesterov']\r\n\r\n params_combined = stash.params_combined_list[i]\r\n grads_combined = stash.grads_combined_list[i]\r\n if params_combined is None or grads_combined is None:\r\n continue\r\n \r\n if weight_decay != 0:\r\n grads_combined = grads_combined.add(params_combined, alpha=weight_decay)\r\n if momentum != 0:\r\n param_state = stash.param_state_combined_list[i]\r\n buf = param_state['momentum_buffer']\r\n if momentum_buffer_in_state_before:\r\n buf.mul_(momentum).add_(grads_combined, alpha=1 - dampening)\r\n\r\n if nesterov:\r\n grads_combined = grads_combined.add(buf, alpha=momentum)\r\n else:\r\n grads_combined = buf\r\n\r\n params_combined.add_(grads_combined, alpha=-group['lr'])\r\n\r\n return loss\r\n","sub_path":"PyTorch/contrib/cv/classification/EfficientNet-B1/pycls/core/sgd.py","file_name":"sgd.py","file_ext":"py","file_size_in_byte":8624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"316723955","text":"import nltk\nfrom nltk.tokenize import PunktSentenceTokenizer\nfrom nltk.corpus import state_union\nfrom nltk.sem import relextract\n\ntrain_text = state_union.raw(\"2006-GWBush.txt\")\n\ncustom_sent_tokenizer = PunktSentenceTokenizer(train_text)\n\n\ndef ie_preprocess(document):\n sentences = nltk.sent_tokenize(document)\n sentences = [nltk.word_tokenize(sent) for sent in sentences]\n sentences = [nltk.pos_tag(sent) for sent in sentences]\n return sentences\n\n\ndef get_entities(sentences):\n entities = []\n for word in sentences:\n entities.append(nltk.ne_chunk(word))\n return entities \n\n# sentences = ie_preprocess(\"Throwaway time! What's your secret that could literally ruin your life if it came out?\")\n# result = get_entities(sentences)\n# grammar = \"NP:{
?*+}\"\n# cp = nltk.RegexpParser(grammar)\n# result = cp.parse(sentence)\n# print(result)\n# tree = next(x for x in trees if type(x) == nltk.tree.Tree)\n# pairs = relextract.tree2semi_rel(result)\n# print(pairs)\n# reldicts = relextract.semi_rel2reldict(pairs)\n# print(reldicts)\n# for k, v in sorted(reldicts[0].items()):\n# print(k, '=>', v)\n","sub_path":"stats/helper/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"621960907","text":"class Solution(object):\n\tdef countDigitOne(self, n):\n\t\t\"\"\"\n\t\t:type n: int\n\t\t:rtype: int\n\t\t\"\"\"\n\t\tif(n <= 0):\n\t\t\treturn 0\n\t\telse:\n\t\t\ts = str(n)\n\t\t\tlength = len(s)\n\t\t\tl = self.countDigitOne1(length)\n\t\t\treturn self.countDigitOneWithString(s, l)\n\tdef countDigitOne1(self, length):\n\t\t# l[i] is the result when n = 10**i - 1\n\t\tcnt, tmp = 0, 0\n\t\tl = [0, ]\n\t\twhile(cnt < length):\n\t\t\tcnt += 1\n\t\t\ttmp = 10 * tmp + 10**(cnt - 1)\n\t\t\tl.append(tmp)\n\t\t# print l\n\t\treturn l\t\n\tdef countDigitOneWithString(self, s, l):\n\t\tif(s == '' or s == '0'):\n\t\t\treturn 0\n\t\telse:\n\t\t\trestString = s[1:].lstrip('0')\n\t\t\tlength = len(s)\n\t\t\trest = self.countDigitOneWithString(restString, l)\n\t\t\t# print(restString, rest)\n\t\t\tif(s[0] == '1'):\n\t\t\t\treturn rest + l[length - 1] + self.convertToNum(restString) + 1\n\t\t\telse:\n\t\t\t\treturn rest + int(s[0])*l[length - 1] + 10**(length - 1)\n\tdef convertToNum(self, s):\n\t\tif(s == ''):\n\t\t\treturn 0\n\t\treturn int(s)\n\nif(__name__ == '__main__'):\n\ts = Solution()\n\tprint(s.countDigitOne(100))","sub_path":"LeetCode/233M_Number_of_Digit_One.py","file_name":"233M_Number_of_Digit_One.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"634012481","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nimport matplotlib.patches as mpatches\n\n# Classifier Libraries\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport collections\n\n# Other Libraries\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score, classification_report\nfrom sklearn.model_selection import KFold, StratifiedKFold\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# In[2]:\n\n\ndf = pd.read_csv('creditcard.csv')\ndf.head(3)\n\n\n# In[3]:\n\n\ndf.shape\n\n\n# In[4]:\n\n\n# Toutes les variables sont mises à l'échelle hormis les variables Amount et Time. Nous les traitons à l'aide\n# RobustScaler qui est moins sensible aux outliers que StandardScaler\nfrom sklearn.preprocessing import RobustScaler\n\nrob_scaler = RobustScaler()\n\ndf['scaled_amount'] = rob_scaler.fit_transform(df['Amount'].values.reshape(-1,1))\ndf['scaled_time'] = rob_scaler.fit_transform(df['Time'].values.reshape(-1,1))\n\ndf.drop(['Time','Amount'], axis=1, inplace=True)\n\n\n# In[5]:\n\n\n#seuls 0.17% des transactions sont frauduleuses. \ndf['Class'].value_counts(normalize=True)\n\n\n# In[6]:\n\n\n#aucune donnée n'est manquante\ndf.isnull().sum().max()\n\n\n# In[7]:\n\n\n#les variables suivent à peu près la loi normale. Nous ne leur appliquerons aucune transformation pour l'instant:\nfig, axes = plt.subplots(ncols=5, nrows=6,figsize=(18,20))\ncolumns = ['V1','V2','V3','V4','V5','V6','V7','V8','V9', \n 'V10','V11','V12','V13','V14','V15','V16','V17','V18','V19','V20','V21','V22','V23','V24','V25','V26','V27','V28']\nfor i, ax in zip(columns, axes.flat):\n sns.distplot(df[i], ax=ax)\nplt.show()\n\n\n# In[8]:\n\n\n#Nous visualisons également les outliers à l'aide des boxplots:\nfig, axes = plt.subplots(ncols=3, nrows=9,figsize=(18,20))\ncolumns = ['V1','V2','V3','V4','V5','V6','V7','V8','V9', \n 'V10','V11','V12','V13','V14','V15','V16','V17','V18','V19','V20','V21','V22','V23','V24','V25','V26','V27','V28']\nfor i, ax in zip(columns, axes.flat):\n sns.boxplot(df[i], ax=ax)\nplt.show()\n\n\n# In[9]:\n\n\n#Pourcentage d'outliers pour chacune des variables:\n\ncolumns = ['V1','V2','V3','V4','V5','V6','V7','V8','V9', \n 'V10','V11','V12','V13','V14','V15','V16','V17','V18','V19','V20','V21','V22','V23','V24','V25','V26','V27','V28']\nfor i in columns:\n Q1 = df[i].quantile(0.25)\n Q3 = df[i].quantile(0.75)\n IQR = Q3 - Q1\n print(\"Pourcentage d'outliers dans\",i ,\"est de\", (len(df[((df[i] < (Q1 - 1.5 * IQR)) | (df[i] > (Q3 + 1.5 * IQR)))].index))/len(df[i])*100,\"%\")\n\n\n# In[10]:\n\n\n#les variables les plus corrélées à la variable réponse sont: V17, V14 et V12\nplt.figure(figsize=(25,15))\nsns.heatmap(df.corr(), annot=True, cmap='viridis_r');\nplt.title('correlation heatmap')\n\n\n# In[11]:\n\n\n#V17 est la variable la plus corrélée à la variable cible. Pour éviter le surapprentissage et étant donné que notre dataset est déséquilibré avec très peu de transactions frauduleuses,\n#nous effectuerons un échantillonnage sur les transactions non frauduleuses pour aboutir à un équilibre d'environ 50%/50%. \n#Nous nous servirons de la nouvelle variable V17_categorical pour réaliser un échantillonnage stratifié.\n#Pour cela, on utilise un découpage par quantiles empiriques:\nr = [df['V17'].quantile(0), df['V17'].quantile(0.25), df['V17'].quantile(0.5), df['V17'].quantile(0.75), df['V17'].quantile(1)]\ng = [1,2,3,4]\ndf['V17_categorical'] = pd.cut(df['V17'], bins=r, labels=g, include_lowest=True)\n\n\n# In[12]:\n\n\n#On va faire de l'échantillonnage stratifié du dataframe en fonction de la variable V17_categorcial\nimport seaborn as sns\nsns.distplot(df.V17_categorical)\nplt.show()\n\n\n# In[13]:\n\n\ndf['V17_categorical'].value_counts().sort_index()\n\n\n# In[14]:\n\n\n#Pour cela, nous utilisons la fonction StratifiedShuffleSplit afin d'équilibrer les classes 0 et 1. \n#Une autre solution et qui serait plus pertinente serait de procéder à l'oversampling à l'aide de la méthode \n#SMOTE permettant de faire du suréchantillonnage avec remplacement plutôt que d'utiliser les méthodes aléatoires.\n#D'autres méthodes d'undersampling sont également possibles.\nfrom sklearn.model_selection import StratifiedShuffleSplit\nsplit = StratifiedShuffleSplit(test_size=len(df[df['Class']==1]) / len(df[df['Class']==0]))\n\nfor df_index, test_index in split.split(df, df['V17_categorical']):\n strat_df = df.loc[df_index]\n strat_test_df = df.loc[test_index] \n\n\n# In[15]:\n\n\n#On voit que l'échantillonnage réalisé maintient une bonne répartition des données \nstrat_test_df['V17_categorical'].value_counts().sort_index()\n\n\n# In[16]:\n\n\n#Nous créons un nouveau dataframe résultat de la concaténation du dataframe stratifié est du dataframe original (pour\n#les transactions frauduleuses)\nnew_df = pd.concat([strat_test_df, df[df['Class']==1]])\n\n\n# In[17]:\n\n\nnew_df['Class'].value_counts()\n\n\n# In[18]:\n\n\nnew_df = new_df.drop('V17_categorical', axis=1)\n\n\n# In[19]:\n\n\n#les variables les plus corrélées à la variable réponse sont: V17, V14 et V12\nplt.figure(figsize=(25,15))\nsns.heatmap(new_df.corr(), annot=True, cmap='viridis_r');\nplt.title('correlation heatmap')\n\n\n# In[20]:\n\n\nf, axes = plt.subplots(ncols=4, figsize=(20,4))\n\n#Nous visualisons les plus fortes corrélations négatives avec la cible. Plus les valeurs sont faibles pour la classe 1,\n#plus il y'a de chance qu'elles correspondent à des transactions frauduleuses:\nsns.boxplot(x=\"Class\", y=\"V16\", data=new_df, ax=axes[0])\naxes[0].set_title('V16 vs Class Negative Correlation')\n\nsns.boxplot(x=\"Class\", y=\"V14\", data=new_df, ax=axes[1])\naxes[1].set_title('V14 vs Class Negative Correlation')\n\n\nsns.boxplot(x=\"Class\", y=\"V12\", data=new_df, ax=axes[2])\naxes[2].set_title('V12 vs Class Negative Correlation')\n\n\nsns.boxplot(x=\"Class\", y=\"V10\", data=new_df, ax=axes[3])\naxes[3].set_title('V10 vs Class Negative Correlation')\n\nplt.show()\n\n\n# In[21]:\n\n\nf, axes = plt.subplots(ncols=3, figsize=(20,4))\n\n#A l'inverse, nous visualisons ici les plus fortes corrélations positives avec la cible: \nsns.boxplot(x=\"Class\", y=\"V2\", data=new_df, ax=axes[0])\naxes[0].set_title('V16 vs Class Positive Correlation')\n\nsns.boxplot(x=\"Class\", y=\"V4\", data=new_df, ax=axes[1])\naxes[1].set_title('V14 vs Class Positive Correlation')\n\n\nsns.boxplot(x=\"Class\", y=\"V11\", data=new_df, ax=axes[2])\naxes[2].set_title('V12 vs Class Positive Correlation')\n\n\nplt.show()\n\n\n# In[22]:\n\n\nX = new_df.drop('Class', axis=1)\ny = new_df['Class']\n\n\n# In[23]:\n\n\n# Nous implémentons la méthode T-SNE pour la visualisation: \nX_reduced_tsne = TSNE(n_components=2, random_state=42).fit_transform(X.values)\n\n# Nous implémentons également l'ACP\nX_reduced_pca = PCA(n_components=2, random_state=42).fit_transform(X.values)\n\n\n# In[24]:\n\n\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(24,6))\n# labels = ['No Fraud', 'Fraud']\nf.suptitle('Clusters using Dimensionality Reduction', fontsize=14)\n\n\nblue_patch = mpatches.Patch(color='#0A0AFF', label='No Fraud')\nred_patch = mpatches.Patch(color='#AF0000', label='Fraud')\n\n\n# t-SNE scatter plot\nax1.scatter(X_reduced_tsne[:,0], X_reduced_tsne[:,1], c=(y == 0), cmap='coolwarm', label='No Fraud', linewidths=2)\nax1.scatter(X_reduced_tsne[:,0], X_reduced_tsne[:,1], c=(y == 1), cmap='coolwarm', label='Fraud', linewidths=2)\nax1.set_title('t-SNE', fontsize=14)\n\nax1.grid(True)\n\nax1.legend(handles=[blue_patch, red_patch])\n\n\n# PCA scatter plot\nax2.scatter(X_reduced_pca[:,0], X_reduced_pca[:,1], c=(y == 0), cmap='coolwarm', label='No Fraud', linewidths=2)\nax2.scatter(X_reduced_pca[:,0], X_reduced_pca[:,1], c=(y == 1), cmap='coolwarm', label='Fraud', linewidths=2)\nax2.set_title('PCA', fontsize=14)\n\nax2.grid(True)\n\nax2.legend(handles=[blue_patch, red_patch])\n\n\nplt.show()\n\n\n# In[25]:\n\n\n# Nous séparons les données d'apprentissage et données tests (30% du dataset)\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n\n\n# In[26]:\n\n\n# Nous passons les données sous forme d'arrays pour alimenter le modèle\nX_train = X_train.values\nX_test = X_test.values\ny_train = y_train.values\ny_test = y_test.values\n\n\n# In[27]:\n\n\n#Ici nous testons 4 algorithmes de classification et en retiendrons un seul\n\nclassifiers = {\n \"LogisiticRegression\": LogisticRegression(),\n \"KNearest\": KNeighborsClassifier(),\n \"Support Vector Classifier\": SVC(),\n \"DecisionTreeClassifier\": DecisionTreeClassifier()\n}\n\n\n# In[28]:\n\n\n#Logistic regression comme à l'attendu fournit de bonnes performances\nfrom sklearn.model_selection import cross_val_score\n\nfor key, classifier in classifiers.items():\n classifier.fit(X_train, y_train)\n training_score = cross_val_score(classifier, X_train, y_train, cv=5)\n print(classifier.__class__.__name__, \"a un score d'apprentissage de\", round(training_score.mean(), 2) * 100, \"% accuracy score\")\n\n\n# In[29]:\n\n\n# Nous utilisons GridSearch pour déterminer les meilleurs paramètres.\nfrom sklearn.model_selection import GridSearchCV\n\nlog_reg_params = {\"penalty\": ['l1', 'l2'], 'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]}\n\n\ngrid_log_reg = GridSearchCV(LogisticRegression(), log_reg_params)\ngrid_log_reg.fit(X_train, y_train)\n#Nous obtenons le modèle de régression logistique avec les meilleurs paramètres:\nlog_reg = grid_log_reg.best_estimator_\n\n\n# In[30]:\n\n\nfrom sklearn.metrics import roc_curve\nfrom sklearn.model_selection import cross_val_predict\n# Nous créons un dataframe avec tous les scores et classifiers\n\nlog_reg_pred = cross_val_predict(log_reg, X_train, y_train, cv=5,\n method=\"decision_function\")\n\n\n# In[31]:\n\n\nfrom sklearn.metrics import roc_auc_score\n\nprint('Logistic Regression: ', roc_auc_score(y_train, log_reg_pred))\n\n\n# In[32]:\n\n\n#Nous visualisons la courbe ROC qui mesure les performances du modèle. Nous retiendrons ici la régression logistique\n#à appliquer sur nos données\nlog_fpr, log_tpr, log_thresold = roc_curve(y_train, log_reg_pred)\n\ndef graph_roc_curve_multiple(log_fpr, log_tpr):\n plt.figure(figsize=(16,8))\n plt.title('ROC Curve \\n Logistic Regression Classifier', fontsize=18)\n plt.plot(log_fpr, log_tpr, label='Logistic Regression Classifier Score: {:.4f}'.format(roc_auc_score(y_train, log_reg_pred)))\n plt.plot([0, 1], [0, 1], 'k--')\n plt.axis([-0.01, 1, 0, 1])\n plt.xlabel('False Positive Rate', fontsize=16)\n plt.ylabel('True Positive Rate', fontsize=16)\n plt.annotate('Minimum ROC Score of 50% \\n (This is the minimum score to get)', xy=(0.5, 0.5), xytext=(0.6, 0.3),\n arrowprops=dict(facecolor='#6E726D', shrink=0.05),\n )\n plt.legend()\n \ngraph_roc_curve_multiple(log_fpr, log_tpr)\nplt.show()\n\n\n# In[33]:\n\n\nlog_reg.fit(X_train,y_train)\ny_pred=log_reg.predict(X_test)\n\n\n# In[34]:\n\n\n#Matrice de confusion. Ici le plus important est de détecter le maximum d'opérations frauduleuses:\n#le but est de trouver le modèle qui minimise les false negatives sur les transactions frauduleuses.\nfrom sklearn.metrics import confusion_matrix \n\ncm=confusion_matrix(y_test,y_pred)\nf, ax = plt.subplots(figsize =(5,5))\nsns.heatmap(cm,annot = True,linewidths=0.5,linecolor=\"red\",fmt = \".0f\",ax=ax)\nplt.title(\"Test for Test Dataset\")\nplt.xlabel(\"predicted y values\")\nplt.ylabel(\"real y values\")\nplt.ylim([-3, 3])\nplt.show()\n\n\n# In[35]:\n\n\n#Nous séparons les variables discriminantes de la variable cible pour le dataset initial:\noriginal_X = df.drop('Class', axis=1)\noriginal_y = df['Class']\n\n\n# In[36]:\n\n\n#Nous appliquons le modèle au dataset initial:\nlog_reg.fit(original_X,original_y)\ny_pred=log_reg.predict(original_X)\n\n\n# In[37]:\n\n\n#172/492 (34.9%) transactions frauduleuses sont mal classifiées par le modèle. Ce ratio peut être amélioré.\nfrom sklearn.metrics import confusion_matrix \n\ncm=confusion_matrix(original_y,y_pred)\nf, ax = plt.subplots(figsize =(5,5))\nsns.heatmap(cm,annot = True,linewidths=0.5,linecolor=\"red\",fmt = \".0f\",ax=ax)\nplt.title(\"Test for Test Dataset\")\nplt.xlabel(\"predicted y values\")\nplt.ylabel(\"real y values\")\nplt.ylim([-3, 3])\nplt.show()\n\n\n# In[38]:\n\n\n#Ici nous allons évaluer les performances de la régression logistique appliquée directement aux données initiales \n#après découpage (train, test) afin d'évaluer l'apport du sous-échantillonnage réalisé précédemment:\nX = df.drop('Class', axis=1)\ny = df['Class']\n\n\n# In[39]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n\n\n# In[40]:\n\n\nclf = LogisticRegression()\nclf.fit(X_train,y_train)\ny_pred=clf.predict(X_test)\n\n\n# In[41]:\n\n\n#52.8% des transactions frauduleuses sont mal classifiées par le modèle. L'over-sampling nous a donc permis d'améliorer\n#grandement notre modèle, même si les résultats peuvent encore être améliorés notamment la minimisation des\n#false negatives\nfrom sklearn.metrics import confusion_matrix \n\ncm=confusion_matrix(y_test,y_pred)\nf, ax = plt.subplots(figsize =(5,5))\nsns.heatmap(cm,annot = True,linewidths=0.5,linecolor=\"red\",fmt = \".0f\",ax=ax)\nplt.title(\"Test for Test Dataset\")\nplt.xlabel(\"predicted y values\")\nplt.ylabel(\"real y values\")\nplt.ylim([-3, 3])\nplt.show()\n\n","sub_path":"Fraud.py","file_name":"Fraud.py","file_ext":"py","file_size_in_byte":13553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"622098429","text":"import datetime\nfrom urllib.request import urlopen\nimport webbrowser\n\n\nnow = datetime.datetime.now()\ncurrentMinute = 0 # last successfull request\ncurrentSize = 0\n\nurl = \"http://feeds.feedburner.com/jExam\"\nwhile (True):\n now = datetime.datetime.now()\n if (((now.minute - currentMinute) >= 10) or ((now.minute - currentMinute) < 0)):\n print(\"New Intervall at \" + str(now.hour) + \":\" + str(now.minute) + \" Uhr\")\n\n html = urlopen(url).read().decode('utf-8')\n length = len(html)\n print(length)\n if (length > currentSize or length < currentSize):\n print(\"New Content\")\n webbrowser.open(\"https://jexam.inf.tu-dresden.de/de.jexam.web.v4.5/spring/welcome\")\n currentSize = length\n\n now = datetime.datetime.now()\n currentMinute = now.minute\n","sub_path":"website_spy.py","file_name":"website_spy.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"375843053","text":"# coding=utf-8\nimport heapq\nimport unittest\nfrom collections import Counter\n\n\ndef most_common(paragraph, banned):\n # normalize the paragraph by removing\n # special characters\n freq_heap = []\n word_count = Counter([s.strip(',.') for s in paragraph.lower().split()])\n for word, freq in word_count.items():\n heapq.heappush(freq_heap, (-freq, word))\n\n while freq_heap:\n word = heapq.heappop(freq_heap)[1]\n if word not in banned:\n return word\n\n return None\n\n\nclass TestMostCommon(unittest.TestCase):\n def setUp(self):\n self.input = \"Bob hit a ball, the hit BALL flew far after it was hit.\"\n self.banned = ['hit']\n\n def test_valid_input(self):\n self.assertEqual(most_common(self.input, self.banned), 'ball')\n\n def test_banned_word_not_in_input(self):\n self.assertEqual(most_common(self.input, ['abc']), 'hit')\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"leetcode/819_most_common.py","file_name":"819_most_common.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"8835913","text":"import cv2\nimport pickle\nimport os\nfrom multiprocessing import Pool\nimport time\n\n\ndef get_top_k_result(match_list=None, k=10):\n\tresult = (sorted(match_list, key=lambda l: l[1], reverse=True))\n\treturn result[:k]\n\n\ndef extract(img, sift):\n\t_, des = sift.detectAndCompute(img, None)\n\treturn des\n\n\ndef read(featurepath):\n\twith open(featurepath, \"rb\") as dump:\n\t\tdes = pickle.load(dump)\n\treturn des\n\n\ndef func(params):\n\tmatch_list = []\n\tfeature_path = os.path.join('./sift', params[1])\n\tfeatures = read(feature_path)\n\tif (features.all()) == None:\n\t\treturn\n\tbf = cv2.BFMatcher()\n\tmatches = bf.knnMatch(params[0], features, k=2)\n\tsimilar_list = []\n\tfor m, n in matches:\n\t\tif m.distance < 0.75 * n.distance:\n\t\t\tsimilar_list.append([m])\n\tmatch_list.append([params[1], len(similar_list)])\n\treturn match_list\n\n\ndef search(query_path):\n\tsift = cv2.xfeatures2d.SIFT_create()\n\tquery_img = cv2.imread(query_path, 0)\n\tquery_des = extract(query_img, sift)\n\tmatch_list = []\n\tindexed_list = os.listdir('./sift')\n\tfor idx, feature_file in enumerate(indexed_list):\n\t\tfeature_path = os.path.join('./sift', feature_file)\n\t\tfeatures = read(feature_path)\n\t\tif (features.all()) == None:\n\t\t\tcontinue\n\t\tbf = cv2.BFMatcher()\n\t\tmatches = bf.knnMatch(query_des, features, k=2)\n\t\tsimilar_list = []\n\t\tfor m, n in matches:\n\t\t\tif m.distance < 0.75 * n.distance:\n\t\t\t\tsimilar_list.append([m])\n\t\tmatch_list.append([feature_file, len(similar_list)])\n\n\t\tdel features, similar_list\n\n\tresult = get_top_k_result(match_list=match_list, k=5)\n\tprint (query_path.split('_')[2], result)\n\treturn result\n\n\ndef search1(query_path):\n\tsift = cv2.xfeatures2d.SIFT_create()\n\tquery_img = cv2.imread(query_path, 0)\n\tquery_des = extract(query_img, sift)\n\tindexed_list = os.listdir('./sift')\n\tstart_time = time.time()\n\tpool = Pool(4)\n\tpool.map(func, [query_des, indexed_list])\n\tprint (\"--- {} seconds ---\".format(time.time() - start_time))\n\n\ndef search2(query_path, indexed_file):\n\tsift = cv2.xfeatures2d.SIFT_create()\n\tquery_img = cv2.imread(query_path, 0)\n\tquery_des = extract(query_img, sift)\n\tfeature_path = os.path.join('./sift', indexed_file)\n\tfeatures = read(feature_path)\n\tif (features.all()) == None:\n\t\treturn\n\tbf = cv2.BFMatcher()\n\tmatches = bf.knnMatch(query_des, features, k=2)\n\tsimilar_list = []\n\tmatch_list = []\n\tfor m, n in matches:\n\t\tif m.distance < 0.75 * n.distance:\n\t\t\tsimilar_list.append([m])\n\tmatch_list.append([indexed_file, len(similar_list)])\n\treturn match_list\n\n\ndef multiprocessing_search(data):\n\tquery_des = data[0]\n\tid = data[1]\n\tindexed_des = data[2]\n\t# start_time2 = time.time()\n\tbf = cv2.BFMatcher()\n\tmatches = bf.knnMatch(query_des, indexed_des, k=2)\n\tsimilar_list = []\n\tfor m, n in matches:\n\t\tif m.distance < 0.75 * n.distance:\n\t\t\tsimilar_list.append([m])\n\tret = [id, len(similar_list)]\n\treturn ret\n\n\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"88778997","text":"from flask import Flask, request\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef odczyt():\n # spróbuj: http://127.0.0.1:5000?a=123&b=456\n\n argumenty = request.args\n for klucz, wartosc in argumenty.items():\n print(f'{klucz}: {wartosc}')\n return f'argumenty: {argumenty}'\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"Python - advanced/zajecia05/querystr2.py","file_name":"querystr2.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"297685879","text":"\nfrom PySide2.QtWidgets import QApplication, QWidget\nimport sys\nfrom myprog import Ui_Form\nfrom alg import factorial\n\n__author__ = \"Kytsenko\"\n\nclass MainWindow(QWidget):\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n self.ui = Ui_Form()\n self.ui.setupUi(self)\n self.ui.pushButton.clicked.connect(self.rashet)\n\n def rashet(self):\n self.ui.lineEdit_2.setText(str(factorial(int(self.ui.lineEdit.text()))))\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"Masha/factorial/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"545560415","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ntyps = {'Noun': 1, 'Verb': 2, 'Adjective': 3, 'Adverb': 4}\n\nclass Word:\n\tdef __init__(self, ob, rl):\n\t\tself.rl = rl\n\t\tself.ob = False\n\t\tself.rats = {}\n\t\tfor typ in typs:\n\t\t\tself.rats[typ] = 0\n\t\tself.json = False\n\t\tself.setOb(ob)\n\t\tself.ratsSumm = 1.1\n\t\tself.onlyTyp = False\n\n\tdef setOb(self, ob):\n\t\tself.ob = ob\n\t\tself.text = ob['text']\n\t\tfor typ in typs:\n\t\t\tif typ in ob:\n\t\t\t\tself.rats[typ] = 10\n\n\tdef autogenJSON(self):\n\t\tself.json = [self.text, 0, 0, 0]\n\t\tif 'Noun' in self.ob and self.ob['Noun']:\n\t\t\tself.genNounJSON()\n\t\telif 'Verb' in self.ob and self.ob['Verb']:\n\t\t\tself.genVerbJSON()\n\t\telif 'Adjective' in self.ob and self.ob['Adjective']:\n\t\t\tself.genAdjectiveJSON()\n\t\telif 'Adverb' in self.ob and self.ob['Adverb']:\n\t\t\tself.genAdverbJSON()\n\n\tdef updateRatsSumm(self):\n\t\tself.ratsSumm = 1.1\n\t\tfor typ in typs:\n\t\t\tself.ratsSumm += self.rats[typ]\n\n\tdef autosetOnlyTyp(self):\n\t\tmaxV = -1.1\n\t\tonlyTyp = False\n\t\tfor typ in typs:\n\t\t\tif self.rats[typ] > maxV:\n\t\t\t\tmaxV = self.rats[typ]\n\t\t\t\tonlyTyp = typ\n\t\tif onlyTyp:\n\t\t\tif not self.onlyTyp:\n\t\t\t\tself.onlyTyp = onlyTyp\n\t\tfor typ in typs:\n\t\t\tif typ != self.onlyTyp:\n\t\t\t\tself.ob[typ] = False","sub_path":"parsDict/Word.py","file_name":"Word.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"67560469","text":"import networkx as nx\nimport commons\nimport rumor_centrality\n\n\nclass Single_source:\n def __init__(self):\n pass\n\n\n\n '''\n 多个独立观察图,然后进行联合单源定位。\n '''\n\n def mutiple_Observation(self, infectG, siG, siG2, source_ture):\n # 将图构造成两个list,一个是感染点list,一个是感染和它的邻居点构造成的list\n infect_node = []\n infect_neighbour_list = []\n print(infectG.number_of_nodes())\n # random_node = random.choice(list(siG.nodes()))\n # subinfectG = nx.bfs_tree(siG, source=random_node)\n subinfectG = siG\n # who_infected = [[] for i in range(infectG.number_of_nodes())]\n # 找出最大的id数目。\n maxs = 0\n for node_index in list(infectG.nodes):\n if node_index > maxs:\n maxs = node_index\n print('maxs', maxs)\n for node in list(subinfectG.nodes()):\n infect_node.append(node)\n who_infected = [[] for i in range(maxs + 1)]\n\n i = 0\n for node_temp in infect_node:\n neighbour_list = list(nx.neighbors(subinfectG, node_temp))\n neighbour_list_index = []\n for neighbour in neighbour_list:\n neighbour_list_index.append(infect_node.index(neighbour))\n who_infected[i] = neighbour_list_index\n i += 1\n\n print('infect_node', infect_node)\n print('who_infected', who_infected)\n rumor_center_object = rumor_centrality.rumor_center()\n\n rumor_center, center = rumor_center_object.rumor_centrality(who_infected)\n\n print('rumor_center', rumor_center)\n print('center', center)\n return [infect_node[rumor_center]]\n\n '''\n 设计本类用来做单源 定位。\n # '''\n\n def main(self, filename):\n\n # #拿到图\n initG = commons.get_networkByFile(filename)\n max_sub_graph = commons.judge_data(initG)\n # source_list = product_sourceList(max_sub_graph, 2)\n source_list = commons.product_sourceList(max_sub_graph, 1)\n # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))\n infectG, T = commons.propagation1(max_sub_graph, source_list)\n infectG1, T = commons.propagation1(max_sub_graph, source_list)\n subinfectG = commons.get_subGraph_true(infectG) # 只取感染点,为2表示,真实的感染图。\n # 将在这里进行单源测试。\n\n subinfectG1 = commons.get_subGraph_true(infectG1)\n\n # 多个观察点\n result_node = self.mutiple_Observation(infectG, subinfectG,subinfectG1, source_list[0])\n\n\n\n\n\n\n\n print('真实源是', source_list[0])\n print('预测源是', result_node[0])\n distance = nx.shortest_path_length(subinfectG, source=source_list[0], target=result_node[0])\n print('结果是', distance)\n return distance\n\n\n'''\n\n\n'''\nimport time\n\nif __name__ == '__main__':\n test = Single_source()\n sum = 0\n # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')\n # initG = commons.get_networkByFile('../../../data/3regular_tree1000.txt')\n # initG = commons.get_networkByFile('../../data/4_regular_graph_3000_data.txt')\n # initG = commons.get_networkByFile(filename)\n # filname = '../../../data/4_regular_graph_3000_data.txt'\n # initG = commons.get_networkByFile('../../../data/email-Eu-core.txt')\n # filname = '../../../data/CA-GrQc.txt'\n filname = '../../../data/3regular_tree9.txt'\n # method ='distan+ covage'\n # method = 'jardan_center'\n # method ='distance'\n method = '乔丹中心性'\n\n for i in range(0, 20):\n tempresult = test.main(filname)\n sum += tempresult # 跑实验\n with open('result.txt', \"a\") as f:\n # f.write(\"这是个测试!\") # 这句话自带文件关闭功能,不需要再写f.close()\n f.write(str(time.asctime(time.localtime(time.time()))) + '\\n')\n f.write('每一步的结果是 ' + str(tempresult) + ' 数据集' + '方法' + str(method) + str(filname) + '\\n')\n with open('result.txt', \"a\") as f:\n f.write('数据集' + str(filname) + '方法' + str(method) + '总结果 ' + str(sum / 20) + '\\n')\n f.write('\\n')\n print('result', sum / 20)\n print(sum / 20)\n\n\n\n\n","sub_path":"jarden_center/main_code/single-multiple-source/change_world/Conditional_experiment/single_source_detection/mutiple_observation/mutiple_observation.py","file_name":"mutiple_observation.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"468505711","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport yaml\nimport codecs\nimport datetime\nimport subprocess\n\nimport VenC.core\nimport VenC.pattern\n\ndef entry(argv):\n\n if len(argv) < 1:\n print(\"VenC: \"+VenC.core.Messages.missingParams.format(\"--new-entry\"))\n return\n \n if VenC.core.blogConfiguration == None:\n print(\"VenC: \"+VenC.core.Messages.noBlogConfiguration)\n return\n \n content = {\"authors\":\t\"\",\n\t\t\"tags\":\t\t\"\",\n\t\t\"categories\":\t\"\"}\n try:\n wd = os.listdir(os.getcwd())\n except OSError:\n print(VenC.core.Messages.cannotReadIn.format(os.getcwd()))\n return\n\n date = datetime.datetime.now()\n\n entry = VenC.core.SetNewEntryMetadata(date, argv[0])\n\n content[\"entry_name\"] = argv[0]\n\n outputFilename = os.getcwd()+'/entries/'+str(entry[\"EntryID\"])+\"__\"+str(date.month)+'-'+str(date.day)+'-'+str(date.year)+'-'+str(date.hour)+'-'+str(date.minute)+\"__\"+content[\"entry_name\"].replace(' ','_')\n stream = codecs.open(outputFilename,'w',encoding=\"utf-8\")\n if len(argv) == 1:\n output = yaml.dump(content, default_flow_style=False, allow_unicode=True) + \"---\\n\"\n else:\n try:\n output = open(os.getcwd()+'/templates/'+argv[1], 'r').read()\n patternProcessor = VenC.pattern.processor(\".:\",\":.\",\"::\")\n patternProcessor.SetWholeDictionnary(entry)\n output = patternProcessor.parse(output)\n\n except FileNotFoundError as e:\n print(\"VenC: \"+VenC.core.Messages.fileNotFound.format(os.getcwd()+\"/templates/\"+argv[1]))\n return\n \n stream.write(output)\n stream.close()\n try:\n subprocess.call([VenC.core.blogConfiguration[\"textEditor\"], outputFilename]) \n except:\n pass\n\ndef blog(argv):\n default_configuration =\t{\"blog_name\":\t\t\tVenC.core.Messages.blogName,\n \"textEditor\": \"nano\",\n \"date_format\": \"%A %d. %B %Y\",\n\t\t\t\t\"author_name\":\t\t\tVenC.core.Messages.yourName,\n\t\t\t\t\"blog_description\":\t\tVenC.core.Messages.blogDescription,\n\t\t\t\t\"blog_keywords\":\t\tVenC.core.Messages.blogKeywords,\n\t\t\t\t\"author_description\":\t\tVenC.core.Messages.aboutYou,\n\t\t\t\t\"license\":\t\t\tVenC.core.Messages.license,\n\t\t\t\t\"url\":\t\t\t\tVenC.core.Messages.blogUrl,\n\t\t\t\t\"blog_language\":\t\tVenC.core.Messages.blogLanguage,\n\t\t\t\t\"email\":\t\t\tVenC.core.Messages.yourEmail,\n\t\t\t\t\"path\":\t\t\t\t{\"index_file_name\":\t\t\t\"index{page_number}.html\",\n\t\t\t\t\t\t\t\t\"category_directory_name\":\t\t\"{category}\",\n\t\t\t\t\t\t\t\t\"dates_directory_name\":\t\t\t\"%Y-%m\",\n\t\t\t\t\t\t\t\t\"entry_file_name\":\t\t\t\"entry{entry_id}.html\",\n\t\t\t\t\t\t\t\t\"archives_overview_directory_name\":\t\"overview\"},\n\t\t\t\t\t\t\t\t\"rss_file_name\":\t\t\t\"feed.xml\",\n\t\t\t\t\"entries_per_pages\":\t\t10,\n\t\t\t\t\"columns\":\t\t\t1,\n\t\t\t\t\"rss_thread_lenght\":\t\t5,\n\t\t\t\t\"thread_order\":\t\t\t\"latest first\"}\n for folder_name in argv:\n try:\n os.mkdir(folder_name)\n except OSError:\n print(\"VenC: \"+VenC.core.Messages.fileAlreadyExists.format(\"--new-blog\",os.getcwd()+'/'+folder_name))\n return\n os.mkdir(folder_name+'/'+\"blog\")\n os.mkdir(folder_name+'/'+\"entries\")\n os.mkdir(folder_name+'/'+\"theme\")\n os.mkdir(folder_name+'/'+\"extra\")\n os.mkdir(folder_name+'/'+\"templates\")\n stream = codecs.open(folder_name+'/'+'blog_configuration.yaml', 'w',encoding=\"utf-8\")\n yaml.dump(default_configuration, stream, default_flow_style=False, allow_unicode=True)\n","sub_path":"src/VenC/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"336419800","text":"#!/bin/python \n\nimport os\nimport sys\n\nproject_path, x = os.path.split(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(project_path)\n\nimport tensorflow as tf\nfrom keras.optimizers import Adam, Adadelta\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger\nfrom keras import metrics\nimport numpy as np\n\nfrom mach.model import create_optical_flow_model, MobileNetSlim, create_mobilenet_plus_model, recurrent_net, create_simple_optical_flow_model, recurrent_mobilenet\nfrom mach.util import isAWS, upload_s3, stop_instance, full_path\nfrom mach.data import create_mobilenet_generators, create_recurrent_generators, create_orig_recurrent_generator\n\nclass Config():\n\tdef __init__(self, folder, max_epochs, batch_size, min_delta, patience, alpha, is_recurrent):\n\t\tself.folder = folder\n\t\tself.max_epochs = max_epochs\n\t\tself.batch_size = batch_size\n\t\tself.min_delta = int(min_delta*1000)\n\t\tself.patience = patience\n\t\tself.alpha = int(alpha*100)\n\t\tif is_recurrent:\n\t\t\tself.is_recurrent = \"recurrent\"\n\t\telse:\n\t\t\tself.is_recurrent = \"non_recurrent\"\n\n\tdef model_name(self):\n\t\treturn \"{}_{}_{}_{}_{}_{}_{}\".format(self.folder, self.max_epochs, self.batch_size, self.min_delta, self.patience, self.alpha, self.is_recurrent)\n\n\tdef model_checkpoint(self):\n\t\treturn \"{}.ckpt\".format(self.model_name())\n\n\tdef csv_log_file(self):\n\t\treturn \"{}.csv\".format(self.model_name())\t\t\t\t\n\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer('max_epochs', 1, 'Number of training examples.')\nflags.DEFINE_integer('model_file', None, 'If defined loaded a saved model and continue training.')\nflags.DEFINE_integer('batch_size', 32, 'The batch size for the generator')\nflags.DEFINE_integer('num_images', 2, 'The number of images used to make the opticlal flow.')\nflags.DEFINE_string('folder', 'optical_flow_2_augmented_5', 'The folder inside the data folder where the images and labels are.')\nflags.DEFINE_float('alpha', 0.75, 'The alpha param for MobileNet.')\nflags.DEFINE_boolean('debug', False, 'If this is for debugging the model/training process or not.')\nflags.DEFINE_integer('verbose', 0, 'Whether to use verbose logging when constructing the data object.')\nflags.DEFINE_boolean('stop', True, 'Stop aws instance after finished running.')\nflags.DEFINE_float('min_delta', 0.005, 'Early stopping minimum change value.')\nflags.DEFINE_integer('patience', 20, 'Early stopping epochs patience to wait before stopping.')\nflags.DEFINE_boolean('is_recurrent', False, 'If this is processing for a recurrent model or not.')\n\ndef main(_):\n\tconfig = Config(FLAGS.folder, FLAGS.max_epochs, FLAGS.batch_size, FLAGS.min_delta, FLAGS.patience, FLAGS.alpha, FLAGS.is_recurrent)\n\tprint(\"Training model named\", config.model_name())\n\n\tfolder_path = full_path(\"data/{}\".format(FLAGS.folder))\n\n\tif FLAGS.is_recurrent:\n\t\t# train, valid, input_shape = create_recurrent_generators(folder_path, FLAGS.batch_size, FLAGS.num_images, FLAGS.debug)\n\t\ttrain, valid, input_shape = create_orig_recurrent_generator(FLAGS.batch_size, FLAGS.num_images, FLAGS.debug)\n\telse:\n\t\ttrain, valid, input_shape = create_mobilenet_generators(folder_path, FLAGS.batch_size, FLAGS.num_images, FLAGS.debug)\n\n\ttrain_steps_per_epoch, train_generator = train\n\tvalid_steps_per_epoch, valid_generator = valid\n\n\tprint(\"Creating model with input\", input_shape) \n\n\tif FLAGS.model_file:\n\t\tmodel = load_model(full_path(FLAGS.model_file))\n\telif FLAGS.is_recurrent:\n\t\t# model = recurrent_net(input_shape, FLAGS.num_images, FLAGS.alpha)\n\t\tmodel = recurrent_mobilenet(input_shape, FLAGS.num_images, FLAGS.alpha)\n\telse:\n\t\t# model = create_optical_flow_model(input_shape, FLAGS.alpha)\n\t\t# model = MobileNetSlim(input_shape, FLAGS.alpha)\n\t\t# model = create_mobilenet_plus_model(input_shape, FLAGS.num_images, FLAGS.alpha)\n\t\t# model = create_optical_flow_model(input_shape, FLAGS.num_images, FLAGS.alpha)\n\t\tmodel = create_simple_optical_flow_model(input_shape)\n\n\tif FLAGS.debug:\n\t\tprint(model.summary())\n\t\tcallbacks = None\n\telse:\n\t\tcallbacks = [\n\t\t\tModelCheckpoint(config.model_checkpoint(), verbose=FLAGS.verbose, save_best_only=True),\n\t\t\tCSVLogger(config.csv_log_file()),\n\t\t\tEarlyStopping(monitor='val_loss', min_delta=FLAGS.min_delta, patience=FLAGS.patience, verbose=1)\n\t\t]\n\n\tprint(\"Compiling model.\")\n\tmodel.compile(\n\t\toptimizer= Adam(lr=0.0001),#, clipnorm=15.),\n\t\tloss={'speed': 'mean_squared_error'},\n\t\tmetrics=['mean_absolute_error', 'mean_squared_error'])\n\n\tprint(\"Starting model train process.\")\n\tmodel.fit_generator(train_generator,\n\t\ttrain_steps_per_epoch,\n\t\tepochs=FLAGS.max_epochs,\n\t\tverbose=FLAGS.verbose,\n\t\tcallbacks=callbacks,\n\t\tvalidation_data=valid_generator,\n\t\tvalidation_steps=valid_steps_per_epoch)\n\n\tprint(\"Finished training model.\")\n\n\tif isAWS() and FLAGS.debug == False:\n\t\tupload_s3(config.model_checkpoint())\n\t\tupload_s3(config.csv_log_file())\n\n\tif isAWS() and FLAGS.stop:\n\t\tstop_instance()\n\n\nif __name__ == '__main__':\n\ttf.app.run()\n","sub_path":"bin/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"265608308","text":"#\n# Copyright 2017 by Delphix\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n#\n# This class has been automatically generated from:\n# /delphix-host-os.json\n#\n# Do not edit this file manually!\n#\n\nfrom delphixpy.v1_9_0.web.vo.TypedObject import TypedObject\nfrom delphixpy.v1_9_0 import common\n\nclass __Undef(object):\n def __repr__(self):\n return \"undef\"\n\n_UNDEFINED = __Undef()\n\nclass HostOS(TypedObject):\n \"\"\"\n *(extends* :py:class:`v1_9_0.web.vo.TypedObject` *)* The operating system\n information for the host.\n \"\"\"\n def __init__(self, undef_enabled=True):\n super(HostOS, self).__init__()\n self._type = (\"HostOS\", True)\n self._kernel = (self.__undef__, True)\n self._name = (self.__undef__, True)\n self._version = (self.__undef__, True)\n self._release = (self.__undef__, True)\n self._timezone = (self.__undef__, True)\n self._distribution = (self.__undef__, True)\n\n API_VERSION = \"1.9.0\"\n\n @classmethod\n def from_dict(cls, data, dirty=False, undef_enabled=True):\n obj = super(HostOS, cls).from_dict(data, dirty, undef_enabled)\n obj._kernel = (data.get(\"kernel\", obj.__undef__), dirty)\n if obj._kernel[0] is not None and obj._kernel[0] is not obj.__undef__:\n assert isinstance(obj._kernel[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._kernel[0]))\n common.validate_format(obj._kernel[0], \"None\", None, None)\n obj._name = (data.get(\"name\", obj.__undef__), dirty)\n if obj._name[0] is not None and obj._name[0] is not obj.__undef__:\n assert isinstance(obj._name[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._name[0]))\n common.validate_format(obj._name[0], \"None\", None, None)\n obj._version = (data.get(\"version\", obj.__undef__), dirty)\n if obj._version[0] is not None and obj._version[0] is not obj.__undef__:\n assert isinstance(obj._version[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._version[0]))\n common.validate_format(obj._version[0], \"None\", None, None)\n obj._release = (data.get(\"release\", obj.__undef__), dirty)\n if obj._release[0] is not None and obj._release[0] is not obj.__undef__:\n assert isinstance(obj._release[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._release[0]))\n common.validate_format(obj._release[0], \"None\", None, None)\n obj._timezone = (data.get(\"timezone\", obj.__undef__), dirty)\n if obj._timezone[0] is not None and obj._timezone[0] is not obj.__undef__:\n assert isinstance(obj._timezone[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._timezone[0]))\n common.validate_format(obj._timezone[0], \"None\", None, None)\n obj._distribution = (data.get(\"distribution\", obj.__undef__), dirty)\n if obj._distribution[0] is not None and obj._distribution[0] is not obj.__undef__:\n assert isinstance(obj._distribution[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._distribution[0]))\n common.validate_format(obj._distribution[0], \"None\", None, None)\n return obj\n\n def to_dict(self, dirty=False):\n dct = super(HostOS, self).to_dict(dirty)\n\n def dictify(obj):\n if isinstance(obj, list):\n return [dictify(o) for o in obj]\n elif hasattr(obj, \"to_dict\"):\n return obj.to_dict()\n else:\n return obj\n if \"kernel\" == \"type\" or (self.kernel is not self.__undef__ and not (dirty and not self._kernel[1])):\n dct[\"kernel\"] = dictify(self.kernel)\n if \"name\" == \"type\" or (self.name is not self.__undef__ and not (dirty and not self._name[1])):\n dct[\"name\"] = dictify(self.name)\n if \"version\" == \"type\" or (self.version is not self.__undef__ and not (dirty and not self._version[1])):\n dct[\"version\"] = dictify(self.version)\n if \"release\" == \"type\" or (self.release is not self.__undef__ and not (dirty and not self._release[1])):\n dct[\"release\"] = dictify(self.release)\n if \"timezone\" == \"type\" or (self.timezone is not self.__undef__ and not (dirty and not self._timezone[1])):\n dct[\"timezone\"] = dictify(self.timezone)\n if \"distribution\" == \"type\" or (self.distribution is not self.__undef__ and not (dirty and not self._distribution[1])):\n dct[\"distribution\"] = dictify(self.distribution)\n return dct\n\n def dirty(self):\n return self.from_dict(self.to_dict(dirty=False), dirty=True)\n\n def force_dirty(self):\n self._kernel = (self._kernel[0], True)\n self._name = (self._name[0], True)\n self._version = (self._version[0], True)\n self._release = (self._release[0], True)\n self._timezone = (self._timezone[0], True)\n self._distribution = (self._distribution[0], True)\n\n def is_dirty(self):\n return any([self._kernel[1], self._name[1], self._version[1], self._release[1], self._timezone[1], self._distribution[1]])\n\n def __eq__(self, other):\n if other is None:\n return False\n if not isinstance(other, HostOS):\n return False\n return super(HostOS, self).__eq__(other) and \\\n self.kernel == other.kernel and \\\n self.name == other.name and \\\n self.version == other.version and \\\n self.release == other.release and \\\n self.timezone == other.timezone and \\\n self.distribution == other.distribution\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n return common.generate_repr_string(self)\n\n @property\n def kernel(self):\n \"\"\"\n The OS kernel.\n\n :rtype: ``basestring``\n \"\"\"\n return self._kernel[0]\n\n @kernel.setter\n def kernel(self, value):\n self._kernel = (value, True)\n\n @property\n def name(self):\n \"\"\"\n The OS name.\n\n :rtype: ``basestring``\n \"\"\"\n return self._name[0]\n\n @name.setter\n def name(self, value):\n self._name = (value, True)\n\n @property\n def version(self):\n \"\"\"\n The OS version.\n\n :rtype: ``basestring``\n \"\"\"\n return self._version[0]\n\n @version.setter\n def version(self, value):\n self._version = (value, True)\n\n @property\n def release(self):\n \"\"\"\n The OS release.\n\n :rtype: ``basestring``\n \"\"\"\n return self._release[0]\n\n @release.setter\n def release(self, value):\n self._release = (value, True)\n\n @property\n def timezone(self):\n \"\"\"\n The OS timezone.\n\n :rtype: ``basestring``\n \"\"\"\n return self._timezone[0]\n\n @timezone.setter\n def timezone(self, value):\n self._timezone = (value, True)\n\n @property\n def distribution(self):\n \"\"\"\n The OS distribution.\n\n :rtype: ``basestring``\n \"\"\"\n return self._distribution[0]\n\n @distribution.setter\n def distribution(self, value):\n self._distribution = (value, True)\n\n","sub_path":"src/main/resources/delphixpy/v1_9_0/web/vo/HostOS.py","file_name":"HostOS.py","file_ext":"py","file_size_in_byte":7753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"34341015","text":"import pickle\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.probability import ELEProbDist, FreqDist\nfrom nltk import NaiveBayesClassifier\nfrom Sastrawi.Stemmer.StemmerFactory import StemmerFactory\nimport nltk\nimport re\nfrom flask import Flask,jsonify,json,request\nfrom flask_restful import reqparse\nimport json\n\n\nf = open('my_classifier.pickle', 'rb')\nclassifier = pickle.load(f)\nf.close()\n\nteks = open('data_latih_hasil_praproses.json', 'r')\nteks_open = teks.read()\nclean_tweet= json.loads(teks_open)\nteks.close()\n\ntweets=[]\nfor i in range(len(clean_tweet)):\n\tprint(clean_tweet[i])\n\tword,sentimen = clean_tweet[i]\n\tfor word,sentimen in text:\n\t words_filtered = [e.lower() for e in word.split()]\n\t tweets.append((words_filtered, sentimen))\n\ndef get_words_in_tweets(tweets):\n all_words = []\n for (words, sentiment) in tweets:\n all_words.extend(words)\n return all_words\n\n\ndef get_word_features(wordlist):\t\n wordlist = nltk.FreqDist(wordlist)\n word_features = wordlist.keys()\n return word_features\n\n\nword_features = get_word_features(get_words_in_tweets(tweets))\n\ndef extract_features(document):\n\tdocument_words = set(document)\n\tfeatures = {}\n\tfor word in word_features:\n\t features['contains(%s)' % word] = (word in document_words)\n\treturn features\n\napp = Flask(__name__)\n@app.route('/predict', methods=[\"POST\"])\n\ndef predict():\n\ttry:\n\t\n\t\t# Parse the needed arguments on POST request\n\t\tparser = reqparse.RequestParser()\n\t\tparser.add_argument('idalat', type=str, help='idalatt')\n\t\targs = parser.parse_args()\n\t\tteks = args['idalat']\n\n\t\ti=teks;\n\t\tsentimen= classifier.classify(extract_features(i.split()))\n\n\t\treturn jsonify({\"tasks\" : sentimen})\n\n\texcept Exception as e:\n\t\treturn {'error': str(e)}\n\t\n\t\n\t#return \"hello world\"+idalat\nif __name__==\"__main__\":\n app.run(host= '0.0.0.0', port=33, debug=True)\n","sub_path":"hitung akurasi/beres/jamur/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"596875409","text":"from __future__ import division, print_function\nimport sys, os, glob, time, warnings, gc\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.table import Table, vstack, hstack\nimport fitsio\nfrom astropy.io import fits\n\nfrom multiprocessing import Pool\nfrom pathlib import Path\n\nparams = {'legend.fontsize': 'large',\n 'axes.labelsize': 'large',\n 'axes.titlesize': 'large',\n 'xtick.labelsize': 'large',\n 'ytick.labelsize': 'large',\n 'figure.facecolor': 'w'}\nplt.rcParams.update(params)\n\ntime_start_all = time.time()\n\nn_processes = 64\n\nkeep_status = True\n\nccdnamenumdict = {'S1': 25, 'S2': 26, 'S3': 27, 'S4': 28,\n 'S5': 29, 'S6': 30, 'S7': 31,\n 'S8': 19, 'S9': 20, 'S10': 21, 'S11': 22, 'S12': 23,\n 'S13': 24,\n 'S14': 13, 'S15': 14, 'S16': 15, 'S17': 16, 'S18': 17,\n 'S19': 18,\n 'S20': 8, 'S21': 9, 'S22': 10, 'S23': 11, 'S24': 12,\n 'S25': 4, 'S26': 5, 'S27': 6, 'S28': 7,\n 'S29': 1, 'S30': 2, 'S31': 3,\n 'N1': 32, 'N2': 33, 'N3': 34, 'N4': 35,\n 'N5': 36, 'N6': 37, 'N7': 38,\n 'N8': 39, 'N9': 40, 'N10': 41, 'N11': 42, 'N12': 43,\n 'N13': 44,\n 'N14': 45, 'N15': 46, 'N16': 47, 'N17': 48, 'N18': 49,\n 'N19': 50,\n 'N20': 51, 'N21': 52, 'N22': 53, 'N23': 54, 'N24': 55,\n 'N25': 56, 'N26': 57, 'N27': 58, 'N28': 59,\n 'N29': 60, 'N30': 61, 'N31': 62,\n }\nccdnamenumdict_inv = {aa: bb for bb, aa in ccdnamenumdict.items()}\n\nfringe_dr9_dir = '/global/cfs/cdirs/desi/users/rongpu/dr9/fringe/DECam_CP-Fringe-Normed'\nfringe_dir = '/global/cfs/cdirs/desi/users/rongpu/dr10dev/fringe/data/fringe_templates'\n\nimage_dir = '/global/cfs/cdirs/cosmo/staging/'\nsurveyccd_path = '/global/cfs/cdirs/desi/users/rongpu/dr10dev/deep_fields/survey-ccds-dr10-deep-fields-v1.fits'\n\nblob_dir_dr9 = '/global/cfs/cdirs/desi/users/rongpu/dr9/decam_ccd_blob_mask'\nblob_dir_dr10 = '/global/cfs/cdirs/desi/users/rongpu/dr10dev/decam_ccd_blob_mask'\n\nfrgscale_dir = '/global/cfs/cdirs/desi/users/rongpu/dr10dev/fringe/data/frgscale'\n\nimage_output_dir = '/pscratch/sd/r/rongpu/dr10dev/fringe_corrected_images'\n\nstatus_dir = '/global/homes/r/rongpu/temp/status'\n\n\n##############################################################################################################################\n\n# Load CCD list\n# ccd_columns = ['image_filename', 'image_hdu', 'expnum', 'ccdname', 'filter', 'ccd_cuts', 'mjd_obs']\n# ccd = fitsio.read(surveyccd_path, columns=ccd_columns)\nccd = fitsio.read(surveyccd_path)\nccd = Table(ccd)\nmask = ccd['filter']=='z' # include only z-band images\nccd = ccd[mask]\nprint(len(ccd), len(np.unique(ccd['expnum'])))\nccd['ccdnum'] = [ccdnamenumdict[ccd['ccdname'][ii].strip()] for ii in range(len(ccd))]\n\nexpnum_list = np.unique(ccd['expnum'])\n\n# # Remove exposures that are already done\n# expnum_list_done = np.zeros(len(expnum_list), dtype=bool)\n# for index, expnum in enumerate(expnum_list):\n# # Find an arbitrary CCD in the exposure to get the image filename\n# ccd_index = np.where((ccd['expnum']==expnum))[0][0]\n# img_fn_write = os.path.join(image_output_dir, ccd['image_filename'][ccd_index].strip())\n# if os.path.isfile(img_fn_write):\n# expnum_list_done[index] = True\n# print('Done Not-done Done/Not-done')\n# print(np.sum(expnum_list_done), np.sum(~expnum_list_done), np.sum(expnum_list_done)/len(expnum_list_done))\n# expnum_list = expnum_list[~expnum_list_done]\n# print('Expsoures left to process: ', len(expnum_list))\n\n# shuffle\nnp.random.seed(12345)\n# DO NOT USE NP.RANDOM.SHUFFLE\nexpnum_list = np.random.choice(expnum_list, size=len(expnum_list), replace=False)\n\n# Load DR9 fringe templates\nfringe_templates_dr9 = {}\nfor ccdnum in range(1, 63):\n ccdname = ccdnamenumdict_inv[ccdnum]\n fringe_template_path = os.path.join(fringe_dr9_dir, 'DECam_z_frg_{}_CCD{}.fits'.format(ccdname, str(ccdnum).zfill(2)))\n if os.path.isfile(fringe_template_path):\n fringe_tmp = fitsio.read(fringe_template_path)\n fringe_tmp = fringe_tmp[1:4095, 1:2047] # remove the edge pixels\n fringe_templates_dr9[ccdnum] = fringe_tmp.copy()\n\n# Load initial fringe templates\nfringe_templates = {}\nfor ccdnum in range(1, 63):\n ccdname = ccdnamenumdict_inv[ccdnum]\n fringe_template_path = os.path.join(fringe_dir, 'DECam_z_frg_{}_CCD{}.fits'.format(ccdname, str(ccdnum).zfill(2)))\n if os.path.isfile(fringe_template_path):\n fringe_tmp = fitsio.read(fringe_template_path)\n fringe_tmp = fringe_tmp[1:4095, 1:2047] # remove the edge pixels\n fringe_templates[ccdnum] = fringe_tmp.copy()\nprint('fringe_templates', len(fringe_templates))\n\nfringe_table = Table(fitsio.read('/global/cfs/cdirs/desi/users/rongpu/dr10dev/fringe/data/frgscale/survey-ccds-dr10-deep-fields-v1-frgscales.fits'))\nfringe_table_all = fringe_table.copy()\n\nfringe_stats = Table.read('/global/cfs/cdirs/desi/users/rongpu/dr10dev/fringe/data/frgscale/survey-ccds-dr10-deep-fields-v1-frgscales-stats.txt', format='ascii.commented_header')\n\n##############################################################################################################################\n\n\ndef save_image(expnum):\n\n time_start = time.time()\n\n print('expnum:', expnum)\n\n # Find an arbitrary CCD in the exposure to get the image filename\n ccd_index = np.where((ccd['expnum']==expnum))[0][0]\n\n # frgscale_path = os.path.join(frgscale_dir, ccd['image_filename'][ccd_index].strip().replace('.fits.fz', '.txt'))\n img_fn_write = os.path.join(image_output_dir, ccd['image_filename'][ccd_index].strip())\n img_fn = os.path.join(image_dir, ccd['image_filename'][ccd_index].strip())\n print(img_fn_write)\n\n if os.path.isfile(img_fn_write):\n print(img_fn_write+' already exists!')\n return None\n\n if keep_status:\n status_fn = os.path.join(status_dir, str(expnum))\n Path(status_fn).touch()\n\n if not os.path.exists(os.path.dirname(img_fn_write)):\n try:\n os.makedirs(os.path.dirname(img_fn_write))\n except:\n pass\n\n # if not os.path.isfile(frgscale_path):\n # print('Frgscale does not exist', frgscale_path)\n # return None\n # elif (os.stat(frgscale_path).st_size==0):\n # print('Frgscale is empty', frgscale_path)\n # return None\n # # fringe_table = Table.read(frgscale_path, format='ascii.commented_header')\n\n mask = fringe_table_all['expnum']==expnum\n fringe_table = fringe_table_all[mask]\n mask = (fringe_table['ccdname']!='S7') & (fringe_table['ccdname']!=['S30'])\n n_ccd = np.sum(mask)\n\n if n_ccd<10:\n print('Only {} CCDs available'.format(n_ccd))\n\n hdul_r = fitsio.FITS(img_fn, mode='r')\n hdul_w = fitsio.FITS(img_fn_write, mode='rw', clobber=True)\n\n for hdu_index in range(len(hdul_r)):\n if hdu_index==0:\n hdr = hdul_r[hdu_index].read_header()\n hdul_w.write(data=None, header=hdr)\n else:\n hdr = hdul_r[hdu_index].read_header()\n img = hdul_r[hdu_index].read()\n ccdname = hdul_r[hdu_index].get_extname()\n ccdnum = ccdnamenumdict[ccdname.strip()]\n\n # Back out the exisiting fringe correction\n if 'FRGSCNEW' in hdr:\n frgscale_dr9 = hdr['FRGSCNEW']\n img += fringe_templates_dr9[ccdnum] * frgscale_dr9\n\n mask = (fringe_table['ccdname']==ccdname)\n if np.sum(mask)==1:\n frgscale = fringe_table['frgscale_apply'][mask][0]\n else:\n fringe_stats_index = np.where(fringe_stats['ccdname']==ccdname)[0][0]\n frgscale = fringe_table['frgscale_median'][0] * fringe_stats['median_ratio'][fringe_stats_index]\n\n img -= frgscale * fringe_templates[ccdnum]\n new_key = {'name': 'FRGSCLV2', 'value': frgscale, 'comment': 'New (v2) fringe-correction scale'}\n hdr.add_record(new_key)\n\n hdul_w.write(data=img, header=hdr, extname=ccdname, compress='rice')\n\n hdul_r.close()\n hdul_w.close()\n\n gc.collect()\n\n if keep_status:\n os.remove(status_fn)\n\n print('Done!', time.strftime('%H:%M:%S', time.gmtime(time.time() - time_start)))\n\n return None\n\n\nwith Pool(processes=n_processes) as pool:\n res = pool.map(save_image, expnum_list)\n\nprint('All!', time.strftime('%H:%M:%S', time.gmtime(time.time() - time_start_all)))\n\n\n","sub_path":"dr10/ccd_fringe/save_fringe_corrected_images.py","file_name":"save_fringe_corrected_images.py","file_ext":"py","file_size_in_byte":8569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"340218042","text":"#!/usr/bin/env python3\n\nimport logging\nimport numpy as np\nimport pandas as pd\n\nfrom anndata import AnnData\nfrom collections import namedtuple\nfrom enum import Enum\nfrom scipy.sparse import csr_matrix\nfrom typing import Any, Dict, List, Literal, Optional, Tuple, Union\n\nfrom .._logging import logger\n\n_TypeAnnDataFields = Literal[\"obs\", \"var\", \"obsm\", \"varm\", \"obsp\", \"varp\", \"uns\"]\n_TypePairwiseSlice = Dict[Tuple[int, int], Tuple[np.ndarray, np.ndarray]]\n_TypeTotalSlice = List[np.ndarray]\n_TypePersistLevels = Literal[\"layer\", \"pipeline\", \"output\"]\n\n\ndef generate_features_slice(features: np.ndarray, selected: np.ndarray) -> np.ndarray:\n \"\"\"\n Returns an integer selector to slice {features} to {selected}\n \"\"\"\n assert (\n np.unique(features).shape[0] == features.shape[0]\n ), \"Duplicated features detected. Please fix this ambiguity.\"\n assert (\n np.unique(selected).shape[0] == selected.shape[0]\n ), \"Duplicated selected features detected. Please report this issue.\"\n fslice = np.zeros(selected.shape).astype(int)\n for i, fname in enumerate(selected):\n feature_idx = np.where(features == fname)[0]\n assert feature_idx.shape[0] == 1, f\"Missing features: {fname}.\"\n fslice[i] = np.where(features == fname)[0] # Yuk, could be done better\n return fslice\n\n\ndef get_pairwise_feature_slices(datasets: List[AnnData]) -> _TypePairwiseSlice:\n \"\"\"\n Returns a dictionary where index (i, j) corresponds to integer\n slices to use to put datasets i and j to the same feature space.\n \"\"\"\n result: _TypePairwiseSlice = {}\n for i, adata_i in enumerate(datasets):\n features_i = adata_i.var_names.to_numpy()\n slice_ii = generate_features_slice(\n features=adata_i.var_names.to_numpy(),\n selected=np.sort(adata_i.var_names.to_numpy()),\n )\n result[i, i] = (slice_ii, slice_ii)\n for j, adata_j in enumerate(datasets):\n if j <= i:\n continue\n features_j = adata_j.var_names.to_numpy()\n common_features = np.intersect1d(features_i, features_j)\n common_features = np.sort(common_features)\n slice_i = generate_features_slice(\n features=features_i,\n selected=common_features,\n )\n slice_j = generate_features_slice(\n features=features_j,\n selected=common_features,\n )\n result[i, j] = (slice_i, slice_j)\n result[j, i] = (slice_j, slice_i)\n return result\n\n\ndef get_total_feature_slices(datasets: List[AnnData]) -> _TypeTotalSlice:\n \"\"\"\n Returns a list where array at index i corresponds to boolean\n slice to use to slice dataset i in a common feature space.\n \"\"\"\n result: _TypeTotalSlice = []\n if len(datasets) == 0:\n return []\n common_features = datasets[0].var_names.to_numpy()\n for adata in datasets[1:]:\n common_features = np.intersect1d(\n common_features,\n adata.var_names.to_numpy(),\n )\n common_features = np.sort(common_features)\n for adata in datasets:\n result.append(\n generate_features_slice(adata.var_names.to_numpy(), common_features)\n )\n return result\n\n\ndef slice_common_features(datasets: List[AnnData]) -> List[np.ndarray]:\n \"\"\"\n Returns a list of AnnData objects in a common features space.\n \"\"\"\n slices = get_total_feature_slices(datasets)\n return [adata.X[:, sl] for adata, sl in zip(datasets, slices)]\n\n\n# FIXME: this will cause issues in the future.\n# It was bad design\nclass AnnDataKeyIdentifiers(Enum):\n \"\"\"\n String constants to pass to AnnDataManager to easily manage\n data storing in AnnData objects.\n\n tr_... suggests the key is internal to the engine, and\n helps avoiding key collisions with other packages.\n \"\"\"\n\n # Default representation keys\n BaseRepresentation = \"tr_base_representation\"\n TransmorphRepresentation = \"X_transmorph\"\n\n # AnnData metadata\n AnnDataId = \"tr_adata_id\"\n IsReference = \"tr_is_reference\"\n Metric = \"tr_default_metric\"\n MetricKwargs = \"tr_default_metric_kwargs\"\n\n # Structural keys\n DistanceMatrix = \"tr_distance_matrix\"\n SimilarityMatrix = \"tr_similarity_matrix\"\n SubsamplingAnchors = \"tr_ssp_anchors\"\n SubsamplingReferences = \"tr_ssp_references\"\n\n # Plotting keys\n PlotRepresentation = \"tr_plot_representation\"\n\n\nAnnDataKey = namedtuple(\"AnnDataKey\", [\"identifier\", \"field\", \"persist\"])\n\n\nclass AnnDataManager:\n \"\"\"\n This class allows to safely handle AnnData objects, either through\n its static methods or using the global anndata manager object.\n \"\"\"\n\n def __init__(self):\n adataid_str = AnnDataKeyIdentifiers.AnnDataId.value\n self.keys: Dict[str, AnnDataKey] = {\n adataid_str: AnnDataKey(adataid_str, \"uns\", \"pipeline\")\n }\n self.current_id = 0\n\n @staticmethod\n def log(msg: str, level: int = logging.DEBUG) -> None:\n logger.log(level, f\"ADManager > {msg}\")\n\n @staticmethod\n def gen_keystring(key: Union[str, AnnDataKeyIdentifiers]) -> str:\n \"\"\"\n Adds a prefix to a given key to decrease collision cases\n with other packages.\n \"\"\"\n if isinstance(key, AnnDataKeyIdentifiers):\n return key.value\n return key\n\n @staticmethod\n def to_delete(query: _TypePersistLevels, target: _TypePersistLevels) -> bool:\n \"\"\"\n Returns true if query level <= target level.\n \"\"\"\n levels = [\"layer\", \"pipeline\", \"output\"]\n assert query in levels, f\"Unknown query level: {query}\"\n assert target in levels, f\"Unknown target level: {target}\"\n if target == \"output\":\n return True\n if target == \"pipeline\":\n return query != \"output\"\n if target == \"layer\":\n return query == target\n raise ValueError(f\"Unknown target persist level: {target}.\")\n\n @staticmethod\n def get(field: Union[pd.DataFrame, Dict], str_key: str) -> Any:\n \"\"\"\n Retrieves information for an AnnData field, returns None if\n not present.\n \"\"\"\n if str_key not in field:\n return None\n return field[str_key]\n\n @staticmethod\n def delete(field: Union[pd.DataFrame, Dict], str_key: str) -> None:\n \"\"\"\n Deletes an AnnData entry if it is present, and logs it.\n \"\"\"\n if str_key not in field:\n return\n del field[str_key]\n\n @staticmethod\n def get_field_from_str(adata: AnnData, field_str: str) -> Union[pd.DataFrame, Dict]:\n \"\"\"\n Returns field {field_str} of {adata}. Raises an exception if {field_str} is\n invalid.\n \"\"\"\n if field_str == \"obs\":\n return adata.obs\n if field_str == \"var\":\n return adata.var\n if field_str == \"obsm\":\n return adata.obsm\n if field_str == \"varm\":\n return adata.varm\n if field_str == \"obsp\":\n return adata.obsp\n if field_str == \"varp\":\n return adata.varp\n if field_str == \"uns\":\n if \"transmorph\" not in adata.uns:\n adata.uns[\"transmorph\"] = {}\n return adata.uns[\"transmorph\"]\n raise ValueError(f\"Unknown field {field_str}.\")\n\n def set_value(\n self,\n adata: AnnData,\n key: Union[str, AnnDataKeyIdentifiers],\n field: _TypeAnnDataFields,\n value: Any,\n persist: _TypePersistLevels = \"pipeline\",\n ) -> None:\n \"\"\"\n Stores information in an AnnData object, with a few sanity\n checks.\n\n Parameters\n ----------\n adata: AnnData\n AnnData object to store information in.\n\n key: AnnDataKeys\n AnnDataKey identifier to safely store the information.\n\n value: Any\n Data to store.\n\n field: Literal[\"obs\", \"var\", \"obsm\", \"varm\", \"obsp\", \"varp\", \"uns\"]\n AnnData subfield to store the information.\n - \"obs\" vector-like data to label observations\n - \"var\" vector-like data to label features\n - \"obsm\" matrix-like data to represent observations\n - \"varm\" matrix-like data to represent features\n - \"obsp\" matrix-like data containing pairwise information\n between observations\n - \"varp\" matrix-like data containing pairwise information\n between features\n - \"uns\" any type of data to be stored (parameters, metadata,\n local metric...)\n\n\n persist: Literal[\"layer\", \"pipeline\", \"output\"], default = \"pipeline\"\n Life duration of the information.\n - \"layer\": Information is deleted at the end of the layer that\n created it.\n - \"pipeline\": Information is deleted at the end of the pipeline.\n - \"output\": Information is not deleted at the end of the pipeline.\n \"\"\"\n str_key = AnnDataManager.gen_keystring(key)\n if str_key not in self.keys:\n self.keys[str_key] = AnnDataKey(str_key, field, persist)\n else: # We check consistance\n _, old_field, old_persist = self.keys[str_key]\n assert old_field == field\n assert old_persist == persist\n field_obj = AnnDataManager.get_field_from_str(adata, field)\n field_obj[str_key] = value\n str_descriptor = value\n if isinstance(value, (np.ndarray, csr_matrix)):\n str_descriptor = f\"{type(value)} ({value.shape})\"\n AnnDataManager.log(f\"Inserting {str_descriptor} in {field}[{str_key}]\")\n\n def get_value(\n self,\n adata: AnnData,\n key: Union[str, AnnDataKeyIdentifiers],\n field_str: Optional[_TypeAnnDataFields] = None,\n ) -> Optional[Any]:\n \"\"\"\n Retrieves value previously stored. Returns None if nothing is found.\n\n Parameters\n ----------\n adata: AnnData\n AnnData object to retrieve information from.\n\n key: AnnDataKeys\n Key to retrieve. if it was stored by transmorph, the field will\n be remembered. Otherwise, you must provide it explicitly.\n\n field_str: Literal[\"obs\", \"var\", \"obsm\", \"varm\", \"obsp\", \"varp\", \"uns\"]\n AnnData subfield containing the information.\n - \"obs\" vector-like data to label observations\n - \"var\" vector-like data to label features\n - \"obsm\" matrix-like data to represent observations\n - \"varm\" matrix-like data to represent features\n - \"obsp\" matrix-like data containing pairwise information\n between observations\n - \"varp\" matrix-like data containing pairwise information\n between features\n - \"uns\" any type of data to be stored (parameters, metadata,\n local metric...)\n \"\"\"\n str_key = AnnDataManager.gen_keystring(key)\n\n # By default, ADKI.BaseRepresentation returns .X if not set.\n base_repr = AnnDataKeyIdentifiers.BaseRepresentation.value\n if str_key == base_repr and AnnDataManager.get(adata.obsm, base_repr) is None:\n return adata.X\n\n if str_key in self.keys:\n field_str = self.keys[str_key].field\n if field_str is None:\n return None\n\n field = AnnDataManager.get_field_from_str(adata, field_str)\n return AnnDataManager.get(field, str_key)\n\n def isset_value(\n self,\n adata: AnnData,\n key: Union[str, AnnDataKeyIdentifiers],\n field: Optional[_TypeAnnDataFields] = None,\n ) -> bool:\n \"\"\"\n Detects if a key is stored in an AnnData object.\n\n Parameters\n ----------\n adata: AnnData\n AnnData object to retrieve information from.\n\n key: AnnDataKeys\n Key to retrieve. if it was stored by transmorph, the field will\n be remembered. Otherwise, you must provide it explicitly.\n\n field_str: Literal[\"obs\", \"var\", \"obsm\", \"varm\", \"obsp\", \"varp\", \"uns\"]\n AnnData subfield containing the information.\n - \"obs\" vector-like data to label observations\n - \"var\" vector-like data to label features\n - \"obsm\" matrix-like data to represent observations\n - \"varm\" matrix-like data to represent features\n - \"obsp\" matrix-like data containing pairwise information\n between observations\n - \"varp\" matrix-like data containing pairwise information\n between features\n - \"uns\" any type of data to be stored (parameters, metadata,\n local metric...)\n \"\"\"\n return self.get_value(adata, key, field) is not None\n\n def clean(\n self,\n datasets: Union[AnnData, List[AnnData]],\n level: _TypePersistLevels = \"pipeline\",\n ) -> None:\n \"\"\"\n Deletes transmorph keys of the given persist level and below.\n\n Parameters\n ----------\n datasets: Union[AnnData, List[AnnData]]\n AnnData object(s) to clean.\n\n level: Literal[\"output\", \"pipeline\", \"layer\"]\n All values with persist below this {level} are deleted.\n \"\"\"\n if isinstance(datasets, AnnData):\n datasets = [datasets]\n\n for adata in datasets:\n for admkey in self.keys.values():\n key, field_str, persist = admkey\n str_key = AnnDataManager.gen_keystring(key)\n if not AnnDataManager.to_delete(persist, level):\n continue\n AnnDataManager.log(f\"Deleting entry {field_str}[{str_key}].\")\n field = AnnDataManager.get_field_from_str(adata, field_str)\n AnnDataManager.delete(field, str_key)\n if field_str == \"uns\" and field == {}:\n del adata.uns[\"transmorph\"]\n\n def get_anndata_id(self, adata: AnnData) -> int:\n \"\"\"\n Creates a new identifier for specified AnnData if necessary,\n then retrieves its identifier.\n \"\"\"\n adata_id = self.get_value(adata, AnnDataKeyIdentifiers.AnnDataId)\n if adata_id is None:\n adata_id = self.current_id\n self.set_value(\n adata,\n AnnDataKeyIdentifiers.AnnDataId,\n \"uns\",\n adata_id,\n )\n self.current_id += 1\n return adata_id\n\n\nanndata_manager = AnnDataManager()\n","sub_path":"src/transmorph/utils/anndata_manager.py","file_name":"anndata_manager.py","file_ext":"py","file_size_in_byte":14522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"556252730","text":"# -*- coding: UTF-8 -*-\n\n#定义节点结构,包含节点\nclass Node(object):\n\tdef __init__(self,number):\n\t\tself.number = number # 节点的编号\n\t\tself.rchild = None\t\t\t# 节点的右支\n\t\tself.lchild = None\t\t\t# 节点的左支\n\n'''\n由于这里只是单纯的二叉树,而不是排序二叉树,所以不需要考虑数的大小关系等,\n也就是说可以按先后顺序插入顺序可以设计成root,root->left,root-rght这样,\n好处在于,root插入后,root左右为空,这时插入root->left,然后再是root->right,\n这样能够确保root的2个子节点都有值,而不至于绕过root->rigth直接插入root->left->left这样的异常情况出现。\n基于这样的设计,我们使用list来存储任意子节点为空的节点,换句话说,只要你这个节点不是左右节点都存在的情况下,\n我下次插入节点就会考虑你。具体实现我们使用list来模拟队列,root,root->left,root->right依次入队,\n\n接下来就是核心问题,怎么把增加新节点,第一步当然要先判断树是否为空\n\n\n\nadd基本的逻辑就是利用队列依次保存未满状态的节点,然后通过不断取队头来添加左右孩子,\n并把左右孩子加入队列,插入完后检查是否走有孩子都有了,依然未满,则保留,满了,则退出队列取下一个队首。\n\n\n\n\n add基本的逻辑就是利用队列依次保存未满状态的节点,然后通过不断取队头来添加左右孩子,并把左右孩子加入队列,插入完后检查是否走有孩子都有了,依然未满,则保留,满了,则退出队列取下一个队首。\n--------------------- \n作者:jchen104 \n来源:CSDN \n原文:https://blog.csdn.net/wzngzaixiaomantou/article/details/81294915 \n版权声明:本文为博主原创文章,转载请附上博文链接!\n'''\n\nclass Tree(object):\n\tlis = []\t\t\t\t\t\t\t\t#保存节点的列表\n\tdef __init__(self):\t\t\t\n\t\tself.root = None\t\t\t\t\t#空值,初始化\n\t\t\n\tdef add(self, number):\n\t\tnode = Node(number)\t\t\t\t\t#实例化一个节点\n\t\tif self.root == None: \t\t\t\t#判断节点的根是否为空,如果是,它就作为根节点\n\t\t\tself.root = node\t\t\t\t# 确定根节点啦\n\t\t\tTree.lis.append(self.root) \t\t#--入队列-- 将第一个add 的数作为根节点 储存根节点到队列中\n\t\telse:\t\t\t\t\t\t \t\t#如果不是根节点\n\t\t\twhile True:\n\t\t\t\tpoint = Tree.lis[0] \t\t#队列中的第一个点为父节点\n\t\t\t\tif point.lchild == None:\n\t\t\t\t\tpoint.lchild = node\n\t\t\t\t\tTree.lis.append(point.lchild) \t# 继续入队列\n\t\t\t\t\treturn\t\t\t\t\t\t\t#退出函数\n\t\t\t\telif point.rchild == None:\n\t\t\t\t\tpoint.rchild = node\n\t\t\t\t\tTree.lis.append(point.rchild) \t# 继续入队列\n\t\t\t\t\tTree.lis.pop(0)\t\t\t\t\t#---出队列--节点装满了,弹出,这个一个队列形式,先进先出,与堆栈的后进先出还是有区别的\n\t\t\t\t\treturn\t\t\t\t\t\t\t#退出 add 函数\n\t\t\t\t\t\n\t\t\nif __name__ ==\"__main__\":\n\tt = Tree()\n\tL = [1,2,3,4,5,6,7]\n\tfor x in L :\n\t\tt.add(x)\n\t\t\n\t\tprint(\"success\")\n\t\t\nprint(t)\n\t\t\n\t\t\n","sub_path":"Program_lunjingfanyan/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"327946416","text":"def unique(values):\n list = []\n for i in values:\n a = values.count(i)\n if a == 1:\n list.append(i)\n else:\n if i in list:\n None\n else:\n list.append(i)\n i += 1\n\n print(list)\n return(list)\n\nassert [1, 5, 3, 6, 7, 2, 4] == unique([1, 5, 3, 5, 6, 7, 2, 1, 4, 1, 5])","sub_path":"lab_2/tasks/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"617943202","text":"\ndef decorFunc(func):\n \"\"\"\n Написати функцію-декоратор, яка підносить до квадрату значення, \n що повертає функція, до якої декоратор застосовується.\n \"\"\"\n\n def wrapperOne(arg1, arg2, arg3):\n varOne = func(arg1, arg2, arg3)\n result = [i**2 for i in varOne]\n return result \n return wrapperOne \n\n\n@decorFunc\ndef my_func(val1,val2, val3):\n var_result = [i for i in range (val1, val2, val3)]\n print()\n print(\"Cписок значень: \", var_result, '\\n')\n return var_result\n\nprint(\"*\"*50)\n\nprint(\"Задайте діапазон чисел піднесення до квадрату.\\n\")\nvar1 = int(input(\"Введіть початкове значення: \"))\nvar2 = int(input(\"Введіть кінцеве значення: \"))\nvar3 = int(input(\"Введіть крок між значеннями: \"))\n\nprint(\"Значення піднесені до квадрату: \", my_func(var1, var2, var3), '\\n')\n","sub_path":"Lesson_12_DZ_Nichipurenko_A.V/Lesson_12_DZ_Nichipurenko_A.V.py","file_name":"Lesson_12_DZ_Nichipurenko_A.V.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"447781861","text":"def fib(xRange):\n lastNum = 1\n lastlastNum = 1\n for x in range(1, xRange + 1):\n if x <= 2:\n print(1)\n else:\n currentNum = lastNum + lastlastNum\n lastlastNum = lastNum\n lastNum = currentNum\n print(currentNum)\n\n\nxRange = int(input(\"How many Fibonacci numbers? \"))\nfib(xRange)","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"179563954","text":"# Given n non-negative integers representing the histogram's bar height where the width of each bar is 1, \r\n# find the area of largest rectangle in the histogram.\r\n\r\n# Above is a histogram where width of each bar is 1, given height = [2,1,5,6,2,3].\r\n# The largest rectangle is shown in the shaded area, which has area = 10 unit.\r\n\r\n# Example:\r\n# Input: [2,1,5,6,2,3]\r\n# Output: 10\r\n\r\n# Method 1: brute force\r\nclass Solution:\r\n def largestRectangleArea(self, height: List[int]) -> int:\r\n if not height:\r\n return 0\r\n \r\n max_area = 0\r\n for i in range(len(height)):\r\n min_height = float('inf')\r\n for j in range(i, len(height)):\r\n min_height = min(min_height, height[j]) # height: height[j]\r\n max_area = max(max_area, min_height*(j-i+1)) # width: j-i+1\r\n return max_area\r\n \r\n# Time: O(N^2)\r\n# Space: O(1)\r\n\r\n\r\n# Method 2: stack to record position\r\nclass Solution:\r\n def largestRectangleArea(self, heights: List[int]) -> int:\r\n ans = 0\r\n stack = []\r\n heights = [0] + heights + [0]\r\n \r\n for i in range(len(heights)):\r\n while stack and heights[i] < heights[stack[-1]]: # i: R index with smaller height\r\n curr = stack.pop()\r\n ans = max(ans, (i-stack[-1]-1)*heights[curr])\r\n stack.append(i) # append it into stack if heights[i] > heights[stack[-1]]\r\n return ans\r\n\r\n# Time: O(n). n numbers are pushed and popped.\r\n# Space: O(n). Stack is used.","sub_path":"01 Array/84. Largest Rectangle in Histogram.py","file_name":"84. Largest Rectangle in Histogram.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"508856897","text":"'''\nanalyze_past_run.py allows different state estimation techniques to be tried on \na history.p collection. \n\nexpects to receive in the form of a load_history object\n\nneeds to load a model based robot_state and loop through feeding in \nthe x and q values, recording the saved ones...\n'''\n\nimport sys, time\nimport numpy as np\nsys.path.append('..')\n\nfrom robot.model.model_communication import RobotModelInterface\nfrom robot.robot_state import robot_state\nfrom functions.state_estimation import jacobian_update\n\nrobot_com = RobotModelInterface(use_leader = True, \n use_sheath = False,\n sensor_noise = [0,0,0],\n use_obstacle = False,\n use_heartbeat = False,\n use_lung_task = False,\n bend_constraint = np.pi,\n touch_wall = False)\nrobot = robot_state(robot_com = robot_com,\n xbox = [], \n global_variables = [], \n use_leader = True, \n use_sheath = False,\n use_orientation = False,\n use_low_pass_filter = True,\n model_estimation = 'abc',\n motion_control = 'jinv',\n dx_threshold = 25.,\n q_threshold = 1e-5,\n trajectory = [])\n\n\ndef get_state_estimate(data, model_estimation, start_pt): \n # model_estimation string\n # start_pt is the index for q_initial (when wires were tensioned)\n N = len(data.time_pts)\n robot.model_estimation = model_estimation\n robot.q_initial = data.q_desired[start_pt,:]\n robot.x_initial = data.x_sensed[start_pt,:]\n J_estimate = np.zeros((N, robot.x_num, robot.q_num))\n R_estimate = np.zeros((N, 3, 3))\n angle_estimate = np.zeros((N, 3))\n robot.dx_expected = np.zeros(3)\n # set x_sensed and q_desired for each loop\n for i in range(start_pt,N):\n robot.q_desired = data.q_desired[i,:]\n robot.x_sensed = data.x_sensed[i,:]\n dq = data.q_desired[i,robot.q_list] - data.q_desired[i-1,robot.q_list]\n R = data.R[i,:,:]\n J = data.J[i,:,:]\n robot.dx_expected += R.dot(J.dot(dq))\n jacobian_update(robot)\n J_estimate[i,:,:] = robot.J.copy()\n R_estimate[i,:,:] = robot.R.copy()\n angle_estimate[i,:] = robot.angles_sensed.copy()\n return J_estimate, R_estimate, angle_estimate\n\n","sub_path":"catheter_simulation/data_analysis/analyze_past_run.py","file_name":"analyze_past_run.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"164988780","text":"import random\nimport time\n# Declaration of proxies\n\nhidemyass =\t\t{\t'url': 'https://www.hidemyass.com/proxy', \n\t\t\t\t\t'text_field': '//*[@id=\"form_url\"]', \n\t\t\t\t\t'button' : ''\n\t\t\t\t}\n\nhidester =\t\t{\t'url': 'https://hidester.com/proxy/', \n\t\t\t\t\t'text_field': '/html/body/div/main/div[1]/div/div[3]/form/div[2]/input', \n\t\t\t\t\t'button' : \"//input[@type='submit' and @value = 'Surf anonymously']\",\n\t\t\t\t}\n\nproxy_site = \t{\t'url': 'https://www.proxysite.com/', \n\t\t\t\t\t'text_field': '//div[@id=\"url-form-wrap\"]//input[@name=\"d\"]', \n\t\t\t\t\t'button' : '//div[@id=\"url-form-wrap\"]//button',\n\t\t\t\t}\n\nvpn_book = \t\t{\t'url': 'https://www.vpnbook.com/webproxy', \n\t\t\t\t\t'text_field': '//*[@id=\"input\"]', \n\t\t\t\t\t'button' : '/html/body/div/article/div/div[2]/form/input[2]',\n\t\t\t\t}\n \nmegaproxy = \t{\t'url': 'https://www.megaproxy.com/freesurf/', \n\t\t\t\t\t'text_field': '/html/body/table[2]/tbody/tr[4]/td[1]/table/tbody/tr[3]/td/form/input[1]', \n\t\t\t\t\t'button' : '/html/body/table[2]/tbody/tr[4]/td[1]/table/tbody/tr[3]/td/form/input[2]',\n\t\t\t\t}\n\nforever_proxy = {\t'url': 'https://www.4everproxy.com/', \n\t\t\t\t\t'text_field': '/html/body/section/section[1]/div[1]/div[2]/div/form/div[1]/input[1]', \n\t\t\t\t\t'button' : '/html/body/section/section[1]/div[1]/div[2]/div/form/div[1]/button',\n\t\t\t\t}\n\nblew_pass = \t{\t'url': 'https://www.blewpass.com/', \n\t\t\t\t\t'text_field': '/html/body/div[2]/div[2]/form/div[1]/div[2]/input[1]', \n\t\t\t\t\t'button' : '/html/body/div[2]/div[2]/form/div[1]/div[3]/input',\n\t\t\t\t}\n\ndont_filter = \t{\t'url': 'http://www.dontfilter.us', \n\t\t\t\t\t'text_field': '//*[@id=\"searchinginput\"]', \n\t\t\t\t\t'button' : '//*[@id=\"submitarea\"]',\n\t\t\t\t}\n\nfilter_bypass = \t{\t'url': 'https://www.filterbypass.me/', \n\t\t\t\t\t'text_field': '//*[@id=\"prform\"]', \n\t\t\t\t\t'button' : '/html/body/div[1]/div[3]/div[2]/form/input[2]',\n\t\t\t\t}\n\navailable_proxies = [hidemyass,hidester,proxy_site,vpn_book,megaproxy,forever_proxy,blew_pass,dont_filter,filter_bypass]\n\ndef run_proxy(driver,proxy,url):\n\ttry:\n\t\tdriver.get(proxy['url'])\n\t\t\n\t\tif (proxy['url'] == 'https://www.hidemyass.com/proxy'):\n\t\t\tdriver.switch_to.frame(\"proxyIframe\")\n\t\t\ttext_field = driver.find_element_by_xpath(proxy['text_field'])\n\t\t\tif (text_field):\n\t\t\t\ttext_field.click()\n\t\t\t\ttext_field.clear()\n\t\t\t\ttext_field.send_keys(url)\n\t\t\t\tdriver.execute_script('document.getElementById(\"submitButton\").click()')\n\t\t\t\tdriver.switch_to.default_content()\n\t\t\telse:\n\t\t\t\tprint (\"Error when writting url\")\n\t\t\n\t\tbutton = driver.find_element_by_xpath(proxy['button'])\n\t\ttext_field = driver.find_element_by_xpath(proxy['text_field'])\n\t\tbutton = driver.find_element_by_xpath(proxy['button'])\n\t\tif (text_field):\n\t\t\ttext_field.click()\n\t\t\ttext_field.clear()\n\t\t\ttext_field.send_keys(url)\n\t\t\tif (button):\n\t\t\t\tbutton.click()\n\t\telse:\n\t\t\tprint (\"error\")\n\t\t\treturn 1\n\t\treturn 0\n\texcept:\n\t\tprint (\"Error\")\n\t\treturn 1\n\ndef random_proxy():\n\treturn random.choice(available_proxies)\n\ndef test_proxies(driver,url):\n\tfor proxy in available_proxies:\n\t\trun_proxy(driver,proxy,url)\n\t\ttime.sleep(15)\n\t","sub_path":"proxies_gate/proxies_gate_module.py","file_name":"proxies_gate_module.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"605951376","text":"import webapp2\n\nfrom src.config import Url\nfrom src.controllers import json_controller, main, worker\n\nAPPLICATION = webapp2.WSGIApplication([\n (Url.Main.LATEST_POSTS, main.Statistics),\n\n (Url.Json.LATEST_POSTS, json_controller.JsonFeed),\n\n (Url.Worker.Participants.WORKER, worker.ParticipantsWorker),\n (Url.Worker.Participants.GET_ALL_PARTICIPANTS_TASK, worker.ParticipantsWorker.GetAllParticipantsTask),\n (Url.Worker.Participants.GET_FEED_URL_TASK, worker.ParticipantsWorker.GetFeedUrlTask),\n\n (Url.Worker.ScrapFeed.WORKER, worker.ScrapWorker),\n (Url.Worker.ScrapFeed.ENQUEUE_SCRAP_TASKS, worker.ScrapWorker.EnqueueFeedScrapTask),\n (Url.Worker.ScrapFeed.SCRAP_TASK, worker.ScrapWorker.ScrapSingleFeedTask)\n], debug=True)\n\n\ndef main():\n webapp.util.run_wsgi_app(APPLICATION)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"DajSiePoznacFeed-Server/crawler/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"297232967","text":"class Girl:\n\n gender = 'female' #class variable\n\n def __init__(self, name): #\n self.name = name #name unique to each object - instance variable\n\nr= Girl(\"Rachel\")\ns= Girl(\"Stinky\")\nprint(r.gender)\nprint(s.gender)\nprint(r.name)\nprint(s.name)\n","sub_path":"python/oop/class_and_instance_variables.py","file_name":"class_and_instance_variables.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"34431751","text":"import math\nimport copy\n\nimport torch\nfrom . import utils as ut\n\n\nclass EG_LIP_LS(torch.optim.Optimizer):\n ''' PyTorch Implementation of SEG with Lipschitz line-search\n '''\n def __init__(self, params, max_epochs, batch_size, init_step_size=1, n=1, reset_option=0, sigma=0.5, beta=0.5, beta_2=None, bound_step_size=False):\n\n defaults = dict(max_epochs=max_epochs, batch_size=batch_size, init_step_size=init_step_size, n=n, sigma=sigma, beta=beta, reset_option=reset_option, beta_2=beta_2, bound_step_size=bound_step_size)\n super().__init__(params, defaults)\n\n\n self.state['step'] = 0\n self.state['step_size'] = init_step_size\n\n # book-keeping for the Lipschitz line-search\n self.state['x_prev'] = copy.deepcopy(self.param_groups)\n\n self.state['n_forwards'] = 0\n self.state['n_backwards'] = 0\n\n def step(self, closure):\n\n step_size = reset_step(self.state, self.defaults)\n\n # call the closure to get loss and compute gradients\n loss = closure()\n loss.backward()\n\n self.state['n_forwards'] += 1\n self.state['n_backwards'] += 1\n\n # save the current parameters:\n x_current = copy.deepcopy(self.param_groups)\n self.state['x_prev'] = copy.deepcopy(self.param_groups)\n # save the gradient at the current parameters:\n\n gradient = [p.grad for p in self.params]\n grad_norm = optim.compute_grad_norm(gradient)\n\n # only do the check if the gradient norm is big enough\n if grad_norm >= 1e-8:\n\n # check if condition is satisfied\n found = 0\n\n for e in range(100):\n # try a prospective step\n self._try_update(step_size, x_current, gradient)\n\n # compute the loss at the next step; Lipschitz line-search requires new gradients.\n loss_temp = closure(compute_grad=True)\n\n self.state['n_forwards'] += 1\n self.state['n_backwards'] += 1\n\n gradient_temp = [p.grad for p in self.params]\n grad_norm_temp = optim.compute_grad_norm(gradient_temp)\n\n g_norm = _compute_grad_diff(gradient_temp, gradient)\n x_norm = _compute_iter_diff(x_current)\n\n # implements the lipschitz condition in the paper\n c = (step_size / self.defaults['sigma'])**2\n break_condition = float(c * g_norm - x_norm)\n if (break_condition <= 0):\n\n found = 1\n break\n\n else:\n\n # decrease the step-size by a multiplicative factor\n step_size = step_size * self.defaults['beta']\n\n if found == 0:\n self._try_update(1e-6, x_current, gradient)\n\n else:\n self._try_update(step_size, x_current, gradient)\n\n # save the new step-size\n self.state['step_size'] = step_size\n self.state['step'] = self.state['step'] + 1\n\n # take the extra gradient step.\n self.EG_step(closure)\n\n return loss\n\n def EG_step(self, closure):\n\n # call the closure to get loss and compute gradients.\n loss = closure(compute_grad=True)\n self._update_func_evals_counters(backward_called=True)\n\n # save the gradient at the current parameters:\n gradient, grad_norm = self.model.get_grads()\n\n self._try_update(self.state['step_size'], self.state['x_prev'], gradient)\n\n return loss\n\n\ndef _try_update(param_groups, step_size, x_current, gradient):\n\n with torch.no_grad():\n for i, group in enumerate(param_groups):\n for j, p in enumerate(group['params']):\n # update models parameters using SGD update\n p.data = x_current[i]['params'][j] - step_size * gradient[i][j]\n\n\ndef _compute_iter_diff(param_groups, x_current):\n\n x_norm = 0\n with torch.no_grad():\n for i, group in enumerate(param_groups):\n for j, p in enumerate(group['params']):\n iter_diff = x_current[i]['params'][j] - p.data\n x_norm = x_norm + torch.sum(torch.mul(iter_diff, iter_diff))\n\n return x_norm\n\ndef _compute_grad_diff(param_groups, g_current, g_prev):\n\n g_norm = 0\n with torch.no_grad():\n for i, group in enumerate(param_groups):\n for j, p in enumerate(group['params']):\n g_diff = g_current[i][j] - g_prev[i][j]\n g_norm = g_norm + torch.sum(torch.mul(g_diff, g_diff))\n\n return g_norm","sub_path":"sls/eg_lip_ls.py","file_name":"eg_lip_ls.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"351237872","text":"\"\"\" View functions for generating IPTT Reports (HTML and Excel)\"\"\"\n\nimport bisect\nfrom collections import OrderedDict\nfrom dateutil import rrule, parser\nfrom django.utils import timezone\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import datetime\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.db.models import Sum, Avg, Subquery, OuterRef, Case, When, Q, F, Max\nfrom django.views.generic import TemplateView, FormView\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib import messages\nfrom openpyxl import Workbook\nfrom openpyxl.utils import get_column_letter\nfrom openpyxl.styles import Font, PatternFill, Alignment\nfrom openpyxl.worksheet.cell_range import CellRange\n\nfrom tola.util import formatFloat\nfrom tola.l10n_utils import l10n_date, l10n_monthly_date\nfrom workflow.models import Program\nfrom ..models import Indicator, CollectedData, Level, PeriodicTarget\nfrom ..forms import IPTTReportQuickstartForm, IPTTReportFilterForm\nfrom ..templatetags.mytags import symbolize_change, symbolize_measuretype\n\n\nclass IPTT_Mixin(object):\n \"\"\"\n A mixin that abstracts all of the common functionality for IPTT reports\n \"\"\"\n template_name = 'indicators/iptt_report.html'\n REPORT_TYPE_TIMEPERIODS = 'timeperiods'\n REPORT_TYPE_TARGETPERIODS = 'targetperiods'\n\n MONTHS_PER_MONTH = 1\n MONTHS_PER_QUARTER = 3\n MONTHS_PER_TRIANNUAL = 4\n MONTHS_PER_SEMIANNUAL = 6\n MONTHS_PER_YEAR = 12\n\n FROM = 'from'\n TO = 'to'\n\n def __init__(self, **kwargs):\n self.program = None\n self.annotations = {}\n self.filter_form_initial_data = {}\n\n @staticmethod\n def _get_num_months(period):\n \"\"\"\n Returns the number of months for a given time-period\n \"\"\"\n try:\n return {\n Indicator.ANNUAL: IPTT_Mixin.MONTHS_PER_YEAR,\n Indicator.SEMI_ANNUAL: IPTT_Mixin.MONTHS_PER_SEMIANNUAL,\n Indicator.TRI_ANNUAL: IPTT_Mixin.MONTHS_PER_TRIANNUAL,\n Indicator.QUARTERLY: IPTT_Mixin.MONTHS_PER_QUARTER,\n Indicator.MONTHLY: IPTT_Mixin.MONTHS_PER_MONTH\n }[period]\n except KeyError:\n return 0\n\n @staticmethod\n def _get_period_name(period):\n \"\"\"\n Returns the name of the period\n \"\"\"\n try:\n return {\n Indicator.ANNUAL: _('Year'),\n Indicator.SEMI_ANNUAL: _('Semi-annual'),\n Indicator.TRI_ANNUAL: _('Tri-annual'),\n Indicator.QUARTERLY: _('Quarter'),\n Indicator.MONTHLY: _('Month')\n }[period]\n except KeyError:\n return 0\n\n def _get_first_period(self, start_date, num_months_in_period):\n # TODO: Delete it\n if start_date is None:\n num_months_in_period = 0\n\n if num_months_in_period == IPTT_Mixin.MONTHS_PER_MONTH:\n # if interval is monthly, set the start_date to the first of the month\n period_start_date = start_date.replace(day=1)\n elif num_months_in_period == IPTT_Mixin.MONTHS_PER_QUARTER:\n # if interval is quarterly, set period_start_date to first calendar quarter\n quarter_start = [start_date.replace(month=month, day=1) for month in (1, 4, 7, 10)]\n index = bisect.bisect(quarter_start, start_date)\n period_start_date = quarter_start[index - 1]\n elif num_months_in_period == IPTT_Mixin.MONTHS_PER_TRIANNUAL:\n # if interval is tri-annual, set period_start_date to first calendar tri-annual\n tri_annual_start = [start_date.replace(month=month, day=1) for month in (1, 5, 9)]\n index = bisect.bisect(tri_annual_start, start_date)\n period_start_date = tri_annual_start[index - 1]\n elif num_months_in_period == IPTT_Mixin.MONTHS_PER_SEMIANNUAL:\n # if interval is semi-annual, set period_start_date to first calendar semi-annual\n semi_annual = [start_date.replace(month=month, day=1) for month in (1, 7)]\n index = bisect.bisect(semi_annual, start_date)\n period_start_date = semi_annual[index - 1]\n elif num_months_in_period == IPTT_Mixin.MONTHS_PER_YEAR:\n # if interval is annual, set period_start_date to first calendar year\n period_start_date = start_date.replace(month=1, day=1)\n else:\n period_start_date = None\n\n return period_start_date\n\n def _generate_annotations(self, timeperiods, period, reporttype):\n \"\"\"\n Generates queryset annotation(sum, avg, last data record). All three annotations are calculated\n because one of these three values will be used depending on how an indicator is configured.\n \"\"\"\n i = 0\n if period == Indicator.LOP:\n self.annotations = {}\n elif period == Indicator.MID_END:\n # Create annotations for MIDLINE TargetPeriod\n last_data_record = CollectedData.objects.filter(\n indicator=OuterRef('pk'),\n periodic_target__period=PeriodicTarget.MIDLINE) \\\n .order_by('-date_collected', '-pk')\n midline_sum = Sum(\n Case(\n When(\n Q(unit_of_measure_type=Indicator.NUMBER) &\n Q(collecteddata__periodic_target__period=PeriodicTarget.MIDLINE),\n then=F('collecteddata__achieved')\n )\n )\n )\n # midline_avg = Avg(\n # Case(\n # When(\n # Q(unit_of_measure_type=Indicator.PERCENTAGE) &\n # Q(is_cumulative=False) &\n # Q(collecteddata__periodic_target__period=PeriodicTarget.MIDLINE),\n # then=F('collecteddata__achieved')\n # )\n # )\n # )\n midline_last = Max(\n Case(\n When(\n Q(unit_of_measure_type=Indicator.PERCENTAGE) &\n # Q(is_cumulative=True) &\n Q(collecteddata__periodic_target__period=PeriodicTarget.MIDLINE),\n then=Subquery(last_data_record.values('achieved')[:1])\n )\n )\n )\n # Get the midline target value\n midline_target = Max(\n Case(\n When(\n Q(collecteddata__periodic_target__period=PeriodicTarget.MIDLINE),\n then=Subquery(last_data_record.values('periodic_target__target')[:1])\n # Q(periodictargets__period=PeriodicTarget.MIDLINE),\n # then=F('periodictargets__target')\n )\n )\n )\n\n # Create annotations for ENDLINE TargetPeriod\n last_data_record = CollectedData.objects.filter(\n indicator=OuterRef('pk'),\n periodic_target__period=PeriodicTarget.ENDLINE) \\\n .order_by('-date_collected', '-pk')\n endline_sum = Sum(\n Case(\n When(\n Q(unit_of_measure_type=Indicator.NUMBER) &\n Q(collecteddata__periodic_target__period=PeriodicTarget.ENDLINE),\n then=F('collecteddata__achieved')\n )\n )\n )\n # endline_avg = Avg(\n # Case(\n # When(\n # Q(unit_of_measure_type=Indicator.PERCENTAGE) &\n # Q(is_cumulative=False) &\n # Q(collecteddata__periodic_target__period=PeriodicTarget.ENDLINE),\n # then=F('collecteddata__achieved')\n # )\n # )\n # )\n endline_last = Max(\n Case(\n When(\n Q(unit_of_measure_type=Indicator.PERCENTAGE) &\n # Q(is_cumulative=True) &\n Q(collecteddata__periodic_target__period=PeriodicTarget.ENDLINE),\n then=Subquery(last_data_record.values('achieved')[:1])\n )\n )\n )\n # Get the endline target value\n endline_target = Max(\n Case(\n When(\n Q(collecteddata__periodic_target__period=PeriodicTarget.ENDLINE),\n then=Subquery(last_data_record.values('periodic_target__target')[:1])\n # Q(periodictargets__period=PeriodicTarget.ENDLINE),\n # then=F('periodictargets__target')\n )\n )\n )\n self.annotations[\"Midline_target\"] = midline_target\n self.annotations[\"Endline_target\"] = endline_target\n self.annotations['Midline_sum'] = midline_sum\n # self.annotations['Midline_avg'] = midline_avg\n self.annotations['Midline_last'] = midline_last\n self.annotations['Endline_sum'] = endline_sum\n # self.annotations['Endline_avg'] = endline_avg\n self.annotations['Endline_last'] = endline_last\n else:\n for k, v in timeperiods.items():\n start_date = datetime.strftime(v[0], '%Y-%m-%d') # TODO: localize this date\n end_date = datetime.strftime(v[1], '%Y-%m-%d') # TODO: localize this date\n\n last_data_record = CollectedData.objects.filter(\n indicator=OuterRef('pk'),\n date_collected__gte=start_date,\n date_collected__lte=end_date) \\\n .order_by('-date_collected', '-pk')\n\n # 1.) If the indicator is NUMBER and CUMULATIVE then do include all data\n # for the first period up to the first period's end_date. In other words,\n # do not limit data records by the current period's start date because if a\n # user selected most_recent=2 then we want the first most_recent period to include\n # all of the data from the periods that the user excluded by specifying num_recents=2\n # since it is a cumulative indicator.\n # 2.) If it is not a cumulative indicator then restrict it both by start_date and end_date\n # 3.) If it is not the first target_period then still restrict it by both start_date and\n # end_date otherwise it will double count the data prior to the first_period's start_date\n # for cumulative indicators\n if i == 0:\n annotation_sum = Sum(\n Case(\n When(\n Q(unit_of_measure_type=Indicator.NUMBER) &\n Q(is_cumulative=True) &\n Q(collecteddata__date_collected__lte=end_date),\n then=F('collecteddata__achieved')\n ),\n When(\n Q(unit_of_measure_type=Indicator.NUMBER) &\n Q(collecteddata__date_collected__gte=start_date) &\n Q(collecteddata__date_collected__lte=end_date),\n then=F('collecteddata__achieved')\n )\n )\n )\n else:\n annotation_sum = Sum(\n Case(\n When(\n Q(unit_of_measure_type=Indicator.NUMBER) &\n Q(collecteddata__date_collected__gte=start_date) &\n Q(collecteddata__date_collected__lte=end_date),\n then=F('collecteddata__achieved')\n )\n )\n )\n i += 1\n # annotation_avg = Avg(\n # Case(\n # When(\n # Q(unit_of_measure_type=Indicator.PERCENTAGE) &\n # Q(is_cumulative=False) &\n # Q(collecteddata__date_collected__gte=start_date) &\n # Q(collecteddata__date_collected__lte=end_date),\n # then=F('collecteddata__achieved')\n # )\n # )\n # )\n annotation_last = Max(\n Case(\n When(\n Q(unit_of_measure_type=Indicator.PERCENTAGE) &\n # Q(is_cumulative=True) &\n Q(collecteddata__date_collected__gte=start_date) &\n Q(collecteddata__date_collected__lte=end_date),\n then=Subquery(last_data_record.values('achieved')[:1])\n )\n )\n )\n\n # if this is targetperiods IPTT report then get the target value for each period\n if reporttype == self.REPORT_TYPE_TARGETPERIODS:\n annotation_target = Max(\n Case(\n When(\n Q(collecteddata__date_collected__gte=start_date) &\n Q(collecteddata__date_collected__lte=end_date),\n then=Subquery(last_data_record.values('periodic_target__target')[:1])\n # Q(periodictargets__start_date__gte=start_date) &\n # Q(periodictargets__end_date__lte=end_date),\n # then=F('periodictargets__target')\n )\n )\n )\n self.annotations[\"{}_target\".format(k)] = annotation_target\n\n # the following becomes annotations for the queryset\n # e.g.\n # Year 1_sum=..., Year 2_sum=..., etc.\n # Year 1_avg=..., Year 2_avg=..., etc.\n # Year 1_last=..., Year 2_last=..., etc.\n #\n self.annotations[\"{}_sum\".format(k)] = annotation_sum\n # self.annotations[\"{}_avg\".format(k)] = annotation_avg\n self.annotations[\"{}_last\".format(k)] = annotation_last\n return self.annotations\n\n @staticmethod\n def _get_num_periods(start_date, end_date, period):\n \"\"\"\n Returns the number of periods, in months, depending on the period\n \"\"\"\n num_months_in_period = IPTT_Mixin._get_num_months(period)\n total_num_months = len(list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, until=end_date)))\n try:\n num_periods = total_num_months / num_months_in_period\n remainder_months = total_num_months % num_months_in_period\n if remainder_months > 0:\n num_periods += 1\n except ZeroDivisionError:\n num_periods = 0\n return num_periods\n\n def _generate_targetperiods(self, program, filter_start_date, filter_end_date, period, show_all, num_recents):\n targetperiods = OrderedDict()\n today = datetime.today().date()\n # today = datetime.strptime('2020-02-23', '%Y-%m-%d').date()\n\n # All indicators within a program that have the same target_frequency (annual, monthly, etc)\n # have the same number of target periods with the same start and end dates, thus we can just\n # get the first indicator that is within this program and have the same target_frequency(period)\n # and fetch the related set of periodic_targets\n ind = Indicator.objects.filter(program__in=[program.id], target_frequency=period).first()\n periodic_targets = PeriodicTarget.objects.filter(indicator=ind) \\\n .values(\"id\", \"period\", \"target\", \"start_date\", \"end_date\")\n\n try:\n start_date = parser.parse(self.filter_form_initial_data['start_date']).date()\n end_date = parser.parse(self.filter_form_initial_data['end_date']).date()\n periodic_targets = periodic_targets.filter(start_date__gte=start_date, end_date__lte=end_date)\n except (KeyError, ValueError):\n pass\n\n for pt in periodic_targets:\n # if it is LOP Target then do not show any target periods becaseu there are none.\n if pt['period'] == Indicator.TARGET_FREQUENCIES[0][1]:\n continue\n targetperiods[pt['period']] = [pt['start_date'], pt['end_date'], pt['target'], pt['id']]\n\n # save the unfiltered targetperiods into the global variable so that\n # it be used to populate the periods dropdown\n all_date_ranges = targetperiods\n\n # Update the report_end_date with the last reporting_period's end_date\n try:\n report_end_date = targetperiods[targetperiods.keys()[-1]][1]\n except (TypeError, IndexError):\n report_end_date = self.program.reporting_period_end\n\n # this check is necessary becasue mid/end line do not have start/end dates\n if report_end_date is None:\n report_end_date = self.program.reporting_period_end\n\n if num_recents is not None and num_recents > 0 and period not in [Indicator.LOP, Indicator.MID_END]:\n # filter out those timeperiods whose end_dates are larger than today's date\n targetperiods_less_than_today = filter(lambda v: v[1][0] <= today, targetperiods.items())\n\n if len(targetperiods_less_than_today) > num_recents:\n # filter out dates that are outside of the most_recent index specified by user\n most_recent_targetperiods = targetperiods_less_than_today[(\n len(targetperiods_less_than_today) - num_recents):]\n else:\n most_recent_targetperiods = targetperiods_less_than_today\n\n # convert to oredered dictionary to preserve order (IMPORTANT!)\n targetperiods = OrderedDict((k, v) for k, v in most_recent_targetperiods)\n elif show_all == 0 and filter_start_date is not None and filter_end_date is not None:\n filtered_targetperiods = OrderedDict()\n # TODO: localize the following dates\n filter_start_date = datetime.strptime(filter_start_date, \"%Y-%m-%d\").date()\n filter_end_date = datetime.strptime(filter_end_date, \"%Y-%m-%d\").date()\n for k, v in targetperiods.items():\n start_date = v[0]\n end_date = v[1]\n if start_date >= filter_start_date and filter_end_date >= end_date:\n filtered_targetperiods[k] = [start_date, end_date]\n return (report_end_date, all_date_ranges, filtered_targetperiods)\n return (report_end_date, all_date_ranges, targetperiods)\n\n def _generate_timeperiods(self, filter_start_date, filter_end_date, frequency, show_all, num_recents):\n timeperiods = OrderedDict()\n today_date = datetime.today().date()\n # today_date = datetime.strptime('2020-02-23', '%Y-%m-%d').date()\n\n period_name = self._get_period_name(frequency)\n num_months_in_period = self._get_num_months(frequency)\n\n num_periods = IPTT_Mixin._get_num_periods(self.program.reporting_period_start,\n self.program.reporting_period_end, frequency)\n\n start_date = self.program.reporting_period_start\n\n # bump up num_periods by 1 because the loop starts from 1 instead of 0\n num_periods += 1\n for i in range(1, num_periods):\n if i > 1:\n # if it is not the first period then advance the\n # start_date by the correct number of months.\n start_date = start_date + relativedelta(months=+num_months_in_period)\n\n end_date = start_date + relativedelta(months=+num_months_in_period) + relativedelta(days=-1)\n # print('start_date={}, end_date={}'.format(start_date, end_date))\n if frequency == Indicator.MONTHLY:\n period_name = datetime.strftime(start_date, \"%b %Y\") #\n timeperiods[\"{}\".format(period_name)] = [start_date, end_date]\n else:\n timeperiods[\"{} {}\".format(period_name, i)] = [start_date, end_date]\n\n # save the unfiltered targetperiods into the global variable so that\n # it be used to populate the periods dropdown\n all_date_ranges = timeperiods\n\n # Update the report_end_date with the last reporting_period's end_date\n try:\n report_end_date = timeperiods[timeperiods.keys()[-1]][1]\n except (TypeError, IndexError):\n report_end_date = self.program.reporting_period_end\n\n if num_recents is not None and num_recents > 0:\n # filter out those timeperiods whose end_dates are larger than today's date\n timeperiods_less_than_today = filter(lambda v: v[1][0] <= today_date, timeperiods.items())\n if len(timeperiods_less_than_today) > num_recents:\n # filter out dates that are outside of the most_recent index specified by user\n most_recent_timeperiods = timeperiods_less_than_today[(\n len(timeperiods_less_than_today) - num_recents):]\n else:\n most_recent_timeperiods = timeperiods_less_than_today\n # convert to oredered dictionary to preserve order (IMPORTANT!)\n timeperiods = OrderedDict((k, v) for k, v in most_recent_timeperiods)\n elif show_all == 0 and filter_start_date is not None and filter_end_date is not None:\n filtered_timeperiods = OrderedDict()\n # TODO: localize the following dates\n filter_start_date = datetime.strptime(filter_start_date, \"%Y-%m-%d\").date()\n filter_end_date = datetime.strptime(filter_end_date, \"%Y-%m-%d\").date()\n for k, v in timeperiods.items():\n start_date = v[0]\n end_date = v[1]\n if start_date >= filter_start_date and filter_end_date >= end_date:\n filtered_timeperiods[k] = [start_date, end_date]\n return (report_end_date, all_date_ranges, filtered_timeperiods)\n\n return (report_end_date, all_date_ranges, timeperiods)\n\n def _update_filter_form_initial(self, formdata):\n self.filter_form_initial_data = {}\n for k in formdata:\n v = formdata.getlist(k)\n if k == 'csrfmiddlewaretoken' or k == 'program':\n continue\n if isinstance(v, list) and len(v) == 1:\n v = v[0]\n\n if k == self.REPORT_TYPE_TIMEPERIODS or k == self.REPORT_TYPE_TARGETPERIODS:\n try:\n v = int(v)\n except ValueError:\n v = int(Indicator.ANNUAL) # defaults to annual\n\n if k == 'numrecentperiods':\n try:\n v = int(v)\n except ValueError:\n continue\n # print(\"{} = {}\".format(k, v))\n self.filter_form_initial_data[k] = v\n\n def _get_filters(self, data):\n filters = {}\n try:\n filters['level__in'] = data['level'] if isinstance(data['level'], list) else [data['level']]\n except KeyError:\n pass\n\n try:\n filters['sector__in'] = data['sector'] if isinstance(data['sector'], list) else [data['sector']]\n except KeyError:\n pass\n\n try:\n filters['indicator_type__in'] = data['ind_type'] if isinstance(data['ind_type'], list) else [\n data['ind_type']]\n except KeyError:\n pass\n\n try:\n filters['collecteddata__site__in'] = data['site'] if isinstance(data['site'], list) else [data['site']]\n except KeyError:\n pass\n\n try:\n filters['id__in'] = data['indicators'] if isinstance(data['indicators'], list) else [data['indicators']]\n except KeyError:\n pass\n\n return filters\n\n def prepare_indicators(self, reporttype, period, periods_date_ranges, indicators):\n # Calculate the cumulative sum across timeperiods for indicators that are NUMBER and CUMULATIVE\n for i, ind in enumerate(indicators):\n running_total = 0\n # process indicator number\n if ind['number'] is None:\n ind['number'] = ''\n\n # process level\n if ind['lastlevel'] is None:\n ind['lastlevel'] = ''\n\n # process unit_of_measure\n if ind['unit_of_measure'] is None:\n ind['unit_of_measure'] = ''\n\n # process direction_of_change\n ind['direction_of_change'] = symbolize_change(ind['direction_of_change'])\n\n # process indicator is_cumulative status\n if ind['target_frequency'] == Indicator.LOP:\n ind['cumulative'] = _(\"N/A\")\n elif ind['is_cumulative'] is True:\n ind['cumulative'] = _(\"Cumulative\")\n elif ind['is_cumulative'] is False:\n ind['cumulative'] = _(\"Non-cumulative\")\n\n # process indicator_unit_type\n ind['unittype'] = symbolize_measuretype(ind['unit_of_measure_type'])\n\n # process baseline\n if ind['baseline_na'] is True:\n ind['baseline'] = _(\"N/A\")\n else:\n if ind['baseline'] is None:\n ind['baseline'] = ''\n\n # process lop_target\n try:\n lop_target = float(ind['lop_target'])\n if ind['unit_of_measure_type'] == Indicator.PERCENTAGE:\n ind['lop_target'] = \"{}%\".format(formatFloat(lop_target))\n else:\n ind['lop_target'] = formatFloat(lop_target)\n except (ValueError, TypeError):\n lop_target = ''\n ind['lop_target'] = lop_target\n\n # process lop_actual\n lop_actual = ''\n percent = ''\n if ind['unit_of_measure_type'] == Indicator.NUMBER:\n if ind['actualsum'] is not None:\n lop_actual = float(ind['actualsum'])\n elif ind['unit_of_measure_type'] == Indicator.PERCENTAGE:\n if ind['lastdata'] is not None:\n lop_actual = float(ind['lastdata'])\n percent = \"%\"\n try:\n ind['lop_actual'] = \"{}{}\".format(formatFloat(lop_actual), percent)\n except TypeError:\n ind['lop_actual'] = ''\n\n # process lop_percent_met\n try:\n ind['lop_percent_met'] = \"{}%\".format(formatFloat(round(lop_actual / lop_target * 100)))\n except TypeError:\n ind['lop_percent_met'] = ''\n except ZeroDivisionError:\n ind['lop_percent_met'] = _('N/A')\n\n if period in [Indicator.ANNUAL, Indicator.SEMI_ANNUAL, Indicator.TRI_ANNUAL, Indicator.QUARTERLY,\n Indicator.MONTHLY, Indicator.MID_END]:\n # if the frequency (period) is periodic, i.e., time-aware then go through each period\n # and calculate the cumulative total achieved across date ranges (periods)\n for k, v in periods_date_ranges.items():\n if ind['unit_of_measure_type'] == Indicator.NUMBER and ind['is_cumulative'] is True:\n current_sum = ind[\"{}_sum\".format(k)]\n if current_sum is not None:\n # current_sum = 0\n key = \"{}_rsum\".format(k)\n running_total = running_total + current_sum\n ind[key] = running_total\n\n # process target_period actual value\n actual = '{}_actual'.format(k)\n actual_val = ''\n percent_sign = ''\n if ind['unit_of_measure_type'] == Indicator.NUMBER:\n if ind['is_cumulative'] is True:\n try:\n actual_val = ind[\"{}_rsum\".format(k)]\n except KeyError:\n actual_val = ''\n else: # if it is not set to cumulative then default to non-cumulative even it is it not set\n actual_val = ind[\"{}_sum\".format(k)]\n elif ind['unit_of_measure_type'] == Indicator.PERCENTAGE:\n percent_sign = '%'\n actual_val = ind[\"{}_last\".format(k)]\n\n if actual_val is not None and actual_val != '':\n ind[actual] = \"{}{}\".format(formatFloat(actual_val), percent_sign)\n else:\n ind[actual] = ''\n\n if reporttype == self.REPORT_TYPE_TARGETPERIODS:\n # process target_period target value\n target_key = \"{}_target\".format(k)\n if ind[target_key] is None:\n target_val = ''\n else:\n target_val = formatFloat(float(ind[target_key]))\n\n if ind['unit_of_measure_type'] == Indicator.PERCENTAGE:\n if target_val > 0 and target_val != '':\n ind['{}_period_target'.format(k)] = \"{}%\".format(target_val)\n else:\n ind['{}_period_target'.format(k)] = ''\n else:\n ind['{}_period_target'.format(k)] = target_val\n\n # process target_period percent_met value\n try:\n percent_met = '{}_percent_met'.format(k)\n target = float(ind[\"{}_target\".format(k)])\n if ind['unit_of_measure_type'] == Indicator.NUMBER:\n if ind['is_cumulative'] is True:\n rsum = float(ind[\"{}_rsum\".format(k)])\n percent_met_val = formatFloat(round(rsum / target * 100))\n else:\n percent_met_val = formatFloat(round(float(ind[\"{}_sum\".format(k)]) / target * 100))\n ind[percent_met] = \"{}%\".format(percent_met_val)\n elif ind['unit_of_measure_type'] == Indicator.PERCENTAGE:\n percent_met_val = formatFloat(round(float(ind[\"{}_last\".format(k)]) / target * 100))\n ind[percent_met] = \"{}%\".format(percent_met_val)\n except (TypeError, KeyError):\n ind[percent_met] = ''\n except ZeroDivisionError:\n ind[percent_met] = _(\"N/A\")\n return indicators\n\n def prepare_iptt_period_dateranges(self, period, periods_date_ranges, from_or_to):\n \"\"\"\n formats date_ranges with optgroup by year for all target_frequencies\n except ANNUAL.\n \"\"\"\n start_date_choices = []\n choices = []\n start = None\n for i, name in enumerate(periods_date_ranges):\n start = periods_date_ranges[name][0]\n if i == 0:\n prev_start = start\n\n # For annual period (frequency) do not create optgrp\n if period != Indicator.ANNUAL and start.year != prev_start.year:\n start_date_choices.append((prev_start.year, tuple(choices)))\n prev_start = start\n choices = []\n\n # TODO: localize the following dates\n if period == Indicator.MONTHLY:\n # this is the value printed to IPTT:\n value = \"{}\".format(l10n_monthly_date(periods_date_ranges[name][0]))\n else:\n value = \"{} ({} - {})\".format(\n name,\n l10n_date(periods_date_ranges[name][0]),\n l10n_date(periods_date_ranges[name][1])\n )\n if from_or_to == self.FROM:\n key = periods_date_ranges[name][0]\n else:\n key = periods_date_ranges[name][1]\n choices.append((key, value))\n\n if period == Indicator.ANNUAL:\n start_date_choices = choices\n else:\n if start:\n # now add the last set of choices from the last iteration\n start_date_choices.append((start.year, tuple(choices)))\n return start_date_choices\n\n def get_context_data(self, **kwargs):\n context = super(IPTT_Mixin, self).get_context_data(**kwargs)\n reporttype = kwargs.get('reporttype')\n program_id = kwargs.get('program_id')\n\n try:\n self.program = Program.objects.get(pk=program_id)\n except Program.DoesNotExist:\n context['redirect'] = reverse_lazy('iptt_quickstart')\n messages.info(self.request, _(\"Please select a valid program.\"))\n return context\n\n self._update_filter_form_initial(self.request.GET)\n filters = self._get_filters(self.filter_form_initial_data)\n\n if reporttype == self.REPORT_TYPE_TIMEPERIODS:\n period = self.filter_form_initial_data[self.REPORT_TYPE_TIMEPERIODS]\n else:\n period = self.filter_form_initial_data[self.REPORT_TYPE_TARGETPERIODS]\n\n try:\n num_recents = self.filter_form_initial_data['numrecentperiods']\n except KeyError:\n num_recents = 0\n\n try:\n show_all = self.filter_form_initial_data['timeframe']\n except KeyError:\n show_all = 0\n\n # calculate aggregated actuals (sum, avg, last) per reporting period\n # (monthly, quarterly, tri-annually, seminu-annualy, and yearly) for each indicator\n lastlevel = Level.objects.filter(indicator__id=OuterRef('pk')).order_by('-id')\n last_data_record = CollectedData.objects.filter(indicator=OuterRef('pk')).order_by('-date_collected')\n indicators = Indicator.objects.filter(program__in=[program_id], **filters) \\\n .annotate(actualsum=Sum('collecteddata__achieved'),\n actualavg=Avg('collecteddata__achieved'),\n lastlevel=Subquery(lastlevel.values('name')[:1]),\n lastlevelcustomsort=Subquery(lastlevel.values('customsort')[:1]),\n lastdata=Subquery(last_data_record.values('achieved')[:1])) \\\n .values(\n 'id', 'number', 'name', 'program', 'target_frequency', 'lastlevel', 'unit_of_measure',\n 'direction_of_change', 'unit_of_measure_type', 'is_cumulative', 'baseline', 'baseline_na',\n 'lop_target', 'actualsum', 'actualavg', 'lastdata', 'lastlevelcustomsort')\n\n start_period = self.request.GET.get('start_period')\n end_period = self.request.GET.get('end_period')\n\n if reporttype == self.REPORT_TYPE_TIMEPERIODS:\n # Update the report_end_date to make sure it ends with the last period's end_date\n # Also, get the all of the periodic date ranges based on the selected period\n report_end_date, all_date_ranges, periods_date_ranges = self._generate_timeperiods(\n start_period, end_period, period, show_all, num_recents)\n\n elif reporttype == self.REPORT_TYPE_TARGETPERIODS:\n target_frequencies = Indicator.objects \\\n .filter(program__in=[program_id], target_frequency__isnull=False) \\\n .exclude(target_frequency=Indicator.EVENT) \\\n .values_list('target_frequency') \\\n .distinct() \\\n .order_by('target_frequency')\n\n if (period,) not in target_frequencies:\n period = target_frequencies[0][0]\n\n report_end_date, all_date_ranges, periods_date_ranges = self._generate_targetperiods(\n self.program, start_period, end_period, period, show_all, num_recents)\n indicators = indicators.filter(target_frequency=period)\n else:\n context['redirect'] = reverse_lazy('iptt_quickstart')\n messages.info(self.request, _(\"Please select a valid report type.\"))\n return context\n\n if period == Indicator.MID_END or period == Indicator.LOP:\n reporting_sdate = l10n_date(self.program.reporting_period_start)\n reporting_edate = l10n_date(self.program.reporting_period_end)\n all_periods_start = ((self.program.reporting_period_start, reporting_sdate,),)\n all_periods_end = ((self.program.reporting_period_end, reporting_edate),)\n\n period_start_initial = None # self.program.reporting_period_start\n period_end_initial = None # self.program.reporting_period_end\n else:\n try:\n period_start_initial = periods_date_ranges[periods_date_ranges.keys()[0]][0]\n period_end_initial = periods_date_ranges[periods_date_ranges.keys()[-1]][1]\n except IndexError:\n period_start_initial = None\n period_end_initial = None\n all_periods_start = self.prepare_iptt_period_dateranges(period, all_date_ranges, self.FROM)\n all_periods_end = self.prepare_iptt_period_dateranges(period, all_date_ranges, self.TO)\n # TODO: localize the following dates ?\n self.filter_form_initial_data['period_choices_start'] = tuple(all_periods_start)\n self.filter_form_initial_data['period_choices_end'] = tuple(all_periods_end)\n self.filter_form_initial_data['period_start_initial'] = period_start_initial\n self.filter_form_initial_data['period_end_initial'] = period_end_initial\n\n self.annotations = self._generate_annotations(periods_date_ranges, period, reporttype)\n # update the queryset with annotations for timeperiods\n indicators = indicators.annotate(**self.annotations).order_by('lastlevelcustomsort', 'number', 'name')\n indicators = self.prepare_indicators(reporttype, period, periods_date_ranges, indicators)\n\n context['report_end_date_actual'] = report_end_date\n context['report_start_date'] = self.program.reporting_period_start\n context['report_end_date'] = report_end_date\n context['report_date_ranges'] = periods_date_ranges\n context['indicators'] = indicators\n context['program'] = self.program\n context['reporttype'] = reporttype\n return context\n\n\nclass IPTT_ExcelExport(IPTT_Mixin, TemplateView):\n # TODO: should be localize dates in the Excel format\n\n def get_filename(self, reporttype):\n report = 'TvA'\n if reporttype == self.REPORT_TYPE_TIMEPERIODS:\n report = \"Actuals only\"\n filename = 'IPTT {} report {}.xlsx'.format(report, timezone.now().strftime('%b %d, %Y'))\n return filename\n\n def style_range(self, ws, cell_range, font, fill):\n # first_cell = ws[cell_range.split(\":\")[0]]\n\n rows = ws[cell_range]\n for row in rows:\n for cell in row:\n if fill:\n cell.fill = fill\n if font:\n cell.font = font\n\n def add_headers(self, ws, data):\n report_header_font = Font(size=18)\n headers_font = Font(bold=True)\n\n alignment = Alignment(horizontal='center',\n vertical='bottom',\n text_rotation=0,\n wrap_text=False,\n shrink_to_fit=False,\n indent=0)\n alignment_right = Alignment(horizontal='right')\n\n bgcolor = PatternFill('solid', \"EEEEEE\")\n ws['A1'] = \"Indicator Performance Tracking Report\"\n ws['A1'].font = report_header_font\n ws.merge_cells('A1:H1')\n\n ws['A2'] = \"{0} - {1}\".format(datetime.strftime(data['report_start_date'], \"%b %d, %Y\"),\n datetime.strftime(data['report_end_date'], \"%b %d, %Y\"))\n ws['A2'].font = report_header_font\n ws.merge_cells('A2:H2')\n\n ws['A3'] = data['program'].name\n ws['A3'].font = report_header_font\n ws.merge_cells('A3:H3')\n\n ws['A4'] = 'No.'\n ws['B4'] = 'Indicator'\n ws['C4'] = 'Level'\n ws['D4'] = 'Unit of measure'\n ws['E4'] = 'Change'\n ws['F4'] = 'C / NC'\n ws['G4'] = '# / %'\n ws['H4'] = 'Baseline'\n\n ws.merge_cells(start_row=3, start_column=9, end_row=3, end_column=11)\n ws.cell(row=3, column=9).value = 'Life of Program'\n ws.cell(row=3, column=9).alignment = alignment\n ws.cell(row=3, column=9).font = headers_font\n\n ws['I4'] = 'Target'\n ws['I4'].alignment = alignment_right\n ws['J4'] = 'Actual'\n ws['J4'].alignment = alignment_right\n ws['K4'] = '% Met'\n ws['K4'].alignment = alignment_right\n periods = data['report_date_ranges']\n col_offset = 0\n col = 0\n if data['reporttype'] == self.REPORT_TYPE_TARGETPERIODS:\n for name, period in periods.items():\n col = 12 + col_offset\n\n # processs period date ranges\n try:\n start_date = datetime.strftime(period[0], '%b %d, %Y')\n end_date = datetime.strftime(period[1], '%b %d, %Y')\n\n # process period name\n ws.merge_cells(start_row=2, start_column=col, end_row=2, end_column=col + 2)\n ws.cell(row=2, column=col).value = name\n ws.cell(row=2, column=col).alignment = alignment\n ws.cell(row=2, column=col).font = headers_font\n\n ws.merge_cells(start_row=3, start_column=col, end_row=3, end_column=col + 2)\n ws.cell(row=3, column=col).value = \"{} - {}\".format(start_date, end_date)\n ws.cell(row=3, column=col).alignment = alignment\n ws.cell(row=3, column=col).font = headers_font\n\n except TypeError:\n start_date = ''\n end_date = ''\n ws.merge_cells(start_row=3, start_column=col, end_row=3, end_column=col + 2)\n ws.cell(row=3, column=col).value = name\n ws.cell(row=3, column=col).alignment = alignment\n ws.cell(row=3, column=col).font = headers_font\n\n ws.cell(row=4, column=col).value = 'Target'\n ws.cell(row=4, column=col).alignment = alignment_right\n ws.cell(row=4, column=col + 1).value = 'Actual'\n ws.cell(row=4, column=col + 1).alignment = alignment_right\n ws.cell(row=4, column=col + 2).value = '% Met'\n ws.cell(row=4, column=col + 2).alignment = alignment_right\n col_offset += 3\n col += 2\n elif data['reporttype'] == self.REPORT_TYPE_TIMEPERIODS:\n for name, period in periods.items():\n col = 12 + col_offset\n ws.cell(row=2, column=col).value = name\n ws.cell(row=2, column=col).alignment = alignment\n ws.cell(row=2, column=col).font = headers_font\n ws.column_dimensions[get_column_letter(col)].width = 30\n\n start_date = datetime.strftime(period[0], '%b %d, %Y')\n end_date = datetime.strftime(period[1], '%b %d, %Y')\n ws.cell(row=3, column=col).value = \"{} - {}\".format(start_date, end_date)\n ws.cell(row=3, column=col).alignment = alignment\n ws.cell(row=3, column=col).font = headers_font\n\n ws.cell(row=4, column=col).value = \"Actual\"\n ws.cell(row=4, column=col).alignment = alignment_right\n col_offset += 1\n\n header_range = CellRange(min_col=1, min_row=4, max_col=col, max_row=4).coord\n self.style_range(ws, header_range, headers_font, bgcolor)\n return ws\n\n def add_data(self, wb, ws, context):\n alignment = Alignment(wrap_text=True)\n indicators = context['indicators']\n periods = context['report_date_ranges']\n row = 5\n for indicator in indicators:\n wb.guess_types = False\n ws.cell(row=row, column=1).value = indicator['number'].encode('UTF-8')\n ws.cell(row=row, column=2).value = indicator['name'].encode('UTF-8')\n ws.cell(row=row, column=2).alignment = alignment\n ws.cell(row=row, column=3).value = indicator['lastlevel'].encode('UTF-8')\n ws.cell(row=row, column=4).value = indicator['unit_of_measure'].encode('UTF-8')\n ws.cell(row=row, column=4).alignment = alignment\n ws.cell(row=row, column=5).value = indicator['direction_of_change'].encode('UTF-8')\n\n try:\n ws.cell(row=row, column=6).value = indicator['cumulative'].encode('UTF-8')\n except KeyError:\n ws.cell(row=row, column=6).value = ''\n\n ws.cell(row=row, column=7).value = indicator['unittype'].encode('UTF-8')\n wb.guess_types = True\n ws.cell(row=row, column=8).value = indicator['baseline'].encode('UTF-8')\n ws.cell(row=row, column=9).value = indicator['lop_target'].encode('UTF-8')\n ws.cell(row=row, column=10).value = indicator['lop_actual'].encode('UTF-8')\n ws.cell(row=row, column=11).value = indicator['lop_percent_met'].encode('UTF-8')\n\n # ws.cell(row=row, column=11).number_format = \"$\"\n col_offset = 0\n col = 0\n if context['reporttype'] == self.REPORT_TYPE_TARGETPERIODS:\n for k, v in periods.items():\n col = 12 + col_offset\n target = \"{}_period_target\".format(k)\n ws.cell(row=row, column=col).value = indicator[target]\n\n actual = \"{}_actual\".format(k)\n ws.cell(row=row, column=col + 1).value = indicator[actual]\n\n percent_met = \"{}_percent_met\".format(k)\n try:\n ws.cell(row=row, column=col + 2).value = indicator[percent_met].encode('UTF-8')\n except ValueError:\n pass\n\n col_offset += 3\n elif context['reporttype'] == self.REPORT_TYPE_TIMEPERIODS:\n for k, v in periods.items():\n col = 12 + col_offset\n actual = \"{}_actual\".format(k)\n ws.cell(row, col).value = indicator[actual]\n col_offset += 1\n row += 1\n return ws\n\n def set_column_widths(self, ws):\n widths = [10, 100, 12, 40, 8, 12]\n for i, w in enumerate(widths):\n ws.column_dimensions[get_column_letter(i + 1)].width = w\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n wb = Workbook()\n # wb.guess_types = True\n ws = wb.active\n\n ws = self.add_headers(ws, context)\n ws = self.add_data(wb, ws, context)\n self.set_column_widths(ws)\n\n response = HttpResponse(content_type='application/ms-excel')\n response['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(self.get_filename(context['reporttype']))\n wb.save(response)\n return response\n\n\nclass IPTT_ReportIndicatorsWithVariedStartDate(TemplateView):\n template_name = \"indicators/iptt_indicators_varied_startdates.html\"\n\n def get_context_data(self, **kwargs):\n context = super(IPTT_ReportIndicatorsWithVariedStartDate, self).get_context_data(**kwargs)\n program_id = kwargs.get('program_id')\n\n try:\n program = Program.objects.get(pk=program_id)\n except Program.DoesNotExist:\n context['redirect'] = reverse_lazy('iptt_quickstart')\n messages.info(self.request, _(\"Please select a valid program.\"))\n return context\n\n if program.do_periodictargets_match_reporting_date is True:\n context['redirect'] = reverse_lazy('iptt_quickstart')\n context['program'] = program\n context['indicators'] = program.get_indicators_in_need_of_targetperiods_fixing\n return context\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n try:\n redirect_url = context['redirect']\n return HttpResponseRedirect(redirect_url)\n except KeyError:\n pass\n return self.render_to_response(context)\n\n\nclass IPTTReportQuickstartView(FormView):\n template_name = 'indicators/iptt_quickstart.html'\n form_class = IPTTReportQuickstartForm\n FORM_PREFIX_TIME = 'timeperiods'\n FORM_PREFIX_TARGET = 'targetperiods'\n\n def get_context_data(self, **kwargs):\n context = super(IPTTReportQuickstartView, self).get_context_data(**kwargs)\n # Add two instances of the same form to context if they're not present\n if 'form' not in context:\n context['form'] = self.form_class(request=self.request, prefix=self.FORM_PREFIX_TIME)\n if 'form2' not in context:\n context['form2'] = self.form_class(request=self.request, prefix=self.FORM_PREFIX_TARGET)\n return context\n\n def get_form_kwargs(self):\n kwargs = super(IPTTReportQuickstartView, self).get_form_kwargs()\n kwargs['request'] = self.request\n return kwargs\n\n def post(self, request, *args, **kwargs):\n targetprefix = request.POST.get('%s-formprefix' % self.FORM_PREFIX_TARGET)\n timeprefix = request.POST.get('%s-formprefix' % self.FORM_PREFIX_TIME)\n program_id = request.POST.get('targetperiods-program', None)\n if program_id:\n program = Program.objects.get(pk=program_id)\n if program.do_periodictargets_match_reporting_date is False:\n return HttpResponseRedirect(reverse_lazy('iptt_redirect', kwargs={'program_id': program_id}))\n\n # set prefix to the current form\n if targetprefix is not None:\n prefix = targetprefix\n else:\n prefix = timeprefix\n\n form = IPTTReportQuickstartForm(self.request.POST, prefix=prefix, request=self.request)\n\n # call the form_valid/invalid with the correct prefix and form\n if form.is_valid():\n return self.form_valid(**{'form': form, 'prefix': prefix})\n else:\n return self.form_invalid(**{'form': form, 'prefix': prefix})\n\n def form_valid(self, **kwargs):\n context = self.get_context_data()\n form = kwargs.get('form')\n prefix = kwargs.get('prefix')\n\n if prefix == self.FORM_PREFIX_TARGET:\n period = form.cleaned_data.get('targetperiods')\n context['form2'] = form\n context['form'] = self.form_class(request=self.request,\n prefix=self.FORM_PREFIX_TIME)\n else:\n prefix = self.FORM_PREFIX_TIME\n period = form.cleaned_data.get('timeperiods')\n context['form'] = form\n context['form2'] = self.form_class(request=self.request,\n prefix=self.FORM_PREFIX_TARGET)\n\n program = form.cleaned_data.get('program')\n num_recents = form.cleaned_data.get('numrecentperiods')\n timeframe = form.cleaned_data.get('timeframe')\n redirect_url = reverse_lazy('iptt_report', kwargs={'program_id': program.id, 'reporttype': prefix})\n\n redirect_url = \"{}?{}={}&timeframe={}\".format(redirect_url, prefix, period, timeframe)\n if num_recents:\n redirect_url = \"{}&numrecentperiods={}\".format(redirect_url, num_recents)\n return HttpResponseRedirect(redirect_url)\n\n def form_invalid(self, form, **kwargs):\n context = self.get_context_data()\n form = kwargs.get('form')\n if kwargs.get('prefix') == self.FORM_PREFIX_TARGET:\n context['form2'] = form\n context['form'] = self.form_class(request=self.request, prefix=self.FORM_PREFIX_TIME)\n else:\n context['form'] = form\n context['form2'] = self.form_class(request=self.request, prefix=self.FORM_PREFIX_TARGET)\n return self.render_to_response(context)\n\n\nclass IPTT_ReportView(IPTT_Mixin, TemplateView):\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n\n form_kwargs = {'request': request, 'program': context['program']}\n context['form'] = IPTTReportFilterForm(initial=self.filter_form_initial_data, **form_kwargs)\n\n context['report_wide'] = True\n if context.get('redirect', None):\n return HttpResponseRedirect(reverse_lazy('iptt_quickstart'))\n return self.render_to_response(context)\n\n def post(self, request, *args, **kwargs):\n filterdata = request.POST.copy()\n # no need to include this token in querystring\n del (filterdata['csrfmiddlewaretoken'])\n url_kwargs = {\n 'program_id': filterdata['program'],\n 'reporttype': kwargs['reporttype'],\n }\n # do not include it in the querystring because it is already part of the url kwargs\n del filterdata['program']\n\n # if show_all or most_recent is specified then do not filter\n # by period_start or period_end dates.\n if filterdata.get('timeframe', None) is not None:\n try:\n del (filterdata['start_period'])\n del (filterdata['end_period'])\n except KeyError:\n pass\n\n redirect_url = \"{}?{}\".format(reverse_lazy('iptt_report', kwargs=url_kwargs),\n filterdata.urlencode())\n return HttpResponseRedirect(redirect_url)\n","sub_path":"indicators/views/views_reports.py","file_name":"views_reports.py","file_ext":"py","file_size_in_byte":54349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"73914945","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport seaborn as sns\nimport matplotlib as mpl\nfrom simtools.Analysis.AnalyzeManager import AnalyzeManager\nfrom simtools.Analysis.BaseAnalyzers import BaseAnalyzer\nfrom simtools.SetupParser import SetupParser\nfrom scipy import interpolate\nimport os\n\nmpl.rcParams['pdf.fonttype'] = 42\nif not SetupParser.initialized:\n SetupParser.init('HPC')\n\nwdir = os.path.join(os.getcwd(),'output')\n\n\ndef smooth(x, window_len=10, window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal\n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n\n input:\n x: the input signal\n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output:\n the smoothed signal\n\n example:\n\n t=linspace(-2,2,0.1)\n x=sin(t)+randn(len(t))*0.1\n y=smooth(x)\n\n see also:\n\n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.lfilter\n\n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]\n # print(len(s))\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n return y\n\nclass PfPR_Analyzer(BaseAnalyzer):\n def __init__(self, output_fname):\n super(PfPR_Analyzer, self).__init__()\n self.filenames = ['output/InsetChart.json']\n self.output_fname = output_fname\n self.channels = ['PCR Parasite Prevalence']\n\n\n def select_simulation_data(self, data, simulation):\n simdata = pd.DataFrame()\n\n year_to_report = 27\n\n for channel in self.channels:\n\n value = data[self.filenames[0]]['Channels'][channel]['Data'][year_to_report*365::]\n simdata[channel] = pd.Series(value)\n\n\n # simdata[self.tag] = simulation.tags[self.tag]\n # simdata['annual EIR'] = [x*365 for x in simdata['Daily EIR']]\n simdata['id'] = simulation.id\n # simdata['LHM'] = simulation.tags['larval_habitat_multiplier']\n simdata['seed'] = simulation.tags['Run_Number']\n simdata['period'] = simulation.tags['Period']\n simdata['infection_number'] = simulation.tags['Infection number']\n\n return simdata\n\n def finalize(self, all_data):\n\n selected = [data for sim, data in all_data.items()]\n if len(selected) == 0:\n print(\"No data have been returned... Exiting...\")\n return\n\n cmap = cm.get_cmap('viridis',10).colors\n\n fig, axes = plt.subplots(ncols=1, nrows=len(self.channels))\n cmap = cm.get_cmap('viridis',len(selected)).colors\n\n for i,sim in enumerate(selected):\n for j,ch in enumerate(self.channels):\n axes.plot(smooth(sim[ch]), color=cmap[i],alpha = 0.5)\n axes.set_ylabel(ch)\n fig_name = f'base_results'\n # plt.xlabel('day')\n # axes.set_ylabel('PfPR')\n # ax.set_ylim(0, 1)\n\n plt.tight_layout()\n # fig.suptitle('Seasonal scenario with IRS')\n plt.savefig(\n rf'C:\\Users\\jorussell\\Dropbox (IDM)\\Malaria Team Folder\\projects\\parasite_genetics\\DTK\\VarGenes\\outputs\\memory_sweep\\{fig_name}.eps')\n plt.savefig(\n rf'C:\\Users\\jorussell\\Dropbox (IDM)\\Malaria Team Folder\\projects\\parasite_genetics\\DTK\\VarGenes\\outputs\\memory_sweep\\{fig_name}.png')\n plt.show()\n\n\n # df = pd.concat(selected).reset_index(drop=True)\n # df.to_csv('%s_inset_chart.csv' % self.output_fname)\n\nif __name__ == '__main__' :\n # import json\n # with open('C:\\git\\magude\\pickup_realistic\\inputs\\Demographics\\demo_exe_224.json') as json_file:\n # datafile = json.load(json_file)\n # print([x['NodeID'] for x in datafile['Nodes']])\n\n\n expids = ['eea8e1c8-8218-eb11-a2c7-c4346bcb1553']\n\n simids = ['f1a8e1c8-8218-eb11-a2c7-c4346bcb1553']\n\n expnames = ['base_demo']\n channel_name = 'PCR Parasite Prevalence'\n for expname, simid in zip(expnames, simids) :\n output_fname = os.path.join(wdir, simid,expname)\n am = AnalyzeManager(sim_list = simids,\n analyzers=PfPR_Analyzer(output_fname))\n am.analyze()\n","sub_path":"immunity_investigations/analyzers/prevalence_analyzer.py","file_name":"prevalence_analyzer.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"55062879","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport re\nimport getopt\nimport os\nimport sys\nimport datetime\nimport pandas as pd\nimport xlsxwriter\n\n\n\n\ndef auto_str_number(text, suffix=''):\n pattern = re.compile(r'^\\s*[+-]?\\d*[.]\\d+$|^\\s*[+-]?\\s*\\d+$')\n match = pattern.match(text)\n if match:\n if '.' in text:\n return float(text)\n else:\n return int(text)\n else:\n return text.strip('\\n').strip(' ') + suffix\n\n\ndef collect_csv_files(dir_path):\n files = os.listdir(dir_path)\n results = dict()\n for f in files:\n if os.path.isfile(dir_path + '/' + f) \\\n and 'insert' not in f.lower() and 'update' not in f.lower() and 'read' not in f.lower():\n key = f.split('.')[0]\n if key not in results.keys():\n results[key] = list()\n results[key].append('.' + '.'.join(f.split('.')[1:]))\n pass\n return results\n\n\ndef add_csv_to_sheet(worksheet, csv_file, start_col, suffix=''):\n row_idx = 0\n col_idx = 0\n for line in open(csv_file).readlines():\n col_idx = start_col\n cols = line.split(',')\n for col in cols:\n if row_idx == 0:\n worksheet.write(row_idx, col_idx, auto_str_number(col, suffix))\n else:\n worksheet.write(row_idx, col_idx, auto_str_number(col))\n col_idx += 1\n row_idx += 1\n return col_idx\n\n\ndef add_sheet_to_workbook(workbook, dir_path, files, share_name,sheets,dbsize,dbsize_sheet,suffix=''):\n count = 0\n for file_name in files.keys():\n if not file_name.startswith('prepare') and 'redolog' not in file_name:\n count += 1\n sht_name = '{0}-{1}'.format(file_name.replace(wlprefix, ''), share_name)\n worksheet = workbook.add_worksheet(sht_name[:31])\n if 'dbsz' in sht_name: # dbsz has the different way to add into summary\n dbszfile = os.path.join(dir_path, '{0}{1}'.format(file_name, '.csv'))\n dbszinfo = pd.read_csv(dbszfile, usecols=['workload']).to_dict(orient='dict')\n for k, v in dbszinfo['workload'].items():\n dbsize.update({v: k + 2})\n dbsize_sheet.append(sht_name)\n else:\n sheets.append(sht_name[:31])\n col = 0\n for ext in sorted(files[file_name], reverse=True):\n sfile = os.path.join(dir_path, '{}{}'.format(file_name, ext))\n col = add_csv_to_sheet(worksheet, sfile, col) + 2\n return count\n\n\ndef get_prefix(dir_name):\n prefix = 'sb-20200202_020202'\n\n if dir_name.startswith('sb'):\n prefix = 'sb-20200202_020202'\n elif dir_name.startswith('tpcc'):\n prefix = 'tpcc-20200202_020202'\n elif dir_name.startswith('sysbench'):\n prefix = 'sysbench-20200202_020202'\n elif dir_name.startswith('ycsb'):\n prefix = 'ycsb_200202_020202'\n\n return prefix\n\n\ndef process_args(argv):\n help_str = 'sb-result.py -d path1,path2 -s suffix1,suffix2 -o output_xlsx_path [-t sysbench|ycsb]'\n try:\n opts, args = getopt.getopt(argv[1:], 'hd:s:o:t:')\n except getopt.GetoptError:\n print(help_str)\n sys.exit(1)\n\n result_dir_list = list()\n suffix_list = list()\n out_file = ''\n data_type = 'sysbench'\n\n for opt, arg in opts:\n if opt == '-h':\n print(help_str)\n sys.exit()\n elif opt == '-d':\n result_dir_list = arg.split(',')\n elif opt == '-s':\n suffix_list = arg.split(',')\n elif opt == '-o':\n out_file = arg\n elif opt == '-t':\n if arg.lower() in ('sysbench', 'ycsb'):\n data_type = arg\n\n if len(result_dir_list) == 0 or len(suffix_list) == 0 or len(out_file) == 0:\n print(help_str)\n sys.exit(2)\n\n if len(result_dir_list) != len(suffix_list):\n print(help_str)\n sys.exit(3)\n\n tuple_list = list()\n for result_dir, suffix in zip(result_dir_list, suffix_list):\n tuple_list.append((result_dir, suffix))\n\n return tuple_list, out_file, data_type\n\n\n## there are 5 parts for every workloads,\n# like: oltp_read_only.iostat.all_part.csv oltp_read_only.iostat.cpu.csv\n# oltp_read_only.iostat.csv oltp_read_only.result.csv oltp_read_only.time.csv\n#wlprefix=\"oltp_\"\nwlprefix=\"\"\npg_fixwls = [\n 'dbsz.csv',\n 'prepare.result.csv',\n 'prepare.time.csv'\n ]\n# pg_workloads = [\n# 'oltp_read_only',\n# 'oltp_update_non_index',\n# 'oltp_update_index',\n# 'oltp_point_select',\n# 'oltp_read_write',\n# 'oltp_write_only',\n# ]\n\npg_workloads = [\n # 'read_only',\n # 'update_non_index',\n # 'update_index',\n # 'point_select',\n # 'read_write',\n # 'write_only',\n 'select_random_points',\n 'select_random_ranges'\n ]\n\npg_workload_suffix = [\n 'iostat.all_part.csv',\n 'iostat.cpu.csv',\n 'iostat.csv',\n 'result.csv',\n 'time.csv'\n]\n\ndef collect_result_files(dir_path):\n f = os.listdir(dir_path)\n results = dict()\n # rule = {pg_workloads[0]: 0, pg_workloads[1]: 1, pg_workloads[2]: 6, pg_workloads[3]: 11, pg_workloads[4]: 16,\n # pg_workloads[5]: 21, pg_workloads[6]: 26}\n rule = {pg_fixwls[0]: 0}\n for wl in pg_workloads:\n for suffix in pg_workload_suffix:\n item='{0}{1}.{2}'.format(wlprefix,wl,suffix)\n rule[item]=len(rule)\n\n rule[pg_fixwls[1]] = len(rule)\n rule[pg_fixwls[2]] = len(rule)\n\n wlf = sorted(f, key=lambda x: rule[x])\n for f in wlf:\n if os.path.isfile(os.path.join(dir_path,f)):\n key = f.split('.')[0]\n if key not in results.keys():\n results[key] = list()\n results[key].append('.' + '.'.join(f.split('.')[1:]))\n return results\n\n\ndef fill_summary_postgres(workbook, sheet,row_idx,sheetname,dbsize,dbsize_sheet,suffix):\n num_format = workbook.add_format()\n num_format.set_num_format('#,##0.0')\n\n formula_average = '=AVERAGE(\\'{0}\\'!{1}2:{1}4000)'\n formula_average_percent = '=100-AVERAGE(\\'{0}\\'!{1}2:{1}4000)'\n formula_size = '=\\'{0}\\'!{1}'\n formula_size_sector = '=\\'{0}\\'!{1}/2/1024/1024'\n formula_storage_saving = '=(D{0}-C{0})/D{0}' # temporary value =(C2-D2)/C2\n\n parts_interval = 3\n columns_ss = [\n ['storage saving', '', formula_storage_saving]\n ]\n columns_sz = [\n ['DB size physical (GB)', 'B', formula_size_sector],\n ['DB size logical (GB)', 'C', formula_size_sector],\n ['comp_ratio', 'D', formula_size]\n ]\n columns = [\n ['tps', 'E', formula_average],\n ['qps', 'F', formula_average],\n ['%99 latency', 'G', formula_average],\n ['Read throughput (MB/s)', 'O', formula_average],\n ['Write throughput (MB/s)', 'P', formula_average],\n ['avgrq-sz', 'Q', formula_average],\n ['avgqu-sz', 'R', formula_average],\n ['%util i/o', 'W', formula_average],\n ['%user cpu', 'Z', formula_average],\n ['%sys cpu', 'AA', formula_average],\n ['%iowait cpu', 'AB', formula_average],\n ['%cpu', 'AC', formula_average_percent]\n ]\n\n ssl=len(columns_ss)\n szl=len(columns_sz)\n # add the first colum of head tt\n sheet.write(row_idx, 0, suffix)\n\n # add head of stroage saving\n for i in range(0, len(columns_ss)):\n sheet.write(row_idx, i + 1, columns_ss[i][0])\n # add head of szinfo\n for i in range(0, len(columns_sz)):\n sheet.write(row_idx, i + 1+ssl, columns_sz[i][0])\n # add others data info head\n for i in range(0, len(columns)):\n sheet.write(row_idx, i + 1+ssl+szl, columns[i][0])\n\n for i in range(0, len(sheetname)):\n wksheet = sheetname[i]\n # get db size from dbsize sheet\n prename,suffixname=wksheet.split('-',1)\n prename = '{0}{1}'.format(wlprefix, prename)\n dbsz_sheet=dbsize_sheet[0]\n dblogical_cell='{0}{1}'.format('C',dbsize[prename])\n dbphy_cell='{0}{1}'.format('B',dbsize[prename])\n columns_sz[0][1]=dbphy_cell\n columns_sz[1][1]=dblogical_cell\n columns_sz[2][1]='{0}{1}'.format('D',dbsize[prename])\n if 'intel' in wksheet: # 如果是intel或者Micron的ssd, logical size = physical size\n columns_sz[0][1] = dblogical_cell\n columns_sz[0][2] = formula_size\n columns_sz[1][2] = formula_size\n # get db size from dbsize sheet\n\n # write workload name to the first column\n sheet.write(row_idx + i + 1, 0, wksheet)\n\n # add db size data first\n for j in range(0, len(columns_sz)):\n column = columns_sz[j]\n value = column[2].format(dbsz_sheet, column[1])\n if not value:\n value = 0\n sheet.write(row_idx + i + 1, j + 1+ssl, value, num_format)\n # add storage saving data secound\n if 'intel-none' in wksheet:\n # 不需要计算storage saving\n pass\n elif 'vanda' in wksheet:\n for j in range(0, ssl):\n column = columns_ss[j]\n column_index=i+2 # +2 真正存放dbsize的地方 是从第3列开始 前面2列一个是workload, 另外一个是storage saving\n value = column[2].format(column_index)\n if not value:\n value = 0\n sheet.write(row_idx + i + 1, j + 1, value, num_format)\n elif 'intel-snappy' in wksheet:\n # 需要先知道intel-none得到的存储size后才能计算 所以放到最后合并summary的时候再计算\n for j in range(0, ssl):\n columns_ss[0][2]='=(C{0}-C{1})/C{0}'\n column = columns_ss[j]\n column_index=i+2 # +2 真正存放dbsize的地方 是从第3列开始 前面2列一个是workload, 另外一个是storage saving\n value = column[2].format(column_index,column_index+7)\n if not value:\n value = 0\n sheet.write(row_idx + i + 1, j + 1, value, num_format)\n elif 'intel-zlib' in wksheet:\n # 需要先知道intel-none得到的存储size后才能计算 所以放到最后合并summary的时候再计算\n for j in range(0, ssl):\n columns_ss[0][2]='=(C{0}-C{1})/C{0}'\n column = columns_ss[j]\n column_index=i+2 # +2 真正存放dbsize的地方 是从第3列开始 前面2列一个是workload, 另外一个是storage saving\n value = column[2].format(column_index,column_index+14)\n if not value:\n value = 0\n sheet.write(row_idx + i + 1, j + 1, value, num_format)\n #add comp_ratio\n # for j in range(0, len(columns_sz)):\n # column = columns_sz[j]\n # value = column[2].format(dbsz_sheet, column[1])\n # if not value:\n # value = 0\n # sheet.write(row_idx + i + 1, j + 1+ssl, value, num_format)\n for j in range(0, len(columns)):\n column = columns[j]\n value = column[2].format(wksheet, column[1])\n if not value:\n value = 0\n sheet.write(row_idx + i + 1, j + 1+ssl+szl, value, num_format)\n return row_idx + len(sheetname) + parts_interval\n\n\nimport glob, time\ndef search_all_files_return_by_time_reversed(path, reverse=True):\n return sorted(glob.glob(os.path.join(path, '*')),\n key=lambda x: time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(os.path.getctime(x))),\n reverse=reverse)\n\n\nif __name__ == '__main__':\n # ## debug search csv files by modified sequence\n # data=search_all_files_return_by_time_reversed(\"F:\\\\PostgreSQL\\\\benchmarks\\\\4.139\\\\1029\\\\pg-20191028_082618_vanda_128.18750000G_ff100\\\\csv\")\n # print(data)\n # ## debug search csv files by modified sequence\n\n # result_dirs, out_file, data_type = process_args(argv)\n if len(sys.argv) == 1:\n print(\"Please input the csv folder\")\n exit(0)\n result_dirs = [\n sys.argv[1],\n ]\n suffix=''\n if len(sys.argv) == 3:\n suffix=sys.argv[2]\n\n st=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n out_file = os.path.join(result_dirs[0], '{0}_{1}{2}'.format(st,'comparison', '.xlsx'))\n workbook = xlsxwriter.Workbook(out_file)\n summary_sheet=workbook.add_worksheet('summary')\n if not workbook:\n print('Failed to create Excel workbook!')\n sys.exit(10)\n\n summary_row_idx = 0\n for result_dir in result_dirs:\n dir_list = os.listdir(result_dir)\n for d in dir_list:\n pp = os.path.join(result_dir, d)\n dir_path = os.path.join(pp, 'csv')\n if os.path.exists(dir_path) and os.path.isdir(dir_path):\n # read mgod.opts.log to get some re-configruation value\n if suffix == \"bfo\":\n reconfigfile = os.path.join(os.path.dirname(dir_path), \"mgod.opts.log\")\n if os.path.isfile(reconfigfile):\n with open(reconfigfile) as mf:\n key = \"Reconfiguring\"\n for ones in mf.readlines():\n if key in ones:\n confi_info=ones.split('\"')\n pmin = re.compile(r'.*threads_min=([0-9]+).*')\n match = pmin.match(ones)\n if match:\n evc_min=match.group(1)\n pmax = re.compile(r'.*threads_max=([0-9]+).*')\n match = pmax.match(ones)\n if match:\n evc_max=match.group(1)\n pdirty = re.compile(r'.*eviction_dirty_target=([0-9]+).*')\n match = pdirty.match(ones)\n if match:\n evc_dirty=match.group(1)\n ptarget = re.compile(r'.*eviction_target=([0-9]+).*')\n match = ptarget.match(ones)\n if match:\n evc_target=match.group(1)\n ptrigger = re.compile(r'.*eviction_trigger=([0-9]+).*')\n match = ptrigger.match(ones)\n if match:\n evc_trigger=match.group(1)\n suffix='evcInfo_{}_{}-{}.{}.{}'.format(evc_min,evc_max,evc_trigger,evc_target,evc_dirty)\n\n dbsize_sheet = []\n sheets_list = []\n dbsize = {}\n # get the ssd_name coompression_mode\n share_name=ssd = ''\n comp = ''\n dbsz = ''\n maxleafsz = ''\n kvsize = ''\n benchfp = os.path.join(os.path.dirname(dir_path), \"bench.info\")\n if os.path.isfile(benchfp):\n with open(benchfp) as fw:\n rt = fw.readline().split()\n ssd = rt[0].split('=')[1][:-4]\n dbsz = dbszinfo = rt[1].split('=')[1]\n # dbsz = dbszinfo.split('_')\n # tblist=dbszinfo.split('.')\n # if len(tblist) == 1:\n # table_size =0\n # table=0\n # else:\n # table=tblist[0]\n # table_size=tblist[1]\n # tb_size=table_size\n # if tb_size != 0:\n # table_size=tb_size.split('_')[0].rstrip('G')\n # # dbsz=int(eval(table)*eval(table_size)*200/1024/1024/1024)\n # dbsz='{0}{1}'.format(int(table*table_size*200/1024/1024/1024),'G')\n # if dbsz == 0:\n # tblist = dbszinfo.split('_')\n # if len(tblist) != 1:\n # dbsz = tblist[0]\n # share_name = '{0}-{1}G'.format(ssd, dbsz)\n share_name = '{0}{1}'.format(ssd, dbsz)\n if dbsz == '2048G':\n dbsz='2T'\n share_name = share_name.lstrip('.')\n share_name = share_name.rstrip('.')\n files = collect_result_files(dir_path)\n count = add_sheet_to_workbook(workbook, dir_path, files,share_name,sheets_list, dbsize, dbsize_sheet)\n summary_row_idx = fill_summary_postgres(workbook,summary_sheet,summary_row_idx,sheets_list,\n dbsize,dbsize_sheet,'{0}-{1}'.format(kvsize,suffix))\n workbook.close()\n","sub_path":"sysbh-summary.py","file_name":"sysbh-summary.py","file_ext":"py","file_size_in_byte":17007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"443985604","text":"class Node:\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n def insert(self, data):\n # Compare the new value with the parent node\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = Node(data)\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = Node(data)\n else:\n self.right.insert(data)\n else:\n self.data = data\n\n \n # Print the tree\n def PrintTree(self):\n if self.left:\n self.left.PrintTree()\n print( self.data),\n if self.right:\n self.right.PrintTree()\n\n # Inorder traversal\n # Left -> Root -> Right\n def inorderTraversal(self, root):\n res = []\n if root:\n res = self.inorderTraversal(root.left)\n res.append(root.data)\n res = res + self.inorderTraversal(root.right)\n return res\n\n # Preorder traversal\n # Root -> Left ->Right\n def PreorderTraversal(self, root):\n res = []\n if root:\n res.append(root.data)\n res = res + self.PreorderTraversal(root.left)\n res = res + self.PreorderTraversal(root.right)\n return res\n\n # Postorder traversal\n # Left ->Right -> Root\n def PostorderTraversal(self, root):\n res = []\n if root:\n res = self.PostorderTraversal(root.left)\n res = res + self.PostorderTraversal(root.right)\n res.append(root.data)\n return res\n\n\nprint('--- start ---')\n#root = Node(27)\n#\n#root.insert(14)\n#root.insert(35)\n#root.insert(10)\n#root.insert(19)\n#root.insert(31)\n#root.insert(42)\n\nroot = Node('S')\n\nroot.insert('VP')\nroot.insert('VB')\nroot.insert('see')\nroot.insert('NP')\nroot.insert('NP')\nroot.insert('NP')\nroot.insert('NNP')\nroot.insert('bob')\nroot.insert('VP')\nroot.insert('VB')\nroot.insert('run')\nroot.insert('PP')\nroot.insert('IN')\nroot.insert('in')\nroot.insert('NP')\nroot.insert('DT')\nroot.insert('the')\nroot.insert('NN')\nroot.insert('park')\n\nprint('--- tree constructed ---')\nprint(' PrintTree:')\nroot.PrintTree()\n\nprint(' inorderTraversal:')\nprint(root.inorderTraversal(root))\n\nprint(' PreorderTraversal:')\nprint(root.PreorderTraversal(root))\n\nprint(' PostorderTraversal:')\nprint(root.PostorderTraversal(root))\n\n\nprint('--- End ---')\n","sub_path":"oop/binaryTree.py","file_name":"binaryTree.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"408927460","text":"import numpy as np\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport csv\n\n# Open the earthquake data file.\ndata = '/Users/jamesevers/Desktop/earthquake_data.csv'\n\n# Create empty lists for the latitudes and longitudes.\nlats, lons = [], []\n\n\nwith open(data) as f:\n reader = csv.reader(f)\n next(reader)\n for row in reader:\n lats.append(float(row[1]))\n lons.append(float(row[2]))\n \n\nquake_map = Basemap(projection='robin', resolution= 'l', area_thresh=1000.0, lat_0=0, lon_0=-40)\nquake_map.drawcoastlines()\nquake_map.drawcountries()\nquake_map.fillcontinents(color='gray')\nquake_map.drawmapboundary()\nquake_map.drawmeridians(np.arange(0,360,30))\nquake_map.drawparallels(np.arange(-90,90,30))\n\nx,y= quake_map(lons,lats)\nquake_map.plot(x,y, 'ro', markersize=6)\n\nplt.show()\n\n\n\n","sub_path":"quakes_map.py","file_name":"quakes_map.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"462470698","text":"from Crypto.Cipher import AES\n\ndef phash(mes, a, b, n, d):\n\n def inner_func(msg, i):\n i = bytes(bin(i), \"utf-8\")\n while len(i) < b:\n i = b'0' + i\n aes_ = AES.new(i, AES.MODE_CFB)\n cipher = aes_.encrypt(bytes(msg, \"utf-8\"))\n return cipher\n\n def XOR(s1, s2):\n h = \"\".join([chr(ord(c1) ^ ord(c2)) for (c1,c2) in zip(s1,s2)])\n return h\n\n def chunks(x, n):\n for i in range(0, len(x), n):\n yield x[i:i + n]\n\n def m_tree(x, d):\n sub_t = []\n s = 0\n for i in x:\n i = inner_func(i, s)\n s += 1\n for i in chunks(x, d):\n if len(i) == 1:\n continue\n xor_ = XOR(i[0], i[1])\n if len(i) == d:\n for j in range(1, d):\n xor_ = XOR(xor_, i[j])\n else:\n for j in range(1, len(i)):\n xor_ = XOR(xor_, i[j])\n sub_t.append(xor_)\n if len(sub_t) == 1:\n return sub_t[0].encode(\"utf8\").hex()\n r = abs(a+b-n) // len(sub_t[0])\n if r == 1:\n return m_tree(sub_t, d)\n C = []\n h = ''\n for i in sub_t:\n h += i\n while len(h) % (a+b-n) != 0:\n h += '0'\n C = [h[i:i + abs(a+b-n)] for i in range(0, len(h), abs(a+b-n))]\n while len(C[-1]) < len(C[0]):\n C[-1] += '0'\n return m_tree(C, d)\n\n lm = len(mes)\n if lm % (a+b-n) != 0:\n mes += '1'\n while len(mes) % (a+b-n - lm) != 0:\n mes += '0'\n mes += str(lm)\n M = [mes[i:i+abs(a+b-n)] for i in range(0, len(mes), abs(a+b-n))]\n while len(M[-1]) < len(M[0]):\n M[-1] += '0'\n return m_tree(M, d)\n\none_block = 128\nkey = 32\nmax = 128\nfan = 3\nprint(phash(\"NVCXdfgfdgJOnvdfgfjkfghgfxzwedgjdjdfg\"*6, one_block, key, max, fan))\n","sub_path":"phash.py","file_name":"phash.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"540763238","text":"from .ParkingSpot import ParkingSpot\nfrom .utils import Size\n\nclass Level(object):\n def __init__(self, id):\n self.id = id\n # Stacks of different available Parking Spots of various sizes\n # 50 of each in each level\n self.s_spots = [ParkingSpot('{0}-{1}-s'.format(self.id, i), Size.S) for i in range(50)]\n self.m_spots = [ParkingSpot('{0}-{1}-m'.format(self.id, i), Size.M) for i in range(50)]\n self.l_spots = [ParkingSpot('{0}-{1}-l'.format(self.id, i), Size.L) for i in range(50)]\n self.xl_spots = [ParkingSpot('{0}-{1}-xl'.format(self.id, i), Size.XL) for i in range(50)]\n\n def get_empty_spot(self, size):\n if size == Size.S:\n return self.s_spots.pop()\n if size == Size.M:\n return self.m_spots.pop()\n if size == Size.L:\n return self.l_spots.pop()\n if size == Size.XL:\n return self.xl_spots.pop()","sub_path":"system-design/parking-lot/Level.py","file_name":"Level.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"550005122","text":"\nimport numpy as np\nimport quantipy as qp\n\nfrom xlsxwriter import Workbook\nfrom weakref import WeakValueDictionary\nfrom properties import Properties\nfrom sheet import Sheet\nfrom box import Box\nfrom copy import copy\n\nclass Excel(Workbook):\n\n _clusters_cache = WeakValueDictionary()\n\n def __init__(self, filename):\n super(Excel, self).__init__()\n self.filename = filename\n self.in_memory = True\n\n def __repr__(self):\n return 'Excel(%r)' % self.filename\n\n def __str__(self):\n return '%s' % self.filename\n\n def __del__(self):\n # delete all instances of ExcelFormats\n pass\n\n def add_sheet(self, cluster, sheet_name=None):\n if sheet_name is None:\n sheet_name = cluster.keys()[0]\n self._clusters_cache[sheet_name] = cluster\n return self.__add_sheet(sheet_name)\n\n def add_sheets(self, clutsers):\n for cluster in clusters:\n self.add_sheet(cluster, cluster.name, write)\n\n def __add_sheet(self, sheet_name):\n # Utility for shared code in add_worksheet() and add_chartsheet().\n\n sheet_index = len(self.worksheets_objs)\n\n # Initialization data to pass to the worksheet.\n init_data = {\n 'name': sheet_name,\n 'index': sheet_index,\n 'str_table': self.str_table,\n 'worksheet_meta': self.worksheet_meta,\n 'optimization': self.optimization,\n 'tmpdir': self.tmpdir,\n 'date_1904': self.date_1904,\n 'strings_to_numbers': self.strings_to_numbers,\n 'strings_to_formulas': self.strings_to_formulas,\n 'strings_to_urls': self.strings_to_urls,\n 'nan_inf_to_errors': self.nan_inf_to_errors,\n 'default_date_format': self.default_date_format,\n 'default_url_format': self.default_url_format,\n 'excel2003_style': self.excel2003_style,\n # 'remove_timezone': self.remove_timezone,\n }\n\n worksheet = Sheet(sheet_name)\n worksheet._initialize(init_data)\n\n self.worksheets_objs.append(worksheet)\n self.sheetnames[sheet_name] = worksheet\n\n self.__write_sheet(sheet_name)\n\n return worksheet\n\n def __write_sheet(self, sheet_name):\n worksheet = self.sheetnames[sheet_name]\n for chain in self._iter_objs(self._clusters_cache[sheet_name], 'Chain'):\n for yk in chain.content_of_axis:\n worksheet.row = copy(worksheet.start_row)\n for first, last, view in self._is_first_last(self._get_views(chain, yk)):\n\n metas = (view.meta(), )\n frames = (view.dataframe, ) # should cope with more than one frame for grouped views\n\n box = Box(worksheet.row, worksheet.col, metas, frames)\n for (row, col), value, properties in box:\n xf = self.add_format(properties.xf_attrs)\n worksheet.write(row, col, value, xf)\n worksheet.row += box.values.shape[0] # x-orientation\n worksheet.col += box.values.shape[1] # x-orientation\n del self._clusters_cache[sheet_name]\n\n @classmethod\n def _iter_objs(cls, obj, cls_name):\n for value in obj.itervalues():\n if value.__class__.__name__ == cls_name:\n yield value\n else:\n for item in cls._iter_objs(value, cls_name):\n yield item\n\n @classmethod\n def _get_views(cls, chain, yk):\n obj = chain[chain.data_key][chain.filter][chain.source_name][yk]\n return cls._iter_objs(obj, 'View')\n\n @staticmethod\n def _is_first_last(obj):\n it = obj.__iter__()\n first = element = it.next()\n while True:\n try:\n next_ = it.next()\n yield (first == element, False, element)\n element = next_\n except StopIteration:\n yield (first == element, True, element)\n break\n","sub_path":"scripts/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"148932997","text":"import unittest\n\nimport numpy\n\nimport chainer\nfrom chainer.backends import cuda\nimport chainer.functions as F\nfrom chainer import testing\nfrom chainer.testing import attr\n\n\nclass UnaryFunctionsTestBase(unittest.TestCase):\n\n def make_data(self):\n raise NotImplementedError\n\n def setUp(self):\n self.eps = 1e-3\n while True:\n self.x, self.gy = self.make_data()\n if (numpy.abs(self.x - numpy.round(self.x)) > self.eps * 10).all():\n break\n\n def check_forward(self, op, op_xp, x_data):\n x = chainer.Variable(x_data)\n y = op(x)\n self.assertEqual(x.data.dtype, y.data.dtype)\n v = op_xp(x_data)\n testing.assert_allclose(\n v, y.data, atol=1e-7, rtol=1e-7)\n\n def check_forward_cpu(self, op, op_xp):\n self.check_forward(op, op_xp, self.x)\n\n def check_forward_gpu(self, op, op_xp):\n self.check_forward(op, op_xp, cuda.to_gpu(self.x))\n\n\n@testing.parameterize(*testing.product({\n 'shape': [(3, 2), ()],\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n}))\nclass TestCeil(UnaryFunctionsTestBase):\n\n def make_data(self):\n x = numpy.random.uniform(-10.0, 10.0, self.shape).astype(self.dtype)\n gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)\n return x, gy\n\n def test_forward_cpu(self):\n self.check_forward_cpu(F.ceil, numpy.ceil)\n\n @attr.gpu\n def test_forward_gpu(self):\n self.check_forward_gpu(F.ceil, cuda.cupy.ceil)\n\n\ntesting.run_module(__name__, __file__)\n","sub_path":"tests/chainer_tests/functions_tests/math_tests/test_ceil.py","file_name":"test_ceil.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"364144168","text":"__author__ = 'Administrator'\n__author__ = 'xny'\n# date : 2018/6/28\nimport requests\nimport json\nfrom urllib import parse\n\nimport requests\n# 第一步:登录教育局招生管理系统\nurl_login=\"http://127.0.0.1:8090/recruit.students/login/in?account=admin&pwd=660B8D2D5359FF6F94F8D3345698F88C\"\n#把请求头信息进行处理,去掉一些没用的,保留一些有用头信息·\nheaders1 = {\n \"Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\",\n \"Referer\": \"http://127.0.0.1:8090/recruit.students/login/view\",\n }\n\n# 发送get请求\nr1 = requests.get(url_login,headers = headers1)\n#print(r1.text)\n\n# 启用学校\nurl_DisableSchool=\"http://127.0.0.1:8090/recruit.students/school/manage/enableOrDisableSchool\"\n#把请求头信息进行处理,去掉一些没用的,保留一些有用头信息·\nheaders2 = {\"\"\n\"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:62.0) Gecko/20100101 Firefox/62.0\",\n\"Referer\": \"http://127.0.0.1:8090/recruit.students/school/manage/index\",\n\"Content-Type\": \"application/json\",\n\"X-Requested-With\": \"XMLHttpRequest\",\n\"Cookie\": \"JSESSIONID=365D9E30F67F88F7301B32AA83C14011\",\n\"Connection\": \"keep-alive\",\n}\n# 接口的数据类型是json格式\nformdata = {\"id\":\"820890\",\"disable\":1,\"schoolId\":\"251\"}\n# 通过urlencode()转码\npostdata = parse.urlencode(formdata)\n#打印转码后的数据\nprint(postdata)\n# 创建session对象,可以保存登录Cookie值。\nssion = requests.session()\n\n# 发送附带用户名和密码的请求,并获取登录后的Cookie值,保存在ssion里。\nr2 = ssion.post(url_DisableSchool,headers = headers2,data=postdata)\nhtml = r2.text\nprint(html)\n\n# 查看响应码\nprint(r2.status_code)\n","sub_path":"接口项目实战一(WEB项目)/接口测试用例/启用学校.py","file_name":"启用学校.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"255446573","text":"import unittest\nfrom os import path\n\nfrom ansig.configurator import ConfigParser\n\n\nclass TestConfigParser(unittest.TestCase):\n def setUp(self):\n self.rootdir = path.abspath(path.dirname(path.dirname(__file__)))\n\n def test_default_values(self):\n config = ConfigParser()\n self.assertEquals(\n config.get('aws', 'regions'), u'all')\n\n def test_custom_values(self):\n filename = path.join(self.rootdir, 'contrib/ansig/config.ini')\n config = ConfigParser(filename=filename)\n self.assertEquals(\n config.get('aws', 'regions'), u'us-east-1')\n","sub_path":"tests/test_configurator.py","file_name":"test_configurator.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"170112899","text":"#14.Um posto está vendendo combustíveis com a seguinte tabela de descontos:\r\n#1. Álcool:\r\n#· até 20 litros, desconto de 3% por litro\r\n#· acima de 20 litros, desconto de 5% por litro\r\n#2. Gasolina:\r\n#· até 20 litros, desconto de 4% por litro\r\n#· acima de 20 litros, desconto de 6% por litro.\r\n#Escreva um algoritmo que leia o número de litros vendidos, o tipo de combustível (codificado da\r\n#seguinte forma: A-álcool, G-gasolina), calcule e imprima o valor a ser pago pelo cliente sabendo-se que\r\n#o preço do litro da gasolina é R$ 2,50 o preço do litro do álcool é R$ 1,90.\r\n\r\ndef main():\r\n print('Escolha o tipo de combustível:\\n')\r\n print('A - álcool preço por litro: R$1.90')\r\n print('G - gasolina preço por litro: R$2.50')\r\n tipo = input('\\nTipo desejado: ')\r\n quant = float(input('Quantos litros você deseja colocar: '))\r\n\r\n pagamento(tipo, quant)\r\n\r\ndef pagamento(tipo, quant):\r\n preço_gas = quant * 2.5\r\n preço_alc = quant * 1.9\r\n\r\n if tipo == 'A':\r\n if quant <= 20:\r\n desc_alc = quant * 3 / 100\r\n total = preço_alc - desc_alc\r\n print(f'\\nValor a ser pago: R${total:.2f} ')\r\n\r\n elif quant > 20:\r\n desc_alc = quant * 5 / 100\r\n total = preço_alc - desc_alc\r\n print(f'\\nValor a ser pago: R${total:.2f} ')\r\n\r\n elif tipo == 'G':\r\n if quant <= 20:\r\n desc_gas = quant * 4 / 100\r\n total = preço_alc - desc_gas\r\n print(f'\\nValor a ser pago: R${total:.2f} ')\r\n\r\n elif quant > 20:\r\n desc_gas = quant * 6 / 100\r\n total = preço_alc - desc_gas\r\n print(f'\\nValor a ser pago: R${total:.2f} ')\r\n\r\n else:\r\n print('Tipo inválido.')\r\n\r\nmain()\r\n","sub_path":"Fabio_lista 02_P 02/Fabio_2b_Q 14.py","file_name":"Fabio_2b_Q 14.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}