diff --git "a/3605.jsonl" "b/3605.jsonl" new file mode 100644--- /dev/null +++ "b/3605.jsonl" @@ -0,0 +1,608 @@ +{"seq_id":"527624726","text":"import supervisely_lib as sly\nfrom functools import partial\nimport globals as g\n\ndef update_progress(count, api: sly.Api, task_id, progress: sly.Progress):\n count = min(count, progress.total - progress.current)\n progress.iters_done(count)\n if progress.need_report():\n progress.report_progress()\n\n\ndef get_progress_cb(api, task_id, message, total, is_size=False, func=update_progress):\n progress = sly.Progress(message, total, is_size=is_size)\n progress_cb = partial(func, api=api, task_id=task_id, progress=progress)\n progress_cb(0)\n return progress_cb","sub_path":"src/dl_progress.py","file_name":"dl_progress.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"389543626","text":"from django import forms\nfrom .models import Cadastro, Empresa\n\nclass CadastroForm(forms.ModelForm):\n nome = forms.CharField(widget=forms.TextInput(\n attrs ={\n 'class': 'input1',\n 'placeholder': 'Nome',\n 'maxlength': '200',\n 'required': ''\n }\n ))\n\n email = forms.EmailField(widget=forms.TextInput(\n attrs ={\n 'class': 'input1',\n 'placeholder': 'E-mail',\n 'required': ''\n }\n ))\n\n telefone = forms.CharField(widget=forms.TextInput(\n attrs ={\n 'class': 'input1',\n 'placeholder': 'Telefone',\n 'maxlength': '20',\n 'type': 'number',\n }\n ))\n LOJA_CHOICES = [\n [1, \"Centerbox Genibau\"],\n [2, \"Centerbox Parque São José\"],\n [6, \"Centerbox Messejana\"],\n [8, \"Centerbox José Bastos\"],\n [9, \"Centerbox Conceito\"],\n [10, \"Centerbox Barra do Ceará\"],\n [11, \"Centerbox Expedicionarios\"],\n [12, \"Centerbox Washington Soares\"],\n ]\n \"\"\"\n LOJA_CHOICES = [\n [1, \"01-GENIBAU\"],\n [2, \"02-PARQUE SÃO JOSE\"],\n [3, \"03-JOÃO XXIII\"],\n [4, \"04-JARDIM IRACEMA\"],\n [5, \"05-SANTOS DUMONT\"],\n [6, \"06-MESSEJANA\"],\n [7, \"07-PARQUELANDIA\"],\n [8, \"08-JOSE BASTOS\"],\n [9, \"09-CONCEITO\"],\n [10, \"10-BARRA DO CEARA\"],\n [11, \"11-EXPEDICIONARIOS\"],\n [12, \"12-WASHINGTON SOARES\"],\n [13, \"13-CAUCAIA\"],\n [14, \"14-BARRA DO CEARA II\"],\n ]\n \"\"\"\n lojacompra = forms.IntegerField(widget=forms.Select(\n choices=LOJA_CHOICES,\n attrs ={\n 'class': 'input1',\n 'placeholder': 'Loja de retirada do kit'\n }\n ))\n\n dtnascimento = forms.DateField(widget=forms.DateInput(\n attrs ={\n 'class': 'input1',\n 'type': 'date',\n 'placeholder': 'Data de nascimento'\n }\n ))\n\n ALUGAR_CHOICES = [\n ['S', 'sim, vou alugar.'],\n ['N', 'não, já tenho minha bike.'],\n ]\n\n alugar = forms.CharField(widget=forms.RadioSelect(\n choices=ALUGAR_CHOICES\n ))\n\n SN_CHOICES = [\n ['S', 'Sim'],\n ['N', 'Não'],\n ]\n outeventos = forms.CharField(widget=forms.RadioSelect(\n choices=SN_CHOICES\n ))\n\n class Meta:\n model = Cadastro\n fields = [\"nome\", \"email\", \"telefone\", \"lojacompra\", \"outeventos\", \"alugar\"]\n\n \"\"\"\n fields = [\"nome\", \"email\", \"telefone\", \"lojacompra\", \"sexo\"\n , \"outeventos\", \"alugar\"]\n \"\"\"\n\nclass EmpresaForm(forms.ModelForm):\n class Meta:\n model = Empresa\n fields = [\"seqempresa\", \"nomeempresa\"]","sub_path":"inscricao/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"315946178","text":"\"\"\"deCONZ scene platform tests.\"\"\"\nfrom unittest.mock import Mock, patch\n\nfrom homeassistant import config_entries\nfrom homeassistant.components import deconz\nfrom homeassistant.setup import async_setup_component\n\nimport homeassistant.components.scene as scene\n\nfrom tests.common import mock_coro\n\n\nGROUP = {\n \"1\": {\n \"id\": \"Group 1 id\",\n \"name\": \"Group 1 name\",\n \"state\": {},\n \"action\": {},\n \"scenes\": [{\n \"id\": \"1\",\n \"name\": \"Scene 1\"\n }],\n }\n}\n\n\nENTRY_CONFIG = {\n deconz.const.CONF_ALLOW_CLIP_SENSOR: True,\n deconz.const.CONF_ALLOW_DECONZ_GROUPS: True,\n deconz.config_flow.CONF_API_KEY: \"ABCDEF\",\n deconz.config_flow.CONF_BRIDGEID: \"0123456789\",\n deconz.config_flow.CONF_HOST: \"1.2.3.4\",\n deconz.config_flow.CONF_PORT: 80\n}\n\n\nasync def setup_gateway(hass, data):\n \"\"\"Load the deCONZ scene platform.\"\"\"\n from pydeconz import DeconzSession\n loop = Mock()\n session = Mock()\n\n config_entry = config_entries.ConfigEntry(\n 1, deconz.DOMAIN, 'Mock Title', ENTRY_CONFIG, 'test',\n config_entries.CONN_CLASS_LOCAL_PUSH)\n gateway = deconz.DeconzGateway(hass, config_entry)\n gateway.api = DeconzSession(loop, session, **config_entry.data)\n gateway.api.config = Mock()\n hass.data[deconz.DOMAIN] = gateway\n\n with patch('pydeconz.DeconzSession.async_get_state',\n return_value=mock_coro(data)):\n await gateway.api.async_load_parameters()\n\n await hass.config_entries.async_forward_entry_setup(config_entry, 'scene')\n # To flush out the service call to update the group\n await hass.async_block_till_done()\n\n\nasync def test_platform_manually_configured(hass):\n \"\"\"Test that we do not discover anything or try to set up a gateway.\"\"\"\n assert await async_setup_component(hass, scene.DOMAIN, {\n 'scene': {\n 'platform': deconz.DOMAIN\n }\n }) is True\n assert deconz.DOMAIN not in hass.data\n\n\nasync def test_no_scenes(hass):\n \"\"\"Test that scenes can be loaded without scenes being available.\"\"\"\n await setup_gateway(hass, {})\n assert len(hass.data[deconz.DOMAIN].deconz_ids) == 0\n assert len(hass.states.async_all()) == 0\n\n\nasync def test_scenes(hass):\n \"\"\"Test that scenes works.\"\"\"\n with patch('pydeconz.DeconzSession.async_put_state',\n return_value=mock_coro(True)):\n await setup_gateway(hass, {\"groups\": GROUP})\n assert \"scene.group_1_name_scene_1\" in hass.data[deconz.DOMAIN].deconz_ids\n assert len(hass.states.async_all()) == 1\n\n await hass.services.async_call('scene', 'turn_on', {\n 'entity_id': 'scene.group_1_name_scene_1'\n }, blocking=True)\n\n\nasync def test_unload_scene(hass):\n \"\"\"Test that it works to unload scene entities.\"\"\"\n await setup_gateway(hass, {\"groups\": GROUP})\n\n await hass.data[deconz.DOMAIN].async_reset()\n\n assert len(hass.states.async_all()) == 0\n","sub_path":"tests/components/scene/test_deconz.py","file_name":"test_deconz.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"247227852","text":"# %%\nimport torch\n# %% [markdown]\n## Getting started\n# %%\nx = torch.arange(12)\nprint(x.shape)\nprint(x.numel())\n# %%\nX = x.reshape(3, 4)\nprint(X.shape)\n# %%\nx = torch.zeros(2, 3, 4)\nprint(x.shape)\nx\n# %%\nx = torch.ones(2, 3, 4)\nprint(x.shape)\nx\n# %%\nx = torch.rand(3, 4)\nx\n# %%\nx = torch.tensor([[0, 1, 2], [3, 4, 5]])\nprint(x.shape)\nx\n# %% [markdown]\n## Operations\n# %%\nx = torch.tensor([1.0, 2, 4, 8])\ny = torch.tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\n# %%\ntorch.exp(x)\n# %%\nX==Y\n# %%\nX = torch.arange(12, dtype=torch.float32).reshape(3, 4)\nY = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nprint(torch.cat((X, Y), dim=0))\nprint()\nprint(torch.cat((X, Y), dim=1))\n# %%\nprint(X.sum())\nprint(X.sum(axis=1))\nprint(X.sum(axis=1, keepdims=True))\n# %%\nx = torch.arange(4, dtype=torch.float32)\ny = torch.ones(4)\nx, y, torch.dot(x, y)\n# %%\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nx = torch.ones(4)\nA, x, torch.mv(A, x)\n# %%\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nB= torch.ones(4, 3)\ntorch.mm(A, B)\n\n# %% [markdown]\n## Broadcasting Mechanism\n# %%\na = torch.arange(3).reshape(3, 1)\nb = torch.arange(2).reshape(1, 2)\na, b\n# %%\na+b\n# %% [markdown]\n## Indexing and Slicing\n# %%\nX, X[-1], X[1:3]\n\n# %%\nX[0:2, :] = 12\nX\n# %% [markdown]\n## Saving Memory\n\n# %%\nbefore = id(Y)\nY = Y + X\nafter = id(Y)\nbefore==after\n# %%\nZ = torch.zeros_like(Y)\nprint(id(Z))\nZ[:] = Y + X\nprint(id(Z))\n# %%\nbefore = id(Y)\nY += X\nafter = id(Y)\nbefore==after\n# %% [markdown]\n## Conversion to Other Python Objects\n\n# %%\nA = X.numpy()\nB = torch.tensor(A)\ntype(A), type(B)\n# %%\na = torch.tensor([3.5])\na, a.item(), float(a), int(a)\n","sub_path":"02-01-Data Manipulation.py","file_name":"02-01-Data Manipulation.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"531656438","text":"import threading\n\nfrom flask import request, Blueprint, jsonify\nfrom infrastructure import log\n\nlogger = log.get_logger(\"stocks_api\")\n\ndef get_stocks_blueprint(domain, job, time_series):\n stocks_blueprint = Blueprint('stocks_api', __name__)\n\n @stocks_blueprint.route('', methods=['POST'])\n def add_stock():\n response = None\n new_stock = request.get_json()\n logger.info(\"post: %s\", new_stock)\n if new_stock is None:\n response = jsonify({ \"error\": \"Please provide a stock in the request body. It should have a name, a symbol and a stock market\" }), 400\n return response\n name = new_stock.get(\"name\", None)\n quote = new_stock.get(\"symbol\", None)\n stock_market = new_stock.get(\"stockMarket\", None)\n is_valid_stock = name and quote and stock_market\n if not is_valid_stock:\n response = jsonify({ \"error\": \"Please provide a valid stock. It should have a name, a symbol and a stock market\" }), 400\n return response\n # This validation (stockExistInDB) should be performed in the domain level, not in the API level.\n stock_exist_in_db = domain.stock_exists(quote)\n if stock_exist_in_db:\n response = jsonify({ \"error\": \"The given stock already exists\" }), 409\n return response\n # Add stock async\n time_series.save_async(\"API\", {}, { \"method\": \"add_stock\", \"stock\": quote })\n thread = threading.Thread(target=job.add_stock_to_stockreader, args=(new_stock,)) # Why args should be a tuple?\n thread.start()\n response = jsonify({ \"success\": \"The stock \" + quote + \" is being added\" }), 202\n return response\n\n return stocks_blueprint\n","sub_path":"src/stocks/stocks_api.py","file_name":"stocks_api.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"74725919","text":"# coding:utf-8\n\nimport socket\n\nsk = socket.socket()\nconn, addr = sk.accept()\n\nsk.connect(('192.168.0.186', 8001))\n\nwhile True:\n # name = input('请输入姓名:')\n # sk.send(name.encode('utf-8'))\n # if name == 'exit':\n # break\n # response = sk.recv(1024)\n # print(response.decode('utf-8'))\n data = conn.recv(1024)\n","sub_path":"day27/case/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"71615408","text":"class Solution:\n def shortestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n len_s = len(s)\n i_sign = 0\n if s == s[::-1]:\n return(s)\n for i in range(len_s):\n if s[0:-i-1] == s[0:-i-1][::-1]:\n i_sign = len_s-i-1\n #print(i_sign)\n break\n delta_str = s[i_sign:][::-1]\n #print(delta_str)\n new_s = delta_str+s\n return(new_s)\n","sub_path":"214/Shortest_Palindrome_FShang.py","file_name":"Shortest_Palindrome_FShang.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"202553352","text":"from django.urls import path\nfrom django.conf.urls import url\n\nfrom offers.views import *\n\nurlpatterns = [\n url(r'^index/', index, name=\"index\"),\n url(r'^populate/', populate, name=\"populate\"),\n url(r'^extractcsv/', extract_csv, name=\"extract_csv\"),\n url(r'^popularoffers/', popular_offers , name=\"popular_offers\"),\n url(r'^myoffers/', my_offers, name=\"my_offers\"),\n url(r'^search/', search_offers, name=\"search_offers\"),\n url(r'^recom/', recom, name=\"recom_offers\"),\n # url(r'^recom/', recom_books, name=\"recom\"),\n]","sub_path":"Dedalo/offers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"319213479","text":"import pandas as pd\nimport numpy as np\n\nimport ultimate\nfrom ultimate import Team, Game, utils, tournament\n\nmens_adjustments = {\"Atlantic Coast\": 0, \"New England\": 0}\nmens_bid_path = \"./Rankings/2019CollegeMensDivisionBidAllocation.htm\"\nwomens_bid_path = \"./Rankings/2019CollegeWomensDivisionBidAllocation.htm\"\nwomens_ranking_html = r\"Rankings/USAU_team_rankings.women.2020-03-11.html\"\nmens_ranking_html = r\"Rankings/USAU_team_rankings.men.2020-03-11.html\"\ncustom_ranks_path = \"./Rankings/2020_05_03_USAU Human Ratings.xlsx\"\n\nfour_pools_of_three_seeds = {\n \"pool_a\": [1, 8, 9],\n \"pool_b\": [2, 7, 10],\n \"pool_c\": [3, 6, 11],\n \"pool_d\": [4, 5, 12],\n}\n\n# Table 16.1.2\nfour_pools_of_four_seeds = {\n \"pool_a\": [1, 8, 12, 16],\n \"pool_b\": [2, 7, 11, 15],\n \"pool_c\": [3, 6, 10, 14],\n \"pool_d\": [4, 5, 9, 13],\n}\n\ntwo_pools_of_four_seeds = {\"pool_a\": [1, 4, 6, 7], \"pool_b\": [2, 3, 5, 8]}\n\ntwo_pools_of_five_seeds = {\"pool_a\": [1, 3, 6, 8, 9], \"pool_b\": [2, 4, 5, 7, 10]}\ntwo_pools_of_six_seeds = {\"pool_a\": [1, 4, 5, 7, 10, 12], \"pool_b\": [2, 3, 6, 8, 9, 11]}\n\npool_seeding = {\n 8: two_pools_of_four_seeds,\n 10: two_pools_of_five_seeds,\n 12: four_pools_of_three_seeds,\n 16: four_pools_of_four_seeds,\n}\n\n\ndef get_d1_teams(\n teams: pd.DataFrame, custom_ranks: pd.DataFrame, custom_teams: dict\n) -> pd.DataFrame:\n \"\"\"\n Takes dataframe of D1 and D3 teams along with custom ratings\n for this season and returns a DataFrame of single rating\n with only D1 teams\n\n Parameters\n ----------\n teams : DataFrame \n Table representing all D1 and D3 teams who have played a game this year\n\n custom_ranks : DataFrame\n Table representing a list of team names with custom rankings if desired\n\n Returns\n -------\n DataFrame\n Table of teams eligible to compete for nationals with updated rankings.\n \"\"\"\n return (\n teams.merge(\n custom_ranks[[\"Team\", \"Adjusted Rating\", \"USAU Power Rating\"]],\n left_on=\"team\",\n right_on=\"Team\",\n how=\"right\",\n )\n .pipe(\n lambda df: df[\n (df.competition_division == \"Division I\")\n | (df.competition_division.isnull())\n ]\n )\n .assign(\n custom_rating=lambda x: x[\"Adjusted Rating\"]\n .combine_first(x.power_rating)\n .combine_first(x[\"USAU Power Rating\"])\n .astype(int)\n )\n .dropna(subset=[\"Team\"])\n .assign(\n college_region=lambda x: x.college_region.combine_first(\n x.Team.map(custom_teams)\n )\n )\n )\n\n\n# bids\ndef calculate_size_and_bids(tables, d1_teams, rankings_col=\"custom_rating\"):\n \"\"\"\n Iterates through eligible teams by specified ranking type and returns\n a table of size and number of bids for the region.\n\n Parameters\n ----------\n tables : List[DataFrame]\n Set of tables read from USAU regional size allocation website\n d1_teams : DataFrame\n Return value from get_d1_teams()\n rankings_col : str, optional\n Which column to use from d1_teams to order bids, \n by default 'custom_rating'\n\n Returns\n -------\n DataFrame\n One row per region with number of bids and number of teams competing\n \"\"\"\n total_bids = 20\n # TODO: make sure this actually works as expected\n # store boolean for whether autobid has been reached while iterating through rankings\n _bids = {region: [1, False] for region in d1_teams.college_region.unique()}\n for i, team in d1_teams.sort_values(rankings_col, ascending=False).iterrows():\n if sum(v[0] for k, v in _bids.items()) <= total_bids:\n if _bids[team.college_region][1]:\n _bids[team.college_region][0] += 1\n else:\n _bids[team.college_region][1] = True\n\n # now just a dict of region name: number of bids\n bids = {k: v[0] for k, v in _bids.items()}\n\n conference_lookup = (\n d1_teams.drop_duplicates(subset=[\"college_conference\", \"college_region\"])\n .set_index(\"college_conference\")\n .college_region.to_dict()\n )\n\n combined = (\n pd.concat(\n [\n table\n for table in tables\n if table.columns.isin(\n [\"Conference\", \"Division\", \"Auto bids\", \"Strength bids\", \"Total\"]\n ).min()\n and (table.Division == \"D-I\").max()\n ]\n )\n .reset_index(drop=True)\n .assign(region=lambda x: x[\"Conference\"].map(conference_lookup))\n )\n region_details = (\n combined.groupby(\"region\")\n .Total.sum()\n .rename(\"size\")\n .to_frame()\n .assign(bids=lambda x: x.index.map(bids))\n )\n return region_details\n\n\ndef determine_regional_qualifiers(\n division_teams, region, n_teams, rankings_var=\"custom_rating\"\n):\n return (\n division_teams[division_teams.college_region == region]\n .sort_values(rankings_var, ascending=False)\n .head(n_teams)\n .apply(\n lambda r: Team(\n name=r.Team, rating=int(r[rankings_var]), region=r.college_region\n ),\n axis=1,\n )\n )\n\n\ndef play_pools(teams, pool_seeds):\n results = {}\n for name, seeds in pool_seeds.items():\n pool = tournament.RoundRobinTournament(\n teams=teams.iloc[[i - 1 for i in seeds]].tolist(), name=f\"Pool {name}\"\n )\n pool.play_games()\n results[name] = pool\n return results\n\n\ndef play_region(n_bids, teams):\n n_teams = len(teams)\n if n_teams == 8:\n pools = play_pools(teams, two_pools_of_four_seeds)\n if n_bids == 1:\n bracket = tournament.BracketEightOne(**pools)\n elif n_bids == 2:\n bracket = tournament.BracketEightTwoOne(**pools)\n elif n_bids == 3:\n bracket = tournament.BracketEightThree(**pools)\n elif n_teams == 10:\n pools = play_pools(teams, two_pools_of_five_seeds)\n if n_bids == 1:\n bracket = tournament.BracketEightOne(**pools)\n elif n_bids == 2:\n bracket = tournament.BracketEightTwoOne(**pools)\n elif n_bids == 3:\n bracket = tournament.BracketEightThree(**pools)\n elif n_teams == 12:\n if n_bids == 1:\n pools = play_pools(teams, four_pools_of_three_seeds)\n bracket = tournament.BracketTwelveFourPools(**pools)\n elif n_bids == 2:\n pools = play_pools(teams, two_pools_of_six_seeds)\n winners_bracket = tournament.BracketSixTwo(**pools)\n #we need a second bracket for bottom of pools\n consolation_bracket = tournament.BracketSixTwo(pools['pool_a'][3:], pools['pool_b'][3:])\n bracket = tournament.CombinationBracket(winners_bracket, consolation_bracket)\n elif n_bids == 3:\n pools = play_pools(teams, two_pools_of_six_seeds)\n winners_bracket = tournament.BracketEightTwoOne(**pools)\n #Should be playing 4.2.2, but this is the same as 4.2.1 if teams are seeded correctly out of pools\n consolation_bracket = tournament.BracketFourTwoOne([*pools['pool_a'][3:], *pools['pool_b'][3:]])\n bracket = tournament.CombinationBracket(winners_bracket, consolation_bracket)\n elif n_bids == 4:\n pools = play_pools(teams, two_pools_of_six_seeds)\n bracket = tournament.BracketEightFour(**pools)\n elif n_teams == 16:\n if n_bids == 1:\n pools = play_pools(teams, four_pools_of_four_seeds)\n bracket = tournament.BracketSixteenOne(**pools)\n if n_bids == 2:\n pools = play_pools(teams, four_pools_of_four_seeds)\n bracket = tournament.BracketSixteenTwoTwo(**pools)\n if n_bids == 3:\n # this won't use pools like all other formats, but we need the object\n pools = {'na': tournament.EmptyTournament()}\n bracket = tournament.BracketSixteenThreeOne(teams.tolist())\n if n_bids == 4:\n pools = play_pools(teams, four_pools_of_four_seeds)\n bracket = tournament.BracketSixteenFourTwo(**pools)\n else:\n return\n try:\n bracket.play_games()\n except:\n print(n_teams, n_bids)\n raise\n return pools, bracket\n\n\ndef play_all_regions(d1_teams, region_details, writer, game_log_writer, division):\n summary = {}\n overall_pools = {}\n overall_placements = pd.DataFrame()\n for region, (n_teams, n_bids) in region_details.iterrows():\n if n_teams == 15:\n n_teams = 16\n teams = determine_regional_qualifiers(\n d1_teams, region, n_teams, rankings_var=\"custom_rating\"\n )\n pool_results, bracket = play_region(n_bids, teams)\n pool_finishes = {\n name: pool.determine_placement() for name, pool in pool_results.items()\n }\n overall_pools[region] = pd.DataFrame(pool_finishes).astype(str)\n\n region_summary = {\n \"Tournament\": f\"{division}'s {region} - {n_teams} Teams with {n_bids} Bids\",\n \"Format\": f\"{len(pool_results.keys())} pools of {len(pool_finishes.get('pool_a', 'NA'))} -> {bracket.name}\",\n \"Qualifiers\": \", \".join(\n [str(team) for team in bracket.determine_placement()[:n_bids]]\n ),\n }\n summary[region] = region_summary\n print()\n for k, v in region_summary.items():\n print(k.upper(), \"-\", v)\n overall_placements[f\"{region} ({n_bids} bids)\"] = pd.Series(\n bracket.determine_placement()\n )\n\n pool_games = []\n for name, pool in pool_results.items():\n try:\n for i, game in enumerate(pool.games):\n pool_games.append(game.results_dict)\n except AttributeError:\n print(\"no pools to add, carry on\")\n\n pd.DataFrame(pool_games).to_excel(\n writer, sheet_name=f\"3. {division} {region} Pools\"\n )\n pd.DataFrame([game.results_dict for game in bracket.games]).to_excel(\n writer, sheet_name=f\"2. {division} {region} Bracket\"\n )\n for i, game in enumerate(bracket.games):\n game_log = pd.Series(game.score.point_log).apply(\n lambda x: pd.Series({game.team_a.name: x[0], game.team_b.name: x[1]})\n )\n sheet_name = f\"{division}{region[:8]}{game.level}{i}\"\n if len(sheet_name) > 30 or game.level is None:\n continue\n game_log.to_excel(game_log_writer, sheet_name=sheet_name)\n\n pd.DataFrame(summary).T.to_excel(writer, sheet_name=f\"1. {division} Overall Summary\")\n pd.concat(\n [\n pd.DataFrame(index=[region], columns=v.columns, data=\"--\").append(\n v.rename(lambda x: region)\n )\n for region, v in overall_pools.items()\n ]\n ).to_excel(writer, sheet_name=f\"1. {division} Overall Pools\")\n overall_placements.to_excel(writer, sheet_name=f\"1. {division} Overall Placement\")\n game_log_writer.save()\n\n\ndef run_all_regionals():\n # Added Ottawa (ME), Delaware (AC) to rankings sheet and adjusted others accordingly.\n # Chicago (GL)\n # Grand Valley State (GL), Loyola-Chicago (GL), Marquette (NC)\n custom_teams = {\n \"Women\": {\n \"Ottawa\": \"Metro East\",\n \"Delaware\": \"Atlantic Coast\",\n \"Chicago\": \"Great Lakes\",\n \"Rutgers\": \"Metro East\"\n },\n \"Men\": {\n \"Rutgers\": \"Metro East\",\n \"Grand Valley State\": \"Great Lakes\",\n \"Loyola-Chicago\": \"Great Lakes\",\n \"Marquette\": \"North Central\",\n },\n }\n # Use pandas html reader to extract dataframe\n result = pd.read_html(womens_ranking_html)\n df_women = (\n result[0]\n .rename(columns=lambda x: x.replace(\" \", \"_\").lower())\n .iloc[0:-1]\n .query('competition_division != \"Developmental\"')\n )\n\n result = pd.read_html(mens_ranking_html)\n df_men = (\n result[0]\n .rename(columns=lambda x: x.replace(\" \", \"_\").lower())\n .iloc[0:-1]\n .query('competition_division != \"Developmental\"')\n )\n\n custom_ranks_men = pd.read_excel(\n custom_ranks_path, sheet_name=\"Copy of Keith Men\"\n ).dropna(subset=[\"USAU Rank\"])\n custom_ranks_women = pd.read_excel(\n custom_ranks_path, sheet_name=\"Copy of Keith Women\"\n ).dropna(subset=[\"USAU Rank\"])\n d1_men = get_d1_teams(df_men, custom_ranks_men, custom_teams[\"Men\"])\n d1_women = get_d1_teams(df_women, custom_ranks_women, custom_teams[\"Women\"])\n\n mens_region_details = calculate_size_and_bids(\n pd.read_html(mens_bid_path, header=0), d1_men,\n )\n mens_region_details.loc[\"North Central\", \"size\"] = 12\n mens_region_details.loc[\"Northwest\", \"size\"] = 12\n mens_region_details.loc[\"New England\", \"size\"] = 12\n\n for region, val in mens_adjustments.items():\n mens_region_details.loc[region, \"bids\"] += val\n\n womens_region_details = calculate_size_and_bids(\n pd.read_html(womens_bid_path, header=0), d1_women,\n )\n womens_region_details.loc[\"New England\", \"size\"] = 12\n\n # replace bids with manual counts:\n womens_bid_numbers = {\n \"Atlantic Coast\": 2,\n \"Great Lakes\": 1,\n \"Metro East\": 1,\n \"New England\": 3,\n \"North Central\": 1,\n \"Northwest\": 3,\n \"Ohio Valley\": 2,\n \"South Central\": 1,\n \"Southeast\": 2,\n \"Southwest\": 4,\n }\n mens_bid_numbers = {\n \"Atlantic Coast\": 2,\n \"Great Lakes\": 1,\n \"Metro East\": 1,\n \"New England\": 2,\n \"North Central\": 2,\n \"Northwest\": 4,\n \"Ohio Valley\": 2,\n \"South Central\": 2,\n \"Southeast\": 1,\n \"Southwest\": 3,\n }\n womens_region_details[\"bids\"] = pd.Series(womens_bid_numbers)\n mens_region_details[\"bids\"] = pd.Series(mens_bid_numbers)\n # print(womens_region_details)\n writer = pd.ExcelWriter(\"regionals_results.xlsx\")\n\n pd.concat(\n [\n womens_region_details.rename(columns=lambda x: f\"womens_{x}\"),\n mens_region_details.rename(columns=lambda x: f\"mens_{x}\"),\n ],\n axis=1,\n ).to_excel(writer, \"0. Bid Allocation\")\n\n mens_game_log_writer = pd.ExcelWriter(\"mens_regionals_game_logs.xlsx\")\n womens_game_log_writer = pd.ExcelWriter(\"womens_regionals_game_logs.xlsx\")\n play_all_regions(\n d1_men, mens_region_details, writer, mens_game_log_writer, division=\"M\"\n )\n play_all_regions(\n d1_women, womens_region_details, writer, womens_game_log_writer, division=\"W\"\n )\n writer.book.worksheets_objs.sort(key=lambda x: x.name)\n writer.save()\n\n\nif __name__ == \"__main__\":\n np.random.seed(29)\n run_all_regionals()\n","sub_path":"run_regionals.py","file_name":"run_regionals.py","file_ext":"py","file_size_in_byte":14826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"3990239","text":"import pytest\n\nproducts = [\n (2, 3, 6), # postive integers\n (1, 99, 99), # identity\n (0, 99, 0), # zero\n (3, -4, -12), # positive by negative\n (-5, -5, 25), \t # negative by negative\n (2.5, 6.7, 16.75) # floats\n] \n\n\n@pytest.mark.parametrize('a,b,product',products)\ndef test_multiplication (a,b,product):\n assert a*b==product","sub_path":"testnew.py","file_name":"testnew.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"148927099","text":"import joblib\nfrom keras.models import load_model\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom queue import Queue\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras_gcn import *\n\n# 需要预测的站点\nStation = 47\n# 需要预测的时间\nPredictTime = \"2019-01-08 18:17:44\"\n# 时间间隔\nTimeInterval = 15\n# 邻近站点阈值\nAdjacentThreshold = 7\n# 单个lstm的步长\nTimeStep = 3\n# 特征长度\nfeatures = 4\n\n# 一天有多少个时间段\nDayInterval = int((60 / TimeInterval) * 24)\n# 一周有多少个时间段\nWeekInterval = int((60 / TimeInterval) * 24 * 7)\n# 地铁运行时间6:00-23:30 [TimeStart,TimeEnd)\nTimeStart = int((60 / TimeInterval) * 6)\nTimeEnd = DayInterval - int((30 / TimeInterval))\n\n\nMap = pd.read_csv(\"Metro_roadMap.csv\",index_col=0)\n# 宽搜,将距离Station在AdjacentThreshold内的站点提取出来\ndef bfs(Station,Map,AdjacentThreshold):\n q = Queue(maxsize=0)\n q.put(Station)\n Flag = np.zeros(Map.shape[0])\n AdjacentStations = [Station]\n Flag[Station] = 1\n Flag[54] = 1\n\n while AdjacentThreshold > 0:\n AdjacentThreshold -= 1\n size = q.qsize()\n while size > 0:\n size -= 1\n st = q.get()\n for k in range(Map.shape[1]):\n if (Map[st,k] == 1) & (Flag[k] == 0):\n Flag[k] = 1\n AdjacentStations.append(k)\n q.put(k)\n return AdjacentStations\n\nAdjacentStations = bfs(Station,Map.to_numpy(),AdjacentThreshold)\n# 站点按照编号从小到大排序\nAdjacentStations.sort()\n# 邻近站点\nprint(\"邻近站点\")\nprint(AdjacentStations)\nLocalMap = Map.iloc[AdjacentStations,AdjacentStations].values\n\n# 保留参数:Num,Minute,DateType,temperature,weather\nMainStationData = pd.read_csv(\"by{}minutes/sta{}.csv\".format(TimeInterval,Station))\n\n\nPredictTime = datetime.datetime.strptime(PredictTime,\"%Y-%m-%d %H:%M:%S\")\n# day,Time\nday = PredictTime.strftime(\"%Y-%m-%d\")\nTime = str(PredictTime.hour) + \":\" + str(int(PredictTime.minute / TimeInterval) * TimeInterval)\n# 真实值\nprint(MainStationData.loc[(MainStationData[\"day\"] == day) & (MainStationData[\"Time\"] == Time)])\n\ncol_days,col_Times = [],[]\n\nfor i in range(TimeStep):\n PredictTime = PredictTime + datetime.timedelta(minutes=-TimeInterval)\n col_days.append(PredictTime.strftime(\"%Y-%m-%d\"))\n col_Times.append(str(PredictTime.hour) + \":\" + str(int(PredictTime.minute / TimeInterval) * TimeInterval))\n\n# 时间随着下标增大\ncol_days.reverse()\ncol_Times.reverse()\n\n# (3,41)\nRidership = np.zeros(shape=(TimeStep,len(AdjacentStations)))\n\ncol = 0\nfor AdjacentStation in AdjacentStations:\n AdjacentData = pd.read_csv(\"by{}minutes/sta{}.csv\".format(TimeInterval,AdjacentStation))\n for i in range(TimeStep):\n Ridership[i,col] = AdjacentData.loc[(AdjacentData[\"day\"] == col_days[i]) & (AdjacentData[\"Time\"] == col_Times[i]),\"Num\"].values[0]\n col += 1\n\nprint(Ridership)\nprint(Ridership.shape)\n\nRidershipScaler = joblib.load('{}-minute forecast/RidershipScaler/Station{}_RidershipScaler'.format(TimeInterval,Station))\nRidership = RidershipScaler.transform(Ridership)\n\nInputData = []\n\nfor i in range(TimeStep):\n InputData.append(Ridership[i,:])\n InputData[i] = InputData[i].reshape(1,InputData[i].shape[0],1)\n print(InputData[i].shape)\n\nLocalMap = LocalMap.reshape(1,LocalMap.shape[0],LocalMap.shape[1])\nInputData += [LocalMap,LocalMap,LocalMap]\n\nfor i in range(len(InputData)):\n print(InputData[i].shape)\n\nmodel = load_model(\"{}-minute forecast/models/Station{}.h5\".format(TimeInterval,Station),custom_objects={\"GraphConv\":GraphConv},compile=False)\n\nOutput = model.predict(InputData)\n\nTrueValueScaler = joblib.load('{}-minute forecast/TrueValueScaler/Station{}_TrueValueScaler'.format(TimeInterval,Station))\nOutput = TrueValueScaler.inverse_transform(Output)\nprint(Output)","sub_path":"code/2019数据短时出行人数/forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"201184471","text":"# coding=UTF-8\n#!/usr/bin/env python\nimport optparse\nimport nmap\n\n'''\n执行 python nmap_scan.py -H 61.135.169.121 -p 22 80:(百度)\nnmScan.scan(tgtHost, tgtPort)结果:\n{\n 'nmap':\n {\n 'scanstats':\n {\n 'uphosts': '1', 'timestr': 'Fri Sep 30 19:56:53 2016', 'downhosts': '0', 'totalhosts': '1', 'elapsed': '11.61'\n },\n 'scaninfo':\n {\n 'tcp': {'services': '80', 'method': 'connect'\n }\n },\n\n 'command_line': 'nmap -oX - -p 80 -sV 61.135.169.121'\n},\n 'scan':\n {\n '61.135.169.121':\n {\n 'status':{'state': 'up', 'reason': 'syn-ack'},\n 'hostnames': [{'type': '', 'name': ''}],\n 'vendor': {},\n 'addresses': {'ipv4': '61.135.169.121'},\n 'tcp': {\n 80: {'product': 'Apache httpd', 'state': 'open', 'version': '', 'name': 'http', 'conf': '10', 'extrainfo': '', 'reason': 'syn-ack', 'cpe': 'cpe:/a:apache:http_server'}\n }\n }\n }\n}\n [*] 61.135.169.121 tcp/80 open\n [*] 61.135.169.121 tcp/22 filtered\n\n\n'''\ndef nmapScan(tgtHost, tgtPort):\n nmScan = nmap.PortScanner()\n results = nmScan.scan(tgtHost, tgtPort)\n # print (results)\n if results['scan']:\n state = results['scan'][tgtHost]['tcp'][int(tgtPort)]['state']\n print(\" [*] \" + tgtHost + \" tcp/\" + tgtPort + \" \" + state)\n else:\n print(\"scan has nothing!\\n\")\n\n\ndef main():\n parser = optparse.OptionParser('usage %prog –H -p ')\n parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')\n parser.add_option('-p', dest='tgtPort', type='string', help='specify target port')\n (options, args) = parser.parse_args()\n tgtHost = options.tgtHost\n tgtPort = options.tgtPort\n args.append(tgtPort)\n if (tgtHost == None) | (tgtPort == None):\n print('[-] You must specify a target host and port[s]!')\n for tgport in args:\n nmapScan(tgtHost, tgport)\nif __name__ == '__main__':\n main()\n\n","sub_path":"nmap_scan.py","file_name":"nmap_scan.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"67536931","text":"#!/usr/bin/env python\n\nimport RPi.GPIO as GPIO # Import GPIO Module\nfrom time import sleep # Import sleep Module for timing\n\nGPIO.setmode(GPIO.BCM) # Configures pin numbering to Broadcom reference\nGPIO.setwarnings(False) # Disable Warnings\nGPIO.setup(21, GPIO.OUT) #Set our GPIO pin to output \nGPIO.output(21, False) #Set output to off\nGPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set GPIO to input with a pull-down resistor\nGPIO.add_event_detect(18, GPIO.RISING, bouncetime=200) # Monitor GPIO pin for a rising edge and debounce for 200mS\n\nwhile (True):\n if GPIO.event_detected(18): # Check to see if button has been pushed\n activate = True\n while (activate is True): # Execute this code until the button is pushed again\n GPIO.output(21, True) # Turn LED on\n sleep(0.01)\n GPIO.output(21, False) # Turn LED off\n sleep(0.01)\n if GPIO.event_detected(18): # Check for a 2nd button push\n activate = False\n else:\n GPIO.output(21, False) # Turn LED off","sub_path":"combine/ButtonLedTest2.py","file_name":"ButtonLedTest2.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"499348141","text":"# https://leetcode-cn.com/problems/permutation-sequence/\n\nclass Solution:\n def getPermutation(self, n: int, k: int) -> str:\n '''字典序算法,O(kn),O(n)'''\n def nextPermute(s):\n for i in range(len(s)-2, -1, -1):\n if s[i] < s[i+1]:\n break\n else:\n s[:] = s[::-1]\n return\n for j in range(len(s)-1, -1, -1):\n if s[j] > s[i]: \n break\n s[i], s[j] = s[j], s[i]\n s[i+1:] = reversed(s[i+1:])\n return s\n \n res = list(range(1, n+1))\n for i in range(k-1):\n nextPermute(res)\n return \"\".join([str(_) for _ in res])\n '''DFS,超时'''\n # def dfs(n, path, visited, k):\n # if len(res) == k: return\n # if len(path) == n:\n # res.append(path)\n # return\n # for i in range(1, n+1):\n # if i not in visited:\n # dfs(n, path + str(i), visited.union({i}), k)\n # res = []\n # dfs(n, \"\", set(), k)\n # return res[-1]","sub_path":"Week_04/60. 第k个排列.py","file_name":"60. 第k个排列.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"92685492","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n# Helping class for linked list\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def append(self, value):\n if self.head is None:\n self.head = Node(value)\n return\n\n node = self.head\n while node.next:\n node = node.next\n\n node.next = Node(value)\n\n def __iter__(self):\n node = self.head\n while node:\n yield node.value\n node = node.next\n\n def __repr__(self):\n return str([v for v in self])\n\n\n\n# Inserting at head of the linked list\ndef insert_at_head(linked_list, value):\n position_tail_continuity = linked_list.head\n linked_list.head = Node(value)\n linked_list.head.next = position_tail_continuity\n\n# To reverse a linked list\ndef reverse(linked_list):\n tail = linked_list.head\n reversed_linked_list = LinkedList()\n while tail is not None:\n value = tail.value\n reversed_linked_list = insert_at_head(reversed_linked_list, value)\n tail = tail.next\n\n# Checking if it works or not\nllist = LinkedList()\nfor value in [4, 2, 5, 1, -3, 0]:\n llist.append(value)\n\nflipped = reverse(llist)\nis_correct = list(flipped) == list(\n [0, -3, 1, 5, 2, 4]) and list(llist) == list(reverse(flipped))\nprint(\"Pass\" if is_correct else \"Fail\")\n","sub_path":"Another 3/reverseLinkedList.py","file_name":"reverseLinkedList.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"639387049","text":"def exponential_decay(input_data, alpha):\n '''\n Exponential Decay Smoothing Filter\n Increase alpha for more filtering: uses more past data to compute an average\n - input_data is a vector (list of values)\n '''\n\n output_data = input_data.copy() \n for i in range(0,input_data.num-1):\n output_data[i+1] = (output_data[i,0]*alpha) + (output_data[i+1,0] * (1-alpha))\n \n return output_data","sub_path":"Motif Analysis Augmented (Source,Target and Distance)/utils/library/BIAPT-NeuroAlgo-0c6969d/Python/filters/exponential_decay.py","file_name":"exponential_decay.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"56210956","text":"from __future__ import division\nfrom setproctitle import setproctitle as ptitle\nimport numpy as np\nimport torch\nfrom environments import create_env\nfrom utils import ensure_shared_grads\nfrom model import ActorCritic\nfrom player_util import Agent\nfrom torch.autograd import Variable\nfrom optimizers import create_optm\n\ndef train(rank, args, shared_model,reward_queue, global_episode_counter):\n ptitle('Training Agent: {}'.format(rank))\n gpu_id = args.gpu_ids[rank % len(args.gpu_ids)]\n torch.manual_seed(args.seed + rank)\n if gpu_id >= 0:\n torch.cuda.manual_seed(args.seed + rank)\n\n env = create_env(args, 2000 + 10 * rank, False)\n optimizer = create_optm(args, shared_model)\n\n player = Agent(None, env, args, None)\n player.gpu_id = gpu_id\n player.model = ActorCritic(args.stack_frames, args.control_output)\n\n player.state = player.env.reset()\n player.state = torch.from_numpy(player.state).float()\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n player.state = player.state.cuda()\n player.model = player.model.cuda()\n player.model.train()\n total_reward = 0\n\n while True:\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n player.model.load_state_dict(shared_model.state_dict())\n else:\n player.model.load_state_dict(shared_model.state_dict())\n\n if player.done:\n\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n player.cx = Variable(torch.zeros(1, 128).cuda())\n player.hx = Variable(torch.zeros(1, 128).cuda())\n else:\n player.cx = Variable(torch.zeros(1, 128))\n player.hx = Variable(torch.zeros(1, 128))\n else:\n player.cx = Variable(player.cx.data)\n player.hx = Variable(player.hx.data)\n \n for step in range(args.num_steps):\n\n total_reward += player.action_train(rank, args.control_output)\n if player.done:\n if args.env == \"Torcs\" or args.env == \"Carla\":\n print(\"Training Episode {0}, Worker {1}, Reward {2}, Length {3}\".format(episode, rank, total_reward, player.eps_len))\n reward_queue.put(total_reward)\n total_reward = 0\n break\n\n with global_episode_counter.get_lock():\n episode = global_episode_counter.value\n global_episode_counter.value += 1\n if global_episode_counter.value > args.max_episodes:\n if args.env == \"Torcs\":\n player.env.end()\n break\n\n if player.done:\n\n player.eps_len = 0\n state = player.env.reset()\n player.state = torch.from_numpy(state).float()\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n player.state = player.state.cuda()\n\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n R = torch.zeros(1, 1).cuda()\n else:\n R = torch.zeros(1, 1)\n if not player.done:\n state = player.state\n state = state.unsqueeze(0)\n value, _, _, _ = player.model(\n (Variable(state), (player.hx, player.cx)))\n R = value.data\n\n player.values.append(Variable(R))\n policy_loss = 0\n value_loss = 0\n R = Variable(R)\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n gae = torch.zeros(1, 1).cuda()\n else:\n gae = torch.zeros(1, 1)\n for i in reversed(range(len(player.rewards))):\n R = args.gamma * R + player.rewards[i]\n advantage = R - player.values[i]\n value_loss = value_loss + 0.5 * advantage.pow(2)\n\n # Generalized Advantage Estimataion\n delta_t = player.rewards[i] + args.gamma * \\\n player.values[i + 1].data - player.values[i].data\n\n gae = gae * args.gamma * args.tau + delta_t\n\n policy_loss = policy_loss - \\\n (player.log_probs[i].sum() * Variable(gae)) - \\\n (0.01 * player.entropies[i].sum())\n\n player.model.zero_grad()\n (policy_loss + 0.5 * value_loss).backward()\n ensure_shared_grads(player.model, shared_model, gpu=gpu_id >= 0)\n optimizer.step()\n player.clear_actions()\n\n if args.env == \"Carla\":\n player.env.end()\n\n reward_queue.put(None)\n print(\"*************** Worker {0} Stops *************** \".format(rank))\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"421577786","text":"#coding=utf-8\n\nimport unittest\n\nfrom util.tree_node import TreeNode\n\"\"\"\n\n653. Two Sum IV - Input is a BST\nDescriptionHintsSubmissionsDiscussSolution\nDiscuss Pick One\nGiven a Binary Search Tree and a target number, return true if there exist two elements in the BST such that their sum \nis equal to the given target.\n\nExample 1:\nInput: \n 5\n / \\\n 3 6\n / \\ \\\n2 4 7\n\nTarget = 9\n\nOutput: True\nExample 2:\nInput: \n 5\n / \\\n 3 6\n / \\ \\\n2 4 7\n\nTarget = 28\n\nOutput: False\n\n\nDifficulty:Easy\nTotal Accepted:21.9K\nTotal Submissions:43.4K\nContributor: aghanta\nSubscribe to see which companies asked this question.\n\nRelated Topics \nTree \nSimilar Questions \nTwo Sum Two Sum II - Input array is sorted Two Sum III - Data structure design \n\"\"\"\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nfrom collections import deque\n\nclass Solution:\n def findTarget(self, root, k):\n \"\"\"\n :type root: TreeNode\n :type k: int\n :rtype: bool\n \"\"\"\n if not root:\n return False\n left_gen = self.get_bigger(root)\n right_gen = self.get_smaller(root)\n left = next(left_gen)\n right = next(right_gen)\n while left != right:\n tmp = left.val + right.val\n if tmp == k:\n return True\n elif tmp < k:\n left = next(left_gen)\n else:\n right = next(right_gen)\n return False\n\n def get_bigger(self, root):\n \"\"\"Generator method to return next bigger node in the tree. Traverse inorder.\n Assumptions: root is not None\n\n \"\"\"\n stack = deque()\n node = root\n while node or stack:\n while node:\n stack.append(node)\n node = node.left\n if stack:\n node = stack.pop()\n yield node\n node = node.right\n return None\n\n def get_smaller(self, root):\n \"\"\"Generator method to return next smaller node in the tree. Reverse traverse inorder.\n\n \"\"\"\n stack = deque()\n node = root\n while node or stack:\n while node:\n stack.append(node)\n node = node.right\n if stack:\n node = stack.pop()\n yield node\n node = node.left\n return None\n\n\nclass Solution1(object):\n def findTarget(self, root, n):\n if not root:\n return None\n vals = {}\n return self.helper(root, vals, n)\n\n def helper(self, root, vals, n):\n if not root:\n return False\n if n - root.val in vals:\n return True\n vals[root.val] = 1\n return self.helper(root.left, vals, n) or self.helper(root.right, vals, n)\n\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case1(self):\n nums = [5, 3, 6, 2, 4, None, 7]\n target = 9\n root = TreeNode.generate_bt_from_list(nums)\n answer = True\n result = self.sol.findTarget(root, target)\n self.assertEqual(answer, result)\n\n def test_case2(self):\n nums = [5, 3, 6, 2, 4, None, 7]\n target = 28\n root = TreeNode.generate_bt_from_list(nums)\n answer = False\n result = self.sol.findTarget(root, target)\n self.assertEqual(answer, result)\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n#-*- coding:utf-8 -*-\n\n\"\"\"\nmany ways (3) to do it.\ncheck out if have time\n\nhttps://leetcode.com/problems/two-sum-iv-input-is-a-bst/discuss/\n\n\"\"\"","sub_path":"freq/2sum_group/two_sumIV_input_is_BST.py","file_name":"two_sumIV_input_is_BST.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"182432541","text":"# coding:utf-8\nimport raven\nimport settings\nimport gevent\nfrom gevent.queue import Queue\nfrom raven.transport import HTTPTransport\n\n\nclass SentryClient(object):\n\n def __init__(self):\n if settings.SERVER_DSN:\n self.__client = raven.Client(dsn=settings.SERVER_DSN, transport=HTTPTransport)\n self.queue = Queue()\n self.loop = None\n\n def send(self, exc_info, **kwargs):\n if settings.SERVER_DSN:\n data = self.__client.build_msg('raven.events.Exception', exc_info=exc_info, **kwargs)\n self.queue.put(data)\n if not self.loop:\n self.__start()\n\n def __run(self):\n while True:\n data = self.queue.get()\n self.__client.send(**data)\n\n def __start(self):\n self.loop = gevent.spawn(self.__run)\n self.loop.start()\n\n def __stop(self):\n self.loop.stop()\n self.loop = None\n","sub_path":"server/rainbow_server/world/sentry.py","file_name":"sentry.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"10731941","text":"#!/usr/bin/env python\n'''\nCreated January, 2012\n\n@author: Dr. Rainer Hessmer\n\n velocityLogger.py - This ROS node subscribes to the /odom topic and logs\n the linear and angular velocities with associated time stamps. This faciliates\n measuring the acceleration limits of the robot.\n\n Copyright (c) 2012 Dr. Rainer Hessmer. All right reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of the Vanadium Labs LLC nor the names of its \n contributors may be used to endorse or promote products derived \n from this software without specific prior written permission.\n \n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,\n INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\n OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n'''\n\nimport roslib; roslib.load_manifest('ardros')\nimport rospy\nimport sys\nimport time\n\nfrom nav_msgs.msg import Odometry\n\nstartTime = time.time()\nlastTimestamp = startTime\n\nclass VelocityLogger(object):\n\t\n\tdef __init__(self, outputFilePath, odomTopic = '/odom'):\n\t\tself._OutputFilePath = outputFilePath\n\t\tself._OdomTopic = odomTopic\n\n\tdef start(self):\n\t\tself._OutputFile = open(self._OutputFilePath, \"w\")\n\t\t\n\t\tself._StartTime = time.time()\n\t\tself._LastTimestamp = startTime\n\t\trospy.Subscriber(self._OdomTopic, Odometry, self._onOdomMessageReceived)\n\n\tdef _onOdomMessageReceived(self, data):\n\t\ttwist = data.twist.twist\n\t\tcurrentTime = time.time()\n\t\tsecondsSinceStart = currentTime - self._StartTime\n\t\tdeltaT = currentTime - self._LastTimestamp\n\t\tself._LastTimestamp = currentTime\n\t\t#rospy.loginfo(str(deltaT) + \", \" + str(twist.linear.x) + ', ' + str(twist.angular.z))\n\t\tif not self._OutputFile.closed:\n\t\t\tself._OutputFile.write(str(secondsSinceStart) + \"\\t\" + str(twist.linear.x) + '\\t' + str(twist.angular.z) + '\\n')\n\n\tdef close(self):\n\t\tprint(\"Closing\")\n\t\tself._OutputFile.close()\n\nif __name__ == '__main__':\n\trospy.init_node('velocityLogger')\n\tvelocityLogger = VelocityLogger('./OdomOutput.txt')\n\ttry:\n\t\tvelocityLogger.start()\n\t\trospy.spin()\n\n\tfinally:\n\t\tvelocityLogger.close()\n","sub_path":"Chapter09/chefbot_code/chefbot_bringup/scripts/bkup_working/velocityLogger.py","file_name":"velocityLogger.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"330549991","text":"import click\nimport logging\nfrom pprint import pformat\nimport json\nimport configparser\nimport os\n\nfrom ..common_utils.common_util import project_dir_path\nfrom ..jira_scraper import jira_worker as scraper\nfrom ..related_jira_util import similar_ticket_finder as related_tickets_finder_module\nfrom ..related_jira_util import util as related_tickets_pkg_util\n\nlog = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n# constant\nTHIS_MODULE_DIRECTORY = os.path.join(project_dir_path, 'related_jira_util')\nCONFIG_DIRECTORY = os.path.join(THIS_MODULE_DIRECTORY, 'settings')\n\n# reading jql filters\njql_filters = configparser.ConfigParser()\njql_filters_config_file = os.path.join(CONFIG_DIRECTORY, 'jql_filters_to_scrape.config')\njql_filters.read(jql_filters_config_file)\nmodel_config = configparser.ConfigParser()\nmodel_config_file = os.path.join(CONFIG_DIRECTORY, 'current_model_in_use.config')\nmodel_config.read(model_config_file)\n\n\ndef train_related_tickets_model():\n \"\"\"This runs a script to train model on all closed tickets\"\"\"\n try:\n jira_obj = scraper.connect_to_jira()\n\n # read jql filters config\n filter_to_get_completed_tickets_for_training = dict(jql_filters['FILTER_COMPLETED_TICKETS_FOR_TRAINING'])\n\n # for each filter, train and save a model\n for filter_name, filter_query in filter_to_get_completed_tickets_for_training.items():\n log.info(\"Will crawl \" + filter_name)\n jira_tickets_corpus = scraper.filter_crawler(jira_obj, filter_query)\n log.info(\"Crawling of \" + filter_name + \" done.\")\n related_tickets_finder_module.train_and_save_tfidf_model(jira_tickets_corpus, \"model_\" + filter_name)\n\n except Exception as e:\n log.exception(e)\n\n\ndef find_related_tickets(test_model, open_tickets_filter, num_of_related_tickets_to_return=5):\n \"\"\"This runs a script to comment related tickets on each new ticket for configured filters\"\"\"\n try:\n jira_obj = scraper.connect_to_jira()\n related_tickets_data_list = []\n log.info(\"after connect : \")\n # read jql filters config\n filters_to_get_new_tickets = dict(jql_filters['FILTER_OPEN_NEW_TICKETS'])\n\n # finalize filter to be used right now\n default_filters = filters_to_get_new_tickets\n if open_tickets_filter is not None:\n filter_to_use = {'custom': open_tickets_filter}\n else:\n filter_to_use = default_filters\n log.info(\"Filter I'm using : \" + pformat(filter_to_use))\n\n # for each filter, find its tickets and comment related tickets using the model\n for filter_name, filter_query in filter_to_use.items():\n # crawl and get tickets info for this filter\n new_tickets_corpus = scraper.filter_crawler(jira_obj, filter_query, filter_name, True, True, True, True)\n\n if not new_tickets_corpus:\n log.info(\"No tickets in filter \" + filter_name + \". Moving on to next filter.\")\n continue\n # find the model pickle file to be used\n model_file_path = related_tickets_pkg_util.get_model_file_path(filter_name, test_model)\n # find top N related tickets\n related_tickets_data = related_tickets_finder_module \\\n .find_top_n_related_jira_tickets(num_of_related_tickets_to_return,\n new_tickets_corpus, model_file_path)\n related_tickets_data_list.extend(related_tickets_data)\n log.info(\"Done for \" + str(filter_name))\n\n log.info(\"Execution completed.\")\n return related_tickets_data_list\n except Exception as e:\n log.exception(e)\n\n\ndef get_data(type_of_data):\n \"\"\" This command returns data that is being used in the script when it gets run by cron.\"\"\"\n if type_of_data == 'default-filters-for-training':\n log.info(dict(jql_filters['FILTER_COMPLETED_TICKETS_FOR_TRAINING']))\n elif type_of_data == 'default-filters-for-commenting':\n log.info(dict(jql_filters['FILTER_OPEN_NEW_TICKETS']))\n elif type_of_data == 'current-models':\n log.info(dict(model_config['FILTER_MODEL_MAP']))\n elif type_of_data == 'tickets-alread-commented':\n for filter_name in dict(jql_filters['FILTER_OPEN_NEW_TICKETS']).keys():\n already_commented_tickets_file = os.path.join(os.path.join(THIS_MODULE_DIRECTORY, 'data'),\n filter_name + '_already_commented_tickets.json')\n with open(already_commented_tickets_file, 'r') as json_file:\n tickets_already_commented = json.load(json_file)\n log.info(\"Tickets already commented for \" + filter_name + \": \" + str(tickets_already_commented))\n else:\n log.error(\"Couldn't understand.\")\n pass\n","sub_path":"app/modules/related_jira_util/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"223450245","text":"#!/usr/bin/env python\n'''\nAuthor:Shivam Singh\nmail:shivam043@gmail.com\ncopyright@2017\n'''\n\nimport requests\nimport argparse\nimport webbrowser\nimport sys\nimport os\n\nAPI_URL=\"https://newsapi.org/register\"\nBASE_URL=\"https://newsapi.org/v1/articles\"\nSOURCE_URL=\"https://newsapi.org/v1/sources\"\nvalid=['y','n']\nnews_code=['1','2']\ncategory_news=['business','entertainment','gaming','general','music','politics','science-and-nature','sport','technology']\n\n''' Function to test network connection '''\ndef test_network_connection():\n try:\n r=requests.get(\"https://newsapi.org/\")\n r.raise_for_status()\n except requests.exceptions.ConnectionError:\n print(\"There was issue connecting to the server. Please check your network connection.\")\n sys.exit(1)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n\n\n''' Fetching all the news code '''\ndef fetch_all_news_code():\n r=requests.get(SOURCE_URL)\n t=r.json()\n for i in t['sources']:\n news_code.append(i['id'])\n \n\n''' Load api key on a global apikey and validate it '''\n\ndef load_config_key():\n \n try:\n global apiKey\n apiKey=os.environ['IN_API_KEY']\n if len(apiKey)==32:\n try:\n int(apiKey,16)\n except ValueError:\n print(\"Invalid API key\")\n \n except KeyError:\n print('No API Token detected. '\n 'Please visit {0} and get an API Token, '\n 'which will be used by instantnews '\n 'to get access to the data.'\n .format(API_URL))\n sys.exit(1)\n\n''' Function to validate choice for yes or no'''\n\ndef check_choice(choice):\n \n if len(choice)>1:\n return True\n \n if choice==\"y\" or choice==\"n\":\n return False\n'''Function for displaying news code category wise'''\n\ndef show_sources_category(l):\n \n flag=0\n if l in category_news:\n flag=1\n if flag==0:\n print(\"Enter valid category\")\n sys.exit(1)\n\n url=\"?category={category_type}\"\n r=requests.get((SOURCE_URL+url).format(category_type=l))\n t=r.json()\n for i in t['sources']:\n print(u\"{0}: <{1}> {2}\".format(\"News Code\",i['id'],i['name']))\n\n'''Function for displaying all news code'''\n\ndef show_sources_all():\n r=requests.get(SOURCE_URL)\n t=r.json()\n for i in t['sources']:\n print(u\"{0}: <{1}> {2}\".format(\"News Code\",i['id'],i['name']))\n\n'''Function for displaying news w.r.t news id'''\n\ndef show_news(l,BASE_URL):\n \n url=\"?source={news_id}&apiKey=\"\n \n r=requests.get((BASE_URL+url+apiKey).format(news_id=l))\n t=r.json()\n list_news=[]\n c=0\n \n for i in t['articles']:\n print('[{0}] {1}: {2}'.format(c,\"Title\",i['title']))\n print('{0}: {1}'.format(\"Author\",i['author']))\n print('{0}: {1}'.format(\"Summary\",i['description']))\n print('--------------------------------------------')\n list_news.append(i['url'])\n c=c+1\n \n print(\"Want to see the news that interests you/open in a webpage? Enter Y/N \") \n \n choice=(input()).lower()\n while check_choice(choice):\n print(\"Ooops that was wrong,Try again!\")\n choice=input(\"Enter (y/n): \").lower()\n\n \n if choice=='y':\n \n while choice=='y':\n \n news_code=input(\"Enter news code: \")\n while not(news_code.isdigit()) or not (0<=int(news_code)<=c):\n print(\"Ooops that was wrong,Try again!\")\n news_code=input(\"Pick One: \")\n \n webbrowser.open(list_news[int(news_code)])\n print(\"Want to exit? Enter Y/N \")\n choice=(input()).lower()\n if choice not in valid:\n sys.exit()\n\n if choice=='n':\n choice='y'\n continue\n else:\n break\n'''Arguments building fun d: '''\n\ndef parser():\n \n fetch_all_news_code()\n load_config_key()\n if not sys.argv[1:]:\n print(\"Arguments need Type in --help/-h for help\")\n else:\n \n parser=argparse.ArgumentParser()\n parser.add_argument(\"--show\",\"-s\",action=\"store\",help=\"Shows all the news channel codes category wise\")\n parser.add_argument(\"--show_all\",\"-sa\",action=\"store_true\",help=\"Shows all the news channel codes\")\n parser.add_argument(\"--news\",\"-n\",type=str,help=\"Shows news\")\n \n \n args=parser.parse_args()\n \n \n if args.show_all:\n show_sources_all()\n elif args.show:\n show_sources_category(args.show)\n elif args.news:\n flag=0\n l=args.news\n if l in news_code:\n flag=1 \n if flag==0:\n print(\"Enter valid newscode\")\n sys.exit(1)\n show_news(args.news,BASE_URL) \n \ndef main():\n test_network_connection()\n parser()\n \nif __name__ == \"__main__\":\n main() \n","sub_path":"instantnews.py","file_name":"instantnews.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"35332696","text":"# encoding: utf-8\n\n\"\"\"\n.. codeauthor:: Tsuyoshi Hombashi \n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom collections import namedtuple\nimport re\n\nimport typepy\n\nfrom ._error import (\n InvalidParameterError,\n UnitNotFoundError,\n)\nfrom ._logger import logger\n\nByteUnit = namedtuple(\"ByteUnit\", \"regexp factor\")\n\n\nclass Humanreadable(object):\n __UNIT_LIST = [\n ByteUnit(regexp=re.compile(\"^b$\", re.IGNORECASE), factor=0),\n ByteUnit(regexp=re.compile(\"^bps$\", re.IGNORECASE), factor=0),\n ByteUnit(regexp=re.compile(\"^k$\", re.IGNORECASE), factor=1),\n ByteUnit(regexp=re.compile(\"^kbps$\", re.IGNORECASE), factor=1),\n ByteUnit(regexp=re.compile(\"^m$\", re.IGNORECASE), factor=2),\n ByteUnit(regexp=re.compile(\"^mbps$\", re.IGNORECASE), factor=2),\n ByteUnit(regexp=re.compile(\"^g$\", re.IGNORECASE), factor=3),\n ByteUnit(regexp=re.compile(\"^gbps$\", re.IGNORECASE), factor=3),\n ByteUnit(regexp=re.compile(\"^t$\", re.IGNORECASE), factor=4),\n ByteUnit(regexp=re.compile(\"^gbps$\", re.IGNORECASE), factor=3),\n ByteUnit(regexp=re.compile(\"^p$\", re.IGNORECASE), factor=5),\n ]\n __RE_NUMBER = re.compile(\"^[\\-\\+]?[0-9\\.]+\")\n\n def __init__(self, readable_size, kilo_size=1024):\n \"\"\"\n String converter that humanreadable byte size to a number.\n\n :param str readable_size: human readable size (bytes). e.g. 256 M\n :param int kilo_size: size of ``kilo``. 1024 or 1000\n \"\"\"\n\n self.__readable_size = readable_size\n self.kilo_size = kilo_size\n\n self.__validate_kilo_size()\n\n def to_bit(self):\n \"\"\"\n :raises ValueError:\n \"\"\"\n\n logger.debug(\"readable_size: {}\".format(self.__readable_size))\n\n if not typepy.is_not_null_string(self.__readable_size):\n raise TypeError(\n \"readable_size must be a string: actual={}\".format(\n self.__readable_size))\n\n self.__readable_size = self.__readable_size.strip()\n\n try:\n size = self.__RE_NUMBER.search(self.__readable_size).group()\n except AttributeError:\n raise InvalidParameterError(\n \"invalid value: {}\".format(self.__readable_size))\n size = float(size)\n if size < 0:\n raise InvalidParameterError(\n \"size must be greater or equals to zero\")\n\n unit = self.__RE_NUMBER.sub(\"\", self.__readable_size).strip().lower()\n\n return size * self.__get_coefficient(unit)\n\n def to_kilo_bit(self):\n \"\"\"\n :param str readable_size: human readable size (bytes). e.g. 256 M\n :raises ValueError:\n \"\"\"\n\n return self.to_bit() / self.kilo_size\n\n def __validate_kilo_size(self):\n if self.kilo_size not in [1000, 1024]:\n raise ValueError(\"invalid kilo size: {}\".format(self.kilo_size))\n\n def __get_coefficient(self, unit_str):\n self.__validate_kilo_size()\n\n for unit in self.__UNIT_LIST:\n if unit.regexp.search(unit_str):\n return self.kilo_size ** unit.factor\n\n raise UnitNotFoundError(\"unit not found: value={}\".format(unit_str))\n","sub_path":"tcconfig/_converter.py","file_name":"_converter.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"89914253","text":"import sys\nfrom converter import Converter\nconv = Converter()\n\ndef convert_to_mp4(video, folder_src):\n # get information about the video file (optional)\n # info = conv.probe(folder_src + video)\n\n new_video = folder_src + video + \".mp4\"\n\n convert = conv.convert(folder_src + video, new_video, {\n 'format': 'mp4',\n 'audio': {\n 'codec': 'aac',\n 'samplerate': 11025,\n 'channels': 2\n },\n 'video': {\n 'codec': 'hevc',\n 'width': 720,\n 'height': 400,\n 'fps': 25\n }})\n \n for timecode in convert:\n print(f'\\rConverting {video}: {timecode}')\n\ndef main(args):\n video = args[1]\n folder_src = args[2]\n convert_to_mp4(video, folder_src)\n\nif __name__ == \"__main__\":\n main(sys.argv)","sub_path":"conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"652831784","text":"import GUI as gui\nimport RLearning as rl\nimport graphics\n\nNUM_OBSTACLES = 0\nWALLS_THICKNESS = 1\n\n# grid WxH\nGRID_WIDTH = 6\nGRID_HEIGHT = 6\n\n# BIG SIDE (WIDTH OR HEIGHT) in pixels\nBIG_SIDE = 400\n\n# compute parameters to set in the objects rl and gui below\nPIXEL_SIZE, WIDTH, HEIGHT = graphics.computePixelSize(GRID_WIDTH+2*WALLS_THICKNESS, GRID_HEIGHT+2*WALLS_THICKNESS, BIG_SIDE)\n\n# objects rl (AI algorithm) and gui (graphical window object)\nrl = rl.RLearning(GRID_WIDTH, GRID_HEIGHT, NUM_OBSTACLES)\ngui = gui.GUI(rl, WIDTH, HEIGHT, GRID_WIDTH + 2*WALLS_THICKNESS, GRID_HEIGHT + 2*WALLS_THICKNESS, PIXEL_SIZE)\n\n# start loop of GUI events\ngui.mainloop()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"606776648","text":"\"\"\"tcp_sock.\n\nUsage:\ntcp_sock echo_client --host IP_ADDR --port TCP_PORT --size MSG_SIZE [--br|--hc]\ntcp_sock echo_server --port TCP_PORT [--br|--hc]\ntcp_sock tp_client (--up|--down) --host IP_ADDR --port TCP_PORT --size MSG_SIZE\ntcp_sock tp_server --port TCP_PORT\ntcp_sock push --client --host IP_ADDR --port TCP_PORT --size MSG_SIZE\n [(--count COUNT --snd_delay MS)]\n [--con THREADS]\ntcp_sock push --server --port TCP_PORT\n [--rcv_delay SECONDS]\n [--win_size BYTES]\n [-S]\n\nArguments:\necho_server Provides a simple TCP 'echo' mechanism. In this mode,\n tcp_sock will listen on the specificed TCP_PORT. Any\n data recieved will be 'echoed' back to the sender.\n Use options '-br' or '-hc' to control the send() and\n recv() mechanism. If neither option is specified, then\n a two-phase recv() mechanism will be used and any\n message sent or received with include a header\n (ECHO_HEADER).\n\n See '-br' and '-hc' options below for more\n information on controllomg the send() and recv()\n mechanism. If neither option is specified, then\n a two-phase recv() mechanism will be used and any\n message sent or received with include a header\n (ECHO_HEADER).\n\n The same option should be used for both 'echo_server'\n and 'echo_client'.\n\n\necho_client Provides a simple TCP 'echo' mechanism. In this mode,\n tcp_sock will attempt to send data of MSG_SIZE to a\n receiver at IP_ADDR and TCP_PORT. Also, tcp_sock will\n expect the data sent to be 'echoed' back. Use options\n '-br' or '-hc' to control the send() and recv()\n mechanism. If neither option is specified, then\n a two-phase recv() mechanism will be used and any\n message sent or received with include a header\n (ECHO_HEADER).\n\n See '-br' and '-hc' options below for more\n information on controllomg the send() and recv()\n mechanism.\n\n The same option should be used for both 'echo_server'\n and 'echo_client'.\n\n\nOptions:\n--port TCP_PORT Specifies the TCP port to used by client or server.\n\n--host IP_ADDR Specifies the destination IP ADDR to be used by client.\n\n--size MSG_SIZE Specifies the size of the message the client should\n send.\n\n--rcv_delay SECONDS Specifies that the server should wait after acccepting\n the connect for X SECONDS before receiving the data.\n This option is used to trigger a zero window condition.\n\n--win_size BYTES Specifies the size of the TCP receive window\n\n--count COUNT Specifies the number of messages to send\n\n--snd_delay SECONDS Specifies the delay between each send()\n\n--br This option can be used with both 'echo_server' and\n 'echo_client'. When this option is specified, no\n ECHO_HEADER is included when sending data or expected\n when receiving data.\n\n Also, the recv() mechanism is intentionally faulty.\n This is done to help illustrate that there is no\n mechanism built into TCP that allows the sending\n application to tell the receiving application how much\n data is being sent.\n\n--hc This option can be used with both 'echo_server' and\n 'echo_client'. When this option is specified, no\n ECHO_HEADER is included when sending data or expected\n when receiving data.\n\n Also, the recv() mechanism expects the sender to\n 'shutdown' the connection for writing, once it has\n completed sending the data.\n\n--up Test upload throughput\n\n--down Test download throughput\n\n--con THREADS Concurrent threads\n\n-S\n\n\n\"\"\"\nimport sys\nimport time\nfrom docopt import docopt\nfrom threading import Thread\nfrom modules import config as cfg\nfrom modules import printmgr\nfrom echo.br_server import br_server\nfrom echo.br_client import br_client\nfrom echo.hc_server import hc_server\nfrom echo.hc_client import hc_client\nfrom echo.hdr_server import hdr_server\nfrom echo.hdr_client import hdr_client\nfrom push.push import Push\nfrom tp.tp_server import tp_server\nfrom tp.tp_client import tp_client\n\n\ndef main():\n \"\"\"Entry point.\"\"\"\n args = docopt(__doc__)\n\n # Create daemon thread for printing to terminal\n thrd = Thread(target=printmgr)\n thrd.daemon = True\n thrd.start()\n\n if args['echo_server'] is True:\n if args['--br'] is True:\n br_server(args['--port'])\n elif args['--hc'] is True:\n hc_server(args['--port'])\n else:\n try:\n hdr_server(args['--port'])\n except RuntimeError as err:\n sys.exit(err)\n\n elif args['echo_client'] is True:\n if args['--br'] is True:\n br_client(args['--host'], args['--port'], args['--size'])\n elif args['--hc'] is True:\n hc_client(args['--host'], args['--port'], args['--size'])\n else:\n try:\n hdr_client(args['--host'], args['--port'], args['--size'])\n except RuntimeError as err:\n sys.exit(err)\n\n elif args['push'] is True:\n push = Push(args)\n if args['--server'] is True:\n push.server()\n elif args['--client'] is True:\n push.client()\n\n elif args['tp_client'] is True:\n try:\n tp_client(\n args['--host'], args['--port'], args['--size'], args['--up'])\n except RuntimeError as err:\n sys.exit(err)\n\n elif args['tp_server'] is True:\n tp_server(args['--port'])\n\n else:\n # Future options\n pass\n\n while True:\n if cfg.print_queue.empty():\n print(\"Print queue is empty. Exiting...\")\n break\n else:\n time.sleep(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tcp_sock.py","file_name":"tcp_sock.py","file_ext":"py","file_size_in_byte":6616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"300079382","text":"import sys\nfrom urllib import urlopen\nfrom bs4 import BeautifulSoup\nfrom collections import Counter\nimport pandas as pd\nimport re\n\n\ndef init():\n\t#opening html file passing as parameter to BeautifulSoup\n\turl = sys.argv[1]\n\thtml = urlopen(url).read()\n\t\n\t#parsing as html file\n\tsoup = BeautifulSoup(html, \"lxml\")\n\t\n\t#Extracting out the HTML and converting to plain text\n\tfor script in soup([\"script\", \"style\"]):\n\t\tscript.extract() # rip it out\n\t\n\t# get text\n\ttext = soup.get_text()\n\t\n\t# break into lines and remove leading and trailing space on each\n\tlines = (line.strip()\n\t\tfor line in text.splitlines()\n\t\t)\n\t\t\n\t# break multi-headlines into a line each\n\tchunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n\t\n\t# drop blank lines\n\ttext = '\\n'.join(chunk for chunk in chunks if chunk)\n\t\n\t#converting unicode result to String\n\ttext = text.encode(\"ascii\", \"ignore\")\n\t\n\treturn text\n\n#tokenizing the string into words definition\ndef tokenize() :\n\tif text is not None:\n\t\t#replace special characters as spaces and split and put in a list (words)\n\t\twords = text.lower().replace(\"-\",\" \").replace(\"@\",\" \").replace(\"(\",\" \").replace(\")\",\" \").replace(\"!\",\" \").replace(\"/\",\" \").replace(\"'\",\"\").split()\n\t\treturn words\n\telse :\n\t\treturn None\n\t\t\n\n#printing content in an output file\ndef printList() :\n\t#removing special characters at the start and end of each string in the list\n\tfor idx, word in enumerate(words):\n\t\tword = re.sub(r'(? 1:\n try:\n application = Application.objects.get(pk=self.args[1])\n if application.processing_status == 'renewal':\n kwargs['is_renewal'] = True\n except Exception:\n pass\n\n return super(ApplicationEntryBaseView, self).get_context_data(**kwargs)\n\n\nclass NewApplicationView(OfficerOrCustomerRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n try:\n utils.delete_app_session_data(request.session)\n except Exception as e:\n messages.warning(request, 'There was a problem deleting session data: %s' % e)\n\n utils.set_app_session_data(request.session, 'temp_files_dir', tempfile.mkdtemp(dir=settings.MEDIA_ROOT))\n\n if is_customer(request.user):\n utils.set_app_session_data(request.session, 'customer_pk', request.user.pk)\n\n return redirect('wl_applications:select_licence_type', *args, **kwargs)\n else:\n return redirect('wl_applications:create_select_customer')\n\n\nclass EditApplicationView(UserCanEditApplicationMixin, View):\n def get(self, request, *args, **kwargs):\n try:\n utils.delete_app_session_data(request.session)\n except Exception as e:\n messages.warning(request, 'There was a problem deleting session data: %s' % e)\n\n temp_files_dir = tempfile.mkdtemp(dir=settings.MEDIA_ROOT)\n utils.set_app_session_data(request.session, 'temp_files_dir', temp_files_dir)\n\n application = get_object_or_404(Application, pk=args[1]) if len(args) > 1 else None\n if application is not None:\n utils.set_app_session_data(request.session, 'customer_pk', application.applicant_profile.user.pk)\n utils.set_app_session_data(request.session, 'profile_pk', application.applicant_profile.pk)\n utils.set_app_session_data(request.session, 'data', application.data)\n\n # copy document files into temp_files_dir\n for document in application.documents.all():\n shutil.copyfile(document.file.path, os.path.join(temp_files_dir, document.name))\n\n if application.hard_copy is not None:\n shutil.copyfile(application.hard_copy.file.path, os.path.join(temp_files_dir, application.hard_copy.name))\n utils.set_app_session_data(request.session, 'application_document', application.hard_copy.name)\n\n return redirect('wl_applications:enter_details', *args, **kwargs)\n\n\nclass CreateSelectCustomer(OfficerRequiredMixin, TemplateView):\n template_name = 'wl/entry/create_select_customer.html'\n login_url = '/'\n\n def get_context_data(self, **kwargs):\n kwargs['create_customer_form'] = EmailUserForm(email_required=False)\n\n return super(CreateSelectCustomer, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n if 'select' in request.POST:\n utils.set_app_session_data(request.session, 'customer_pk', request.POST.get('customer'))\n elif 'create' in request.POST:\n create_customer_form = EmailUserForm(request.POST, email_required=False)\n if create_customer_form.is_valid():\n customer = create_customer_form.save()\n utils.set_app_session_data(request.session, 'customer_pk', customer.id)\n else:\n context = {'create_customer_form': create_customer_form}\n return render(request, self.template_name, context)\n\n return redirect('wl_applications:select_licence_type', *args, **kwargs)\n\n\nclass SelectLicenceTypeView(LoginRequiredMixin, TemplateView):\n template_name = 'wl/entry/select_licence_type.html'\n login_url = '/'\n\n def get_context_data(self, **kwargs):\n kwargs['licence_type_dicts'] = WildlifeLicenceType.objects.all().values('code_slug', 'name', 'code')\n\n return super(SelectLicenceTypeView, self).get_context_data(**kwargs)\n\n\nclass CheckIdentificationRequiredView(LoginRequiredMixin, ApplicationEntryBaseView, FormView):\n template_name = 'wl/entry/upload_identification.html'\n form_class = IdentificationForm\n\n def get(self, *args, **kwargs):\n licence_type = get_object_or_404(WildlifeLicenceType, code_slug=args[1])\n\n try:\n applicant = utils.determine_applicant(self.request)\n except utils.SessionDataMissingException as e:\n messages.error(self.request, six.text_type(e))\n return redirect('wl_applications:create_select_customer')\n\n if licence_type.identification_required and applicant.identification is None:\n return super(CheckIdentificationRequiredView, self).get(*args, **kwargs)\n else:\n return redirect('wl_applications:create_select_profile', args[1], **kwargs)\n\n def get_context_data(self, **kwargs):\n kwargs['file_types'] = ', '.join(['.' + file_ext for file_ext in IdentificationForm.VALID_FILE_TYPES])\n\n return super(CheckIdentificationRequiredView, self).get_context_data(**kwargs)\n\n def form_valid(self, form):\n try:\n applicant = utils.determine_applicant(self.request)\n except utils.SessionDataMissingException as e:\n messages.error(self.request, six.text_type(e))\n return redirect('wl_applications:create_select_customer')\n\n if applicant.identification is not None:\n applicant.identification.delete()\n\n applicant.identification = Document.objects.create(file=self.request.FILES['identification_file'])\n applicant.save()\n\n # update any other applications for this user that are awaiting ID upload\n for application in Application.objects.filter(applicant_profile__user=applicant):\n if application.id_check_status == 'awaiting_update':\n application.id_check_status = 'updated'\n application.save()\n\n return redirect('wl_applications:create_select_profile', *self.args)\n\n\nclass CreateSelectProfileView(LoginRequiredMixin, ApplicationEntryBaseView):\n template_name = 'wl/entry/create_select_profile.html'\n\n def get_context_data(self, **kwargs):\n if len(self.args) > 1:\n kwargs['application_pk'] = self.args[1]\n\n try:\n applicant = utils.determine_applicant(self.request)\n except utils.SessionDataMissingException as e:\n messages.error(self.request, six.text_type(e))\n return redirect('wl_applications:create_select_customer')\n\n profile_exists = applicant.profile_set.count() > 0\n\n if utils.is_app_session_data_set(self.request.session, 'profile_pk'):\n selected_profile = Profile.objects.get(id=utils.get_app_session_data(self.request.session, 'profile_pk'))\n kwargs['profile_selection_form'] = ProfileSelectionForm(user=applicant, selected_profile=selected_profile)\n else:\n if profile_exists:\n kwargs['profile_selection_form'] = ProfileSelectionForm(user=applicant)\n\n if profile_exists:\n kwargs['profile_creation_form'] = ProfileForm(user=utils.get_app_session_data(self.request.session, 'customer_pk'))\n else:\n kwargs['profile_creation_form'] = ProfileForm(initial_display_name='Default', initial_email=applicant.email,\n user=utils.get_app_session_data(self.request.session, 'customer_pk'))\n\n kwargs['address_form'] = AddressForm()\n kwargs['licence_type'] = get_object_or_404(WildlifeLicenceType, code_slug=self.args[0])\n\n return super(CreateSelectProfileView, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n try:\n applicant = utils.determine_applicant(request)\n except utils.SessionDataMissingException as e:\n messages.error(request, six.text_type(e))\n return redirect('wl_applications:create_select_customer')\n\n licence_type = WildlifeLicenceType.objects.get(code_slug=args[0])\n\n if 'select' in request.POST:\n profile_selection_form = ProfileSelectionForm(request.POST, user=applicant)\n\n if profile_selection_form.is_valid():\n utils.set_app_session_data(request.session, 'profile_pk', profile_selection_form.cleaned_data.get('profile').id)\n else:\n return render(request, self.template_name, {'licence_type': licence_type,\n 'profile_selection_form': profile_selection_form,\n 'profile_creation_form': ProfileForm(),\n 'address_form': AddressForm()})\n elif 'create' in request.POST:\n profile_form = ProfileForm(request.POST)\n address_form = AddressForm(request.POST)\n\n if profile_form.is_valid() and address_form.is_valid():\n profile = profile_form.save(commit=False)\n profile.postal_address = address_form.save()\n profile.save()\n\n utils.set_app_session_data(request.session, 'profile_pk', profile.id)\n else:\n return render(request, self.template_name,\n {'licence_type': licence_type,\n 'profile_selection_form': ProfileSelectionForm(user=request.user),\n 'profile_creation_form': profile_form, 'address_form': address_form})\n\n return redirect('wl_applications:enter_details', *args)\n\n\nclass EnterDetailsView(UserCanEditApplicationMixin, ApplicationEntryBaseView):\n template_name = 'wl/entry/enter_details.html'\n\n def get_context_data(self, **kwargs):\n application = get_object_or_404(Application, pk=self.args[1]) if len(self.args) > 1 else None\n\n licence_type = WildlifeLicenceType.objects.get(code_slug=self.args[0])\n if utils.is_app_session_data_set(self.request.session, 'profile_pk'):\n profile = get_object_or_404(Profile, pk=utils.get_app_session_data(self.request.session, 'profile_pk'))\n else:\n profile = application.applicant_profile\n\n kwargs['licence_type'] = licence_type\n kwargs['profile'] = profile\n kwargs['structure'] = licence_type.application_schema\n\n kwargs['is_proxy_applicant'] = is_officer(self.request.user)\n\n if application is not None:\n kwargs['application_pk'] = application.pk\n if application.review_status == 'awaiting_amendments':\n amendments = AmendmentRequest.objects.filter(application=application).filter(status='requested')\n kwargs['amendments'] = amendments\n\n temp_files_dir = utils.get_app_session_data(self.request.session, 'temp_files_dir')\n if temp_files_dir is not None:\n temp_files_url = settings.MEDIA_URL + os.path.basename(os.path.normpath(temp_files_dir))\n\n if utils.is_app_session_data_set(self.request.session, 'data'):\n data = utils.get_app_session_data(self.request.session, 'data')\n\n if temp_files_dir is not None:\n utils.prepend_url_to_files(licence_type.application_schema, data, temp_files_url)\n\n kwargs['data'] = data\n\n if utils.is_app_session_data_set(self.request.session, 'application_document'):\n application_document = utils.get_app_session_data(self.request.session, 'application_document')\n\n if temp_files_dir is not None:\n application_document = os.path.join(temp_files_url, application_document)\n\n kwargs['application_document'] = application_document\n\n return super(EnterDetailsView, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n licence_type = WildlifeLicenceType.objects.get(code_slug=self.args[0])\n\n utils.rename_filename_doubleups(request.POST, request.FILES)\n\n utils.set_app_session_data(request.session, 'data', utils.create_data_from_form(licence_type.application_schema,\n request.POST, request.FILES))\n\n temp_files_dir = utils.get_app_session_data(request.session, 'temp_files_dir')\n\n if 'draft' in request.POST or 'draft_continue' in request.POST:\n if len(args) > 1:\n application = get_object_or_404(Application, pk=args[1])\n else:\n application = Application()\n\n if is_officer(request.user):\n application.proxy_applicant = request.user\n\n application.data = utils.get_app_session_data(request.session, 'data')\n application.licence_type = WildlifeLicenceType.objects.get(code_slug=args[0])\n application.applicant_profile = get_object_or_404(Profile,\n pk=utils.get_app_session_data(request.session, 'profile_pk'))\n application.customer_status = 'draft'\n\n if application.processing_status != 'renewal':\n application.processing_status = 'draft'\n\n application.save(version_user=application.applicant_profile.user)\n\n application.documents.clear()\n\n # need to create documents from all the existing files that haven't been replaced\n # (saved in temp_files_dir) as well as any new ones\n try:\n for filename in utils.get_all_filenames_from_application_data(licence_type.application_schema,\n utils.get_app_session_data(request.session, 'data')):\n\n # need to be sure file is in tmp directory (as it could be a freshly attached file)\n if os.path.exists(os.path.join(temp_files_dir, filename)):\n document = Document.objects.create(name=filename)\n with open(os.path.join(temp_files_dir, filename), 'rb') as doc_file:\n document.file.save(filename, File(doc_file), save=True)\n application.documents.add(document)\n except Exception as e:\n messages.error(request, 'There was a problem appending applications files: %s' % e)\n\n for f in request.FILES:\n if f == 'application_document':\n application.hard_copy = Document.objects.create(name=str(request.FILES[f]), file=request.FILES[f])\n else:\n application.documents.add(Document.objects.create(name=str(request.FILES[f]), file=request.FILES[f]))\n\n application.save(no_revision=True)\n\n messages.warning(request, 'The application was saved to draft.')\n\n if 'draft' in request.POST:\n try:\n utils.delete_app_session_data(request.session)\n except Exception as e:\n messages.warning(request, 'There was a problem deleting session data: %s' % e)\n\n return redirect('wl_dashboard:home')\n else:\n # if continuing, need to save new files in temp so they can be previewed on enter details screen\n if len(request.FILES) > 0:\n temp_files_dir = utils.get_app_session_data(request.session, 'temp_files_dir')\n\n for f in request.FILES:\n if f == 'application_document':\n utils.set_app_session_data(request.session, 'application_document', str(request.FILES[f]))\n\n with open(os.path.join(temp_files_dir, str(request.FILES[f])), 'wb+') as destination:\n for chunk in request.FILES[f].chunks():\n destination.write(chunk)\n\n return redirect('wl_applications:enter_details', args[0], application.pk)\n else:\n if len(request.FILES) > 0:\n temp_files_dir = utils.get_app_session_data(request.session, 'temp_files_dir')\n\n for f in request.FILES:\n if f == 'application_document':\n utils.set_app_session_data(request.session, 'application_document', str(request.FILES[f]))\n\n with open(os.path.join(temp_files_dir, str(request.FILES[f])), 'wb+') as destination:\n for chunk in request.FILES[f].chunks():\n destination.write(chunk)\n\n return redirect('wl_applications:preview', *args)\n\n\nclass PreviewView(UserCanEditApplicationMixin, ApplicationEntryBaseView):\n template_name = 'wl/entry/preview.html'\n\n def get_context_data(self, **kwargs):\n licence_type = WildlifeLicenceType.objects.get(code_slug=self.args[0])\n\n application = get_object_or_404(Application, pk=self.args[1]) if len(self.args) > 1 else None\n\n if utils.is_app_session_data_set(self.request.session, 'profile_pk'):\n profile = get_object_or_404(Profile, pk=utils.get_app_session_data(self.request.session, 'profile_pk'))\n else:\n profile = application.applicant_profile\n\n kwargs['licence_type'] = licence_type\n kwargs['profile'] = profile\n kwargs['structure'] = licence_type.application_schema\n\n kwargs['is_proxy_applicant'] = is_officer(self.request.user)\n\n if len(self.args) > 1:\n kwargs['application_pk'] = self.args[1]\n\n temp_files_dir = utils.get_app_session_data(self.request.session, 'temp_files_dir')\n if temp_files_dir is not None:\n temp_files_url = settings.MEDIA_URL + os.path.basename(os.path.normpath(temp_files_dir))\n\n if utils.is_app_session_data_set(self.request.session, 'data'):\n data = utils.get_app_session_data(self.request.session, 'data')\n\n if temp_files_dir is not None:\n utils.prepend_url_to_files(licence_type.application_schema, data, temp_files_url)\n\n kwargs['data'] = data\n\n if utils.is_app_session_data_set(self.request.session, 'application_document'):\n application_document = utils.get_app_session_data(self.request.session, 'application_document')\n\n if temp_files_dir is not None:\n application_document = os.path.join(temp_files_url, application_document)\n\n kwargs['structure'], kwargs['data'] = utils.append_app_document_to_schema_data(kwargs['structure'],\n kwargs['data'],\n application_document)\n\n return super(PreviewView, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n if len(args) > 1:\n application = get_object_or_404(Application, pk=args[1])\n else:\n application = Application()\n\n if is_officer(request.user):\n application.proxy_applicant = request.user\n\n application.data = utils.get_app_session_data(self.request.session, 'data')\n application.licence_type = get_object_or_404(WildlifeLicenceType, code_slug=args[0])\n application.correctness_disclaimer = request.POST.get('correctnessDisclaimer', '') == 'on'\n application.further_information_disclaimer = request.POST.get('furtherInfoDisclaimer', '') == 'on'\n application.applicant_profile = get_object_or_404(Profile, pk=utils.get_app_session_data(request.session,\n 'profile_pk'))\n application.lodgement_sequence += 1\n application.lodgement_date = datetime.now().date()\n\n if application.customer_status == 'amendment_required':\n # this is a 're-lodged' application after some amendment were required.\n # from this point we assume that all the amendments have been amended.\n AmendmentRequest.objects.filter(application=application).filter(status='requested').update(status='amended')\n application.review_status = 'amended'\n application.processing_status = 'ready_for_action'\n else:\n if application.processing_status != 'renewal':\n application.processing_status = 'new'\n\n application.customer_status = 'under_review'\n\n # need to save application in order to get its pk\n if not application.lodgement_number:\n application.save(no_revision=True)\n application.lodgement_number = '%s-%s' % (str(application.licence_type.pk).zfill(LICENCE_TYPE_NUM_CHARS),\n str(application.pk).zfill(LODGEMENT_NUMBER_NUM_CHARS))\n\n application.documents.clear()\n\n # if attached files were saved temporarily, add each to application as part of a Document\n temp_files_dir = utils.get_app_session_data(request.session, 'temp_files_dir')\n try:\n for filename in utils.get_all_filenames_from_application_data(application.licence_type.application_schema,\n utils.get_app_session_data(request.session, 'data')):\n document = Document.objects.create(name=filename)\n with open(os.path.join(temp_files_dir, filename), 'rb') as doc_file:\n document.file.save(filename, File(doc_file), save=True)\n\n application.documents.add(document)\n\n if utils.is_app_session_data_set(request.session, 'application_document'):\n filename = utils.get_app_session_data(request.session, 'application_document')\n document = Document.objects.create(name=filename)\n with open(os.path.join(utils.get_app_session_data(request.session, 'temp_files_dir'), filename), 'rb') as doc_file:\n document.file.save(filename, File(doc_file), save=True)\n\n application.hard_copy = document\n\n messages.success(request, 'The application was successfully lodged.')\n except Exception as e:\n messages.error(request, 'There was a problem creating the application: %s' % e)\n\n application.save(version_user=application.applicant_profile.user, version_comment='Details Modified')\n\n try:\n utils.delete_app_session_data(request.session)\n except Exception as e:\n messages.warning(request, 'There was a problem deleting session data: %s' % e)\n\n return redirect('wl_dashboard:home')\n\n\nclass RenewLicenceView(View): # NOTE: need a UserCanRenewLicence type mixin\n def get(self, request, *args, **kwargs):\n try:\n utils.delete_app_session_data(request.session)\n except Exception as e:\n messages.warning(request, 'There was a problem deleting session data: %s' % e)\n\n previous_application = get_object_or_404(Application, licence=args[0])\n\n # check if there is already a renewal, otherwise create one\n try:\n application = Application.objects.get(previous_application=previous_application)\n if application.customer_status == 'under_review':\n messages.warning(request, 'A renewal for this licence has already been lodged and is awaiting review.')\n return redirect('wl_dashboard:home')\n except Application.DoesNotExist:\n application = utils.clone_application_for_renewal(previous_application)\n\n utils.set_app_session_data(request.session, 'customer_pk', application.applicant_profile.user.pk)\n utils.set_app_session_data(request.session, 'profile_pk', application.applicant_profile.pk)\n utils.set_app_session_data(request.session, 'data', application.data)\n utils.set_app_session_data(request.session, 'temp_files_dir', tempfile.mkdtemp(dir=settings.MEDIA_ROOT))\n\n return redirect('wl_applications:enter_details', application.licence_type.code_slug, application.pk, **kwargs)\n","sub_path":"wildlifelicensing/apps/applications/views/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":25841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"166373850","text":"def calculateCompountInt(principal, rate, years, compoundedRate):\n total = principal * ((( 1 + (rate / (100 * compoundedRate))) ** (compoundedRate*years)))\n\n return print(principal,\"invested at\", rate, \"for\",years,\"years compounded\",compoundedRate,\"time per year is\",'${:,.2f}'.format(total))\n\nx = int(input(\"What is the principal amount? :\"))\ny = int(input(\"What is the rate? :\"))\nz = int(input(\"What is the number of years? :\"))\na = int(input(\"What is the number of times per year interest is compounded? :\"))\n\ncalculateCompountInt(x, y, z, a)","sub_path":"13-CompoundInterest.py","file_name":"13-CompoundInterest.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"497492255","text":"\"\"\"Unit tests for the module for interacting with Octave.\"\"\"\n\nimport os\nfrom unittest import mock\n\nfrom matl_online.octave import OctaveSession\n\n\nclass TestOctaveSession:\n \"\"\"Series of tests for the OctaveSession class.\"\"\"\n\n def test_no_inputs(self, mocker):\n \"\"\"Ensure the proper default parameters.\"\"\"\n # Make sure that eval wasn't called\n mock = mocker.patch('matl_online.octave.OctaveSession.eval')\n\n session = OctaveSession()\n\n assert session.octaverc is None\n assert session.paths == []\n\n mock.assert_not_called()\n\n def test_octaverc(self, mocker):\n \"\"\"Ensure that the octaverc file is sourced.\"\"\"\n octaverc = os.path.join('path', 'to', 'my', '.octaverc')\n\n mock = mocker.patch('matl_online.octave.OctaveSession.eval')\n\n session = OctaveSession(octaverc=octaverc)\n\n assert session.octaverc == octaverc\n assert session.paths == []\n\n mock.assert_called_once_with('source(\"''%s''\")' % octaverc)\n\n def test_paths(self, mocker):\n \"\"\"Ensure that the specified paths are added to the path.\"\"\"\n paths = ['path1', 'path2', 'path3']\n\n eval_mock = mocker.patch('matl_online.octave.OctaveSession.eval')\n\n session = OctaveSession(paths=paths)\n\n assert session.octaverc is None\n assert session.paths == paths\n\n expected_calls = [mock.call('addpath(\"''%s''\")' % path) for path in paths]\n eval_mock.assert_has_calls(expected_calls)\n\n def test_eval_without_handler(self, mocker):\n \"\"\"Ensure that code is sent to octave for evaluation.\"\"\"\n mock = mocker.patch('matl_online.octave.OctaveEngine.eval')\n\n session = OctaveSession()\n code = '1 + 1'\n session.eval(code)\n\n mock.assert_called_with(code)\n\n def test_eval_with_handler(self, mocker):\n \"\"\"Ensure that the stream handler is used.\"\"\"\n mock = mocker.patch('matl_online.octave.OctaveEngine.eval')\n\n session = OctaveSession()\n code = '1 + 1'\n\n output_list = list()\n handler = output_list.append\n\n session.eval(code, line_handler=handler)\n\n assert session._engine.line_handler == handler\n mock.assert_called_with(code)\n\n def test_terminate(self, mocker):\n \"\"\"Ensure that we stop the Octave instance.\"\"\"\n session = OctaveSession()\n\n assert session._engine is not None\n\n session.terminate()\n\n assert session._engine is None\n\n def test_restart(self, mocker):\n \"\"\"Make sure we stop and restart the octave instance.\"\"\"\n session = OctaveSession()\n\n engine1 = session._engine\n\n session.restart()\n\n assert session._engine is not None\n assert session._engine != engine1\n","sub_path":"tests/test_octave.py","file_name":"test_octave.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"173394845","text":"import numpy as np\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\nimport pandas as pd\n\napp = Flask(__name__)\nmodel = pickle.load(open('model.pkl', 'rb'))\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/predict',methods=['POST'])\ndef predict():\n features = [x for x in request.form.values()]\n print(features)\n int_features = [float(x) for x in features[:6]]\n onehotvalues = []\n cat_features = [str(x) for x in features[6:]]\n\n if cat_features[0] == \"cng\":\n onehotvalues.extend([1,0,0,0])\n elif cat_features[0] == \"diesel\":\n onehotvalues.extend([0,1,0,0])\n elif cat_features[0] == \"lpg\":\n onehotvalues.extend([0,0,1,0])\n else :\n onehotvalues.extend([0,0,0,1])\n\n if cat_features[1] == \"dealer\":\n onehotvalues.extend([1,0,0])\n elif cat_features[1] == \"individual\":\n onehotvalues.extend([0,1,0])\n else :\n onehotvalues.extend([0,0,1])\n\n if cat_features[2] == \"automatic\":\n onehotvalues.extend([1,0])\n else:\n onehotvalues.extend([0,1])\n\n #1,4,2,test,3\n if cat_features[3] == \"first\":\n onehotvalues.extend([1,0,0,0,0])\n elif cat_features[3] == \"fourth and above\":\n onehotvalues.extend([0,1,0,0,0])\n elif cat_features[3] == \"second\":\n onehotvalues.extend([0,0,1,0,0])\n elif cat_features[3] == \"test drive car\":\n onehotvalues.extend([0,0,0,1,0])\n else :\n onehotvalues.extend([0,0,0,0,1])\n int_features.extend(onehotvalues)\n final_features = [np.array(int_features)]\n prediction = model.predict(final_features)\n\n output = round(prediction[0], 2)\n\n return render_template('index.html', prediction_text='Predicted Selling Price is Rs. {}'.format(output))\n\n@app.route('/results',methods=['POST'])\ndef results():\n\n data = request.get_json(force=True)\n prediction = model.predict([np.array(list(data.values()))])\n\n output = prediction[0]\n return jsonify(output)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"574842186","text":"\"\"\"\n# Definition for Employee.\nclass Employee:\n def __init__(self, id: int, importance: int, subordinates: List[int]):\n self.id = id\n self.importance = importance\n self.subordinates = subordinates\n\"\"\"\n\n\nfrom collections import deque\n\nclass Solution:\n def getImportance(self, employees: List['Employee'], id: int) -> int:\n \n dic = {}\n for e in employees:\n dic[e.id] = e\n \n queue = deque()\n queue.append(dic[id])\n result = 0\n while queue:\n temp = queue.popleft()\n result += temp.importance\n for i in temp.subordinates:\n queue.append(dic[i])\n \n return result","sub_path":"EmployeeImportance.py","file_name":"EmployeeImportance.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"44379300","text":"import json\nimport os\nimport subprocess\nimport platform\n\ndef system_call(command):\n p = subprocess.Popen([command], stdout=subprocess.PIPE, shell=True)\n return p.stdout.read()\n\n# Read state file for info\nwith open(\"/usr/local/ltechagent/state\",\"r\") as read_file:\n\tdata = json.load(read_file)\n\n# Get last contact date\nlc = data[\"last_contact\"]\nlast_contact = \"{0}/{1}/{2} {3}:{4:02d}:{5:02d}\".format(lc[\"month\"],lc[\"day_of_month\"],lc[\"year\"],lc[\"hour\"],lc[\"min\"],lc[\"sec\"])\n\n# Check services\nif platform.system() == 'Darwin':\n\tif system_call(\"launchctl list | grep com.labtechsoftware.LTSvc\") != \"\":\n\t\tstatusname = \"Running\"\n\telse:\n\t\tos.system(\"launchctl stop com.labtechsoftware.LTSvc\")\n\t\tos.system(\"launchctl start com.labtechsoftware.LTSvc\")\n\t\tif system_call(\"launchctl list | grep com.labtechsoftware.LTSvc\") != \"\":\n\t\t\tstatusname = \"Running\"\n\t\telse:\n\t\t\tstatusname = \"Stopped\"\n\tsvc_ltsvc = { \"Status\": statusname, \"User\": \"com.labtechsoftware.LTSvc\", \"Start Mode\": \"Auto\"}\nelif platform.system() == 'Linux':\n\tstatus = os.system('service ltechagent status')\n\tif status == 0:\n\t\tstatusname = \"Running\"\n\telif status == 3:\n\t\tos.system('service ltechagent stop')\n\t\tos.system('service ltechagent start')\n\t\tstatus = os.system('service ltechagent status')\n\t\tif status == 0:\n\t\t\tstatusname = \"Running\"\n\t\telse:\n\t\t\tstatusname = \"Stopped\"\n\telse:\n\t\tstatusname = \"Stopped\"\n\tsvc_ltsvc = { \"Status\": statusname, \"User\": \"ltechagent\", \"Start Mode\": \"Auto\"}\n\ndiag_result = { \n\t'server_addr': data[\"last_good_server_url\"], \n\t'lastcontact': last_contact,\n\t'update': data[\"version\"],\n\t'version': data[\"version\"],\n\t'id': data['computer_id'],\n\t'online': data[\"is_signed_in\"],\n\t'svc_ltservice': svc_ltsvc\n}\n\nprint(\"!---BEGIN JSON---!\")\nprint(json.dumps(diag_result))\n","sub_path":"AutomateDiagnostics.py","file_name":"AutomateDiagnostics.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"265282778","text":"from bs4 import BeautifulSoup\nfrom requests import get\nimport csv\n\nMAX_NUM_PAGES = 10\n\n\n# add products\ndef scraper_products():\n url_site = 'https://www.pichshop.ru'\n response = get(url_site + '/catalog/komu/')\n html_soup = BeautifulSoup(response.text, 'html.parser')\n\n categories = html_soup.find('div', class_='catalog-categories').findAll('a')\n\n category_links = []\n category_texts = []\n\n user_id = 0\n\n for i in range(len(categories)):\n category_links.append(categories[i].get('href'))\n category_texts.append(categories[i].text)\n\n section = 1\n\n with open('products.csv', 'w') as csv_file:\n\n fieldnames = ['id', 'name', 'description', 'section', 'price', 'images', 'link']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n\n for i in range(len(categories)):\n\n for page_num in range(1, MAX_NUM_PAGES):\n response_category = get(url_site + category_links[i] + '?PAGEN_3={page_num}')\n html_code = BeautifulSoup(response_category.text, 'html.parser')\n\n products = html_code.find('div', class_='catalog-products-list').findAll('div', class_='product-card')\n\n for j in range(len(products)):\n images = 'https:' + products[j].get('data-image')\n name = products[j].get('name')\n price = products[j].get('price')\n link = 'https://www.pichshop.ru' + products[j].get('data-href')\n description = products[j].findAll('meta')[2].get('content')\n csv_row = {'id': user_id, 'name': name, 'description': description, 'section': section,\n 'price': price, 'images': images, 'link': link}\n\n writer.writerow(csv_row)\n\n user_id += 1\n\n section += 1\n\n\nscraper_products()\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"269489748","text":"import pandas as pd\r\nimport numpy as np\r\n\r\ndf = pd.read_csv('input_4.csv')\r\nn = df.shape[0]\r\ntrain = df.iloc[:int(n*0.8), :]\r\ntest = df.iloc[int(n*0.8):, :]\r\nclass_list = df['class'].unique()\r\nclass_list.sort()\r\n\r\nprior_list = train['class'].value_counts(normalize=True)\r\nfor c in class_list:\r\n print('Prior Probability of class ' + str(c) + '\\nP(C' + str(c) + ') =', prior_list[c])\r\n\r\nmean_list = pd.Series()\r\nstd_list = pd.Series()\r\np_list = pd.Series()\r\ntable = pd.crosstab(train.feature_value_1, train['class'], margins=True)\r\nfor c in class_list:\r\n mean_list.at[c] = train.loc[train['class'] == c, 'feature_value_2'].mean()\r\n std_list.at[c] = train.loc[train['class'] == c, 'feature_value_2'].std(ddof=0)\r\n p_list.at[c] = table.loc[1, c] / table.loc['All', c]\r\n\r\nfor c in class_list:\r\n print('Estimated mean for feature_value_2 of class ' + str(c) + '\\nm' + str(c) + ' =', mean_list[c])\r\n print('Estimated variance for feature_value_2 of class' + str(c) + '\\nsigma square ' + str(c) + ' =',\r\n std_list[c]**2)\r\n print('Estimated probability for feature_value_1 having outcome 1 of class ' + str(c) + '\\np' + str(c) + ' =',\r\n p_list[c])\r\n\r\n\r\ndef normal_pdf(mean, std, x):\r\n return 1 / (np.sqrt(2 * np.pi) * std) * np.exp(-(x - mean)**2 / (2 * std**2))\r\n\r\n\r\ndef discriminant(cl, x, d):\r\n return (p_list[cl]**d * (1-p_list[cl])**(1-d)) * normal_pdf(mean_list[cl], std_list[cl], x) * prior_list[cl]\r\n\r\n\r\nfor c in class_list:\r\n test['g' + str(c) + '(x)'] = discriminant(c, test.feature_value_2, test.feature_value_1)\r\ntest['prediction'] = test.iloc[:, 3:].idxmax(axis=1).map(lambda x: int(x[1]))\r\n\r\n\r\ndef confusion_matrix(actual, prediction):\r\n confusion = pd.crosstab(actual, prediction)\r\n confusion.index.name = 'Actual'\r\n confusion.columns.name = 'Prediction'\r\n if set(confusion.columns.values) != set(class_list):\r\n miss_class = set(class_list).difference(set(confusion.columns.values))\r\n for cl in miss_class:\r\n confusion[cl] = 0\r\n else:\r\n pass\r\n confusion.sort_index(axis=1, inplace=True)\r\n return confusion\r\n\r\n\r\ndef accuracy_score(actual, prediction):\r\n confusion = confusion_matrix(actual, prediction)\r\n confusion = np.matrix(confusion)\r\n return float(confusion.trace() / confusion.sum())\r\n\r\n\r\ndef precision_score(actual, prediction):\r\n confusion = confusion_matrix(actual, prediction)\r\n temp = np.diag(confusion) / confusion.sum(axis=0)\r\n return pd.Series(temp, index=confusion.index.values)\r\n\r\n\r\ndef recall_score(actual, prediction):\r\n confusion = confusion_matrix(actual, prediction)\r\n temp = np.diag(confusion) / confusion.sum(axis=1)\r\n return pd.Series(temp, index=confusion.index.values)\r\n\r\n\r\ndef f1_score(actual, prediction, average=False):\r\n p = precision_score(actual, prediction)\r\n r = recall_score(actual, prediction)\r\n f = 2 * (p * r) / (p + r)\r\n if average:\r\n return f.mean()\r\n else:\r\n return f\r\n\r\n\r\nconfusion_m = confusion_matrix(test['class'], test.prediction)\r\nprint(confusion_m)\r\n\r\naccuracy = accuracy_score(test['class'], test.prediction)\r\nprecision = precision_score(test['class'], test.prediction)\r\nrecall = recall_score(test['class'], test.prediction)\r\nf1 = f1_score(test['class'], test.prediction, average=False)\r\nf1_average = f1_score(test['class'], test.prediction, average=True)\r\n\r\nprint('Accuracy for all classes =', accuracy)\r\nfor c in class_list:\r\n print('Precision for class ' + str(c) + ' =', precision[c])\r\n print('Recall for class ' + str(c) + ' =', recall[c])\r\n print('f1 score for class ' + str(c) + ' =', f1[c])\r\n\r\nprint('Average f1 score =', f1_average)\r\n","sub_path":"CUHK CSCI3320 Machine Learning Assignment 1/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"354949610","text":"from bs4 import BeautifulSoup\nimport urllib.request\n\nurl = input(\"Enter a url: \")\nhtml = urllib.request.urlopen(url).read()\nsoup = BeautifulSoup(html, \"html.parser\")\n\nanchors = soup('a')\nfor a in anchors:\n\tprint(a.get(\"href\", None))","sub_path":"chapter-13/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"514471622","text":"import os\nimport numpy as np\nfrom heft import heft, gantt\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nimport os.path\nfrom os import path\nimport pathlib\n\ndef computeMSDict(root_dir):\n\n os.chdir(root_dir)\n folder_names = [name for name in os.listdir(\".\") if os.path.isdir(name)]\n\n# Max and ave dictionaries: These contain the absolute best makespan and the average makespan for the experiments. Each key is an experiment id which is unique\n\n# We'll generate charts from these two dictionaries\n best_res = {}\n ave_res = {}\n\n\n for fn in folder_names:\n\n # Get the experiment parameters from file_name\n params = fn.split(\"__\")\n\n rc = (params[2].split(\"_\"))[1]\n gh = (params[3].split(\"_\"))[1]\n ccr = (params[4].split(\"_\"))[1]\n hf = (params[5].split(\"_\"))[1]\n mpr = (params[6].split(\"_\"))[1]\n\n #create unique experiment tag\n unq_tag = rc + \"_\" + gh + \"_\" + ccr + \"_\" + hf + \"_\" + mpr\n os.chdir(fn)\n reps = [name for name in os.listdir(\".\") if os.path.isdir(name)]\n\n best_res[unq_tag] = math.inf\n ave_res[unq_tag] = 0.0\n ave_ms = 0.0\n num_reps = len(reps)\n\n #print(\"Processing experiment \", unq_tag)\n for rep in reps:\n #print(\"Processing replica \", str(rep))\n os.chdir(rep)\n #print(pathlib.Path().absolute())\n df_max = pd.read_csv('max.csv')\n df_ave = pd.read_csv('pop.csv')\n\n # update best makespan\n if (df_max['SL_AVE'].min() < best_res[unq_tag]):\n best_res[unq_tag] = df_max['SL_AVE'].min()\n\n # calculate the average makespan\n ave_ms = ave_ms + df_ave['SL_AVE'][len(df_ave['SL_AVE'])-1]\n os.chdir(\"..\")\n\n\n ave_ms = ave_ms / num_reps\n ave_res[unq_tag] = ave_ms\n os.chdir(\"..\")\n\n #print(\"Finished processing, printing the best makespans...\")\n\n return best_res, ave_res\n\ndef pickBestMPR(best_dict, ave_dict):\n best_res = {}\n ave_res = {}\n best_mprs_best = {}\n best_mprs_ave = {}\n for key,val in best_dict.items():\n params = key.split(\"_\")\n unq_tag = params[0] + \"_\" + params[1] + \"_\" + params[2] + \"_\" + params[3]\n if unq_tag not in best_res:\n best_res[unq_tag] = val\n best_mprs_best[unq_tag] = params[4]\n else:\n if val < best_res[unq_tag]:\n best_res[unq_tag] = val\n best_mprs_best[unq_tag] = params[4]\n\n for key,val in ave_dict.items():\n params = key.split(\"_\")\n unq_tag = params[0] + \"_\" + params[1] + \"_\" + params[2] + \"_\" + params[3]\n if unq_tag not in ave_res:\n ave_res[unq_tag] = val\n best_mprs_ave[unq_tag] = params[4]\n else:\n if val < ave_res[unq_tag]:\n ave_res[unq_tag] = val\n best_mprs_ave[unq_tag] = params[4]\n\n return best_res, ave_res, best_mprs_best, best_mprs_ave\n\n# generates bar charts to compare makespan between different selection methods\ndef generateBars(lexiLP_dict, simple_dict, lexiLIB_dict,nsgaLP_dict, heft_dict):\n\n simple_data = {}\n lexiLIB_data = {}\n lexiLP_data = {}\n nsgalp_data = {}\n heft_data = {}\n\n #for key,val in heft_dict.items():\n # params = key.split(\"_\")\n # unq_tag = params[1] + \"_\" + params[2] + \"_\" + params[3]\n # if unq_tag not in heft_data:\n # heft_data[unq_tag] = [0,0,0,0]\n # idx = int(math.log2(int(params[0]))) - 2\n # (heft_data[unq_tag])[idx] = val\n # else:\n # idx = int(math.log2(int(params[0]))) - 2\n # (heft_data[unq_tag])[idx] = val\n\n for key,val in simple_dict.items():\n params = key.split(\"_\")\n unq_tag = params[1] + \"_\" + params[2] + \"_\" + params[3]\n if unq_tag not in simple_data:\n simple_data[unq_tag] = [0,0,0,0]\n idx = int(math.log2(int(params[0]))) - 2\n (simple_data[unq_tag])[idx] = val\n else:\n idx = int(math.log2(int(params[0]))) - 2\n (simple_data[unq_tag])[idx] = val\n\n for key,val in lexiLIB_dict.items():\n params = key.split(\"_\")\n unq_tag = params[1] + \"_\" + params[2] + \"_\" + params[3]\n if unq_tag not in lexiLIB_data:\n lexiLIB_data[unq_tag] = [0,0,0,0]\n idx = int(math.log2(int(params[0]))) - 2\n (lexiLIB_data[unq_tag])[idx] = val\n else:\n idx = int(math.log2(int(params[0]))) - 2\n (lexiLIB_data[unq_tag])[idx] = val\n\n for key,val in lexiLP_dict.items():\n params = key.split(\"_\")\n unq_tag = params[1] + \"_\" + params[2] + \"_\" + params[3]\n if unq_tag not in lexiLP_data:\n lexiLP_data[unq_tag] = [0,0,0,0]\n idx = int(math.log2(int(params[0]))) - 2\n (lexiLP_data[unq_tag])[idx] = val\n else:\n idx = int(math.log2(int(params[0]))) - 2\n (lexiLP_data[unq_tag])[idx] = val\n\n for key,val in nsgaLP_dict.items():\n params = key.split(\"_\")\n unq_tag = params[1] + \"_\" + params[2] + \"_\" + params[3]\n if unq_tag not in nsgalp_data:\n nsgalp_data[unq_tag] = [0,0,0,0]\n idx = int(math.log2(int(params[0]))) - 2\n (nsgalp_data[unq_tag])[idx] = val\n else:\n idx = int(math.log2(int(params[0]))) - 2\n (nsgalp_data[unq_tag])[idx] = val\n\n\n\n for key,val in lexiLIB_data.items():\n barWidth = 0.1\n bars_lexiLIB = val\n bars_lexiLP = lexiLP_data[key]\n bars_simple = simple_data[key]\n bars_nsgalp = nsgalp_data[key]\n bars_heft = heft_dict[key]\n #print(key)\n #print(bars_nsgalp)\n fig = plt.figure()\n # Set position of bar on X axis\n r1 = np.arange(len(bars_lexiLIB))\n r2 = [x + barWidth for x in r1]\n r3 = [x + barWidth for x in r2]\n r4 = [x + barWidth for x in r3]\n r5 = [x + barWidth for x in r4]\n plt.bar(r1, bars_simple, color='#7f6d5f', width=barWidth, edgecolor='white', label='Simple')\n plt.bar(r2, bars_lexiLIB, color='#557f2d', width=barWidth, edgecolor='white', label='Lexi-LIB')\n plt.bar(r3, bars_lexiLP, color='red', width=barWidth, edgecolor='white', label='Lexi-LP')\n plt.bar(r4, bars_nsgalp, color='blue', width=barWidth, edgecolor='white', label='NSGA-LP')\n plt.bar(r5, bars_heft, color='black', width=barWidth, edgecolor='white', label='HEFT')\n\n plt.xlabel('#procs' , fontweight='bold')\n plt.xticks([r + barWidth for r in range(len(bars_simple))], ['4', '8', '16', '32'])\n\n # Create legend & Show graphic\n plt.legend()\n\n file_name = key + \".png\"\n fig.savefig(file_name)\n\ndef computeMSDictHEFT(root_dir):\n res_heft = {}\n if path.exists(root_dir):\n os.chdir(root_dir)\n file_names = [name for name in os.listdir(\".\") if os.path.isdir(name)]\n for fn in file_names:\n params = fn.split(\"_\")\n rc = params[1]\n gh = params[2]\n ccr = params[3]\n hf = params[4]\n\n unq_tag = gh + \"_\" + ccr + \"_\" + hf\n\n cm_path = fn + \"/task_exe_time.csv\"\n comp_matrix = heft.readCsvToNumpyMatrix(cm_path)\n cmm_path = fn + \"/resource_BW.csv\"\n comm_matrix = heft.readCsvToNumpyMatrix(cmm_path)\n dag_path = fn + \"/task_connectivity.csv\"\n dag = heft.readDagMatrix(dag_path)\n sched, _,_,_ = heft.schedule_dag(dag,communication_matrix=comm_matrix, computation_matrix=comp_matrix)\n\n if unq_tag not in res_heft:\n res_heft[unq_tag] = [0,0,0,0]\n\n if int(rc) > 2:\n (res_heft[unq_tag])[int(math.log2(int(rc)))-2] = sched\n\n return res_heft\n else:\n print(\"PATH DOESNT EXIST\")\n\n\nheft_res = computeMSDictHEFT(\"/Users/dogadikbayir/200nodeLexiLIB/200nodeLexiLIB/200data/\")\n\nprint(heft_res)\nlexiLP_best, lexiLP_ave = computeMSDict(\"/Users/dogadikbayir/200nodeLexiLIB/200nodeLexiLIB/lexiLP\")\nlexiLP_best, lexiLP_ave, bestMPRsLP, _ = pickBestMPR(lexiLP_best,lexiLP_ave)\n\nprint(\"Best MPRs for LP Lexi: \")\nprint(bestMPRsLP)\n\nnsgaLP_best, nsgaLP_ave = computeMSDict(\"/Users/dogadikbayir/200nodeLexiLIB/200nodeLexiLIB/nsgalp\")\nnsgaLP_best, nsgaLP_ave,_,_ = pickBestMPR(nsgaLP_best, nsgaLP_ave)\n\nprint(nsgaLP_best)\nlexiLIB_best, lexiLIB_ave = computeMSDict(\"/Users/dogadikbayir/200nodeLexiLIB/200nodeLexiLIB/lexiLIB\")\nsimple_best, simple_ave = computeMSDict(\"/Users/dogadikbayir/200nodeLexiLIB/200nodeLexiLIB/simple200\")\n\nlexiLIB_best, lexiLIB_ave,_,_ = pickBestMPR(lexiLIB_best,lexiLIB_ave)\nsimple_best, simple_ave,_,_ = pickBestMPR(simple_best,simple_ave)\n\nos.chdir(\"..\")\ntry:\n os.mkdir(\"./charts\")\nexcept OSError:\n print (\"Cannot create folder ''charts'\")\nelse:\n print (\"Successfully created the charts directory\")\n\nos.chdir(\"charts\")\ngenerateBars(lexiLP_best, simple_best, lexiLIB_best, nsgaLP_best, heft_res)\n","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":8980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"347916783","text":"#!/usr/bin/env python2\nimport json\nimport sys\n\nimport numpy\n\n\ndef get_data(data_path):\n data = numpy.loadtxt(data_path)\n azimuth = tuple(numpy.unique(data[:,2]))\n elevation = tuple(numpy.unique(data[:,1]))\n flux = numpy.zeros((len(elevation), len(azimuth)))\n for row in data:\n j = azimuth.index(row[2])\n i = elevation.index(row[1])\n flux[i,j] = row[3]\n return azimuth, elevation, flux\n\n\ndef get_rate(data_path, mode, density):\n\tpath = \"{:}/flux-{:}_1_{:}_forward.dat\".format(data_path, mode, density)\n\tazimuth, elevation, forward = get_data(path)\n\tpath = \"{:}/flux-{:}_1_{:}_backward.dat\".format(data_path, mode, density)\n\t_, _, backward = get_data(path)\n\tforward[10:21,:] += backward[10::-1,:]\n\treturn azimuth, elevation[10:], forward[10:,:]\n\n\ndef build(data_path, mode, save_path):\n\tdensity = numpy.linspace(0.5, 3.75, 14).tolist()\n\trate = []\n\tfor rhoi in density:\n\t\tazimuth, elevation, ri = get_rate(data_path, mode, rhoi)\n\t\trate.append(ri.tolist())\n\n\tpath = \"{:}/model-{:}.json\".format(save_path, mode)\n\twith open(path, \"w\") as f:\n\t\tjson.dump({\"x\": (min(azimuth), max(azimuth), len(azimuth)),\n \"y\": (min(elevation), max(elevation), len(elevation)),\n \"parameter\": density,\n \"rate\": rate}, f)\n\t\n\nif __name__ == \"__main__\":\n\tbuild(sys.argv[1], sys.argv[2], sys.argv[3])\n","sub_path":"wudalianchi/bin/build-model.py","file_name":"build-model.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"56676933","text":"#--------------------------------------------------------------------------\n# File and Version Information:\n# $Id$\n#\n# Description:\n# GUIMetrology...\n#------------------------------------------------------------------------\n\n\"\"\"GUI for CalibManager.\n\nThis software was developed for the SIT project. If you use all or \npart of it, please give an appropriate acknowledgment.\n\n@version $Id$\n\n@author Mikhail S. Dubrovin\n\"\"\"\nfrom __future__ import absolute_import\n\n#--------------------------------\n__version__ = \"$Revision$\"\n#--------------------------------\n\nimport os\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nfrom CalibManager.Frame import Frame\nfrom .ConfigParametersForApp import cp\n\nfrom CalibManager.Logger import logger\nfrom .FileNameManager import fnm\nfrom .GUIFileBrowser import *\nfrom .GUIRange import *\nfrom .FileDeployer import fd\nfrom . import GlobalUtils as gu\n\nfrom .xlsx_parser import convert_xlsx_to_text\nfrom .OpticAlignmentCspadV1 import OpticAlignmentCspadV1\nfrom .OpticAlignmentCspadV2 import OpticAlignmentCspadV2\nfrom .OpticAlignmentCspad2x2V1 import OpticAlignmentCspad2x2V1\n\n#------------------------------\n\nclass GUIMetrology(Frame) :\n \"\"\"GUI for metrology processing.\n \"\"\"\n def __init__ (self, parent=None, app=None) :\n\n self.name = 'GUIMetrology'\n self.myapp = app\n #QtGui.QWidget.__init__(self, parent)\n Frame.__init__(self, parent, mlw=1)\n\n self.instr_name = cp.instr_name # for comments in geometry file\n self.fname_prefix = cp.fname_prefix\n self.fname_metrology_xlsx = cp.fname_metrology_xlsx\n self.fname_metrology_text = cp.fname_metrology_text\n self.img_arr = None\n self.list_of_calib_types = ['center', 'tilt', 'geometry']\n \n cp.setIcons()\n\n self.setGeometry(10, 25, 725, 200)\n self.setWindowTitle('Metrology')\n #self.setWindowIcon(cp.icon_monitor)\n self.palette = QtGui.QPalette()\n self.resetColorIsSet = False\n\n self.setParams()\n \n #self.titFileXlsx = QtGui.QLabel('File xlsx:')\n\n self.ediFileXlsx = QtWidgets.QLineEdit ( fnm.path_metrology_xlsx() )\n self.ediFileXlsx.setReadOnly(True)\n\n self.ediFileText = QtWidgets.QLineEdit ( fnm.path_metrology_text() ) # cp.fname_metrology_text.value_def() )\n self.ediFileText.setReadOnly(True)\n\n self.butFileXlsx = QtWidgets.QPushButton(' 1. Select xlsx file:')\n self.butConvert = QtWidgets.QPushButton(' 2. Convert xlsx to text file(s)')\n self.butFileText = QtWidgets.QPushButton(' 3. Select text file:')\n self.butEvaluate = QtWidgets.QPushButton(' 4. Evaluate')\n self.butDeploy = QtWidgets.QPushButton(' 5. Deploy')\n self.butList = QtWidgets.QPushButton('List')\n self.butRemove = QtWidgets.QPushButton('Remove')\n self.butViewOffice= QtWidgets.QPushButton('View xlsx')\n self.butViewText = QtWidgets.QPushButton('View text')\n self.butScript = QtWidgets.QPushButton(self.script + cp.char_expand )\n self.butSrc = QtWidgets.QPushButton(self.source_name + cp.char_expand )\n self.labSrc = QtWidgets.QLabel('for detector')\n self.labScript = QtWidgets.QLabel('using script')\n self.guirange = GUIRange()\n\n self.butViewOffice .setIcon(cp.icon_monitor)\n self.butViewText .setIcon(cp.icon_monitor)\n #self.butConvert .setIcon(cp.icon_convert)\n\n self.grid = QtWidgets.QGridLayout()\n self.grid_row = 0\n self.grid.addWidget(self.butFileXlsx, self.grid_row, 0)\n self.grid.addWidget(self.ediFileXlsx, self.grid_row, 1, 1, 8)\n self.grid.addWidget(self.butViewOffice, self.grid_row, 8)\n\n self.grid.addWidget(self.butConvert, self.grid_row+1, 0)\n self.grid.addWidget(self.butList, self.grid_row+1, 1, 1, 1)\n self.grid.addWidget(self.butRemove, self.grid_row+1, 2, 1, 1)\n\n self.grid.addWidget(self.butFileText, self.grid_row+2, 0)\n self.grid.addWidget(self.ediFileText, self.grid_row+2, 1, 1, 8)\n self.grid.addWidget(self.butViewText, self.grid_row+2, 8)\n\n self.grid.addWidget(self.butEvaluate, self.grid_row+3, 0)\n self.grid.addWidget(self.labScript, self.grid_row+3, 1)\n self.grid.addWidget(self.butScript, self.grid_row+3, 2)\n\n self.grid.addWidget(self.butDeploy, self.grid_row+4, 0)\n self.grid.addWidget(self.labSrc, self.grid_row+4, 1)\n self.grid.addWidget(self.butSrc, self.grid_row+4, 2)\n self.grid.addWidget(self.guirange, self.grid_row+4, 3, 1, 5)\n #self.setLayout(self.grid)\n \n self.vbox = QtWidgets.QVBoxLayout() \n self.vbox.addLayout(self.grid)\n self.vbox.addStretch(1)\n self.setLayout(self.vbox)\n\n self.butFileXlsx.clicked.connect(self.onButFileXlsx)\n self.butFileText.clicked.connect(self.onButFileText)\n self.butViewOffice.clicked.connect(self.onButViewOffice)\n self.butViewText.clicked.connect(self.onButViewText)\n self.butConvert.clicked.connect(self.onButConvert)\n self.butRemove.clicked.connect(self.onButRemove)\n self.butList.clicked.connect(self.onButList)\n self.butEvaluate.clicked.connect(self.onButEvaluate)\n self.butDeploy.clicked.connect(self.onButDeploy)\n self.butScript.clicked.connect(self.onButScript)\n self.butSrc.clicked.connect(self.onButSrc)\n \n self.showToolTips()\n self.setStyle()\n\n cp.guimetrology = self\n #self.move(10,25)\n \n #print 'End of init'\n \n\n def showToolTips(self):\n #pass\n self.ediFileXlsx .setToolTip('Persistent path to xlsx file') \n self.butFileXlsx .setToolTip('Open file browser dialog window\\nand select xlsx file. This file is\\nusually e-mailed from detector group.') \n self.butViewOffice.setToolTip('Open openoffice.org window')\n self.butViewText .setToolTip('Open file viewer window')\n self.butFileText .setToolTip('Open file browser dialog window\\nand select metrology text file') \n self.ediFileText .setToolTip('Path to the text metrology file which\\nis used to evaluate calibration constants.') \n self.butConvert .setToolTip('Convert xlsx to text metrology file(s)')\n self.butList .setToolTip('List temporarty metrology text file(s)')\n self.butRemove .setToolTip('Remove temporarty metrology text file(s)')\n self.butEvaluate .setToolTip('Run quality check script and\\nevaluate geometry alignment parameters')\n self.butDeploy .setToolTip('Deploy geometry alignment parameters')\n self.butScript .setToolTip('Select the script to process optic metrology file')\n self.butSrc .setToolTip('Select name of the detector')\n \n\n def setParams(self) :\n #if self.path_fm_selected != '' :\n # self.path_fm_selected = os.path.dirname(self.path_fm_selected)\n self.str_run_from = '0'\n self.str_run_to = 'end'\n self.source_name = 'Select'\n self.script = 'Select'\n self.calib_type = 'Select'\n\n\n def setStyle(self):\n self.setMinimumSize(725,200)\n self.setMaximumSize(800,200)\n \n self. setStyleSheet(cp.styleBkgd)\n self.butViewOffice.setStyleSheet(cp.styleButton)\n self.butViewText .setStyleSheet(cp.styleButton)\n #self.butViewOffice.setFixedWidth(200)\n #self.butViewOffice.setMinimumHeight(60)\n #self.butViewOffice.setMinimumSize(180,60)\n\n self.butFileXlsx .setStyleSheet(cp.styleButtonLeft)\n self.butConvert .setStyleSheet(cp.styleButtonLeft) \n self.butFileText .setStyleSheet(cp.styleButtonLeft) \n self.butEvaluate .setStyleSheet(cp.styleButtonLeft) \n self.butDeploy .setStyleSheet(cp.styleButtonLeft) \n\n self.ediFileXlsx.setFixedWidth(400)\n self.ediFileXlsx.setStyleSheet(cp.styleEditInfo) \n self.ediFileXlsx.setEnabled(False) \n\n self.ediFileText.setFixedWidth(400)\n self.ediFileText.setStyleSheet(cp.styleEditInfo) \n self.ediFileText.setEnabled(False) \n\n self.labSrc .setStyleSheet(cp.styleLabel)\n self.labScript .setStyleSheet(cp.styleLabel)\n\n #self.butFBrowser.setVisible(False)\n #self.butSave.setText('')\n #self.butExit.setText('')\n #self.butExit.setFlat(True)\n\n self.setStyleButtons()\n\n\n def setStyleButtons(self):\n if self.source_name == 'Select' : self.butSrc.setStyleSheet(cp.stylePink)\n else : self.butSrc.setStyleSheet(cp.styleButton)\n\n if self.script == 'Select' : self.butScript.setStyleSheet(cp.stylePink)\n else : self.butScript.setStyleSheet(cp.styleButton)\n\n \n #def resizeEvent(self, e):\n #logger.debug('resizeEvent', self.name) \n #print 'GUIMetrology.resizeEvent: %s' % str(self.size())\n #pass\n\n\n #def moveEvent(self, e):\n #logger.debug('moveEvent', self.name) \n #self.position = self.mapToGlobal(self.pos())\n #self.position = self.pos()\n #logger.debug('moveEvent - pos:' + str(self.position), __name__) \n #pass\n\n\n def closeEvent(self, event):\n logger.debug('closeEvent', self.name)\n\n try : cp.guifilebrowser.close()\n except : pass\n\n\n def onExit(self):\n logger.debug('onExit', self.name)\n self.close()\n\n\n def onButFileXlsx(self):\n logger.debug('onButFileXlsx', __name__)\n but = self.butFileXlsx\n edi = self.ediFileXlsx\n par = self.fname_metrology_xlsx\n #prefix = self.fname_prefix.value()\n filter = 'Text files (*.xlsx )\\nAll files (*)'\n\n self.onButFile(but, edi, par, filter, set_path=True)\n\n\n def onButFileText(self):\n logger.debug('onButFileText', __name__)\n but = self.butFileText\n edi = self.ediFileText\n par = self.fname_metrology_text \n basename = os.path.basename( fnm.path_metrology_text() )\n fname, ext = os.path.splitext(basename)\n filter = 'Text files (*' + ext + ')\\nAll files (*)'\n self.onButFile(but, edi, par, filter, set_path=True)\n\n\n def onButFile(self, but, edi, par, filter, set_path=True):\n logger.debug('onButFile', __name__)\n path = str( edi.displayText() )\n dname, fname = os.path.split(path)\n msg = 'dir : %s file : %s' % (dname, fname)\n logger.info(msg, __name__)\n path = str( QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', dname, filter=filter) )[0]\n dname, fname = os.path.split(path)\n\n if dname == '' or fname == '' :\n logger.info('Input directiry name or file name is empty... use default values', __name__)\n return\n else :\n edi.setText(path)\n if set_path : par.setValue(path)\n else : par.setValue(fname)\n logger.info('Selected file: %s' % path, __name__)\n\n\n def onButViewOffice(self): \n logger.debug('onLogger', self.name)\n try :\n #cp.viewoffice.close()\n #del cp.viewoffice\n self.butViewOffice.setStyleSheet(cp.styleButton)\n #self.butViewOffice.setText('Open openoffice')\n\n cmd = 'openoffice.org %s &' % fnm.path_metrology_xlsx()\n msg = 'Confirm command: %s' % cmd\n\n resp = gu.confirm_or_cancel_dialog_box(parent=self.butViewOffice, text=msg, title='Please confirm or cancel!')\n if resp :\n logger.info('Approved command:\\n' + cmd, __name__)\n self.commandInSubproc(cmd)\n else :\n logger.info('Command is cancelled', __name__)\n\n except :\n self.butViewOffice.setStyleSheet(cp.styleButtonGood)\n #self.butViewOffice.setText('Close openoffice')\n\n #cp.viewoffice = MaskEditor(**pars)\n #cp.viewoffice.move(self.pos().__add__(QtCore.QPoint(820,-7))) # open window with offset w.r.t. parent\n #cp.viewoffice.show()\n\n\n def onButViewText(self):\n logger.debug('onButViewText', __name__)\n try :\n cp.guifilebrowser.close()\n #self.but_view.setStyleSheet(cp.styleButtonBad)\n except :\n #self.but_view.setStyleSheet(cp.styleButtonGood)\n\n list_of_files = fnm.get_list_of_metrology_text_files()\n if self.script != 'Select' :\n list_of_files += self.list_metrology_alignment_const_fnames()\n\n cp.guifilebrowser = GUIFileBrowser(None, list_of_files, fnm.path_metrology_text())\n cp.guifilebrowser.move(self.pos().__add__(QtCore.QPoint(880,40))) # open window with offset w.r.t. parent\n cp.guifilebrowser.show()\n\n\n def checkTextFileName(self):\n\n edi = self.ediFileText\n par = self.fname_metrology_text \n\n if fnm.path_metrology_text() != fnm.path_metrology_text_def() :\n\n msg = 'TEXT FILE WILL BE OVERWRITTEN!\\nUse default name %s\\n for output file' % fnm.path_metrology_text_def()\n resp = gu.confirm_or_cancel_dialog_box(parent=self.butConvert, text=msg, title='Please confirm or cancel!')\n if resp :\n logger.info('Approved:\\n' + msg.replace('\\n',' '), __name__)\n par.setDefault()\n edi.setText(fnm.path_metrology_text_def())\n else :\n logger.info('Selected current file name: %s' % fnm.path_metrology_text(), __name__)\n \n\n def onButConvert(self):\n logger.debug('onButConvert', __name__)\n \n if not os.path.exists(fnm.path_metrology_xlsx()) :\n msg = 'Input file %s DOES NOT exist!' % fnm.path_metrology_xlsx() \n logger.warning(msg, __name__)\n return\n\n self.checkTextFileName()\n\n #ifname = fnm.path_metrology_xlsx()\n #ofname = fnm.path_metrology_text()\n list_ofnames = convert_xlsx_to_text(fnm.path_metrology_xlsx(), fnm.path_metrology_text(), print_bits=0)\n\n msg = 'File %s is converted to the temporarty metrology text file(s):\\n' % fnm.path_metrology_xlsx()\n for name in list_ofnames : msg += ' %s\\n' % name\n logger.info(msg, __name__)\n\n\n def onButRemove(self):\n #logger.debug('onButRemove', __name__) \n cmd = 'rm'\n for fname in fnm.get_list_of_metrology_text_files() : cmd += ' %s' % fname\n msg = 'Confirm command: %s' % cmd\n\n resp = gu.confirm_or_cancel_dialog_box(parent=self.butViewOffice, text=msg, title='Please confirm or cancel!')\n if resp :\n logger.info('Approved command:\\n' + cmd, __name__)\n self.commandInSubproc(cmd)\n else :\n logger.info('Command is cancelled', __name__)\n\n\n def onButList(self):\n msg = 'List of metrology text files in %s\\n' % fnm.path_dir_work()\n for fname in fnm.get_list_of_metrology_text_files() : msg += ' %s\\n' % fname\n logger.info(msg, __name__) \n\n\n def get_detector_selected(self):\n lst = cp.list_of_dets_selected()\n len_lst = len(lst)\n msg = '%d detector(s) selected: %s' % (len_lst, str(lst))\n #logger.info(msg, __name__ )\n\n if len_lst !=1 :\n msg += ' Select THE ONE!'\n logger.warning(msg, __name__ )\n return None\n\n return lst[0]\n\n\n def onButScript(self):\n logger.debug('onButScript', __name__ )\n\n det = self.get_detector_selected()\n if det is None : return\n\n if det != cp.list_of_dets[0] :\n logger.warning('Scripts are implemented for CSPAD ONLY !!!: ', __name__)\n \n lst = cp.dict_of_metrology_scripts[det]\n\n selected = gu.selectFromListInPopupMenu(lst)\n\n if selected is None : return # selection is cancelled\n if selected is self.script : return # the same\n \n txt = str(selected)\n\n self.setScript(txt)\n self.setSrc()\n self.setStyleButtons()\n\n\n def onButSrc(self):\n logger.debug('onButSrc', __name__ )\n\n det = self.get_detector_selected()\n if det is None : return\n\n try :\n lst = ru.list_of_sources_for_det(det)\n except :\n lst = cp.dict_of_det_sources[det]\n\n selected = gu.selectFromListInPopupMenu(lst)\n\n if selected is None : return # selection is cancelled\n if selected is self.source_name : return # the same\n\n txt = str(selected)\n self.setSrc(txt)\n self.setStyleButtons()\n\n\n def setScript(self,txt='Select'):\n self.script = txt\n self.butScript.setText( txt + cp.char_expand )\n logger.info('Script is selected: ' + txt, __name__)\n\n \n def setSrc(self,txt='Select'):\n self.source_name = txt\n self.butSrc.setText( txt + cp.char_expand )\n logger.info('Source selected: ' + txt, __name__)\n\n \n def onButEvaluate(self):\n logger.debug('onButEvaluate', __name__)\n det = self.get_detector_selected()\n if det is None : return\n\n if not os.path.exists(fnm.path_metrology_text()) :\n msg = 'Input file %s DOES NOT exist!' % fnm.path_metrology_text() \n logger.warning(msg, __name__)\n return\n\n list_of_metrology_scripts = cp.dict_of_metrology_scripts[det]\n\n if self.script == 'Select' :\n msg = 'Script for processing metrology file is not selected. Select it first...'\n logger.warning(msg, __name__)\n return\n\n fname_metrology = fnm.path_metrology_text()\n\n #print 'list_of_metrology_scripts', list_of_metrology_scripts\n #for CSPAD script CSPADV1 CXI-like\n if det == cp.list_of_dets[0] and self.script == list_of_metrology_scripts[0] : \n msg = 'Evaluate parameters for %s using script %s' % (det, self.script)\n logger.info(msg, __name__)\n optal = OpticAlignmentCspadV1(fname_metrology, print_bits=0, plot_bits=0, \\\n exp=self.instr_name.value(), det=det)\n self.procCspad(optal)\n\n #for CSPAD script CSPADV2 XPP-like\n elif det == cp.list_of_dets[0] and self.script == list_of_metrology_scripts[1] : \n msg = 'Evaluate parameters for %s using script %s' % (det, self.script)\n logger.info(msg, __name__)\n optal = OpticAlignmentCspadV2(fname_metrology, print_bits=0, plot_bits=0, \\\n exp=self.instr_name.value(), det=det)\n self.procCspad(optal)\n\n #for CSPAD2x2 script CSPAD2X2V1\n elif det == cp.list_of_dets[1] and self.script == list_of_metrology_scripts[0] : \n msg = 'Evaluate parameters for %s using script %s' % (det, self.script)\n logger.info(msg, __name__)\n optal = OpticAlignmentCspad2x2V1(fname_metrology, print_bits=0, plot_bits=0, \\\n exp=self.instr_name.value(), det=det)\n self.procCspad(optal)\n\n # for other detectors and scripts for now...\n else : \n msg = 'Script %s is not yet implemented for detector %s...' % (self.script, det)\n logger.warning(msg, __name__)\n return\n \n \n def procCspad(self, optal):\n \"\"\"Create and save interim files for calibration types\"\"\"\n self.list_of_calib_types = ['center', 'tilt', 'geometry']\n\n fname_metrology = fnm.path_metrology_text()\n msg = 'procCspad(V1,V2,2x2V1) for metrology data in file %s' % fname_metrology\n logger.info(msg, __name__) \n\n txt_qc_table_xy = optal.txt_qc_table_xy()\n txt_qc_table_z = optal.txt_qc_table_z()\n\n txt_center = optal.txt_center_pix_formatted_array()\n txt_tilt = optal.txt_tilt_formatted_array()\n txt_geometry = optal.txt_geometry()\n\n logger.info('Quality check in X-Y plane:\\n'+txt_qc_table_xy, __name__) \n logger.info('Quality check in Z:\\n'+txt_qc_table_z, __name__) \n logger.info('parameters of type \"center\":\\n'+txt_center, __name__) \n logger.info('parameters of type \"tilt\":\\n'+txt_tilt, __name__) \n logger.info('parameters of type \"geometry\":\\n'+txt_geometry, __name__) \n \n # Save calibration files in work directory\n\n dic_type_fname = self.dict_metrology_alignment_const_fname_for_type()\n\n gu.save_textfile(txt_center, dic_type_fname['center']) \n gu.save_textfile(txt_tilt, dic_type_fname['tilt']) \n gu.save_textfile(txt_geometry, dic_type_fname['geometry']) \n\n msg = 'Save interim metrology alignment files:'\n for type in self.list_of_calib_types :\n msg += '\\n %s %s' % (type.ljust(16), dic_type_fname[type])\n\n logger.info(msg, __name__) \n\n\n def dict_metrology_alignment_const_fname_for_type(self) : \n #lst_const_types = cp.const_types_cspad # ex. ['center', 'tilt',...]\n lst_const_types = self.list_of_calib_types\n lst_of_insets = ['%s-%s' % (self.script,type) for type in lst_const_types] # ex. ['CSPADV1-tilt', ...]\n lst_of_const_fnames = gu.get_list_of_files_for_list_of_insets(fnm.path_metrology_alignment_const(), lst_of_insets)\n return dict(zip(lst_const_types, lst_of_const_fnames))\n\n\n def list_metrology_alignment_const_fnames(self) : \n return list(self.dict_metrology_alignment_const_fname_for_type().values())\n\n\n def onButDeploy(self):\n logger.debug('onButDeploy', __name__) \n\n if self.script == 'Select' :\n msg = 'Script for processing metrology file is not selected.... Select it first and evaluate constants (Item 4)'\n logger.warning(msg, __name__)\n return\n\n if self.source_name == 'Select' :\n msg = 'Detector is not selected. Select it first...'\n logger.warning(msg, __name__)\n return\n\n list_of_cmds = self.list_of_copy_cmds()\n\n\n txt = '\\nList of commands for tentetive file deployment:'\n for cmd in list_of_cmds :\n txt += '\\n' + cmd\n logger.info(txt, __name__)\n\n\n msg = 'Approve commands \\njust printed in the logger'\n if self.approveCommand(self.butDeploy, msg) :\n\n for cmd in list_of_cmds :\n fd.procDeployCommand(cmd, 'metrology-alignment')\n #print 'Command for deployer: ', cmd\n\n if cp.guistatus is not None : cp.guistatus.updateStatusInfo()\n\n\n def approveCommand(self, but, msg):\n resp = gu.confirm_or_cancel_dialog_box(parent=but, text=msg, title='Please confirm or cancel!')\n if resp : logger.info('Commands approved', __name__)\n else : logger.info('Command is cancelled', __name__)\n return resp\n\n\n def list_of_copy_cmds(self):\n\n det = self.get_detector_selected()\n if det is None : return\n\n dst_calib_dir = fnm.path_to_calib_dir()\n dst_calib_type = cp.dict_of_det_calib_types[det]\n dst_source = self.source_name\n dst_fname = '%s.data' % self.guirange.getRange()\n\n #print 'dst_calib_dir ', dst_calib_dir\n #print 'dst_calib_type ', dst_calib_type\n #print 'dst_source ', dst_source\n #print 'dst_fname ', dst_fname\n\n list_of_cmds = []\n for type, fname in self.dict_metrology_alignment_const_fname_for_type().items() :\n dst_path = os.path.join(dst_calib_dir, dst_calib_type, dst_source, type, dst_fname)\n cmd = 'cp %s %s' % (fname, dst_path)\n list_of_cmds.append(cmd)\n\n return list_of_cmds\n\n \n def commandInSubproc(self, cmd):\n \n cmd_seq = cmd.split()\n msg = 'Command: ' + cmd\n\n #out, err = gu.subproc(cmd_seq)\n #if err != '' : msg += '\\nERROR: ' + err\n #if out != '' : msg += '\\nRESPONCE: ' + out\n\n os.system(cmd)\n logger.info(msg, __name__)\n\n #os.system('chmod 670 %s' % path)\n\n#------------------------------\n\nif __name__ == \"__main__\" :\n import sys\n app = QtWidgets.QApplication(sys.argv)\n ex = GUIMetrology()\n ex.show()\n app.exec_()\n\n#------------------------------\n","sub_path":"src/GUIMetrology.py","file_name":"GUIMetrology.py","file_ext":"py","file_size_in_byte":24464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"271059903","text":"import os\nfrom setuptools import setup\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name = \"tornado_api_mixins\",\n version = \"0.0.1\",\n author = \"Didip Kerabat\",\n author_email = \"didipk@gmail.com\",\n description = (\"Packaged version of tornado_api \"\n \"(https://github.com/didip/tornado_api) \"\n \"including only FacebookGraphMixin and FoursquareMixin.\"),\n license = \"Apache License, Version 2.0\",\n keywords = \"tornado mixin facebook foursquare api client\",\n url = \"https://github.com/fjsj/tornado_api\",\n packages=['tornado_api_mixins'],\n long_description=read('README.md'),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"337012962","text":"# Author: Simon Blanke\n# Email: simon.blanke@yahoo.com\n# License: MIT License\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom hyperactive import Hyperactive\n\ndata = load_iris()\nX = data.data\ny = data.target\nmemory = False\n\n\ndef model(para, X, y):\n dtc = DecisionTreeClassifier(\n max_depth=para[\"max_depth\"],\n min_samples_split=para[\"min_samples_split\"],\n min_samples_leaf=para[\"min_samples_leaf\"],\n )\n scores = cross_val_score(dtc, X, y, cv=2)\n\n return scores.mean()\n\n\nsearch_config = {\n model: {\n \"max_depth\": range(1, 21),\n \"min_samples_split\": range(2, 21),\n \"min_samples_leaf\": range(1, 21),\n }\n}\n\n\ndef test_results():\n opt = Hyperactive(X, y, memory=memory)\n opt.search(search_config)\n\n assert len(list(opt.results[model].keys())) == 3\n\n\ndef test_best_scores():\n opt = Hyperactive(X, y, memory=memory)\n opt.search(search_config)\n\n assert 0 < opt.best_scores[model] < 1\n","sub_path":"tests/test_attributes.py","file_name":"test_attributes.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"168474393","text":"import unittest\nfrom Score import Score\n\nclass TestScore(unittest.TestCase):\n \"\"\"test class of kanzen.py\n \"\"\"\n\n def test_Score1(self):\n\n actual = Score(0, 0, 'strike!').get_call()\n expected = 'strike!'\n\n self.assertEqual(expected, actual)\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"testScore.py","file_name":"testScore.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"328521258","text":"import ctypes, schedule, time, ephem, geocoder\nfrom datetime import datetime\nimport shelve, os\n\n#Get the Appdata path where files are stored\npath = os.getenv('APPDATA').rsplit(\"\\\\\", 1)[0] + \"\\\\Local\\\\Wallpaper Changer\"\nDAY = path + \"/daytime.jpg\" #the wallpaper will change to this after sunrise\nNIGHT = path + \"/nighttime.jpg\" #the wallpaper will change to this after sunset\n\nme = ephem.Observer()\ndef store(key, entry):\n\tglobal path\n\tshelfFile = shelve.open(path + \"/data\")\n\tshelfFile[key] = entry\n\tshelfFile.close()\ndef retrieve(key):\n\ttry:\n\t\tshelfFile = shelve.open(path + \"/data\")\n\t\treturn shelfFile[key]\n\texcept:\n\t\tprint(\"Invalid Key or File not found\")\n\t\treturn -1\n\ndef updateLocation():\n\tglobal me\n\ttry:\n\t\tg = geocoder.ip(\"me\") \n\t\tcurrentLocation = g.latlng\n\t\tme.lat = repr(currentLocation[0])\n\t\tme.lon = repr(currentLocation[1]) #set current location based on geocoder api\n\texcept: \n\t\t\"\"\"\n\t\tIf theres no Internet, me.lat and me.lon will very likely still hold the last past value. \n\t\tIf they don't, check cache\n\t\tIf that fails too (super unlikely), give up\n\t\t\"\"\"\n\t\t\n\t\tprint(\"Dude, where's my Internet\")\n\t\tif(me.lat == 0.0 or me.lon == 0):\n\t\t\tme.lat = retrieve(\"lat\")\n\t\t\tme.lon = retrieve(\"lon\")\n\t\t\tif(me.lat == -1 or me.lon == -1):\n\t\t\t\tprint(\"Now we're really screwed\")\n\t\t\t\tme.lat = \"1.0\"\n\t\t\t\tme.lon = \"-1.0\"\n\tprint(\"Updating location!\")\n\t#hardcode these (Fremont) in case Internet is down\n\n\t\n#calculate when rising and setting is\ndef calcTimes():\n\tglobal me\n\trising = me.next_rising(ephem.Sun())\n\tr = ephem.localtime(rising)\n\t\n\ttimerise = r - datetime.now()\n\tsunrise = r.strftime(\"%H:%M\")\n\t\n\tsetting = me.next_setting(ephem.Sun())\n\ts = ephem.localtime(setting)\n\tsunset = s.strftime(\"%H:%M\")\n\ttimeset = s - datetime.now()\n\t\n\tif(timeset.seconds < timerise.seconds):\n\t\tchangeToDay()\n\telse:\n\t\tchangeToNight()\n\tprint(sunrise, sunset)\n\t\ndef changeToDay():\n\tdaytime = retrieve(\"daytime\")\n\tif(daytime == 0 or daytime == -1 or not daytime.endswith((\".jpg\", \".JPG\", \".jpeg\", \".JPEG\", \".png\", \".PNG\", \".gif\", \".GIF\"))):\n\t\treturn\n\tctypes.windll.user32.SystemParametersInfoW(20, 0, DAY, 0)\n\t# return schedule.CancelJob\n\t\ndef changeToNight():\n\tnighttime = retrieve(\"nighttime\")\n\tif(nighttime == 0 or nighttime == -1 or not nighttime.endswith((\".jpg\", \".JPG\", \".jpeg\", \".JPEG\", \".png\", \".PNG\", \".gif\", \".GIF\"))):\n\t\treturn\n\tctypes.windll.user32.SystemParametersInfoW(20, 0, NIGHT, 0)\n\t# return schedule.CancelJob\n\t\n#store the location of user in case the Internet goes down\ndef cacheLoc():\n\tprint(\"Storing\")\n\ttry:\n\t\tg = geocoder.ip(\"me\") \n\t\tcurrentLocation = g.latlng\n\t\tstore(\"lat\", repr(currentLocation[0]))\n\t\tstore(\"lon\", repr(currentLocation[1])) #set current location based on geocoder api\n\texcept: \n\t\t\"\"\"\n\t\tIf theres no Internet, me.lat and me.lon will very likely still hold the last past value. \n\t\tIf they don't, set to Fremont location\n\t\t\"\"\"\n\t\tprint(\"Dude, where's my Internet\")\ndef comb():\n\n\tstate = retrieve(\"on\")\n\tpaths_set1 = retrieve(\"daytime\")\n\tpaths_set2 = retrieve(\"nighttime\")\n\tif(state == -1):\n\t\tstore(\"on\", False)\n\tprint(paths_set1)\n\tif(state == True and paths_set1 != -1 and paths_set2 != -1):\n\t\tupdateLocation()\n\t\tcalcTimes()\n\telse:\n\t\tprint(\"off or error\")\n\t# print(\"updated program running\")\n#every 1 minute check \nif __name__ == \"__main__\":\n\tschedule.every(1).minutes.do(comb)\n\tschedule.every(1).minutes.do(cacheLoc)\n\n\tupdateLocation()\n\tcalcTimes()\n\t\n\twhile(True):\n\t\tschedule.run_pending()\n\t\ttime.sleep(1)","sub_path":"wallpaper.py","file_name":"wallpaper.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"438260340","text":"from random import choice, randint\nfrom typing import Optional\n\nimport arcade\nfrom arcade_curtains import BaseScene, Curtains, KeyFrame, Sequence\n\nfrom aaaaAAAA import _sprites, constants\n\n\nPOINTS_HINT = [(0.004, 0.681), (0.042, 0.77), (0.084, 0.827), (0.118, 0.878),\n (0.189, 0.858), (0.245, 0.841), (0.296, 0.887), (0.37, 0.717),\n (0.397, 0.673), (0.401, 0.709), (0.425, 0.72), (0.455, 0.85),\n (0.481, 0.852), (0.583, 0.72), (0.597, 0.728), (0.61, 0.714)]\n\n\nDUCKS = 36\n\n\nclass DuckScene(BaseScene):\n \"\"\"Scene showing Ducks moving down the river to the pondhouse.\"\"\"\n\n def __init__(self, debug: Optional[bool] = False):\n self.debug = debug\n super().__init__()\n\n def setup(self) -> None:\n \"\"\"Setup the scene assets.\"\"\"\n self.background = arcade.load_texture(\"assets/overworld/overworld placeholder.png\")\n self.pond = arcade.load_texture(\"assets/overworld/ponds/png/Blue pond.png\")\n self.pondhouse = arcade.load_texture(\"assets/overworld/pondhouse.png\")\n self.ducks = arcade.SpriteList()\n self.leader = _sprites.Ducky(choice(constants.DUCKY_LIST), 0.075)\n self.ducks.append(self.leader)\n self.seq = self.sequence_gen()\n\n def add_a_ducky(self, dt: Optional[float] = None) -> None:\n \"\"\"Add a ducky to the scene, register some events and start animating.\"\"\"\n if not POINTS_HINT:\n return\n ducky = _sprites.Ducky(choice(constants.DUCKY_LIST), 0.05)\n self.events.hover(ducky, ducky.expand)\n self.events.out(ducky, ducky.shrink)\n self.ducks.append(ducky)\n seq = self.sequence_gen(random=True)\n self.animations.fire(ducky, seq)\n if len(self.ducks) >= DUCKS:\n arcade.unschedule(self.add_a_ducky)\n\n def enter_scene(self, previous_scene: BaseScene) -> None:\n \"\"\"Start adding duckies on entering the scene.\"\"\"\n if not self.debug:\n self.animations.fire(self.leader, self.seq)\n arcade.schedule(self.add_a_ducky, len(POINTS_HINT)*10/DUCKS)\n\n def draw(self) -> None:\n \"\"\"Draw the background environment.\"\"\"\n arcade.start_render()\n arcade.draw_lrwh_rectangle_textured(\n 0, 0, constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT, self.background\n )\n arcade.draw_scaled_texture_rectangle(constants.SCREEN_WIDTH/2,\n constants.SCREEN_HEIGHT * .8,\n self.pond,\n constants.SCREEN_WIDTH/self.pond.image.width)\n arcade.draw_scaled_texture_rectangle(constants.SCREEN_WIDTH * .67,\n constants.SCREEN_HEIGHT * .78,\n self.pondhouse)\n super().draw()\n\n @staticmethod\n def sequence_gen(random: Optional[bool] = False) -> Sequence:\n \"\"\"Generate a Sequence for the ducky to follow.\"\"\"\n seq = Sequence()\n current = 0\n for ((x1, y1), (x2, y2)) in zip(POINTS_HINT[:-1], POINTS_HINT[1:]):\n p1 = x1 * constants.SCREEN_WIDTH, y1 * constants.SCREEN_HEIGHT\n p2 = x2 * constants.SCREEN_WIDTH, y2 * constants.SCREEN_HEIGHT\n frames = 1\n if random:\n frames = randint(1, 5)\n seq.add_keyframes((current, KeyFrame(position=p1)), (current+frames, KeyFrame(position=p2)))\n current += frames\n return seq\n\n\nclass GameView(arcade.View):\n \"\"\"Main application class.\"\"\"\n\n def __init__(self, debug: Optional[bool] = False):\n super().__init__()\n self.debug = debug\n if self.debug:\n POINTS_HINT.clear()\n self.curtains = Curtains(self)\n self.curtains.add_scene('swimming_scene', DuckScene(self.debug))\n self.curtains.set_scene('swimming_scene')\n arcade.set_background_color(arcade.color.WARM_BLACK)\n\n def on_key_release(self, symbol: int, modifiers: int) -> None:\n \"\"\"\n For use only when debug=True.\n\n 'a' to add a duck\n 'p' to print the generated points_hint list\n 'x' to clear the points\n \"\"\"\n if not self.debug:\n return\n if symbol == ord('a'):\n if self.curtains.current_scene == self.curtains.scenes['swimming_scene']:\n self.curtains.current_scene.add_a_ducky()\n elif symbol == ord('p'):\n print(POINTS_HINT)\n elif symbol == ord('x'):\n POINTS_HINT.clear()\n\n def on_mouse_release(self, x: float, y: float, button: int, modifiers: int) -> None:\n \"\"\"Add clicked point to points_hint as % of width/height.\"\"\"\n POINTS_HINT.append((round(x/self.window.width, 3), round(y/self.window.height, 3)))\n\n\ndef main() -> None:\n \"\"\"\n Main function.\n\n Can be run for a GameView in debug mode\n \"\"\"\n window = arcade.Window(title=constants.SCREEN_TITLE, width=constants.SCREEN_WIDTH, height=constants.SCREEN_HEIGHT)\n arcade.set_window(window)\n game = GameView(debug=True)\n window.show_view(game)\n arcade.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"aaaaAAAA/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"138608085","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 19 19:04:50 2020\n\n@author: ASUS\n主游戏逻辑\n\"\"\"\n\nimport os,sys,pygame\nfrom pygame import locals\nimport config,objects\n\nclass State:\n def handle(self,event):\n #只处理退出时间的默认事件处理\n if event.type == pygame.QUIT:\n sys.exit(0)\n if event.type== pygame.KEYDOWN and event.key==pygame.K_ESCAPE:\n sys.exit(0)\n \n def first_display(self,screen):\n screen.fill(config.background_color)\n pygame.display.flip()\n \n def display(self,screen):\n #在后续显示状态时使用\n pass\n\nclass Level(State):\n def __init__(self,number=1):\n self.number=number\n #还需躲开多少个\n self.remaining=config.weights_per_level\n \n speed=config.drop_speed\n \n speed+=(self.number-1)*config.speed_increase\n self.weight=objects.Weight(speed)\n self.banana=objects.Banana()\n both=self.weight,self.banana#可包含更多精灵\n self.sprites=pygame.sprite.RenderUpdates(both)\n \n def update(self,game):\n #更新游戏状态\n self.sprites.update()\n if self.banana.touches(self.weight):\n game.next_state=GameOver()\n elif self.weight.landed:\n self.weight.reset()\n self.remaining-=1\n if self.remaining==0:\n game.next_state=LevelCleared(self.number)\n \n def display(self,screen):\n #在第一次清屏后显示状态\n screen.fill(config.background_color)\n updates=self.sprites.draw(screen)\n pygame.display.update(updates)\n \nclass Paused(State):\n #游戏暂停状态\n finished=0#end?\n image=None#如果要显示文件,将其显示为一个文件名\n text=''#说明性文本\n def handle(self,event):\n State.handle(self,event)\n if event.type in [pygame.MOUSEBUTTONDOWN,pygame.KEYDOWN]:\n self.finished=1\n \n def update(self,game):\n if self.finished:\n game.next_state=self.next_state()\n \n def first_display(self,screen):\n screen.fill(config.background_color)\n #创建一个使用指定外观和字号的Font对象\n font=pygame.font.Font(None,config.font_size)\n #获取文本行\n lines=self.text.strip().splitlines()\n height = len(lines)*font.get_linesize()\n center,top=screen.get_rect().center\n top-=height//2\n \n #如果有图像要显示\n if self.image:\n image=pygame.image.load(self.image).convert()\n r=image.get_rect()\n top+=r.height//2\n r.midbottom=center,top-20\n screen.blit(image,r)\n antialias=1\n black=0,0,0\n \n for line in lines:\n text=font.render(line.strip(),antialias,black)\n r=text.get_rect()\n r.midtop=center,top\n screen.blit(text,r)\n top+=font.get_linesize()\n \n #显示所作的更改\n pygame.display.flip()\n \nclass Info(Paused):\n next_state=Level\n text='''\n In this game, fuck you'''\n \nclass Startup(Paused):\n #显示启动图像和欢迎消息的暂停状态\n next_state=Info\n image=config.splash_image\n text='''welcome'''\n \nclass LevelCleared(Paused):\n #过关\n def __init__(self,number):\n self.number=number\n self.text=''' Level {} cleared\n Click to start next level'''.format(self.number)\n \n def next_state(self):\n return Level(self.number+1)\n \nclass GameOver(Paused):\n next_state=Level\n text=''' Game Over\n Click to Restart,Esc to Quit'''\n \nclass Game:\n #主事件循环\n def __init__(self,*args):\n #获取游戏和图像所在的目录\n path = os.path.abspath(args[0])\n dir=os.path.split(path)[0]\n #切换到这个目录,以便后续打开图像文件\n os.chdir(dir)\n #最初不处于任何状态\n self.state=None\n #在第一次事件的循环迭代中切换到Startup\n self.next_state =Startup()\n \n def run(self):\n pygame.init()\n flag=0#默认在窗口中显示游戏\n if config.full_screen:\n flag=pygame.FULLSCREEN\n screen_size=config.screen_size\n screen =pygame.display.set_mode(screen_size,flag)\n \n pygame.display.set_caption('Fruit Self Defense')\n pygame.mouse.set_visible(False)\n \n while True:\n #如果nextState被修改,切换状态并显示\n if self.state!=self.next_state:\n self.state=self.next_state\n self.state.first_display(screen)\n \n #将事件处理并委托给当前状态\n for event in pygame.event.get():\n self.state.handle(event)\n \n #更新当前状态\n self.state.update(self)\n \n #显示\n self.state.display(screen)\n \nif __name__=='__main__':\n game=Game(*sys.argv)\n game.run()\n \n\n","sub_path":"squish.py","file_name":"squish.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"286199738","text":"Daftar = [2, 3, 5, 6, 6, 6, 8, 9, 9, 10, 11, 12, 13, 14]\r\n\r\ndef binSe(kumpulan, target):\r\n a = []\r\n low = 0\r\n high = len(kumpulan) - 1\r\n while low <= high:\r\n mid = (high + low) // 2\r\n if kumpulan[mid] == target:\r\n a.append(mid)\r\n savepoint = mid\r\n while kumpulan[savepoint-1] == target:\r\n savepoint -= 1\r\n a.append(savepoint)\r\n savepoint1 = mid\r\n while kumpulan[savepoint1+1] == target:\r\n savepoint1 += 1\r\n a.append(savepoint1)\r\n a.sort()\r\n return a\r\n elif target < kumpulan[mid]:\r\n high = mid - 1\r\n else:\r\n low = mid + 1\r\n return False\r\n","sub_path":"Praktikum 4/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"192042188","text":"\"\"\"ROSARIO VALERO MIRANDA - 1º DAW - PRACTICA5 - EJERCICIO 3\r\nEscriu un programa que demani notes i les guardi en una llista.\r\nPer a terminar d'introduir notes, escriu una nota que no estigui entre 0 i 10.\r\nEl programa termina escrivint la llista de notes.\"\"\"\r\n\r\nprint(\"Introduce una nota\")\r\nnota=float(input())\r\n\r\nlista=[]\r\n\r\nwhile (nota>0 and nota<10):\r\n lista.append(nota)\r\n print(\"Introduce otra nota\")\r\n nota=float(input())\r\n\r\nprint(lista)\r\n","sub_path":"Ejercicios-Pr5/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"492811472","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom deepnet.model.learner import Model\n\nclass ModBasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n \"\"\"\n Creates the basic block of RESNET-18\n \n Arguments:\n in_planes : Number of input channels\n planes : Number of output channels\n stride : Value of stride in the model (By default = 1)\n \"\"\"\n super(ModBasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\nclass ResModNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=200):\n \"\"\"\n Creates RESNET-18\n Arguments:\n block : Basic block of resnet\n num_blocks : List of number of convolutions in each block\n num_classes : Number of labels in dataset\n \"\"\"\n super(ResModNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.gap = nn.Sequential(\n nn.AdaptiveAvgPool2d(1)\n )\n self.linear = nn.Linear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n \"\"\"\n Arguments:\n block : The basic block for the coresponding layer\n planes : Number of output channels\n num_blocks : Number of convolutions for this block\n stride : Value of stride\n \"\"\"\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.gap(out)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n def learner(self, model, tensorboard, dataset_train, train_loader, test_loader, device, optimizer, criterion, epochs, metrics, callbacks):\n \"\"\"Trains the model\n Arguments:\n model: Model to trained and validated\n tensorboard: Tensorboard instance for visualization\n dataset_train: Dataset training instance\n train_loader: Dataloader containing train data on the GPU/ CPU\n test_loader: Dataloader containing test data on the GPU/ CPU \n device: Device on which model will be trained (GPU/CPU)\n optimizer: optimizer for the model\n criterion: Loss function\n epochs: Number of epochs to train the model\n metrics(bool): If metrics is to be displayed or not\n (default: False)\n callbacks: Scheduler to be applied on the model\n (default : None)\n \"\"\"\n\n learn = Model(model, tensorboard, dataset_train, train_loader, test_loader, device, optimizer, criterion, epochs, metrics, callbacks)\n self.result = learn.fit()\n\n @property\n def results(self):\n \"\"\"Returns model results\"\"\"\n return self.result","sub_path":"model/models/resmodnet.py","file_name":"resmodnet.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"377401144","text":"import pytest\nfrom lib.html_parser import HtmlParser\nimport pdb\n\npage =\"\"\"\nA fantastic title!\n\n
The header
\n DEFRA\n

A fantastic body!

\n Link 1\n Link 2\n Link 3\n \n
The footer
\n\n\"\"\"\n\ndef test_title():\n p = HtmlParser(page)\n assert \"A fantastic title!\" in p.title()\n\ndef test_clean_body():\n p = HtmlParser(page)\n clean_body = p.clean_body()\n\n assert \"A fantastic body!\" in clean_body\n assert \"The header\" not in clean_body\n assert \"The footer\" not in clean_body\n\ndef test_links():\n p = HtmlParser(page)\n\n assert p.links() == [\n '/defra',\n 'www.links1.com',\n 'www.links2.com',\n 'www.links3.com',\n 'http://www.gov.uk/stats.pdf'\n ]\n\ndef test_download_links():\n p = HtmlParser(page)\n\n assert p.download_links() == [\n 'http://www.gov.uk/stats.pdf'\n ]\n\ndef test_organisations():\n p = HtmlParser(page)\n\n assert p.organisations() == [\n 'DEFRA'\n ]\n\ndef test_to_json():\n p = HtmlParser(page)\n jsoned_document = p.to_json()\n\n assert \"A fantastic title!\" in jsoned_document['title']\n\n clean_body = jsoned_document['body']\n\n assert \"A fantastic body!\" in clean_body\n assert \"The header\" not in clean_body\n assert \"The footer\" not in clean_body\n\n assert jsoned_document[\"links\"] == [\n '/defra',\n 'www.links1.com',\n 'www.links2.com',\n 'www.links3.com',\n 'http://www.gov.uk/stats.pdf'\n ]\n\n assert jsoned_document[\"download_links\"] == [\n 'http://www.gov.uk/stats.pdf'\n ]\n\n assert jsoned_document[\"organisations\"] == [\n 'DEFRA'\n ]\n","sub_path":"tests/lib/test_html_parser.py","file_name":"test_html_parser.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"513010886","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n Copyright 2020 DR ROBIN RAFFE PRYCE JONES\r\n Licensed under the Apache License, Version 2.0 (the \"License\");\r\n you may not use this file except in compliance with the License.\r\n You may obtain a copy of the License at\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n Unless required by applicable law or agreed to in writing, software\r\n distributed under the License is distributed on an \"AS IS\" BASIS,\r\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n See the License for the specific language governing permissions and\r\n limitations under the License.\r\n\"\"\"\r\n\r\n\r\nimport wx\r\nimport wx.xrc\r\n\r\nfrom CustomLib.Funcs import(LengthUnitArray, \r\n AxisLabel_Array, \r\n CbarLabel_Array, \r\n CbarManualMag_Array,\r\n ExportFormat_Array,\r\n Cbar_ColourMapArray,\r\n Cbar_ReverseArray,\r\n ExportDPI_Array)\r\n\r\n###########################################################################\r\n## Class MainFrame\r\n###########################################################################\r\n\r\nclass MainFrame ( wx.Frame ):\r\n \"\"\"Define the design of the gui\"\"\"\r\n \r\n def __init__( self, parent ):\r\n \r\n wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, \r\n title = u\"Spectrum Analyser\",\r\n pos = wx.Point(10,10),#wx.DefaultPosition, \r\n size = wx.Size( 1000,650 ), \r\n style = wx.CAPTION|wx.CLOSE_BOX|wx.MINIMIZE|wx.MINIMIZE_BOX|wx.TAB_TRAVERSAL )\r\n\r\n################################################################\r\n # Menu bar stuff\r\n menuBar = wx.MenuBar()\r\n fileButton = wx.Menu()\r\n \r\n exitItem = fileButton.Append(wx.ID_EXIT, 'Exit', 'status msg....')\r\n \r\n menuBar.Append(fileButton, 'File')\r\n \r\n self.SetMenuBar(menuBar)\r\n################################################################\r\n # panel stuff\r\n self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )\r\n \r\n bSizer1 = wx.BoxSizer( wx.VERTICAL )\r\n \r\n bSizer10 = wx.BoxSizer( wx.VERTICAL )\r\n gSizer10 = wx.GridSizer( 0, 5, 0, 0 )\r\n \r\n bSizer11 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer12 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer13 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer14 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer15 = wx.BoxSizer( wx.VERTICAL )\r\n \r\n bSizer20 = wx.BoxSizer( wx.VERTICAL )\r\n gSizer20 = wx.GridSizer( 0, 4, 0, 0 )\r\n \r\n bSizer21 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer22 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer23 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer24 = wx.BoxSizer( wx.VERTICAL )\r\n \r\n bSizer30 = wx.BoxSizer( wx.VERTICAL )\r\n gSizer30 = wx.GridSizer( 0, 5, 0, 0 )\r\n \r\n bSizer31 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer32 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer33 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer34 = wx.BoxSizer( wx.VERTICAL )\r\n bSizer35 = wx.BoxSizer( wx.VERTICAL )\r\n\r\n\r\n \r\n################################################################\r\n \r\n \r\n self.m_staticText_FILE = wx.StaticText( self, wx.ID_ANY, u\"Spectra File\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_FILE.Wrap( -1 )\r\n \r\n self.m_textCtrl_FILE = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, (350, -1), 0 )\r\n \r\n self.m_button_select_file = wx.Button( self, wx.ID_ANY, u\"Select File\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n \r\n\r\n\r\n self.m_staticText_ExportFormat = wx.StaticText( self, wx.ID_ANY, u\"Export File Format\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_ExportFormat.Wrap( -1 )\r\n \r\n ExportFormat_List = ExportFormat_Array()\r\n \r\n self.m_ComboBox_ExportFormat = wx.ComboBox(self, wx.ID_ANY, \"Export File Format\", wx.DefaultPosition,\r\n (150,-1), ExportFormat_List , 0)\r\n \r\n self.m_staticText_ExportDPI = wx.StaticText( self, wx.ID_ANY, u\"Export Resolution\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_ExportFormat.Wrap( -1 )\r\n \r\n ExportDPI_List = ExportDPI_Array()\r\n \r\n self.m_ComboBox_ExportDPI = wx.ComboBox(self, wx.ID_ANY, \"Export Res\", wx.DefaultPosition,\r\n (150,-1), ExportDPI_List , 0)\r\n \r\n self.m_button_ExportFig = wx.Button( self, wx.ID_ANY, u\"Export Figure\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n \r\n self.m_button_SavePrefs = wx.Button( self, wx.ID_ANY, u\"Save Prefs\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n\r\n self.m_button_LoadPrefs = wx.Button( self, wx.ID_ANY, u\"Load Prefs\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n\r\n\r\n self.m_button_RestorePrefs = wx.Button( self, wx.ID_ANY, u\"Restore Default Prefs\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n \r\n\r\n\r\n bSizer11.Add( self.m_staticText_FILE, 0, wx.ALL, 5 )\r\n bSizer11.Add( self.m_textCtrl_FILE, 0, wx.ALL, 5 )\r\n bSizer11.Add( self.m_button_select_file, 0, wx.ALL, 5 )\r\n gSizer10.Add( bSizer11, 0, wx.ALL,5 )\r\n \r\n bSizer12.AddSpacer( 0)\r\n \r\n gSizer10.Add( bSizer12, 0, wx.ALL,5 )\r\n\r\n bSizer13.Add( self.m_staticText_ExportFormat, 0, wx.ALL, 5 )\r\n bSizer13.Add( self.m_ComboBox_ExportFormat, 0, wx.ALL, 5 )\r\n bSizer13.Add( self.m_button_ExportFig, 0, wx.ALL, 5 )\r\n \r\n gSizer10.Add( bSizer13, 0, wx.ALL,5 )\r\n \r\n bSizer14.Add( self.m_staticText_ExportDPI, 0, wx.ALL, 5 )\r\n bSizer14.Add( self.m_ComboBox_ExportDPI, 0, wx.ALL, 5 )\r\n \r\n gSizer10.Add( bSizer14, 0, wx.ALL,5 )\r\n \r\n bSizer15.Add( self.m_button_SavePrefs, 0, wx.ALL, 5 )\r\n bSizer15.Add( self.m_button_LoadPrefs, 0, wx.ALL, 5 )\r\n bSizer15.Add( self.m_button_RestorePrefs, 0, wx.ALL, 5 ) \r\n \r\n \r\n gSizer10.Add( bSizer15, 0, wx.ALL,5 )\r\n\r\n \r\n bSizer10.Add( gSizer10, 1, wx.EXPAND, 5 )\r\n bSizer1.Add( bSizer10, 1, wx.EXPAND, 5 )\r\n\r\n\r\n\r\n \r\n################################################################\r\n \r\n self.m_staticline1 = wx.StaticLine( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )\r\n bSizer1.Add( self.m_staticline1, 0, wx.EXPAND |wx.ALL, 5 ) \r\n\r\n################################################################\r\n self.m_staticText_XLowLimit = wx.StaticText( self, wx.ID_ANY, u\"x axis lower limit (nm)\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_XLowLimit.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_XLowLimit = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=-2000, max=2000, initial=0, name=\"x axis lower limit\")\r\n\r\n self.m_staticText_XHighLimit = wx.StaticText( self, wx.ID_ANY, u\"x axis upper limit (nm)\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_XHighLimit.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_XHighLimit = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=-2000, max=2000, initial=0, name=\"x axis upper limit\")\r\n \r\n self.m_staticText_Num_X_Ticks = wx.StaticText( self, wx.ID_ANY, u\"x axis ticks (num)\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Num_X_Ticks.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_Num_X_Ticks = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=2, max=20, initial=5, name=\"x axis ticks\")\r\n \r\n self.m_staticText_YLowLimit = wx.StaticText( self, wx.ID_ANY, u\"y axis lower limit (nm)\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_YLowLimit.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_YLowLimit = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=-2000, max=2000, initial=0, name=\"y axis lower limit\")\r\n\r\n self.m_staticText_YHighLimit = wx.StaticText( self, wx.ID_ANY, u\"y axis upper limit (nm)\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_YHighLimit.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_YHighLimit = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=-2000, max=2000, initial=0, name=\"y axis upper limit\")\r\n \r\n self.m_staticText_Num_Y_Ticks = wx.StaticText( self, wx.ID_ANY, u\"y axis ticks (num)\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Num_Y_Ticks.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_Num_Y_Ticks = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=2, max=20, initial=5, name=\"y axis ticks\")\r\n \r\n \r\n self.m_staticText_Cbar_Lowlim = wx.StaticText( self, wx.ID_ANY, u\"Colour bar lower limit\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Cbar_Lowlim.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_CbarLowLimit = wx.SpinCtrlDouble(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=-1000000, max=1000000, initial=0, name=\"Colour bar lower limit\")\r\n\r\n self.m_staticText_Cbar_Highlim = wx.StaticText( self, wx.ID_ANY, u\"Colour bar upper limit\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Cbar_Highlim.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_CbarHighLimit = wx.SpinCtrlDouble(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=-1000000, max=1000000, initial=0, name=\"Colour bar upper limit\")\r\n \r\n self.m_staticText_Num_Cbar_Ticks = wx.StaticText( self, wx.ID_ANY, u\"Colour bar ticks (num)\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Num_Cbar_Ticks.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_Num_Cbar_Ticks = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=2, max=20, initial=5, name=\"c bar ticks\")\r\n \r\n self.m_staticText_Cbar_SkewHi = wx.StaticText( self, wx.ID_ANY, u\"Colour bar High Offset (%)\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Cbar_SkewHi.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_Cbar_SkewHi = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=-200, max=200, initial=100, name=\"Colour bar High Offset (%)\")\r\n \r\n self.m_staticText_Cbar_SkewLo = wx.StaticText( self, wx.ID_ANY, u\"Colour bar Low Offset (%)\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Cbar_SkewLo.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_Cbar_SkewLo = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=-200, max=200, initial=0, name=\"Colour bar Low Offset (%)\")\r\n\r\n\r\n self.m_staticText_Rotate = wx.StaticText( self, wx.ID_ANY, u\"Rotate\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Rotate.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_RotateAngle = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition, (100,-1), \r\n wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER |wx.SP_WRAP, \r\n min=-180, max=180, initial=0, name=\"Rotate\")\r\n\r\n self.m_CHKBox_FlipX = wx.CheckBox(self, id=wx.ID_ANY, label=\"Flip x\", pos=wx.DefaultPosition,\r\n size=wx.DefaultSize, style=0, validator=wx.DefaultValidator,\r\n name='Flip x') \r\n \r\n self.m_CHKBox_FlipY = wx.CheckBox(self, id=wx.ID_ANY, label=\"Flip y\", pos=wx.DefaultPosition,\r\n size=wx.DefaultSize, style=0, validator=wx.DefaultValidator,\r\n name='Flip y') \r\n \r\n bSizer21.Add( self.m_staticText_XLowLimit, 0, wx.ALL, 5 )\r\n bSizer21.Add( self.m_SpinCtrl_XLowLimit, 0, wx.ALL, 5 )\r\n\r\n bSizer21.Add( self.m_staticText_YLowLimit, 0, wx.ALL, 5 )\r\n bSizer21.Add( self.m_SpinCtrl_YLowLimit, 0, wx.ALL, 5 )\r\n \r\n bSizer21.Add( self.m_staticText_Cbar_Lowlim, 0, wx.ALL, 5 )\r\n bSizer21.Add( self.m_SpinCtrl_CbarLowLimit, 0, wx.ALL, 5 ) \r\n \r\n bSizer21.Add( self.m_staticText_Cbar_SkewLo, 0, wx.ALL, 5 )\r\n bSizer21.Add( self.m_SpinCtrl_Cbar_SkewLo, 0, wx.ALL, 5 ) \r\n \r\n gSizer20.Add( bSizer21, 0, wx.ALL,5 )\r\n\r\n\r\n bSizer22.Add( self.m_staticText_XHighLimit, 0, wx.ALL, 5 )\r\n bSizer22.Add( self.m_SpinCtrl_XHighLimit, 0, wx.ALL, 5 )\r\n\r\n bSizer22.Add( self.m_staticText_YHighLimit, 0, wx.ALL, 5 )\r\n bSizer22.Add( self.m_SpinCtrl_YHighLimit, 0, wx.ALL, 5 )\r\n \r\n bSizer22.Add( self.m_staticText_Cbar_Highlim, 0, wx.ALL, 5 )\r\n bSizer22.Add( self.m_SpinCtrl_CbarHighLimit, 0, wx.ALL, 5 )\r\n\r\n bSizer22.Add( self.m_staticText_Cbar_SkewHi, 0, wx.ALL, 5 )\r\n bSizer22.Add( self.m_SpinCtrl_Cbar_SkewHi, 0, wx.ALL, 5 ) \r\n \r\n gSizer20.Add( bSizer22, 0, wx.ALL,5 )\r\n\r\n bSizer23.Add( self.m_staticText_Num_X_Ticks, 0, wx.ALL, 5 )\r\n bSizer23.Add( self.m_SpinCtrl_Num_X_Ticks, 0, wx.ALL, 5 )\r\n \r\n bSizer23.Add( self.m_staticText_Num_Y_Ticks, 0, wx.ALL, 5 )\r\n bSizer23.Add( self.m_SpinCtrl_Num_Y_Ticks, 0, wx.ALL, 5 )\r\n \r\n bSizer23.Add( self.m_staticText_Num_Cbar_Ticks, 0, wx.ALL, 5 )\r\n bSizer23.Add( self.m_SpinCtrl_Num_Cbar_Ticks, 0, wx.ALL, 5 )\r\n \r\n gSizer20.Add( bSizer23, 0, wx.ALL,5 )\r\n \r\n bSizer24.Add( self.m_staticText_Rotate, 0, wx.ALL, 5 )\r\n bSizer24.Add( self.m_SpinCtrl_RotateAngle , 0, wx.ALL, 5 )\r\n bSizer24.Add( self.m_CHKBox_FlipX , 0, wx.ALL, 5 )\r\n bSizer24.Add( self.m_CHKBox_FlipY , 0, wx.ALL, 5 )\r\n\r\n \r\n gSizer20.Add( bSizer24, 0, wx.ALL,5 )\r\n \r\n bSizer20.Add( gSizer20, 1, wx.EXPAND, 5 )\r\n bSizer1.Add( bSizer20, 1, wx.EXPAND, 5 )\r\n################################################################\r\n \r\n self.m_staticText_Cbar_ColourMap = wx.StaticText( self, wx.ID_ANY, u\"Colour bar map\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Cbar_ColourMap.Wrap( -1 )\r\n \r\n Cbar_ColourMap_List = Cbar_ColourMapArray()\r\n\r\n self.m_ComboBox_Cbar_ColourMap = wx.ComboBox(self, wx.ID_ANY, \"Colour bar map\", wx.DefaultPosition,\r\n (150,-1), Cbar_ColourMap_List, 0)\r\n \r\n self.m_staticText_Cbar_Reverse = wx.StaticText( self, wx.ID_ANY, u\"Colour bar Direction\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Cbar_Reverse.Wrap( -1 )\r\n \r\n Cbar_Reverse_List = Cbar_ReverseArray()\r\n\r\n self.m_ComboBox_Cbar_Reverse = wx.ComboBox(self, wx.ID_ANY, \"Colour bar Direction\", wx.DefaultPosition,\r\n (150,-1), Cbar_Reverse_List, 0)\r\n \r\n self.m_staticText_Axis_Units = wx.StaticText( self, wx.ID_ANY, u\"x and y axis units\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Axis_Units.Wrap( -1 )\r\n \r\n AxisUnits_List = LengthUnitArray()\r\n\r\n self.m_ComboBox_AxisUnits = wx.ComboBox(self, wx.ID_ANY, \"Axis Units\", wx.DefaultPosition,\r\n (150,-1), AxisUnits_List, 0)\r\n \r\n \r\n AxisLabel_List = AxisLabel_Array()\r\n \r\n self.m_staticText_XAxis_Label = wx.StaticText( self, wx.ID_ANY, u\"x axis label\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_XAxis_Label.Wrap( -1 )\r\n\r\n self.m_ComboBox_x_label = wx.ComboBox(self, wx.ID_ANY, \"x axis Label\", wx.DefaultPosition,\r\n (150,-1), AxisLabel_List, 0)\r\n \r\n self.m_staticText_YAxis_Label = wx.StaticText( self, wx.ID_ANY, u\"y axis label\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_YAxis_Label.Wrap( -1 )\r\n \r\n self.m_ComboBox_y_label = wx.ComboBox(self, wx.ID_ANY, \"y axis Label\", wx.DefaultPosition,\r\n (150,-1), AxisLabel_List, 0)\r\n \r\n self.m_staticText_Cbar_Label = wx.StaticText( self, wx.ID_ANY, u\"Colour bar label\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_Cbar_Label.Wrap( -1 )\r\n \r\n CbarLabel_List = CbarLabel_Array()\r\n \r\n self.m_ComboBox_Cbar_Label = wx.ComboBox(self, wx.ID_ANY, \"Colour bar Label\", wx.DefaultPosition,\r\n (150,-1), CbarLabel_List , 0)\r\n \r\n self.m_StaticText_ManualCbar_label = wx.StaticText( self, wx.ID_ANY, u\"Manual Cbar Label\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n \r\n self.m_textCtrl_ManualCbar_label = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString , wx.DefaultPosition, (150,-1), 0 | wx.TE_PROCESS_ENTER )\r\n\r\n self.m_staticText_CbarManualMag = wx.StaticText( self, wx.ID_ANY, u\"Colour Bar Re-scale magnitude\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_CbarManualMag.Wrap( -1 )\r\n \r\n CbarManualMag_List = CbarManualMag_Array()\r\n \r\n self.m_ComboBox_CbarManualMag = wx.ComboBox(self, wx.ID_ANY, \"Colour Bar Re-scale magnitude\", wx.DefaultPosition,\r\n (150,-1), CbarManualMag_List , 0)\r\n \r\n self.m_staticText_SmallText = wx.StaticText( self, wx.ID_ANY, u\"Small Text Size\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_SmallText.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_SMALL_SIZE = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=1, max=100, initial=22, name=\"Small Text Size\")\r\n \r\n self.m_staticText_MidText = wx.StaticText( self, wx.ID_ANY, u\"Medium Text Size\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_MidText.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_MEDIUM_SIZE = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=1, max=100, initial=25, name=\"Medium Text Size\")\r\n \r\n self.m_staticText_LargeText = wx.StaticText( self, wx.ID_ANY, u\"Large Text Size\", wx.DefaultPosition, wx.DefaultSize, 0 )\r\n self.m_staticText_LargeText.Wrap( -1 )\r\n \r\n self.m_SpinCtrl_BIGGER_SIZE = wx.SpinCtrl(self, wx.ID_ANY, \"\", wx.DefaultPosition,\r\n (100,-1), wx.SP_ARROW_KEYS | wx.ALIGN_LEFT | wx.TE_PROCESS_ENTER, \r\n min=1, max=100, initial=30, name=\"Large Text Size\")\r\n \r\n bSizer31.Add( self.m_staticText_Cbar_ColourMap, 0, wx.ALL, 5 )\r\n bSizer31.Add( self.m_ComboBox_Cbar_ColourMap, 0, wx.ALL, 5 )\r\n bSizer31.Add( self.m_staticText_Cbar_Reverse, 0, wx.ALL, 5 )\r\n bSizer31.Add( self.m_ComboBox_Cbar_Reverse, 0, wx.ALL, 5 )\r\n bSizer31.Add( self.m_staticText_Axis_Units, 0, wx.ALL, 5 )\r\n bSizer31.Add( self.m_ComboBox_AxisUnits, 0, wx.ALL, 5 )\r\n \r\n gSizer30.Add( bSizer31, 0, wx.ALL,5 )\r\n \r\n bSizer32.Add( self.m_staticText_XAxis_Label, 0, wx.ALL, 5 )\r\n bSizer32.Add( self.m_ComboBox_x_label, 0, wx.ALL, 5 )\r\n \r\n gSizer30.Add( bSizer32, 0, wx.ALL,5 )\r\n \r\n bSizer33.Add( self.m_staticText_YAxis_Label, 0, wx.ALL, 5 )\r\n bSizer33.Add( self.m_ComboBox_y_label, 0, wx.ALL, 5 )\r\n \r\n gSizer30.Add( bSizer33, 0, wx.ALL,5 )\r\n \r\n bSizer34.Add( self.m_staticText_Cbar_Label, 0, wx.ALL, 5 )\r\n bSizer34.Add( self.m_ComboBox_Cbar_Label, 0, wx.ALL, 5 )\r\n bSizer34.Add( self.m_StaticText_ManualCbar_label, 0, wx.ALL, 5 )\r\n bSizer34.Add( self.m_textCtrl_ManualCbar_label, 0, wx.ALL, 5 )\r\n bSizer34.Add( self.m_staticText_CbarManualMag, 0, wx.ALL, 5 )\r\n bSizer34.Add( self.m_ComboBox_CbarManualMag, 0, wx.ALL, 5 )\r\n \r\n gSizer30.Add( bSizer34, 0, wx.ALL,5 )\r\n \r\n bSizer35.Add( self.m_staticText_SmallText, 0, wx.ALL, 5 )\r\n bSizer35.Add( self.m_SpinCtrl_SMALL_SIZE, 0, wx.ALL, 5 )\r\n bSizer35.Add( self.m_staticText_MidText, 0, wx.ALL, 5 )\r\n bSizer35.Add( self.m_SpinCtrl_MEDIUM_SIZE, 0, wx.ALL, 5 )\r\n bSizer35.Add( self.m_staticText_LargeText, 0, wx.ALL, 5 )\r\n bSizer35.Add( self.m_SpinCtrl_BIGGER_SIZE, 0, wx.ALL, 5 )\r\n \r\n gSizer30.Add( bSizer35, 0, wx.ALL,5 )\r\n \r\n bSizer30.Add( gSizer30, 1, wx.EXPAND, 5 )\r\n \r\n bSizer1.Add( bSizer30, 1, wx.EXPAND, 5 )\r\n\r\n################################################################\r\n self.SetSizer( bSizer1 )\r\n self.Layout()\r\n self.Centre( wx.BOTH )\r\n################################################################\r\n # Connect Events:\r\n self.Bind(wx.EVT_MENU, self.Quit, exitItem)\r\n self.Bind( wx.EVT_CLOSE, self.Quit, exitItem)\r\n self.m_button_select_file.Bind( wx.EVT_BUTTON, self.OnButtonClick_select_file )\r\n \r\n self.m_SpinCtrl_XLowLimit.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_XLowLimit )\r\n self.m_SpinCtrl_XLowLimit.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_XLowLimit )\r\n self.m_SpinCtrl_XHighLimit.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_XHighLimit )\r\n self.m_SpinCtrl_XHighLimit.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_XHighLimit )\r\n self.m_SpinCtrl_Num_X_Ticks.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_Num_X_Ticks )\r\n self.m_SpinCtrl_Num_X_Ticks.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_Num_X_Ticks )\r\n \r\n self.m_SpinCtrl_YLowLimit.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_YLowLimit )\r\n self.m_SpinCtrl_YLowLimit.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_YLowLimit )\r\n self.m_SpinCtrl_YHighLimit.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_YHighLimit )\r\n self.m_SpinCtrl_YHighLimit.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_YHighLimit )\r\n self.m_SpinCtrl_Num_Y_Ticks.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_Num_Y_Ticks )\r\n self.m_SpinCtrl_Num_Y_Ticks.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_Num_Y_Ticks )\r\n \r\n self.m_SpinCtrl_CbarLowLimit.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_Cbar_Lowlim )\r\n self.m_SpinCtrl_CbarLowLimit.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_Cbar_Lowlim )\r\n self.m_SpinCtrl_CbarHighLimit.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_Cbar_Highlim )\r\n self.m_SpinCtrl_CbarHighLimit.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_Cbar_Highlim )\r\n self.m_SpinCtrl_Num_Cbar_Ticks.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_Num_Cbar_Ticks )\r\n self.m_SpinCtrl_Num_Cbar_Ticks.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_Num_Cbar_Ticks )\r\n \r\n self.m_SpinCtrl_RotateAngle.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_Rotate)\r\n self.m_SpinCtrl_RotateAngle.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_Rotate )\r\n self.m_CHKBox_FlipX.Bind(wx.EVT_CHECKBOX, self.On_CHKBox_FlipX )\r\n self.m_CHKBox_FlipY.Bind(wx.EVT_CHECKBOX, self.On_CHKBox_FlipY )\r\n \r\n self.m_SpinCtrl_Cbar_SkewHi.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_Cbar_SkewHi )\r\n self.m_SpinCtrl_Cbar_SkewHi.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_Cbar_SkewHi )\r\n self.m_SpinCtrl_Cbar_SkewLo.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_Cbar_SkewLo )\r\n self.m_SpinCtrl_Cbar_SkewLo.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_Cbar_SkewLo )\r\n \r\n self.m_ComboBox_Cbar_ColourMap.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect_Cbar_ColourMap)\r\n self.m_ComboBox_Cbar_Reverse.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect_Reverse)\r\n self.m_ComboBox_AxisUnits.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect_AxisUnits)\r\n self.m_ComboBox_x_label.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect_XAxis_Label)\r\n self.m_ComboBox_y_label.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect_YAxis_Label)\r\n self.m_ComboBox_Cbar_Label.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect_Cbar_Label)\r\n \r\n self.m_SpinCtrl_SMALL_SIZE.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_SmallTextSize)\r\n self.m_SpinCtrl_SMALL_SIZE.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_SmallTextSize )\r\n self.m_SpinCtrl_MEDIUM_SIZE.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_MidTextSize)\r\n self.m_SpinCtrl_MEDIUM_SIZE.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_MidTextSize )\r\n self.m_SpinCtrl_BIGGER_SIZE.Bind( wx.EVT_SPINCTRL, self.OnSpinCtrl_LargeTextSize)\r\n self.m_SpinCtrl_BIGGER_SIZE.Bind(wx.EVT_TEXT_ENTER, self.OnSpinCtrl_LargeTextSize )\r\n \r\n self.m_textCtrl_ManualCbar_label.Bind(wx.EVT_TEXT_ENTER, self.OnTextCtrl_ManualCbar)\r\n self.m_ComboBox_CbarManualMag.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect_CbarManualMag)\r\n \r\n self.m_ComboBox_ExportFormat.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect_ExportFormat)\r\n self.m_ComboBox_ExportDPI.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect_ExportDPI)\r\n self.m_button_ExportFig.Bind( wx.EVT_BUTTON, self.OnButtonClick_ExportFig )\r\n self.m_button_SavePrefs.Bind( wx.EVT_BUTTON, self.OnButtonClick_SavePrefs )\r\n self.m_button_LoadPrefs.Bind( wx.EVT_BUTTON, self.OnButtonClick_LoadPrefs )\r\n self.m_button_RestorePrefs.Bind( wx.EVT_BUTTON, self.OnButtonClick_RestorePrefs )\r\n################################################################\r\n # Standard Event handlers \r\n def Quit(self , event):\r\n self.Close()\r\n \r\n \r\n################################################################ \r\n\t# Virtual event handlers, overide them in your derived class\r\n\r\n def OnComboBoxSelect_ExportDPI(self, event):\r\n event.Skip()\r\n\r\n def On_CHKBox_FlipX(self, event):\r\n event.Skip()\r\n\r\n def On_CHKBox_FlipY(self, event):\r\n event.Skip()\r\n\r\n\r\n def OnButtonClick_LoadPrefs(self, event):\r\n event.Skip()\r\n\r\n\r\n def OnSpinCtrl_Rotate(self, event):\r\n event.Skip()\r\n\r\n def OnButtonClick_SavePrefs(self, event):\r\n event.Skip()\r\n \r\n def OnButtonClick_RestorePrefs(self, event):\r\n event.Skip()\r\n\r\n def OnSpinCtrl_LargeTextSize(self, event):\r\n event.Skip()\r\n\r\n def OnSpinCtrl_MidTextSize(self, event):\r\n event.Skip()\r\n \r\n\r\n def OnSpinCtrl_SmallTextSize(self, event):\r\n event.Skip()\r\n \r\n def OnComboBoxSelect_Reverse(self, event):\r\n event.Skip()\r\n \r\n def OnComboBoxSelect_Cbar_ColourMap(self, event):\r\n event.Skip()\r\n\r\n def OnButtonClick_ExportFig(self, event):\r\n event.Skip()\r\n \r\n def OnComboBoxSelect_ExportFormat(self, event):\r\n event.Skip()\r\n\r\n def OnComboBoxSelect_CbarManualMag(self, event):\r\n event.Skip()\r\n \r\n def OnComboBoxSelect_Cbar_Label(self, event):\r\n event.Skip()\r\n \r\n def OnTextCtrl_ManualCbar(self, event):\r\n event.Skip()\r\n \r\n def OnComboBoxSelect_XAxis_Label(self, event):\r\n event.Skip()\r\n\r\n def OnComboBoxSelect_YAxis_Label(self, event):\r\n event.Skip()\r\n \r\n def OnSpinCtrl_Cbar_SkewHi(self, event):\r\n event.Skip()\r\n \r\n def OnSpinCtrl_Cbar_SkewLo(self, event):\r\n event.Skip()\r\n \r\n \r\n def OnButtonClick_select_file(self, event):\r\n event.Skip()\r\n\r\n def OnSpinCtrl_XLowLimit (self, event):\r\n event.Skip()\r\n\r\n def OnSpinCtrl_XHighLimit (self, event):\r\n event.Skip()\r\n \r\n def OnSpinCtrl_Num_X_Ticks (self, event):\r\n event.Skip()\r\n \r\n def OnSpinCtrl_YLowLimit (self, event):\r\n event.Skip()\r\n\r\n def OnSpinCtrl_YHighLimit (self, event):\r\n event.Skip()\r\n \r\n def OnSpinCtrl_Num_Y_Ticks (self, event):\r\n event.Skip()\r\n \r\n def OnSpinCtrl_Cbar_Lowlim (self, event):\r\n event.Skip()\r\n\r\n def OnSpinCtrl_Cbar_LHighlim (self, event):\r\n event.Skip()\r\n \r\n def OnSpinCtrl_Num_Cbar_Ticks (self, event):\r\n event.Skip()\r\n\r\n def OnComboBoxSelect_AxisUnits (self, event):\r\n event.Skip()","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":29736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"623801123","text":"import sys, re, json, argparse\n\nparser = argparse.ArgumentParser(description=\"Log reader for nishanchi v9 json files.\")\nparser.add_argument(\"-i\", help=\"Input file name\", metavar=\"--input\", required=True)\n\nargs = parser.parse_args()\ninputFile = open(args.i, 'r')\n\na = inputFile.readline()\nwhile a != '':\n b = json.loads(a)\n print(b)\n a = inputFile.readline()\n\n\n\n","sub_path":"logRead.py","file_name":"logRead.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"304669665","text":"userInput = input('Enter some words separated by spaces: ')\ninputArray = userInput.split(' ')\n\nprint('There are {} words in the list!'.format(len(inputArray)))\nif len(inputArray) >= 3:\n print('The first 3 words in the list are: \\\"{} {} {}\\\".' .format(inputArray[0], inputArray[1], inputArray[2]))\nelse:\n print('There are not three words in this list. The words in the list are: ' + inputArray)\n\nnoFirstOrLast = inputArray[1:(len(inputArray)-1)]\nprint('Here\\'s the list without the first or last words: ')\nprint(noFirstOrLast)\n\nlongerThanFour = []\nshortestWord = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nlongestWord = ''\nfor word in inputArray:\n if len(str(word)) > 4:\n longerThanFour.append(word)\n if len(word) < len(shortestWord):\n shortestWord = word\n if len(word) > len(longestWord):\n longestWord = word\n\nprint('The shortest word is: \\\"{}\\\", the longest word is: \\\"{}\\\".' .format(shortestWord, longestWord))\nprint('Words longer than four letters: ')\nprint(longerThanFour)\n\nsortedList = inputArray\nsortedList.sort()\nprint('Here the words are in alphabetical order: ')\nprint(sortedList)\nsortedList.sort(key = len)\nprint('And here they are in order of their length: ')\nprint(sortedList)\n","sub_path":"Extracting Words.py","file_name":"Extracting Words.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"311140575","text":"from collections import namedtuple\n\nimport pytest\nfrom django.http import Http404\nfrom model_mommy import mommy\nfrom rest_framework.test import APIClient\n\nfrom reqs.models import Agency, AgencyGroup, Policy, Requirement, Topic\nfrom reqs.views import policies as policies_views\n\nPolicySetup = namedtuple('PolicySetup', ('policies', 'reqs'))\n\n\n@pytest.fixture\ndef policy_setup():\n policies = [mommy.make(Policy, policy_number='0',\n workflow_phase='published'),\n mommy.make(Policy, policy_number='1',\n workflow_phase='published')]\n reqs = [mommy.make(Requirement, policy=policies[0], _quantity=3),\n mommy.make(Requirement, policy=policies[1], _quantity=4)]\n yield PolicySetup(policies, reqs)\n\n\n@pytest.fixture\ndef policy_topic_setup(policy_setup):\n topics = mommy.make(Topic, _quantity=3)\n reqs = policy_setup.reqs\n reqs[0][0].topics.add(topics[0])\n reqs[0][1].topics.add(topics[1])\n reqs[0][2].topics.add(topics[0], topics[1])\n reqs[1][0].topics.add(topics[1])\n reqs[1][1].topics.add(topics[1], topics[2])\n yield policy_setup, topics\n\n\n@pytest.mark.django_db\ndef test_topics_counts_no_params(policy_topic_setup):\n \"\"\"The API endpoint should include all requirements when no params are\n given\"\"\"\n (_, reqs), _ = policy_topic_setup\n client = APIClient()\n\n response = client.get(\"/policies/\").json()\n assert response['count'] == 2\n assert response['results'][0]['total_reqs'] == len(reqs[0])\n assert response['results'][0]['relevant_reqs'] == len(reqs[0])\n assert response['results'][1]['total_reqs'] == len(reqs[1])\n assert response['results'][1]['relevant_reqs'] == len(reqs[1])\n\n\n@pytest.mark.django_db\ndef test_topics_counts_filter_req(policy_topic_setup):\n \"\"\"The API endpoint should include only relevant policies when we filter\n by an attribute of a requirement\"\"\"\n (_, reqs), _ = policy_topic_setup\n client = APIClient()\n\n path = \"/policies/?requirements__req_id=\" + reqs[1][1].req_id\n response = client.get(path).json()\n assert response['count'] == 1\n assert response['results'][0]['total_reqs'] == len(reqs[1])\n assert response['results'][0]['relevant_reqs'] == 1\n\n\n@pytest.mark.django_db\ndef test_topics_counts_filter_by_one_topic(policy_topic_setup):\n \"\"\"The API endpoint should include only relevant policies when we filter\n by a single topic\"\"\"\n (_, reqs), topics = policy_topic_setup\n client = APIClient()\n\n path = \"/policies/?requirements__topics__id__in={0}\".format(topics[0].pk)\n response = client.get(path).json()\n assert response['count'] == 1\n assert response['results'][0]['total_reqs'] == len(reqs[0])\n # reqs[0][0] and reqs[0][2]\n assert response['results'][0]['relevant_reqs'] == 2\n\n\n@pytest.mark.django_db\ndef test_topics_counts_filter_by_multiple_topics(policy_topic_setup):\n \"\"\"The API endpoint should include only relevant policies when we filter\n by multiple topics\"\"\"\n (_, reqs), topics = policy_topic_setup\n client = APIClient()\n\n path = \"/policies/?requirements__topics__id__in={0},{1}\".format(\n topics[0].pk, topics[2].pk)\n response = client.get(path).json()\n assert response['count'] == 2\n assert response['results'][0]['total_reqs'] == len(reqs[0])\n # reqs[0][0] and reqs[0][2]\n assert response['results'][0]['relevant_reqs'] == 2\n assert response['results'][1]['total_reqs'] == len(reqs[1])\n # reqs[1][1]\n assert response['results'][1]['relevant_reqs'] == 1\n\n\n@pytest.mark.django_db\ndef test_agencies_direct(policy_setup):\n _, reqs = policy_setup\n agencies = mommy.make(Agency, _quantity=3)\n reqs[0][1].agencies.add(agencies[0], agencies[1])\n reqs[1][0].agencies.add(agencies[2])\n client = APIClient()\n\n path = \"/policies/?requirements__all_agencies__id__in={0}\".format(\n agencies[0].pk)\n response = client.get(path).json()\n assert response['count'] == 1\n assert response['results'][0]['relevant_reqs'] == 1\n\n path = \"/policies/?requirements__agencies__id__in={0}\".format(\n agencies[0].pk)\n response = client.get(path).json()\n assert response['count'] == 1\n assert response['results'][0]['relevant_reqs'] == 1\n\n\n@pytest.mark.django_db\ndef test_agencies_indirect(policy_setup):\n _, reqs = policy_setup\n group = mommy.make(AgencyGroup)\n agency_in_group, agency_no_group = mommy.make(Agency, _quantity=2)\n group.agencies.add(agency_in_group)\n reqs[0][0].agencies.add(agency_no_group)\n reqs[1][0].agency_groups.add(group)\n client = APIClient()\n\n path = \"/policies/?requirements__all_agencies__id__in={0}\".format(\n agency_in_group.pk)\n response = client.get(path).json()\n assert response['count'] == 1\n assert response['results'][0]['relevant_reqs'] == 1\n\n path = \"/policies/?requirements__agencies__id__in={0}\".format(\n agency_in_group.pk)\n response = client.get(path).json()\n assert response['count'] == 0\n\n path = \"/policies/?requirements__agency_groups__id__in={0}\".format(\n group.pk)\n response = client.get(path).json()\n assert response['count'] == 1\n assert response['results'][0]['relevant_reqs'] == 1\n\n\n@pytest.mark.django_db\ndef test_nonpublic_reqs():\n client = APIClient()\n policy = mommy.make(Policy, workflow_phase='published')\n mommy.make(Requirement, policy=policy, public=False)\n\n assert client.get('/policies/').json()['count'] == 0\n\n mommy.make(Requirement, _quantity=4, policy=policy)\n response = client.get('/policies/').json()\n assert response['count'] == 1\n assert policy.requirements.count() == 5\n assert response['results'][0]['relevant_reqs'] == 4\n assert response['results'][0]['total_reqs'] == 4\n\n\n@pytest.mark.django_db\ndef test_omb_policy_id():\n client = APIClient()\n omb_policy_id = \"M-123-4\"\n path = \"/policies/{0}\".format(omb_policy_id)\n response = client.get(path)\n assert response.status_code == 301\n mommy.make(Policy, omb_policy_id=omb_policy_id, workflow_phase='published')\n response = client.get(path + '.json').json()\n assert response['omb_policy_id'] == omb_policy_id\n\n\n@pytest.mark.django_db\ndef test_pk_id():\n client = APIClient()\n pk_id = 123\n path = \"/policies/{0}\".format(pk_id)\n response = client.get(path)\n assert response.status_code == 301\n mommy.make(Policy, pk=pk_id, workflow_phase='published')\n response = client.get(path + '.json').json()\n assert response['id'] == pk_id\n\n\n@pytest.mark.django_db\ndef test_slug():\n client = APIClient()\n slug = \"hello-there\"\n path = f\"/policies/{slug}.json\"\n response = client.get(path)\n assert response.status_code == 404\n mommy.make(Policy, slug=slug, pk=456, workflow_phase='published')\n response = client.get(path).json()\n assert response['id'] == 456\n\n\n@pytest.mark.django_db\ndef test_policy_or_404():\n policy = mommy.make(Policy, omb_policy_id='AAA-BBB-CCC',\n workflow_phase='published')\n assert policies_views.policy_or_404(f\"{policy.pk}\") == policy\n assert policies_views.policy_or_404(\"AAA-BBB-CCC\") == policy\n with pytest.raises(Http404):\n policies_views.policy_or_404('does-not-exist')\n","sub_path":"api/reqs/tests/views_policies_tests.py","file_name":"views_policies_tests.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"388886895","text":"import requests\n\nBASE_URL = 'http://betaboss.speedpos.snsshop.net'\nHEADERS = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Origin': 'http://betaboss.speedpos.snsshop.net',\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4549.400 QQBrowser/9.7.12900.400',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Referer': 'http://betaboss.speedpos.snsshop.net/',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.8'\n\n}\nlogin_data = {'login_name': 'admin', 'login_pwd': '666999'}\n\ndef url(url_path):\n return \"%s%s\" % (BASE_URL, url_path)\n\ns = requests.Session()\nr = s.post(url('/index/index'), data=login_data,headers=HEADERS)\nprint(\"Resp:%s\" % r.text)\nrb = s.get(url('/bm/selectBank/112233'))\n\nif __name__ == '__main__':\n print(\"SelectBank:%s\" % rb.text)\n rc = s.get(url('/channel/index'))\n print(\"Channel:%s\" % rc.text)\n","sub_path":"python/Test/Unittest/speedpos/lib/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"289511119","text":"\"\"\"\n爬虫基础2\n\"\"\"\nfrom env import Env\nfrom lxml import etree\nimport re\n\n# 调用封装的登陆环境\nenv = Env()\nsession = env.login()\n\n# 定义 和\ntotal = 0\n# 这次需要循环遍历每一页\ncurr_page_url = \"http://www.glidedsky.com/level/web/crawler-basic-2?page=1\"\nwhile True:\n response = session.get(curr_page_url, headers=env.headers)\n # 获取每一个数字框\n html = etree.HTML(response.text)\n div_list = html.xpath('//div[@class=\"card-body\"]//div[@class=\"col-md-1\"]')\n\n for div in div_list:\n total += int(div.xpath('normalize-space(./text())'))\n\n cur_page_num = re.search('page=(\\\\d+)', curr_page_url).group(1)\n print(f\"前{cur_page_num}页数字和为: {total}\")\n\n # 是否还有下一页\n next_page_btn = html.xpath('//ul[@class=\"pagination\"]/li[last()]')[0]\n # 如果下一页按钮有disabled样式, 就没有下一页\n if next_page_btn.xpath('contains(@class, \"disabled\")'):\n break\n else:\n # 有下一页, 给下一页url赋值\n curr_page_url = next_page_btn.xpath('./a/@href')[0]\n\nprint(f\"结果是: {total}\")\n","sub_path":"crawler-basic-2.py","file_name":"crawler-basic-2.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"283741600","text":"\n\nfrom xai.brain.wordbase.nouns._caretaker import _CARETAKER\n\n#calss header\nclass _CARETAKERS(_CARETAKER, ):\n\tdef __init__(self,): \n\t\t_CARETAKER.__init__(self)\n\t\tself.name = \"CARETAKERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"caretaker\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_caretakers.py","file_name":"_caretakers.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"384234976","text":"from inpynamodb.constants import CAMEL_COUNT, ITEMS, LAST_EVALUATED_KEY\n\n\nclass ResultIterator(object):\n \"\"\"\n ResultIterator handles Query and Scan result pagination.\n\n http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination\n http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination\n \"\"\"\n def __init__(self, operation, args, kwargs, map_fn=None, limit=None):\n self._operation = operation\n self._args = args\n self._kwargs = kwargs\n self._map_fn = map_fn\n self._limit = limit\n self._needs_execute = True\n self._total_count = 0\n\n async def _execute(self):\n data = await self._operation(*self._args, **self._kwargs)\n self._count = data[CAMEL_COUNT]\n self._items = data.get(ITEMS) # not returned if 'Select' is set to 'COUNT'\n self._last_evaluated_key = data.get(LAST_EVALUATED_KEY)\n self._index = 0 if self._items else self._count\n self._total_count += self._count\n\n def __aiter__(self):\n return self\n\n async def __anext__(self):\n try:\n if self._limit == 0:\n raise StopIteration\n\n if self._needs_execute:\n self._needs_execute = False\n await self._execute()\n\n while self._index == self._count and self._last_evaluated_key:\n self._kwargs['exclusive_start_key'] = self._last_evaluated_key\n await self._execute()\n\n if self._index == self._count:\n raise StopIteration\n\n item = self._items[self._index]\n self._index += 1\n if self._limit is not None:\n self._limit -= 1\n if self._map_fn:\n item = await self._map_fn(item)\n return item\n\n except StopIteration:\n raise StopAsyncIteration\n\n @property\n def last_evaluated_key(self):\n return self._last_evaluated_key\n\n @property\n def total_count(self):\n return self._total_count\n","sub_path":"inpynamodb/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"587569384","text":"# Adrian Pena\n# CTEC 121 / Winter 2019\n# Module 4 / Problem Set 5\n# Problem 1 (25 points)\n\n\"\"\"\nUsing the graphics library, draw a picture of the side view of a car.\nMake sure to include:\n- two tires *\n- an area for windows *\n- and the roof. *\n- Include a front and rear bumper as well.\nUse all of the following types of objects and functions listed below:\n\n- Line *\n- Circle *\n- Rectangle *\n- Polygon *\n- setOutline *\n- setFill *\n\"\"\"\nfrom graphics import *\n\n\ndef main():\n win = GraphWin('Lines', 800, 800)\n win.setBackground('white')\n win.setCoords(-5, -5, 5, 5)\n\n car_body = Polygon(Point(-3.9, 0), Point(-3.9, .9),\n Point(-2.5, 1), Point(-1, 2), Point(1, 1.9), Point(3.5, .8), Point(3.5, 0), Point(-3.9, 0))\n car_body.setOutline('black')\n car_body.setWidth(3)\n car_body.setFill('red')\n car_body.draw(win)\n\n c_windshield = Polygon(Point(-2.2, 1), Point(-1.2, 1.7), Point(-1.2, 1))\n c_windshield.setFill('cyan')\n c_windshield.setOutline('blue')\n c_windshield.draw(win)\n c_window = Polygon(Point(-1.1, 1.8), Point(1, 1.7),\n Point(2.8, 1), Point(-1.1, 1))\n c_window.setFill('cyan')\n c_window.setOutline('blue')\n c_window.draw(win)\n v_door1 = Line(Point(-1.15, 1.9), Point(-1.15, 0))\n v_door1.setWidth(3)\n v_door2 = v_door1.clone()\n v_door2.move(2, 0)\n v_door1.draw(win)\n v_door2.draw(win)\n\n front_tire = Circle(Point(-2.75, 0), .6)\n front_tire.setFill('black')\n front_tire.setOutline('gray')\n rear_tire = front_tire.clone()\n rear_tire.move(4.75, 0)\n\n front_hub = Circle(Point(-2.75, 0), .4)\n front_hub.setFill('gray')\n rear_hub = front_hub.clone()\n rear_hub.move(4.75, 0)\n\n front_tire.draw(win)\n rear_tire.draw(win)\n front_hub.draw(win)\n rear_hub.draw(win)\n\n front_bump = Rectangle(Point(-4.2, .5), Point(-3.9, 0))\n front_bump.setFill('gray')\n front_bump.setWidth(3)\n front_bump.draw(win)\n rear_bump = front_bump.clone()\n rear_bump.move(7.7, 0)\n rear_bump.draw(win)\n\n # I made this to better see where I am placing points. I will comment it out when the drawing is done.\n # I probably should have cloned one X and one Y line and then moved the clones.\n # maybe there was an even better solution. Probably a for loop.\n # y1 = Line(Point(-4, 5), Point(-4, -5))\n # y1.draw(win)\n # y1 = Line(Point(-4, 5), Point(-4, -5))\n # y1.draw(win)\n # y2 = Line(Point(-3, 5), Point(-3, -5))\n # y2.draw(win)\n # y3 = Line(Point(-2, 5), Point(-2, -5))\n # y3.draw(win)\n # y4 = Line(Point(-1, 5), Point(-1, -5))\n # y4.draw(win)\n # y5 = Line(Point(0, 5), Point(0, -5))\n # y5.setFill('white')\n # y5.draw(win)\n # y6 = Line(Point(1, 5), Point(1, -5))\n # y6.draw(win)\n # y7 = Line(Point(2, 5), Point(2, -5))\n # y7.draw(win)\n # y8 = Line(Point(3, 5), Point(3, -5))\n # y8.draw(win)\n # y9 = Line(Point(4, 5), Point(4, -5))\n # y9.draw(win)\n\n # x1 = Line(Point(-5, -4), Point(5, -4))\n # x1.draw(win)\n # x2 = Line(Point(-5, -3), Point(5, -3))\n # x2.draw(win)\n # x3 = Line(Point(-5, -2), Point(5, -2))\n # x3.draw(win)\n # x4 = Line(Point(-5, -1), Point(5, -1))\n # x4.draw(win)\n # x5 = Line(Point(-5, 0), Point(5, 0))\n # x5.setFill('white')\n # x5.draw(win)\n # x6 = Line(Point(-5, 1), Point(5, 1))\n # x6.draw(win)\n # x7 = Line(Point(-5, 2), Point(5, 2))\n # x7.draw(win)\n # x8 = Line(Point(-5, 3), Point(5, 3))\n # x8.draw(win)\n # x9 = Line(Point(-5, 4), Point(5, 4))\n # x9.draw(win)\n\n win.getMouse()\n\n\nmain()\n","sub_path":"problem-set-5-problem-1.py","file_name":"problem-set-5-problem-1.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"410640149","text":"import os\nfrom tkinter import filedialog, IntVar, messagebox\nimport tkinter as tk\nfrom tkinter import ttk\nfrom year3.plantLab.Rijstproef.renamer_3 import rice_rename\nfrom year3.plantLab.Sojaproef.renamer import soy_rename\nfrom tkinter.ttk import *\nfrom PIL import ImageTk, Image\nimport time\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ndef main():\n\n HEIGHT = 700\n WIDTH = 800\n\n def browse_button1(): # check: not empty!\n infile = filedialog.askdirectory()\n pathlabel1.config(text=infile)\n return infile\n\n def browse_button2(): # check: empty!\n filename = filedialog.askdirectory()\n pathlabel2.config(text=filename)\n return filename\n\n def job(infile, outfile, rice, soy):\n if rice == soy:\n messagebox.showinfo(\"Error message\", \"Choose (either) Rice or Soy!\")\n else:\n # progress\n size = len(os.listdir(infile)) + 1\n barVar = tk.DoubleVar()\n barVar.set(0)\n progress = Progressbar(lowest_frame, orient='horizontal', length=100, style='black.Horizontal.TProgressbar',\n variable=barVar, mode='determinate')\n progress['maximum'] = size\n progress.pack()\n start_time = time.time()\n\n # renamer\n if rice:\n rice_rename(progress, infile, outfile)\n else:\n soy_rename(progress, infile, outfile)\n end_time = time.time()\n\n end = tk.Label(end_frame)\n end.config(text=\"Renaming Completed. (took %i seconds)\" % (end_time-start_time), font=(\"calibri\", 12))\n end.place(relwidth=1, relheight=1)\n\n root = tk.Tk()\n canvas = tk.Canvas(root, height=HEIGHT, width=WIDTH)\n canvas.pack()\n\n # image logo\n upper_img = tk.Frame(root)\n upper_img.place(relx=0.5, rely=0.1, relwidth=0.75, relheight=0.1, anchor='n')\n img = Image.open(\"C:/Users/tmdad/Documents/Master/year3/plantLab/lab.png\")\n img = img.resize((60, 60), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n logo = tk.Label(upper_img, image=img)\n logo.pack()\n\n # input\n upper_frame1 = tk.Frame(root)\n upper_frame1.place(relx=0.5, rely=0.25, relwidth=0.75, relheight=0.1, anchor='n')\n label1 = tk.Label(upper_frame1, text=\"Select Input Folder: \", font=(\"calibri\", 12, \"bold\"))\n label1.place(relwidth=1, relheight=1)\n mid1_frame = tk.Frame(root)\n mid1_frame.place(relx=0.5, rely=0.35, relwidth=0.75, relheight=0.1, anchor='n')\n button1 = tk.Button(mid1_frame, text=\"Browse for folder...\", font=(\"calibri\", 10), bg='lightgray', command=lambda: browse_button1())\n button1.config(borderwidth=0)\n button1.pack()\n\n # show path----\n mid2_frame = tk.Frame(root)\n mid2_frame.place(relx=0.5, rely=0.4, relwidth=0.75, relheight=0.025, anchor='n')\n pathlabel1 = tk.Label(mid2_frame, font=(\"calibri\", 10))\n pathlabel1.place(relwidth=1, relheight=1)\n\n # output\n upper_frame2 = tk.Frame(root)\n upper_frame2.place(relx=0.5, rely=0.45, relwidth=0.75, relheight=0.1, anchor='n')\n label2 = tk.Label(upper_frame2, text=\"Select Output Folder: \", font=(\"calibri\", 12, \"bold\"))\n label2.place(relwidth=1, relheight=1)\n mid2_frame = tk.Frame(root)\n mid2_frame.place(relx=0.5, rely=0.55, relwidth=0.75, relheight=0.1, anchor='n')\n button2 = tk.Button(mid2_frame, text=\"Browse for folder...\", font=(\"calibri\", 10), bg='lightgray', command=lambda: browse_button2())\n button2.config(borderwidth=0)\n button2.pack()\n\n # show path----\n mid2_frame = tk.Frame(root)\n mid2_frame.place(relx=0.5, rely=0.6, relwidth=0.75, relheight=0.025, anchor='n')\n pathlabel2 = tk.Label(mid2_frame, font=(\"calibri\", 10))\n pathlabel2.place(relwidth=1, relheight=1)\n\n # checkboxes\n check = tk.Frame(root)\n check.place(relx=0.5, rely=0.7, relwidth=0.2, relheight=0.025, anchor='n')\n rice = IntVar()\n soy = IntVar()\n c1 = Checkbutton(check, text=\"Rice\", variable=rice).pack(side=tk.LEFT)\n c2 = Checkbutton(check, text=\"Soy\", variable=soy).pack(side=tk.RIGHT)\n\n # rename: take text from labels\n lower_frame = tk.Frame(root)\n lower_frame.place(relx=0.5, rely=0.8, relwidth=0.75, relheight=0.1, anchor='n')\n button = tk.Button(lower_frame, text=\"Rename\", bg='lightgray', command=lambda: job(pathlabel1.cget(\"text\"), pathlabel2.cget(\"text\"), rice.get(), soy.get()))\n button.config(borderwidth=0)\n button.pack()\n\n # progress and completed\n lowest_frame = tk.Frame(root)\n lowest_frame.place(relx=0.5, rely=0.9, relwidth=0.75, relheight=0.1, anchor='n')\n end_frame = tk.Frame(root)\n end_frame.place(relx=0.5, rely=0.91, relwidth=0.75, relheight=0.1, anchor='n')\n\n root.mainloop()\n\n\n# Driver Code\nif __name__ == '__main__':\n\n # Calling main() function\n main()\n\n\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"494947719","text":"from olympia.amo.tests import TestCase\nfrom olympia.blocklist import forms\nfrom olympia.blocklist.models import BlocklistItem, BlocklistPlugin\n\n\nclass BlocklistFormTest(TestCase):\n\n def setUp(self):\n super(BlocklistFormTest, self).setUp()\n self.blitem = BlocklistItem.objects.create()\n self.blplugin = BlocklistPlugin.objects.create()\n\n def test_app_form_only_blitem(self):\n data = {'blitem': self.blitem.pk, 'blplugin': None}\n form = forms.BlocklistAppForm(data)\n assert form.is_valid()\n\n def test_app_form_only_blplugin(self):\n data = {'blplugin': self.blplugin.pk, 'blitem': None}\n form = forms.BlocklistAppForm(data)\n assert form.is_valid()\n\n def test_app_form_neither_blplugin_and_blitem(self):\n data = {'blitem': None, 'blplugin': None}\n form = forms.BlocklistAppForm(data)\n assert not form.is_valid()\n assert 'One and only one' in str(form.errors)\n\n def test_app_form_both_blplugin_and_blitem(self):\n data = {'blitem': self.blitem.pk, 'blplugin': self.blplugin.pk}\n form = forms.BlocklistAppForm(data)\n assert not form.is_valid()\n assert 'One and only one' in str(form.errors)\n","sub_path":"src/olympia/blocklist/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"15500268","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 12 12:18:44 2021\r\n\r\n@author: victo\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ntrain = pd.read_csv('train.csv')\r\ntest = pd.read_csv('test.csv')\r\n\r\nhouse_data = pd.concat([train, test], \r\n keys = ['train', 'test'], axis=0)\r\n\r\nhouse_data.head()\r\n\r\nhouse_data.info()\r\nhouse_data.columns\r\n\r\n## DEALING WITH MISSING VALUES \r\n\r\nmissing = house_data.isna().sum().reset_index()\r\nmissing = missing[missing[0] > 1] \r\n\r\nhouse_data.loc[(house_data.MSZoning.isnull()) & (house_data.Neighborhood == \"IDOTRR\"), \"MSZoning\"] = \"C (all)\"\r\nhouse_data.loc[(house_data.MSZoning.isnull()) & (house_data.Neighborhood == \"Mitchel\"), \"MSZoning\"] = \"RL\"\r\n\r\nhouse_data.groupby('Street')['LotFrontage'].mean()\r\n\r\ndef Fill_with_mean(row):\r\n if row['Street'] == 'Grvl':\r\n return 88.2\r\n if row['Street'] == 'Pave':\r\n return 69.227817\r\n\r\nhouse_data['LotFrontage'] = house_data.apply(lambda x: Fill_with_mean(x), axis = 1)\r\n\r\nhouse_data.groupby('MasVnrType')['MasVnrArea'].mean()\r\n\r\ndef Ms_filler(row):\r\n if np.isnan(row['MasVnrArea']):\r\n if row['MasVnrType']== 'BrkCmn':\r\n return 195.48\r\n if row['MasVnrType'] == 'BrkFace':\r\n return 261.67\r\n if row['MasVnrType'] == 'None':\r\n return 0.8\r\n if row['MasVnrType'] == 'Stone':\r\n return 239.55\r\n else:\r\n return row['MasVnrArea']\r\n\r\nhouse_data['MasVnrArea'] = house_data.apply(lambda x: Ms_filler(x), axis = 1)\r\n\r\nhouse_data['BsmtUnfSF'] = house_data['BsmtUnfSF'].fillna(0)\r\nhouse_data['BsmtFullBath'] = house_data['BsmtFullBath'].fillna(0)\r\nhouse_data['BsmtHalfBath'] = house_data['BsmtHalfBath'].fillna(0)\r\n\r\nhouse_data[['BsmtFinType1', 'BsmtFinType2']] = house_data[['BsmtFinType1', 'BsmtFinType2']].fillna('Unf')\r\n\r\nhouse_data.groupby('BsmtFinType2')['BsmtFinSF2'].mean()\r\nhouse_data.groupby('BsmtFinType1')['BsmtFinSF1'].mean()\r\ndef BstmFinSF1_NA(row):\r\n if np.isnan(row['BsmtFinSF1']):\r\n if row['BsmtFinType1']== 'ALQ':\r\n return 621.051282\r\n if row['BsmtFinType1'] == 'BLQ':\r\n return 527.732342\r\n if row['BsmtFinType1'] == 'GLQ':\r\n return 808.012956\r\n if row['BsmtFinType1'] == 'LwQ':\r\n return 387.064935\r\n if row['BsmtFinType1'] == 'Rec':\r\n return 465.524306\r\n if row['BsmtFinType1'] == 'Unf':\r\n return 0.000000\r\n else:\r\n return row['BsmtFinSF1']\r\n\r\nhouse_data['BsmtFinSF1'] = house_data.apply(lambda x: BstmFinSF1_NA(x), axis = 1)\r\n\r\ndef BstmFinSF2_NA(row):\r\n if np.isnan(row['BsmtFinSF2']):\r\n if row['BsmtFinType2']== 'ALQ':\r\n return 621.051282\r\n if row['BsmtFinType2'] == 'BLQ':\r\n return 527.732342\r\n if row['BsmtFinType2'] == 'GLQ':\r\n return 808.012956\r\n if row['BsmtFinType2'] == 'LwQ':\r\n return 387.064935\r\n if row['BsmtFinType2'] == 'Rec':\r\n return 465.524306\r\n if row['BsmtFinType2'] == 'Unf':\r\n return 0.000000\r\n else:\r\n return row['BsmtFinSF2']\r\n\r\nhouse_data['BsmtFinSF2'] = house_data.apply(lambda x: BstmFinSF2_NA(x), axis = 1)\r\n\r\ntt = house_data[house_data['GarageCars'].isna() == True]\r\n\r\nhouse_data.groupby('GarageType')['GarageYrBlt'].count()\r\nhouse_data['GarageType'] = house_data['GarageType'].fillna('Unbuilt')\r\n\r\nhouse_data.groupby('GarageType')['GarageYrBlt'].mean()\r\ndef Garage_fill(row):\r\n if row['GarageType'] == 'Unbuilt':\r\n return 0\r\n if row['GarageType'] == 'Detchd':\r\n return 1961\r\n else:\r\n return row['GarageYrBlt']\r\n \r\nhouse_data['GarageYrBlt'] = house_data.apply(lambda x: Garage_fill(x), axis = 1 )\r\n\r\nhouse_data[['GarageCars','GarageArea']] = house_data[['GarageCars','GarageArea']].fillna(0)\r\n\r\n\r\nhouse_data[['Utilities','Alley', 'MasVnrType', 'BsmtQual', 'Fence', 'FireplaceQu', 'MiscFeature', 'PoolQC', 'BsmtCond', 'BsmtExposure','Electrical' ,'GarageQual', 'GarageCond','KitchenQual']] = house_data[['Utilities','Alley', 'MasVnrType', 'BsmtQual', 'Fence', 'FireplaceQu', 'MiscFeature', 'PoolQC', 'BsmtCond', 'BsmtExposure','Electrical', 'GarageQual', 'GarageCond','KitchenQual']].fillna('None') \r\nhouse_data['Utilities'] = house_data['Utilities'].fillna('AllPub')\r\nhouse_data['Functional'] = house_data['Functional'].fillna('Typ')\r\n\r\ncorrs = house_data.corr()\r\n\r\nhouse_data[['GarageFinish', 'GarageQual']] = house_data[['GarageFinish','GarageQual']].fillna('Unf')\r\n\r\nhouse_data['SaleType'] = house_data['SaleType'].fillna('Oth')\r\nhouse_data[['Exterior1st','Exterior2nd']] = house_data[['Exterior1st','Exterior2nd']] .fillna('None')\r\n\r\n\r\nhouse_data[(house_data.Neighborhood == \"IDOTRR\") & (house_data.OverallQual < 5) & (house_data.YearRemodAdd < 1960) & (house_data.ExterQual == \"Fa\")].Functional.value_counts()\r\n\r\nmissing = house_data.isna().sum().reset_index()\r\nmissing = missing[missing[0] > 1] \r\n\r\n\"\"\"\r\nTHE MISSING VALUES ARE DONE, THE ONLY ONE IS FUNCTIONAL. CHECK THIS OUT BEFORE MOVING ON. \r\n\r\nNEXT STEP WOULD BE FEATURE ENGENEERING\r\n\r\n\"\"\"\r\n\r\nhouse_data['House_age'] = house_data['YrSold'] - house_data['YearBuilt'] \r\n\r\nhouse_data['Remodeled'] = house_data['YrSold'] - house_data['YearRemodAdd']\r\n\r\nhouse_data['TotalBsmtSF'] = house_data['BsmtFinSF1'] + house_data['BsmtFinSF2'] + house_data['BsmtUnfSF']\r\n\r\nhouse_data['TotalSF'] = house_data['1stFlrSF'] + house_data['2ndFlrSF']\r\n\r\nhouse_data['TotalPorch'] = house_data['OpenPorchSF'] + house_data['EnclosedPorch'] + house_data['3SsnPorch'] + house_data['ScreenPorch']\r\n\r\ndef exterior(row): #check if more than one exterior material covering\r\n if row['Exterior1st'] == row['Exterior2nd']:\r\n return 1\r\n else:\r\n return 2 \r\n \r\nhouse_data['number_of_exteriors'] = 0\r\nhouse_data['number_of_exteriors'] = house_data.apply(lambda row: exterior(row), axis = 1)\r\n\r\ndef redundant_exterior(row): #second second column to none if same as first exterior\r\n if row['Exterior1st'] == row['Exterior2nd']:\r\n row['Exterior2nd'] = 'None'\r\n else:\r\n row['Exterior2nd'] = row['Exterior2nd']\r\n return row['Exterior2nd']\r\n\r\nhouse_data['Exterior2nd'] = house_data.apply(lambda row: redundant_exterior(row), axis = 1)\r\n\r\nhouse_data.drop(['GrLivArea'], axis = 1, inplace=True)\r\n\r\nhouse_data.to_csv('data_clean.csv')\r\n\r\nhouse_data.columns\r\n","sub_path":"Housing_regression/Data_cleaning.py","file_name":"Data_cleaning.py","file_ext":"py","file_size_in_byte":6334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"425702853","text":"import pandas as pd\n\ncourse_sales = { 'course': ['Python', 'Ruby', 'Excel', 'C++'],\n 'day':['Mon', 'Tue', 'Wed', 'Fri' ],\n 'price': [5, 10, 15, 20],\n 'sale': [2,3,4,5]\n }\n\n#print(course_sales)\n\n# df_sales = pd.DataFrame(course_sales)\n# print(df_sales)\n\n# Create indidivdual lists from sales data\ncourse = ['Python','Ruby','Excel','C++']\n\nday = ['Mon', 'Tue', 'Tue', 'Wed']\n\nprice = [5,10,15,20]\n\nsale = [2,3,5,7]\n\nlabels = ['Course', 'Day', 'Price', 'No. of Sales']\n\ncols = [ course, day, price, sale ]\n\nmaster_list = list(zip(labels, cols))\n#print(master_list)\n\ndata = dict(master_list)\n\n#data dictionary to dataframe\nnew_sales = pd.DataFrame(data)\nprint(new_sales)\nprint(type(new_sales))","sub_path":"dataframe_from_dictionary.py","file_name":"dataframe_from_dictionary.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"477602778","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport networkx as nx;\nfrom random import random;\nimport numpy as np;\n\n# Read data from the dataset, and create graph G_fb\nG_fb = nx.read_edgelist(\"facebook_combined.txt\", create_using = nx.Graph(), nodetype=int);\n\n# Show the number of edges in G_fb\n\nprint(\"edges = \" + str(G_fb.number_of_edges()))\n\n# Show number of nodes in G_fb\nnodes = G_fb.number_of_nodes()\nprint(\"nodes = \" + str(G_fb.number_of_nodes()))\n\n# TASK1. Now your task is to compute the probability whether there is an edge between two vertices.\n## edge_probab = ...\nmaximal_possible_number_of_edges = nodes*(nodes -1)/2\nprint(\"maximal_possible_number_of_edges =\"+str(maximal_possible_number_of_edges))\nedge_probability = G_fb.number_of_edges() / maximal_possible_number_of_edges\nprint(\"edge_probabilty:\"+ str(edge_probability))\n\n# Now we have to generate a random graph. First we initialize it\nG_rand = nx.Graph();\n\n# TASK3. generate edges in G_rand at random:\nk = nodes\nfor i in range(0,k) :\n for j in range(0,i) :\n if np.random.binomial(1, edge_probability):\n G_rand.add_edge(i,j)\n # Add an edge between vertices i and j, with probability edge_probab (as in G_fb)\n # ...\n \n# Now we print out the number of edges and the ACC of the new graph\nprint(\"rgraph_edges = \" + str(G_rand.number_of_edges()))\n\nav_clust_coeff = nx.average_clustering(G_rand)\n\nprint(\"rgraph_acc = \" + str(av_clust_coeff))\n\n# The results which should be submitted to the grader include the ACC of G_fb and of G_rand. Good luck!\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"clustering/Clustering.py","file_name":"Clustering.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"455251350","text":"import yfinance as yf\nimport pandas as pd\nfrom .IntradayTrading import IntradayTrading\nimport json\n\n\nclass Stock:\n\n def __init__(self, stock_code):\n self.stock_data = pd.DataFrame()\n self.stock_history = pd.DataFrame()\n if not self.getStockData(stock_code):\n raise ValueError\n\n def getStockData(self, stock_code):\n self.stock_data = yf.Ticker(stock_code)\n self.stock_history = self.stock_data.history(period='max')\n if self.stock_history.empty:\n return False\n else:\n return True\n\n def stock_history_to_json(self):\n temp = self.stock_history.reset_index()\n temp = temp.set_index(['Date'], drop=False)\n temp = temp.sort_index()\n temp = temp.tail(365)\n jstr = ''\n for index, row in temp.iterrows():\n intra = IntradayTrading(str(row['Date']), row['Open'],\n row['High'], row['Low'],\n row['Close'], row['Dividends'],\n row['Stock Splits'])\n jstr += json.dumps(intra, default=lambda obj: obj.__dict__, sort_keys=True, indent=4)\n jstr += ','\n\n jstr = jstr.strip(',')\n return '{\\\"data\\\":['+jstr+']}'","sub_path":"echarts/stock/Stock.py","file_name":"Stock.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595551783","text":"from PIL import Image as PilImage, ImageChops\nimport imghdr\nfrom manga_py.libs.base.file import File\nfrom pathlib import PurePath\n\n\nclass Image:\n _image = None\n _format = None\n\n def __init__(self, image):\n if isinstance(image, str):\n self._image = PilImage.open(image)\n elif isinstance(image, PilImage.Image):\n self._image = image\n\n def __add__(self, other):\n margin = 5\n size = self._image.size\n other_size = other.size\n if (\n size[0] < (other_size[0] + 2 * margin) or\n size[1] < (other_size[1] + 2 * margin)\n ):\n other_size = (size[0] - margin*2, size[1] - margin*2)\n other.resize(other_size, PilImage.ANTIALIAS)\n self._image.paste(other, (margin, margin))\n\n def set_out_format(self, format: str):\n \"\"\"\n https://pillow.readthedocs.io/en/latest/handbook/image-file-formats.html\n :param format:\n :return:\n \"\"\"\n self._format = format\n\n def save(self, fp, **kwargs):\n if self._format is not None and isinstance(fp, (str, PurePath)):\n fp = PurePath(str(fp)).with_suffix('.' + self._format)\n self._image.save(fp, format=self._format, **kwargs)\n\n def calc_sides(self, need, inside=True) -> PilImage:\n image = self._image.copy()\n orig = image.size\n if inside:\n scale = max(*orig) / max(*need)\n else:\n scale = min(*orig) / min(*need)\n return image.resize((scale * need[0], scale * need[1]), PilImage.ANTIALIAS)\n\n def auto_crop(self):\n bg = PilImage.new(\n self._image.mode,\n self._image.size,\n self._image.getpixel((1, 1))\n )\n diff = ImageChops.difference(self._image, bg)\n diff = ImageChops.add(diff, diff, 2.0, -100)\n bbox = diff.getbbox()\n diff.close()\n self._image = self._image.crop(bbox)\n return self\n\n def auto_split(self): # TODO\n pass\n\n def manual_crop(self, sides=(0, 0, 0, 0)): # (top, right, bottom, left)\n \"\"\"\n Relative sizes!\n :param sides:\n :return:\n \"\"\"\n self._image = self._image.crop(sides)\n return self\n\n def grayscale(self):\n return self._image.convert('LA')\n\n @classmethod\n def process(cls, file, args: dict):\n \"\"\"\n :param file:\n :param args:\n :type file File\n :return:\n \"\"\"\n _orig = True\n img = cls(file.path_location)\n\n # 'crop-blank'\n if args['crop_blank']:\n _orig = False\n img.auto_crop()\n\n if args['Xt'] > 0 or args['Xr'] > 0 or args['Xb'] > 0 or args['Xl'] > 0:\n _orig = False\n param = (args['Xt'], args['Xr'], args['Xb'], args['Xl'])\n img.manual_crop(param)\n\n if args['jpg']:\n _orig = False\n img.format = 'jpg'\n elif args['png']:\n _orig = False\n img.format = 'png'\n\n if _orig:\n return None\n return img\n\n @classmethod\n def real_extension(cls, path):\n ext = imghdr.what(path)\n if ext:\n ext = '.' + ext\n return ext\n","sub_path":"manga_py/libs/modules/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"640107988","text":"revenue = int(input(\"enter revenue here\"))\r\ncogs = int(input(\"enter cost of goods sold here\"))\r\ngrossprofit = revenue - cogs\r\nprint(\"gross profit is\", grossprofit)\r\nsellingexpenses = int(input(\"enter selling expenses here\"))\r\nadministrativeexpenses = int(input(\"enter administrative expenses here\"))\r\ntotalopexpenses = sellingexpenses + administrativeexpenses\r\nprint(\"total operating expenses are\", totalopexpenses)\r\noperatingincome = grossprofit - totalopexpenses\r\nprint(\"operating income is\", operatingincome)\r\ninterestexpense = int(input(\"enter interest expense here\"))\r\nincomebeforetaxes = operatingincome - interestexpense\r\nprint(\"income before taxes is\", incomebeforetaxes)\r\ntax = int(input(\"enter tax expense here\"))\r\nnetincomeaftertaxes = incomebeforetaxes - tax\r\nprint(\"net income after taxes is\", netincomeaftertaxes)\r\nshares = int(input(\"enter shares outstanding here\"))\r\neps = netincomeaftertaxes / shares\r\nprint(\"EPS is\", eps)","sub_path":"income statement.py","file_name":"income statement.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"508300193","text":"# UnemploymentOffice - Brian Lee and Max Millar\n# SoftDev1 pd6\n# K#10 -- Jinja Tuning\n# 2018-09-22\nfrom flask import Flask, render_template\nfrom util import random_job\n\napp = Flask(__name__)\n\n@app.route('/')\ndef main_page():\n return '''

Welcome!

\n
Click here for Occupations Table.
'''\n\n@app.route('/occupations/')\ndef random_occupation():\n job_dict = random_job.job_dict()\n job_rand = random_job.weighted_probability(job_dict)\n return render_template(\"occupations.html\",\n jobs = job_dict,\n random_job = job_rand) \n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","sub_path":"10_occupy_flask_st/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"589038706","text":"#!/usr/bin/python -tt\n# -*- coding: utf-8 -*-\n\n\nimport sys\nfrom PyQt4.QtGui import *\n\n\nif __name__ == '__main__':\n application = QApplication(sys.argv)\n\n widget = QWidget()\n\n widget.resize(320, 240)\n widget.setWindowTitle(\"Hello, world!\")\n widget.show()\n\n sys.exit(application.exec_())","sub_path":"lesson16/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"497769315","text":"\"\"\"\r\nDemonstrates RoboSumo with pre-trained policies.\r\n\"\"\"\r\nimport click\r\nimport gym\r\nimport os\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nimport robosumo.envs\r\n\r\nfrom robosumo.policy_zoo import LSTMPolicy, MLPPolicy\r\nfrom robosumo.policy_zoo.utils import load_params, set_from_flat,save_params_to_npy\r\n\r\n\r\nimport imageio\r\nimageio.plugins.ffmpeg.download()\r\n\r\nPOLICY_FUNC = {\r\n \"mlp\": MLPPolicy,\r\n \"lstm\": LSTMPolicy,\r\n}\r\n\r\n@click.command()\r\n@click.option(\"--env\", type=str,\r\n default=\"RoboSumo-Ant-vs-Ant-v0\", show_default=True,\r\n help=\"Name of the environment.\")\r\n@click.option(\"--policy-names\", nargs=2, type=click.Choice([\"mlp\", \"lstm\"]),\r\n default=(\"mlp\", \"mlp\"), show_default=True,\r\n help=\"Policy names.\")\r\n@click.option(\"--param-versions\", nargs=2, type=int,\r\n default=(1, 1), show_default=True,\r\n help=\"Policy parameter versions.\")\r\n@click.option(\"--max_episodes\", type=int,\r\n default=20000, show_default=True,\r\n help=\"Number of episodes.\")\r\n@click.option(\"--traj_length\", type=int,\r\n default=1, show_default=True,\r\n help=\"How many steps in a trajectory\")\r\n\r\n\r\ndef main(env, policy_names, param_versions, max_episodes, traj_length):\r\n # Construct paths to parameters\r\n curr_dir = os.path.dirname(os.path.realpath(__file__))\r\n params_dir = os.path.join(curr_dir, \"../robosumo/policy_zoo/assets_selfplay_robosumo/\")\r\n agent_names = [env.split('-')[1].lower(), env.split('-')[3].lower()]\r\n param_paths = []\r\n for a, p, v in zip(agent_names, policy_names, param_versions):\r\n param_paths.append(\r\n os.path.join(params_dir, a, p, \"agent-params-v%d.npy\" % v)\r\n )\r\n\r\n # Create environment\r\n env = gym.make(env)\r\n\r\n for agent in env.agents:\r\n agent._adjust_z = -0.5\r\n\r\n tf_config = tf.ConfigProto(\r\n inter_op_parallelism_threads=1,\r\n intra_op_parallelism_threads=1)\r\n sess = tf.Session(config=tf_config)\r\n sess.__enter__()\r\n\r\n # Initialize policies\r\n policy = []\r\n for i, name in enumerate(policy_names):\r\n scope = \"policy\" + str(i)\r\n print (\"agent \",i)\r\n policy.append(\r\n POLICY_FUNC[name](scope=scope, reuse=False,\r\n ob_space=env.observation_space.spaces[i],\r\n ac_space=env.action_space.spaces[i],\r\n hiddens=[64, 64], normalize=True)\r\n )\r\n sess.run(tf.variables_initializer(tf.global_variables()))\r\n\r\n # Load policy parameters\r\n for i in range(len(policy)):\r\n try:\r\n param = load_params(param_paths[i])\r\n set_from_flat(policy[i].get_variables(), param)\r\n print(\"successful loaded policy %s for agent %d\" %(param_paths[i], i))\r\n except:\r\n print (\"cannot load file\", param_paths[i], \"using random initialized\")\r\n\r\n set_from_flat(policy[1].get_variables(), load_params(param_paths[1][:-4]+\"-pretrained.npy\"))\r\n # Play matches between the agents\r\n num_episodes, nstep ,nstep_traj, n_avg_rew = 0, 0, 0, 0\r\n #reward of one episode\r\n total_reward = [0.0 for _ in range(len(policy))]\r\n # reward across #show_anime_ever episodes\r\n reward_across_episodes = [[] for _ in range(len(policy))]\r\n\r\n total_scores = [0 for _ in range(len(policy))]\r\n observation = env.reset()\r\n tr_observations = [None for _ in range(len(policy))]\r\n tr_actions =[ None for _ in range(len(policy)) ]\r\n tr_vpred = [ [] for _ in range(len(policy))]\r\n tr_rewards=[ [] for _ in range(len(policy))]\r\n show_anime_per = 20\r\n update_opponent_per = 100\r\n gamma=0.995\r\n avg_rew = [0.0,0.0]\r\n\r\n print(\"-\" * 5 + \"Episode %d \" % (num_episodes + 1) + \"-\" * 5)\r\n\r\n # cache all trajectory losses over #show_anime_per episodes\r\n vpred_losses=[]\r\n max_episodes=float('inf')\r\n avg_action=0.0\r\n action_taken =0.0\r\n if policy_names[0]=='lstm':\r\n traj_length=1\r\n\r\n while num_episodes < max_episodes:\r\n if (num_episodes+1)%show_anime_per==0 or num_episodes==0:\r\n env.render()\r\n\r\n action_value_logstd = tuple([\r\n pi.act(stochastic=True, observation=observation[i])\r\n for i, pi in enumerate(policy)\r\n ])\r\n action=[]\r\n value=[]\r\n logstd=[]\r\n\r\n for x in action_value_logstd:\r\n action.append(x[0])\r\n value.append(x[1]['vpred'])\r\n logstd.append(x[2])\r\n action=tuple(action)\r\n value=tuple(value)\r\n logstd=tuple(logstd)\r\n \r\n # Disable opponent movement for better training agent 0\r\n action[1].fill(0.0)\r\n\r\n for i in range(len(policy)):\r\n if i ==0:\r\n try:\r\n tr_observations[i] = np.vstack((tr_observations[i],observation[i]))\r\n except:\r\n tr_observations[i] = observation[i]\r\n try:\r\n tr_actions[i] = np.vstack((tr_actions[i], action[i]))\r\n except:\r\n tr_actions[i] = action[i]\r\n tr_vpred[i].append(value[i])\r\n\r\n\r\n observation, reward, done, infos = env.step(action)\r\n\r\n draw = True\r\n nstep += 1\r\n nstep_traj += 1\r\n\r\n for i in range(len(policy)):\r\n if i ==0:\r\n total_reward[i] += reward[i]\r\n tr_rewards[i].append(reward[i])\r\n if nstep_traj==traj_length or done[0]:\r\n for i in range(len(tr_rewards)):\r\n if i ==0:\r\n reward_onwards = 0\r\n for j in range(len(tr_rewards[i])-1,-1,-1):\r\n tr_rewards[i][j] += gamma * reward_onwards\r\n reward_onwards = tr_rewards[i][j]\r\n tr_rewards[i]=np.array(tr_rewards[i])\r\n tr_vpred = [np.array(x) for x in tr_vpred]\r\n tr_vpred_target = tr_rewards\r\n\r\n #advantage function: A = r(s+1)+ gamma * v(s+1) - v(s)\r\n tr_adv = [ tr_rewards[i] - tr_vpred[i] for i in range(len(policy))]\r\n\r\n '''\r\n print ('observation shape ', tr_observations[0].shape)\r\n print ('action shape', tr_actions[0].shape)\r\n print ('vpred shape ',tr_vpred[0].shape)\r\n print ('reward shape ',tr_rewards[0].shape)\r\n print ('vpred target shape ',tr_vpred_target[0].shape)\r\n print ('adv shape', tr_adv[0].shape)\r\n '''\r\n if nstep_traj>50 or traj_length<=50:\r\n vpred_loss = policy[0].update_policy(tr_observations[0],tr_actions[0],reward=tr_adv[0], target=tr_vpred_target[0])\r\n vpred_losses.append(vpred_loss)\r\n\r\n nstep_traj=0\r\n avg_action=np.mean(tr_actions[0],axis=0)\r\n tr_observations = [None for _ in range(len(policy))]\r\n tr_actions =[ None for _ in range(len(policy)) ]\r\n tr_vpred = [ [] for _ in range(len(policy))]\r\n tr_rewards=[ [] for _ in range(len(policy))]\r\n\r\n\r\n\r\n if done[0]:\r\n for i in range(len(policy)):\r\n if 'winner' in infos[i]:\r\n draw = False\r\n total_scores[i] += 1\r\n '''\r\n print(\"Winner: Agent {}, Scores: {}, Total Episodes: {}\"\r\n .format(i, total_scores, num_episodes))\r\n '''\r\n if draw:\r\n pass\r\n '''\r\n print(\"Match tied: Agent {}, Scores: {}, Total Episodes: {}\"\r\n .format(i, total_scores, num_episodes))\r\n '''\r\n for i in range(len(policy)):\r\n reward_across_episodes[i].append(total_reward[i])\r\n \r\n if ((num_episodes+1)%show_anime_per==0):\r\n print(\"-\" * 5 + \"Episode %d \" % (num_episodes + 1) + \"-\" * 5)\r\n print (\"reward: \",total_reward[0], \" win rate %d vs %d = %f%%\" % (total_scores[0],total_scores[1], float(total_scores[0])/(num_episodes+1) *100))\r\n print (\"average_vpred_loss: \", np.mean(vpred_losses))\r\n print (\"average_reward: \", np.mean(reward_across_episodes[0]))\r\n print (\"reward this animation\", total_reward[0])\r\n\r\n reward_across_episodes=[[] for _ in range(len(policy))]\r\n\r\n \r\n if ((num_episodes+1)%update_opponent_per==0):\r\n save_params_to_npy(policy[0].get_variables(),param_paths[0][:-4]+'%08d.npy'%(num_episodes+1))\r\n save_params_to_npy(policy[0].get_variables(),param_paths[0])\r\n '''\r\n for i in range(len(policy)):\r\n param = load_params(param_paths[i])\r\n set_from_flat(policy[i].get_variables(), param)\r\n '''\r\n\r\n num_episodes += 1\r\n draw = True\r\n\r\n observation = env.reset()\r\n nstep = 0\r\n total_reward = [0.0 for _ in range(len(policy))]\r\n\r\n for i in range(len(policy)):\r\n policy[i].reset()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"demos/self_train.py","file_name":"self_train.py","file_ext":"py","file_size_in_byte":9184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"7611339","text":"import curses\n\nstdscr=curses.initscr()\nkey=''\n\ncurses.noecho()\ncurses.cbreak()\n\nstdscr.addstr(1,1,'press esc to quit')\n\nwhile key != 27: # esc key\n key=stdscr.getch()\n stdscr.addstr(3,3, 'key pressed')\n stdscr.addch(5,5, key)\n stdscr.refresh()\n\ncurses.echo()\ncurses.nocbreak()\ncurses.endwin()\n","sub_path":"Program03/py_curses_ex.py","file_name":"py_curses_ex.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"437875764","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport unittest\nimport SimpleInterpreter as SI\n\n\nclass TestExpressions(unittest.TestCase):\n\n def test_divide_by_0(self):\n env = {}\n expression = SI.Div(SI.Const(5), SI.Const(0))\n self.assertRaises(ZeroDivisionError, expression.eval, env)\n\n def test_simple_expression(self):\n expression = SI.Mult(SI.Add(SI.Const(5), SI.Var(\"x\")), SI.Var(\"y\"))\n env = {\"x\": 5, \"y\": 7}\n self.assertEqual(expression.eval(env), 70)\n\n\nclass TestPrograms(unittest.TestCase):\n\n def test_simple_assignment(self):\n program = SI.Assign(SI.Var(\"x\"), SI.Const(10))\n env = program.execute({})\n self.assertEqual(env, {\"x\": 10})\n\n def test_second_assignment(self):\n env = SI.Assign(SI.Var(\"x\"), SI.Const(5)).execute({})\n env = SI.Assign(SI.Var(\"x\"), SI.Const(10)).execute(env)\n self.assertEqual(env, {\"x\": 10})\n\n def test_subprogram(self):\n env = {}\n program = SI.Assign(SI.Var(\"x\"), SI.Const(10))\n program1 = SI.Assign(SI.Var(\"y\"), SI.Const(20))\n program2 = SI.Proc(SI.Assign(SI.Var(\"x\"), SI.Const(15)), program1)\n program3 = SI.Proc(program, program2)\n self.assertEqual(program3.execute(env), {\"x\": 15, \"y\": 20})\n\n @staticmethod\n def factorial(n):\n program = SI.Assign(SI.Var(\"x\"), SI.Mult(SI.Var(\"x\"), SI.Var(\"y\")))\n program = SI.Proc(program, SI.Assign(SI.Var(\"y\"), SI.Sub(SI.Var(\"y\"), SI.Const(1))))\n program = SI.Loop(SI.GreaterThan(SI.Var(\"y\"), SI.Const(0)), program)\n program = SI.Condition(SI.Equals(SI.Var(\"y\"), SI.Const(0)), SI.Assign(SI.Var(\"x\"), (SI.Const(1))), program)\n program = SI.Proc(SI.Assign(SI.Var(\"y\"), SI.Const(n)), program)\n program = SI.Proc(SI.Assign(SI.Var(\"x\"), SI.Const(1)), program)\n return program\n\n def test_factorial_10(self):\n self.assertEqual(self.factorial(10).execute({}), {\"x\": 3628800, \"y\": 0})\n\n def test_factorial_8(self):\n self.assertEqual(self.factorial(8).execute({}), {\"x\": 40320, \"y\": 0})\n\n def test_pass(self):\n program = SI.Assign(SI.Var(\"x\"), SI.Const(10))\n program = SI.Proc(program, SI.Pass())\n env = program.execute({})\n self.assertEqual(env, {\"x\": 10})\n\n def test_print(self):\n from io import StringIO\n from unittest.mock import patch\n with patch('sys.stdout', new=StringIO()) as fakeOutput:\n program = SI.Print(SI.Const(1))\n program.execute({}) # value is irrelevant, testing side effects\n self.assertEqual(fakeOutput.getvalue().strip(), \"1\")\n \n def test_uninitialized(self):\n env = {}\n program = SI.Print(SI.Var(\"x\"))\n self.assertRaises(SI.UninitializedVariableException, program.execute, env)\n\n\nclass TestStringFormatting(unittest.TestCase):\n\n def test_factorial_formatting(self):\n code = \"((set x 1) ((set y 5) (if (== y 0) (set x 1) (while (> y 0) ((set x (* x y)) (set y (- y 1)))))))\"\n self.assertEqual(str(TestPrograms.factorial(5)), code)\n\n def test_simple_expression_formatting(self):\n expression = SI.Mult(SI.Add(SI.Const(5), SI.Var(\"x\")), SI.Var(\"y\"))\n code = \"(* (+ 5 x) y)\"\n self.assertEqual(str(expression), code)\n\n def test_assign_formatting(self):\n expression = SI.Assign(SI.Var(\"x\"), SI.Var(\"y\"))\n code = \"(set x y)\"\n self.assertEqual(str(expression), code)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"L2/Tests.py","file_name":"Tests.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"248016280","text":"from discord.ext import commands\n\n\ndef is_in_guild(guild_id):\n async def predicate(ctx):\n return ctx.guild and ctx.guild.id == guild_id\n\n return commands.check(predicate)\n\n\nclass OwnerCog(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n # Hidden means it won't show up on the default help.\n @commands.command(name='load', hidden=True)\n @is_in_guild(584593409860173873)\n async def ext_load(self, ctx, *, cog: str):\n \"\"\"Command which Loads a Module.\n Remember to use dot path. e.g: cogs.owner\"\"\"\n\n try:\n self.bot.load_extension(cog)\n except Exception as e:\n await ctx.send(f'**`ERROR:`** {type(e).__name__} - {e}')\n else:\n await ctx.send('**`SUCCESS`**')\n\n @commands.command(name='unload', hidden=True)\n @is_in_guild(584593409860173873)\n async def ext_unload(self, ctx, *, cog: str):\n \"\"\"Command which Unloads a Module.\n Remember to use dot path. e.g: cogs.owner\"\"\"\n if cog == \"cogs.owner\":\n await ctx.send(\"Sorry, I am unable to do that :P\")\n else:\n try:\n self.bot.unload_extension(cog)\n except Exception as e:\n await ctx.send(f'**`ERROR:`** {type(e).__name__} - {e}')\n else:\n await ctx.send('**`SUCCESS`**')\n\n @commands.command(name='reload', hidden=True)\n @is_in_guild(584593409860173873)\n async def ext_reload(self, ctx, *, cog: str):\n \"\"\"Command which Reloads a Module.\n Remember to use dot path. e.g: cogs.owner\"\"\"\n\n try:\n self.bot.unload_extension(cog)\n self.bot.load_extension(cog)\n except Exception as e:\n await ctx.send(f'**`ERROR:`** {type(e).__name__} - {e}')\n else:\n await ctx.send('**`SUCCESS`**')\n\n\ndef setup(bot):\n bot.add_cog(OwnerCog(bot))\n","sub_path":"cogs/owner.py","file_name":"owner.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"344349254","text":"from django.shortcuts import render\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\nfrom customer.models import Customer_Profile\nfrom employee.models import Employee_Profile\nfrom business.models import Business_Profile\nfrom django.contrib.auth.decorators import login_required\n#pdf libraries\nfrom io import BytesIO\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import letter, landscape, A4\n\n\n\n\n@login_required(login_url='/customer/login')\ndef cust_search(request):\n return render(request, 'card/customer_search.html')\n\n\n\n@login_required(login_url='/employee/login')\ndef emp_search(request):\n return render(request, 'card/employee_search.html')\n\n@login_required(login_url='/business/login')\ndef busi_search(request):\n return render(request, 'card/business_search.html')\n\n\n@login_required(login_url='/customer/login')\ndef results(request):\n\n if 'q' in request.GET and request.GET['q']:\n q=request.GET['q']\n all_username = User.objects.filter(username=q)\n for i in all_username:\n id=i.id\n\n all_details = Customer_Profile.objects.filter(user_id=id) or Employee_Profile.objects.filter(user_id=id) or Business_Profile.objects.filter(user_id=id)\n for j in all_details:\n if j.registered_as == \"CUS\":\n return render(request, 'card/customercard.html',{'all_username':all_username, 'all_details':all_details})\n elif j.registered_as == \"EMP\":\n return render(request, 'card/employeecard.html',{'all_username':all_username, 'all_details':all_details})\n elif j.registered_as == \"BUSI\":\n return render(request, 'card/businesscard.html',{'all_username':all_username, 'all_details':all_details})\n else:\n s=\"NO CARD FOUND\"\n return HttpResponse(s)\n\n#pdf Function\ndef generate_pdf(request):\n response = HttpResponse(content_type= 'application/pdf')\n # d= datetime.today().strftime('%Y-%m-%d')\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"mypdf.pdf\"'\n\n buffer = BytesIO()\n p = canvas.Canvas(buffer, pagesize=A4)\n\n #data to Print\n p.drawString(250, 800, \"VISUDH AJIVAM\")\n p.showPage()\n p.save()\n\n pdf = buffer.getvalue()\n buffer.close()\n response.write(pdf)\n\n return response\n","sub_path":"card/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"230661873","text":"# IMPOT\nimport sys\n\n# MAIN\n# Reads a *.cc all_cv commmand file [1] and a list of finished *.pred files [2]\n# (i.e completed_files.txt) and filters the commands for the ones you need to rerun\n\ncommand_lines = open(sys.argv[1],\"r\").readlines()\n#print command_lines[0:9]\n\nfinished_files = [l.strip() for l in open(sys.argv[2],\"r\").readlines()]\nprint (finished_files[0:9])\n\nkeep_commands = []\nfor c in command_lines:\n x= c.strip().split(\" \")[2]\n #y= x.strip().split(\"/\")[8]\n print (x)\n if x not in finished_files:\n keep_commands.append(c)\n \noutput = open(sys.argv[1] + \".rerun\",\"w\")\noutput.write(\"\".join(keep_commands))\noutput.close()\n","sub_path":"2_GetRerunJobs_clustenrich.py","file_name":"2_GetRerunJobs_clustenrich.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"638444886","text":"\"\"\"\nGeneric file renaming\nWorks from CLI with args:\n - file dir\n - file extension\n - current string to replace\n - new string\n\"\"\"\n\nimport os\nimport sys\n\ndef fixExt(ext):\n \"\"\" (str) -> str\n Prepend \".\" to extension if it doesn't start with one\n \"\"\"\n if not ext.startswith(\".\"):\n return \".{}\".format(ext)\n return ext\n\ndef renameFiles(inputDir, inputExt, currentString, newString):\n \"\"\" (str, str, str, str) -> NoneType\n Renames files in inputDir with inputExt,\n replacing currentString with newString\n \"\"\"\n os.chdir(inputDir)\n for f in os.listdir(os.getcwd()):\n if f != \".DS_Store\":\n fileName, fileExt = os.path.splitext(f)\n\n if inputExt == \"all\" or fileExt == fixExt(inputExt):\n fileName = fileName.replace(currentString, newString)\n\n newName = \"{}{}\".format(fileName, fileExt)\n os.rename(f, newName)\n\n\n\ntry:\n if sys.argv[1].lower() == \"help\" or sys.argv[1].lower() == \"h\":\n print(\"Signature: {} directory extension currentString newString\".format(sys.argv[0]))\n else:\n renameFiles(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\n print(\"Script running\")\nexcept IndexError:\n print(\"Nope! Did you type in the right number of args?\")\n print(\"Type in '{} help' to see this script's signature\".format(sys.argv[0]))\n","sub_path":"fileManagement/renameFiles.py","file_name":"renameFiles.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"579395027","text":"# coding: utf-8\nimport os\nimport socketserver\n\n# Copyright 2013 Abram Hindle, Eddie Antonio Santos\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n# Furthermore it is derived from the Python documentation examples thus\n# some of the code is Copyright © 2001-2013 Python Software\n# Foundation; All Rights Reserved\n#\n# http://docs.python.org/2/library/socketserver.html\n#\n# run: python freetests.py\n\n# try: curl -v -X GET http://127.0.0.1:8080/\n\n\n# Copyright 2020 Xiaole Zeng\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# current os path\nINDEX = 'index.html'\nPATH = os.getcwd() + '/www/'\n\n\nclass MyWebServer(socketserver.BaseRequestHandler):\n\n def handle(self):\n self.data = self.request.recv(1024).strip()\n print(\"Got a request of: %s\\n\" % self.data)\n\n if self.data != b'':\n request_method = str(self.data).split(' ')[0]\n request_path = str(self.data).split(' ')[1]\n print(request_method)\n if request_method == \"b'GET\":\n print(request_path)\n self.response(request_path)\n else:\n self.status_405()\n\n def response(self, path):\n \"\"\"\"Take the response when the method is GET\"\"\"\n try:\n path_for_dir = os.path.join(PATH, path.strip('/'))\n\n if os.path.join(PATH, path).endswith('.html') or (\n os.path.isdir(path_for_dir)\n and path.endswith('/')\n ):\n\n if os.path.isdir(os.path.join(PATH, path.strip('/'))):\n path = path + INDEX\n f = open(os.path.join(PATH, path[1:]), 'r')\n data = f.read()\n mimetypes = 'html'\n f.close()\n self.status_200(mimetypes, data)\n elif os.path.join(PATH, path).endswith('.css'):\n try:\n f = open(os.path.join(PATH, path[1:]), 'r')\n data = f.read()\n mimetypes = 'css'\n f.close()\n self.status_200(mimetypes, data)\n except Exception:\n self.status_404()\n elif os.path.isdir(path_for_dir) and not path.endswith('/'):\n self.status_301(path)\n else:\n self.status_404()\n except Exception as e:\n print(e)\n\n def status_200(self, mimetypes, data):\n \"\"\"Send the 200 ok Message\"\"\"\n self.request.sendall(bytearray('HTTP/1.1 200 OK\\r\\n' +\n 'Content-Type: text/' + mimetypes +\n '\\r\\n\\r\\n' +\n data, 'utf-8'))\n\n def status_301(self, path):\n \"\"\"Send 301 Move Permanently Message\"\"\"\n self.request.sendall(bytearray('HTTP/1.1 301 Move Permanently\\r\\n' +\n 'Redirected to :' + path + '/\\r\\n' +\n 'Content-Type: text/html\\r\\n\\r\\n',\n 'utf-8'))\n\n def status_404(self):\n \"\"\"Send 404 Page Not Found Message\"\"\"\n self.request.sendall(bytearray('HTTP/1.1 404 Not Found\\r\\n'\n 'Content-Type: text/html\\r\\n\\r\\n' +\n ''\n '

Error response

'\n '

Error code 404.

'\n '

Message: File not found.

'\n ' ', 'utf-8'))\n\n def status_405(self):\n \"\"\"Send 405 Method Not Allowed Message\"\"\"\n self.request.sendall(bytearray('HTTP/1.1 405 Method Not Allowed\\r\\n'\n 'Content-Type: text/html\\r\\n\\r\\n' +\n ''\n '

Error response

'\n '

Error code 405.

'\n '

Message: Method Not Allowed.

'\n ' ', 'utf-8'))\n\n\nif __name__ == \"__main__\":\n HOST, PORT = \"localhost\", 8080\n\n socketserver.TCPServer.allow_reuse_address = True\n # Create the server, binding to localhost on port 8080\n server = socketserver.TCPServer((HOST, PORT), MyWebServer)\n\n # Activate the server; this will keep running until you\n # interrupt the program with Ctrl-C\n server.serve_forever()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"579463868","text":"#!/usr/bin/env python2.7\ndef main():\n\timport io\n\ndef checkAS(longTid, asDict, asCoordDict, tidDict, domainsDict, geneDomains, exonDict, gene):\n\tfrom collections import defaultdict\n\tevents = defaultdict(dict)\n\t\t\n\tfor tid in tidDict:\n\t\tevents[tid][\"gains\"] = []\n\t\tif \"gains\" in tidDict[tid]:\n\t\t\tgains = tidDict[tid][\"gains\"]\n\t\t\tgCoords = []\n\t\t\tfor g in gains:\n\t\t\t\tfor gLoc in domainsDict[tid][g]:\n\t\t\t\t\t#print(str(domainsDict[tid][g]))\n\t\t\t\t\tgCoords = gLoc.split(':')\n\t\t\t\t\tgL, gR = int(gCoords[0]), int(gCoords[1])\n\t\t\t\t\tfor e in asDict[tid]:\n\t\t\t\t\t\tfor loc in asDict[tid][e]:\n\t\t\t\t\t\t\teCoords = loc.split(':')\n\t\t\t\t\t\t\teL, eR = int(eCoords[1]), int(eCoords[2])\n\t\t\t\t\t\t\tlCheck, rCheck = 0,0\n\t\t\t\t\t\t\tif eL <= gR:\n\t\t\t\t\t\t\t\tlCheck = 1\n\t\t\t\t\t\t\tif eR >= gL:\n\t\t\t\t\t\t\t\trCheck = 1\n\t\t\t\t\t\t\tif lCheck == 1 and rCheck == 1:\n\t\t\t\t\t\t\t\t\tdesc = e + ':' + loc\n\t\t\t\t\t\t\t\t\tif desc not in events[tid][\"gains\"]:\n\t\t\t\t\t\t\t\t\t\tevents[tid][\"gains\"].append(desc)\n\t\t\t\t\tif len(events[tid][\"gains\"]) > 0:\n\t\t\t\t\t\tprint(gene + '\\t' + tid +'\\tgain\\t' + g +'\\t' + gLoc + '\\t' + ','.join(events[tid][\"gains\"]))\n\t\tevents[tid][\"losses\"] = []\n\t\tif \"losses\" in tidDict[tid]:\n\t\t\tlosses = tidDict[tid][\"losses\"]\n\t\t\tlCoords = []\n\t\t\tif longTid in domainsDict:\n\t\t\t\tlongDoms = domainsDict[longTid]\n\t\t\tfor l in losses:\n\t\t\t\tfor lLoc in domainsDict[longTid][l]:\n\t\t\t\t\tlCoords = lLoc.split(':')\n\t\t\t\t\tlL, lR = int(lCoords[0]), int(lCoords[1])\n\t\t\t\t\tfor e in asDict[tid]:\n\t\t\t\t\t\tfor loc in asDict[tid][e]:\n\t\t\t\t\t\t\teCoords = loc.split(':')\n\t\t\t\t\t\t\teL, eR = int(eCoords[1]), int(eCoords[2])\n\t\t\t\t\t\t\tlCheck, rCheck = 0,0\n\t\t\t\t\t\t\tif eL <= lR:\n\t\t\t\t\t\t\t\tlCheck = 1\n\t\t\t\t\t\t\tif eR >= lL:\n\t\t\t\t\t\t\t\trCheck = 1\n\t\t\t\t\t\t\tif lCheck == 1 and rCheck == 1:\n\t\t\t\t\t\t\t\tdesc = e + ':' + loc\n\t\t\t\t\t\t\t\tif desc not in events[tid][\"losses\"]:\n\t\t\t\t\t\t\t\t\tevents[tid][\"losses\"].append(desc)\n\t\t\t\t\tif len(events[tid][\"losses\"]) > 0:\n\t\t\t\t\t\tprint(gene + '\\t' + tid +'\\tloss\\t' + l +'\\t' + lLoc + '\\t' + ','.join(events[tid][\"losses\"]))\n\t\t\t\t\t\t\t\t\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"mirna_binding/check_AS.py","file_name":"check_AS.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"47205153","text":"import discord\r\nimport datetime\r\nfrom discord.utils import get\r\nfrom discord.ext import commands\r\n\r\ncolour = discord.Colour.blue()\r\n\r\nclass 관리자(commands.Cog):\r\n \"\"\"관리자 기능들을 보여줍니다\"\"\"\r\n\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n @commands.command(name=\"청소\")\r\n @commands.has_permissions(administrator=True)\r\n async def _clear(self, ctx, number):\r\n \"\"\"메시지를 청소합니다(관리자)\"\"\"\r\n number = int(number) # Converting the amount of messages to delete to an integer\r\n if number >= 100 or number <= 0:\r\n embed = discord.Embed(title=\"1개부터 99개가지만 해주세요.\", colour=colour)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n else:\r\n await ctx.channel.purge(limit=number + 1)\r\n embed = discord.Embed(title=\"{}개를 삭제하였습니다.\".format(number), colour=colour)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n\r\n @_clear.error\r\n async def _clear_error(self, ctx, error):\r\n if isinstance(error, commands.MissingPermissions):\r\n embed = discord.Embed(title=\"{}님, 당신은 이 명령을 실행하실 권한이 없습니다.\".format(ctx.message.author), colour=colour)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command(name=\"킥\")\r\n @commands.has_permissions(kick_members=True)\r\n async def _kick(self, ctx, member: discord.Member, *, reason=None):\r\n \"\"\"맨션한 사람을 추방시킵니다. (관리자)\"\"\"\r\n await member.kick(reason=reason)\r\n embed = discord.Embed(title=str(member) + \"을(를) 킥하였습니다.\", colour=colour)\r\n embed.add_field(name=\"사유\", value=reason)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n\r\n @_kick.error\r\n async def _kick_error(self, ctx, error):\r\n if isinstance(error, commands.MissingPermissions):\r\n embed = discord.Embed(title=\"{}님, 당신은 이 명령을 실행하실 권한이 없습니다.\".format(ctx.message.author), colour=colour)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command(name=\"밴\")\r\n @commands.has_permissions(ban_members=True)\r\n async def _ban(self, ctx, member: discord.Member, *, reason=None):\r\n \"\"\"맨션한 사람을 밴시킵니다. (관리자)\"\"\"\r\n await member.ban(reason=reason)\r\n embed = discord.Embed(title=str(member) + \"을(를) 밴시켰습니다.\", colour=colour)\r\n embed.add_field(name=\"사유\", value=reason)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n\r\n @_ban.error\r\n async def _ban_error(self, ctx, error):\r\n if isinstance(error, commands.MissingPermissions):\r\n embed = discord.Embed(title=\"{}님, 당신은 이 명령을 실행하실 권한이 없습니다.\".format(ctx.message.author), colour=colour)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command(name=\"언밴\", pass_context=True)\r\n @commands.has_permissions(ban_members=True)\r\n async def _unban(self, ctx, *, user_name):\r\n \"\"\"이름#아이디를 하시면 언밴 시킵니다. (관리자)\"\"\"\r\n banned_users = await ctx.guild.bans()\r\n member_name, member_discriminator = user_name.split('#')\r\n for ban_entry in banned_users:\r\n user = ban_entry.user\r\n if (user.name, user.discriminator) == (member_name, member_discriminator):\r\n await ctx.guild.unban(user)\r\n embed = discord.Embed(title=f\"{user.mention}을(를) 언밴시켰습니다.\", colour=colour)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n return\r\n\r\n @_unban.error\r\n async def _unban_error(self, ctx, error):\r\n if isinstance(error, commands.MissingPermissions):\r\n embed = discord.Embed(title=\"{}님, 당신은 이 명령을 실행하실 권한이 없습니다.\".format(ctx.message.author), colour=colour)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command(name=\"뮤트\", pass_context=True)\r\n @commands.has_permissions(administrator=True)\r\n async def _mute(self, ctx, member: discord.Member, *, reason=None):\r\n \"\"\"유저를 뮤트시킵니다. Muted라는 역할이 있서야 작동합니다. \\n Muted역할은 뮤트의 기능을 추가해주세요 (관리자)\"\"\"\r\n member = member or ctx.message.author\r\n embed = discord.Embed(title=str(member) + \"을(를) 뮤트 시켰습니다\")\r\n embed.add_field(name=\"사유\", value=reason)\r\n await member.add_roles(get(ctx.guild.roles, name=\"Muted\"))\r\n await ctx.send(member.mention + \"를 뮤트 했습니다\")\r\n\r\n @_mute.error\r\n async def _mute_error(self, ctx, error):\r\n if isinstance(error, commands.MissingPermissions):\r\n embed = discord.Embed(title=\"{}님, 당신은 이 명령을 실행하실 권한이 없습니다.\".format(ctx.message.author), colour=colour)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command(name=\"언뮤트\", pass_context=True)\r\n @commands.has_permissions(administrator=True)\r\n async def _unmute(self, ctx, member: discord.Member = None):\r\n \"\"\"유저를 언뮤트 시킵니다. (관리자)\"\"\"\r\n member = member or ctx.message.author\r\n await member.remove_roles(get(ctx.guild.roles, name='Muted'))\r\n await ctx.send(member.mention + \"를 언뮤트 했습니다.\")\r\n\r\n @_unmute.error\r\n async def _unmute_error(self, ctx, error):\r\n if isinstance(error, commands.MissingPermissions):\r\n embed = discord.Embed(title=\"{}님, 당신은 이 명령을 실행하실 권한이 없습니다.\".format(ctx.message.author), colour=colour)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command()\r\n @commands.has_permissions(administrator=True)\r\n async def dm(self, ctx, *, args=None):\r\n \"\"\"모든사람에게 dm 공지를 합니다\"\"\"\r\n if args != None:\r\n members = ctx.guild.members\r\n for member in members:\r\n try:\r\n await member.send(args)\r\n print(\"'\" + args + \"' sent to: \" + member.name)\r\n await ctx.send(f\"{member.name}에게 dm을 보냇습니다\")\r\n except:\r\n print(\"Couldn't send '\" + args + \"' to \" + member.name)\r\n else:\r\n await ctx.channel.sned(\"You didn't provide arguments.\")\r\n\r\n @dm.error\r\n async def dm_error(self, ctx, error):\r\n if isinstance(error, commands.MissingPermissions):\r\n embed = discord.Embed(title=\"{}님, 당신은 이 명령을 실행하실 권한이 없습니다.\".format(ctx.message.author), colour=colour)\r\n embed.timestamp = datetime.datetime.utcnow()\r\n await ctx.send(embed=embed)\r\n\r\n\r\ndef setup(client):\r\n client.add_cog(관리자(client))\r\n","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":7390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"561053923","text":"import os, shutil, time, glob\nimport tkinter as tk\nfrom tkinter import ttk, messagebox\nfrom ttkthemes import ThemedTk\nimport configparser\n\nimport python as py\n\n# global settings\nc = configparser.ConfigParser()\nfilename = 'config\\\\TourApp.ini'\nc.read(filename)\n\nclass Form(tk.Frame):\n \"\"\" A class for all my widgets, for the factory reports \"\"\"\n def __init__(self, parent, *args, **kwargs):\n super().__init__(parent, *args, **kwargs)\n\n # 1st Section, moving files from folders to temp folders\n self.section_1 = tk.LabelFrame(self, text=\"Tour Mode\",\n font=(\"bold\", 16))\n\n self.description_label_1 = tk.Label(self.section_1, text=\n \"Click this button to show\"\n \" tour slides on TV's\",\n )\n\n # Buttons\n self.tour_button = ttk.Button(self.section_1,\n text=\"Tour Mode\",\n state=\"normal\",\n width=40,\n command=self.tour_mode)\n\n # Section 2\n self.section_2 = tk.LabelFrame(self, text=\"Factory Mode\",\n font=(\"bold\", 16))\n\n self.description_label_2 = tk.Label(self.section_2, text=\n \"Click this button to show\"\n \" reports on TV screens\"\n )\n\n self.factory_button = ttk.Button(self.section_2,\n text=\"Factory Mode\",\n state=\"normal\",\n width=40,\n command=self.factory_mode)\n\n\n # state checks\n if c.get('button_settings', 'currently_pressed') == 'factory':\n self.factory_button.config(state = 'disabled')\n elif c.get('button_settings', 'currently_pressed') == 'tour':\n self.tour_button.config(state = 'disabled')\n\n # placement section_1\n self.tour_button.grid(row=1, column=1)\n\n self.section_1.grid(row=0, column=0, columnspan=5, sticky=tk.W + tk.E)\n self.description_label_1.grid(row=0, columnspan=5)\n\n # placement section_2\n self.factory_button.grid(row=3, column=1)\n\n self.section_2.grid(row=2, column=0, columnspan=5,sticky=tk.W + tk.E)\n self.description_label_2.grid(row=2, columnspan=5)\n\n # Functions\n def tour_mode(self):\n \"\"\"Changes Factory into Tour mode\"\"\"\n\n py.move_files()\n\n # Move Files From slides to Pi's\n py.move_slides()\n\n os.chdir(r\"\\\\twitched\\TourApp\")\n c['button_settings']['currently_pressed'] = 'tour'\n with open(filename, 'w') as cf:\n c.write(cf)\n c.read('config\\TourApp.ini')\n self.tour_button.config(state = 'disabled')\n self.factory_button.config(state = 'normal')\n\n # Success\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo(\"Success!\", \"Tour Mode Activated\")\n\n def factory_mode(self):\n \"\"\"Changes the slideshow back to factory mode\"\"\"\n\n # Removes the Files from all pi's so there is no conflict\n py.clean_directory()\n\n time.sleep(2)\n\n py.move_files_back()\n\n os.chdir(r\"\\\\twitched\\TourApp\")\n\n c['button_settings']['currently_pressed'] = 'factory'\n with open(filename, 'w') as cf:\n c.write(cf)\n c.read(filename)\n self.factory_button.config(state = 'disabled')\n self.tour_button.config(state = 'normal')\n\n # Success\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo(\"Success!\", \"Factory Mode Activated\")\n\n\nclass Application(ThemedTk):\n\n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n self.get_themes()\n self.set_theme(\"radiance\")\n\n try:\n tk.Tk.iconbitmap(self,default='icons\\py_115518.ico')\n except:\n pass\n\n self.title(\"John Smedley Tours v 1.0\")\n\n self.resizable(False, False)\n geometry_string = (f\"{c.get('settings', 'width')}x\"\n f\"{c.get('settings', 'height')}+\"\n f\"{c.get('settings', 'x_pos')}+\"\n f\"{c.get('settings', 'y_pos')}\")\n self.geometry(geometry_string)\n\n ttk.Label(\n self,\n text=\"John Smedley Tours\",\n font=(\"bold\", 16)\n ).grid(row=0, columnspan=5, padx=5, pady=5)\n\n ttk.Label(\n self,\n text=\"Developed by Kyle Taylor\"\n ).grid(row=2)\n\n self.widgets = Form(self)\n self.widgets.grid(row=1, padx=5, pady=5)\n\n def on_closing(self):\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n self.quit()\n self.destroy()\n\n\ndef main():\n app = Application()\n app.protocol(\"WM_DELETE_WINDOW\", app.on_closing)\n app.mainloop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Tour_Slideshow/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"442974249","text":"# coding: utf8\n\nfrom flask import Flask, abort, request, render_template, session, redirect, g, url_for, jsonify\nfrom flask_login import LoginManager, login_user, logout_user, login_required\n\nfrom jinja2 import Template\nimport requests\n# from social_flask.routes import social_auth\n# from social_flask_sqlalchemy.models import init_social\nfrom db import User, db_session, Cafe\nimport json\nfrom fullfill_db import get_nearest_cafe, get_cafe_json\n\napp = Flask(__name__)\napp.config.from_object('config')\n# app.config['SOCIAL_AUTH_USER_MODEL'] = 'db.User'\n# app.register_blueprint(social_auth)\n\n# init_social(app, session)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n# login_manager.login_view = \"login\"\n\nCURRENT_USER = User()\n\n@app.before_request\ndef global_user(): \n g.user = CURRENT_USER\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return User.query.get(userid)\n except:\n pass\n\n\n@app.context_processor\ndef inject_user():\n try:\n return {'user': g.user}\n except AttributeError:\n return {'user': None}\n\n\n@app.route('/')\ndef index_page():\n return render_template('index_page.html', user=g.user)\n\n\n@app.route('/coffee/')\ndef cofee_info_page():\n return render_template('coffee_info_page.html', user=g.user)\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n email = request.form.get('email')\n pwd = request.form.get('password')\n found_user = User.query.filter(User.email == email and User.password == pwd).first()\n if found_user: \n login_user(found_user)\n g.user = found_user \n return redirect('profile/{}'.format(found_user.id))\n else:\n return redirect('registration')\n\n\n@app.route('/registrate', methods=['POST'])\ndef registrate():\n email = request.form.get('email') \n password = request.form.get('password') \n first_name = request.form.get('first_name') \n last_name = request.form.get('last_name') \n new_user = User(email=email, password=password, first_name=first_name, last_name=last_name) \n print(new_user)\n db_session.add(new_user)\n db_session.commit() \n login_user(new_user)\n g.user = new_user\n print(g.user)\n return redirect('profile/{}'.format(g.user.id))\n\n\n@app.route('/registration')\ndef registration_page():\n return render_template('registration.html', user=g.user)\n\n\n@app.route('/logout')\ndef logout_page():\n print(g.user)\n logout_user()\n return 'Logged out'\n\n\n@app.route('/profile/')\n@login_required\ndef profile_page(user_id):\n user = User.query.get(user_id)\n if user is None:\n print('User with id as {} not found'.format(user_id))\n return render_template('404.html', text=\"Это не тот пользователь, которого ты ищешь\", img_link=\"http://cdn2.s.kolorado.ru/products/1/14/143/143736/101_1_14_design.png\")\n return render_template('profile.html', user=user)\n\n\n@app.route('/points/', methods=['GET'])\ndef get_cafes():\n try:\n lat = float(request.args.get('lat'))\n lng = float(request.args.get('lng'))\n except:\n lat = 55.740750\n lng = 37.608874\n \n cafes = get_nearest_cafe(lat,lng) \n return render_template('cafes.html', cafes=cafes)\n\n\n@app.route('/points-result-json/', methods=['GET'])\ndef get_maps_json():\n try:\n lat = float(request.args.get('lat'))\n lng = float(request.args.get('lng'))\n except:\n lat = 55.740750\n lng = 37.608874\n \n cafes = get_nearest_cafe(lat,lng)\n cafes_json = []\n for cafe in cafes:\n cafes_json.append((cafe.name, cafe.lat, cafe.lng))\n return jsonify(cafes=cafes_json)\n\n@app.route('/cafes/')\ndef cafe_info_page(cafe_id): \n cafe = Cafe.query.get(cafe_id)\n if not cafe:\n return render_template('404.html', text=\"\", img_link=\"https://cdn.dribbble.com/users/469578/screenshots/2597126/404-drib23.gif\")\n return render_template('cafe_info_page.html', cafe=cafe)\n\n\n@app.errorhandler(404)\ndef not_found_page(e):\n return render_template('404.html', text=\"\", img_link=\"https://cdn.dribbble.com/users/469578/screenshots/2597126/404-drib23.gif\"), 404\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"407737437","text":"from django.contrib import admin\nfrom cv.models import CV\nfrom cv.models import Competences, Categorie_competences\n\nadmin.site.register(CV)\n\n\nclass CompetencesInline(admin.StackedInline):\n model = Competences\n extra = 1\n\n\nclass Categorie_competencesAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ['categorie']}),\n ]\n inlines = [CompetencesInline]\n\nadmin.site.register(Categorie_competences, Categorie_competencesAdmin)","sub_path":"cv/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"165593328","text":"import unittest\nfrom flask_testing import TestCase\nfrom app import app, db\nfrom app.models import User\n\n\nclass UnitTests(unittest.TestCase):\n SQLALCHEMY_DATABASE_URI = \"sqlite://\"\n TESTING = True\n\n def setUp(self):\n self.app_context = app.app_context()\n self.app_context.push()\n db.create_all()\n\n def test_connect_manager_page(self):\n response = app.test_client(self).get('/app/manager')\n self.assertEqual(response.status_code, 404)\n\n def test_wrong_login_without_id(self):\n login = app.test_client(self).post('login', data=dict(first_name='tomer', last_name='admon'))\n self.assertEqual(login.status_code, 400)\n\n def test_login_with_user_not_exist(self):\n login = app.test_client(self).post('login', data=dict(first_name='aba', last_name='aca', id=900))\n loginString = login.data.decode('utf-8')\n assert 'User not found!' in loginString\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\nif (__name__ == '__main__'):\n unittest.main()\n","sub_path":"app/tests/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"496285690","text":"import json\nimport os\n\n\ndef get_room(id):\n ret = None\n with open(os.path.join(\n os.path.dirname(__file__), 'rooms/' + str(id) + '.json'),\n 'r') as f:\n jsontext = f.read()\n d = json.loads(jsontext)\n d['id'] = id\n ret = Room(**d)\n return ret\n\n\ndef grab_object(objects):\n objects = None\n if objects in Room(objects):\n return objects\n else:\n return 0\n\n\nclass Room():\n def __init__(self, id=0, name='A room', objects={},\n description='An empty room', neighbors={}):\n self.id = id\n self.name = name\n self.objects = objects\n self.description = description\n self.neighbors = neighbors\n\n def _neighbor(self, direction):\n if direction in self.neighbors:\n return self.neighbors[direction]\n else:\n return None\n\n def _objects(self, arg):\n if arg in self.objects:\n return self.objects[arg]\n else:\n return None\n","sub_path":"game/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"207781609","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\n\ndef add_css_class(widget, css_class):\n css_classes = widget.attrs.get('class', '').split()\n if css_class not in css_classes:\n css_classes.append(css_class)\n widget.attrs.update({'class': ' '.join(css_classes)})\n return widget\n","sub_path":"thecut/forms/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601857434","text":"from flask import Flask, url_for, request\nfrom flask_script import Manager\n\napp = Flask(__name__)\nmanager = Manager(app)\n\n\n@app.route('/')\ndef api():\n return 'Welcome'\n\n\n@app.route('/data')\ndef api_data():\n if 'da' in request.args:\n print(request.args['da'])\n return request.args['da']\n else:\n return 'none'\n\n\nif __name__ == '__main__':\n # debug=True\n app.run()\n","sub_path":"blog/app_1.py","file_name":"app_1.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"138032087","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 9 08:11:09 2017\n\n@author: Lafungo\n\"\"\"\n\nimport datetime\nfrom sympy.ntheory import prevprime\n\nstart = datetime.datetime.now()\n\ndef is_pan(n):\n s_n = str(n)\n \n if set(s_n) == set(['1', '2', '3', '4', '5', '6', '7']):\n return True\n else:\n return False\n \n# if len(s_n) != 9:\n# return False\n \n# digits = []\n# \n# for digit in s_n:\n# if digit == '0' or digit in digits:\n# return False\n# \n# digits.append(digit)\n# \n# return True\n\nfound = False\np = prevprime(10**7)\n\nwhile not found:\n if is_pan(p):\n pan_prime = p\n found = True\n \n p = prevprime(p)\n\nprint(pan_prime)\n\nend = datetime.datetime.now()\nprint(end - start)\n","sub_path":"p41.py","file_name":"p41.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"123619187","text":"from odoo import models, api, exceptions, _\n\n\nclass AccountMove(models.Model):\n _inherit = 'account.move'\n _description = \"module to delete duplicate attachments when sent by email\"\n\n # @api.multi\n def action_invoice_sent(self):\n \"\"\" Open a window to compose an email, with the edi invoice template\n message loaded by default\n \"\"\"\n\n # self.ensure_one()\n template = self.env.ref('account.email_template_edi_invoice', False)\n compose_form = self.env.ref(\n 'mail.email_compose_message_wizard_form', False)\n ctx = dict(\n default_model='account.move',\n default_res_id=self.id,\n default_use_template=bool(template),\n default_template_id=template and template.id or False,\n default_composition_mode='comment',\n mark_invoice_as_sent=True,\n custom_layout=\"account.mail_template_data_notification_email_account_invoice\",\n force_email=True\n )\n\n # Begin modification\n name = ''\n band = False\n attachments = self.env['ir.attachment']\n attachs = attachments.search(\n [('res_model', '=', self._name), ('res_id', '=', self.id)])\n for attach in attachs:\n name = attach.name\n if name.upper()[:3] == 'FAC':\n if name[-3:] == 'xml':\n if band == False:\n band = True\n else:\n attachments.search([('id', '=', attach.id)]).unlink()\n else:\n attachments.search([('id', '=', attach.id)]).unlink()\n elif name[-3:] == 'xml':\n if band == False:\n band = True\n else:\n attachments.search([('id', '=', attach.id)]).unlink()\n\n return {\n 'name': _('Compose Email'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form.id, 'form')],\n 'view_id': compose_form.id,\n 'target': 'new',\n 'context': ctx,\n }\n","sub_path":"remove_attachemnts_invoice/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"516207271","text":"import os\nimport sys\n\ndef in_filter_list():\n\tpath = r'filtered_words.txt'\n\tword_file = open(path, encoding = 'utf-8')\n\tword_list = list()\n\tfor l in word_file.readlines():\n\t\tprint(l.strip())\n\t\tword_list.append(l.strip())\n\treturn word_list\n\n\nif __name__ == '__main__':\n\tw_l = in_filter_list()\n\tword = input(\"输入词语:\")\n\tif word in w_l:\n\t\tprint(\"Freedom\")\n\telse:\n\t\tprint(\"Human Rights\")","sub_path":"0011_filter.py","file_name":"0011_filter.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"559190232","text":"#coding: utf-8\nfrom flask import Flask, render_template, redirect, request, url_for, jsonify\nfrom time import sleep\nfrom random import random\nimport markoff\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n\treturn render_template(\"index.html\")\n\n@app.route('/chat/')\ndef chat():\n\tconnection = markoff.markoff(\"markoff.db\")\n\tsleep(random()+random())\n\ttry:\n\t\tconnection.add(request.args['text'])\n\texcept UnicodeEncodeError:\n\t\tanswer = \"I don't understand Unicode :(\" # Or do I?\n\t\treturn jsonify(text=answer)\n\n\ttry:\n\t\tanswer = connection.create()\n\texcept:\n\t\tanswer = \"\"\n\n\treturn jsonify(text=answer)\n\nif __name__ == \"__main__\":\n\tapp.run(host=\"0.0.0.0\", debug=True)\n","sub_path":"webmarkoff/webmarkoff.py","file_name":"webmarkoff.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"120671932","text":"#Accuracy(正解率) : (TP + TN) / (TP + FP + FN + TN)\r\n#Recall(再現率) : TP / (TP + FN)\r\n#Precision(適合率) : TP / (TP + FP)\r\n#F-value : 2 * Recall * Precision / (Recall + Precision)\r\n#    実際\r\n#    O X\r\n#予 O TP FP\r\n#測 X FN TN\r\n\r\nimport numpy as np\r\nfrom excel import *\r\n\r\ndef evaluate(predict_path, answer_path):\r\n predict = load_excel_np(predict_path)\r\n answer = load_excel_np(answer_path)\r\n test_sample = predict.shape[0]\r\n split_sample = predict.shape[1]\r\n TP = np.zeros(test_sample)\r\n FP = np.zeros(test_sample)\r\n FN = np.zeros(test_sample)\r\n TN = np.zeros(test_sample)\r\n for i in range(test_sample):\r\n for j in range(split_sample):\r\n if predict[i][j] == 1 and answer[i][j] == 1:\r\n TP[i]+=1\r\n elif predict[i][j] == 1 and answer[i][j] == 0:\r\n FP[i]+=1\r\n elif predict[i][j] == 0 and answer[i][j] == 1:\r\n FN[i]+=1\r\n else:\r\n TN[i]+=1\r\n Accuracy = (TP + TN) / (TP + FP + FN + TN)\r\n Accuracy_mean = np.mean(Accuracy)\r\n Recall = TP / (TP + FN)\r\n Recall_mean = np.mean(Recall)\r\n Precision = TP / (TP + FP)\r\n Precision_mean = np.mean(Precision)\r\n F_value = 2 * Recall * Precision / (Recall + Precision)\r\n F_value_mean = np.mean(F_value)\r\n return Accuracy_mean, Recall_mean, Precision_mean, F_value_mean\r\n\r\ndef frog_evaluation(section, section_name, nfft, hoplength, para1, para2, way):\r\n predict_path = 'mel_filter/frog_data/predict.xlsx'\r\n Accuracy, Recall, Precision, F_value = evaluate(predict_path, 'evaluation/frog_sheet_'+str(section)+'s.xlsx')\r\n result = np.empty(4)\r\n result[0] = Accuracy\r\n result[1] = Recall\r\n result[2] = Precision\r\n result[3] = F_value\r\n np_excel(result, 'mel_filter/frog_data/predict/'+section_name+'_'+str(nfft)+'_'+str(hoplength)+'_'+para1+'_'+para2+'_'+way+'.xlsx')\r\n\r\nif __name__ == '__main__':\r\n #path = 'mfcc/frog_data/predict.xlsx'\r\n path = 'mel_filter/frog_data/predict.xlsx'\r\n #path = 'haar_like/frog_data/predict.xlsx'\r\n frog_evaluation(path, 0.5)\r\n\r\n\r\n\r\n","sub_path":"select_filters/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"551412355","text":"dna_1 = \"ACCGTT\"\r\ndna_2 = \"CCAGCA\"\r\n\r\ndef longest_common_subsequence(string_1, string_2):\r\n print(\"Finding longest common subsequence of {0} and {1}\".format(string_1, string_2))\r\n grid = [[0 for col in range(len(string_1) + 1)] for row in range(len(string_2) + 1)]\r\n for row in range(1, len(string_2) + 1):\r\n print(\"Comparing: {0}\".format(string_2[row - 1]))\r\n for col in range(1, len(string_1) + 1):\r\n print(\"Against: {0}\".format(string_1[col - 1]))\r\n if string_1[col - 1] == string_2[row - 1]:\r\n grid[row][col] = grid[row - 1][col - 1] + 1\r\n else:\r\n grid[row][col] = max(grid[row - 1][col], grid[row][col - 1])\r\n for row_line in grid:\r\n print(row_line)\r\n\r\nlongest_common_subsequence(dna_1, dna_2)","sub_path":"Misc_Functions/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"67136331","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^([0-9]+)/$', views.detail, name='detail'),\n url(r'^([0-9]+)/comment/$', views.comment, name='comment'),\n url(r'^regist/$', views.regist, name='regist'),\n url(r'^login/$', views.login, name='login'),\n url(r'^nearly/$', views.nearly, name='nearly'),\n url(r'^textall/$', views.textall, name='textall'),\n url(r'^pictureall/$', views.pictureall, name='pictureall'),\n url(r'^graph/$', views.graph, name='graph'),\n url(r'^mytopic/$', views.mytopic, name='mytopic'),\n]","sub_path":"shenpinglun/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"210663166","text":"import json\n\nfrom aitoolkit.models import Project\nfrom aitoolkit.models import Image\nfrom aitoolkit.models import ObjectAnnotation, StoryAnnotation\nfrom aitoolkit.models import ImagePrediction\n\nfrom aitoolkit.enum import DataType\n\nfrom aitoolkit import db\n\nclass DBQuery(object):\n # ************************************************** #\n # Add data into database #\n # ************************************************** #\n def add_project(self, created_user, title, description):\n project = Project(created_user, title, description)\n db.session.add(project)\n db.session.commit()\n\n return project.id\n\n def add_image(self, project_id, image_url, image_key):\n image = Image(project_id, image_url, image_key)\n db.session.add(image)\n db.session.commit()\n\n return image.id\n\n def add_object_annotation(self, created_user, image_id, story_id, label, x, y, w, h):\n object_annotation = ObjectAnnotation(created_user, image_id, story_id, label, x, y, w, h)\n db.session.add(object_annotation)\n db.session.commit()\n\n return object_annotation.id\n \n def add_story_annotation(self, created_user, image_id, description):\n story_annotation = StoryAnnotation(created_user, image_id, description)\n db.session.add(story_annotation)\n db.session.commit()\n\n return story_annotation.id\n\n def add_machine_predictions(self, model_name, image_key, image_url, predictions):\n image_prediction = ImagePrediction(model_name, image_key, image_url, predictions)\n db.session.add(image_prediction)\n db.session.commit()\n\n return image_prediction.id\n\n # task\n def add_task(self, created_user, task_type, problem, answer, duration_time, verified_string, status, worker_id, hit_id, assignment_id):\n task = Task(created_user, task_type, problem, answer, duration_time, verified_string, status, worker_id, hit_id, assignment_id)\n db.session.add(task)\n db.session.commit()\n\n return task.id\n\n\n # ************************************************** #\n # Get data from database #\n # ************************************************** #\n def get_all_projects(self):\n projects = Project.query.all()\n return projects\n\n def get_project_by_id(self, project_id):\n project = Project.query.filter_by(id=project_id).first()\n return project\n\n def get_image_by_id(self, image_id):\n image = Image.query.filter_by(id=image_id).first()\n return image\n\n def get_image_by_key(self, image_key):\n image = Image.query.filter_by(image_key=image_key).first()\n return image\n\n def get_images_by_project_id(self, project_id):\n project = Project.query.filter_by(id=project_id).first()\n all_images = Image.query.filter_by(project_id=project.id).all()\n image_list = []\n for image in all_images:\n image_list.append({\n \"id\": image.id,\n \"key\": image.image_key,\n \"image_url\": image.image_url\n })\n\n return image_list\n \n def get_image_list_by_project_id(self, project_id):\n project = Project.query.filter_by(id=project_id).first()\n return project.image_list\n\n def get_image_data_by_image_ids(self, image_ids):\n image_data = []\n for img_id in image_ids:\n image = self.get_image_by_id(img_id)\n image_data.append(image)\n\n return image_data\n \n def get_objects_by_story_id(self, story_id):\n objects = ObjectAnnotation.query.filter_by(story_id=story_id).all()\n object_list = []\n for obj in objects:\n object_list.append({\n \"label\": obj.label,\n \"x\": obj.x,\n \"y\": obj.y,\n \"w\": obj.w,\n \"h\": obj.h\n })\n return object_list\n\n def get_stories_by_image_id(self, image_id):\n stories = StoryAnnotation.query.filter_by(image_id=image_id).all()\n story_list = []\n for story in stories:\n story_description = story.description\n story_object_list = self.get_objects_by_story_id(story.id)\n data = {\n \"id\": story.id,\n \"created_user\": story.created_user,\n \"story\": story_description,\n \"object_list\": story_object_list\n }\n story_list.append(data)\n \n return story_list\n\n def get_img_predictions_by_key(self, model_name, image_key):\n predictions = ImagePrediction.query.filter_by(image_key=image_key, model_name=model_name).first()\n \n if predictions:\n output = predictions.predictions\n output = {\n \"image_size\": output[\"image_size\"],\n \"predictions\": output[\"predictions\"]\n }\n return output\n else:\n return None\n \n # ************************************************** #\n # Update data from database #\n # ************************************************** #\n def update_image_list_by_project_id(self, project_id, image_list):\n project = Project.query.filter_by(id=project_id).update({\"image_list\": image_list})\n db.session.commit()\n\n return project\n\n def update_story_object_list(self, story_id, object_list):\n story = StoryAnnotation.query.filter_by(id=story_id).update({\"object_list\": object_list})\n db.session.commit()\n\n return story ","sub_path":"aitoolkit/dbquery.py","file_name":"dbquery.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"343361672","text":"from prompt_toolkit.shortcuts import PromptSession, clear, input_dialog\nfrom prompt_toolkit.formatted_text import HTML, to_formatted_text\nfrom prompt_toolkit import print_formatted_text\nfrom random import randint, seed\nfrom unidecode import unidecode\n\nfrom consts import PERSONS, NORUEGO, ESPANOL, TENSES, NUM_PERSONS\nfrom read_data import read_data\n\nprompt_session = PromptSession()\n\n\ndef opposite_language(language):\n if language == NORUEGO:\n return ESPANOL\n else:\n return NORUEGO\n\n\nclass Interrogador:\n def __init__(self, file_name, use_seed=None):\n seed(use_seed)\n self.askable = {}\n self.asked = []\n self.failed = []\n self.file_name = file_name\n self.data = read_data(file_name)\n self.max_asked = 2*len(self.data)*len(TENSES)*NUM_PERSONS\n self.answer_form_reminder = {\n NORUEGO: '(hvem + hva ( ofte? )) ',\n ESPANOL: ''\n }\n\n def askable_key(self, language, verb_no, tense, person):\n return f'From: {language}, Verb: {verb_no}, Tense: {tense}, Person: {person}'\n\n def pronoun_the_verb(self, pronoun, conjugation, language):\n if language == NORUEGO:\n return f'{pronoun} {conjugation}'\n else:\n return conjugation\n\n def interrogate(self, question, correct_answer, add_on, answer_form_reminder):\n full_question = f'{question} {add_on[0]} {answer_form_reminder}- '\n full_question = full_question.capitalize()\n for i in range(0, 2): # pylint: disable=unused-variable\n response = prompt_session.prompt(full_question)\n if (response == correct_answer):\n print_formatted_text(\n HTML(\"Yes! \"), end='')\n return True\n print_formatted_text(HTML(\"Nope: \"), end='')\n return False\n\n def pick_one(self, from_language):\n while len(self.asked) < self.max_asked:\n verb_no = randint(0, len(self.data)-1)\n tense = randint(0, len(TENSES)-1)\n person = randint(0, NUM_PERSONS - 1)\n key = self.askable_key(from_language, verb_no, tense, person)\n if not key in self.asked:\n self.asked.append(key)\n verb = self.data[verb_no]\n return (verb, tense, person)\n # else:\n # if not key in self.asked:\n # print_formatted_text(HTML('Hey, I haven' 't asked that!'))\n # else:\n # print_formatted_text(HTML('Yep, I already asked'))\n print(\"Can't find any more to ask - you're done!\")\n exit()\n\n def ask_me_one(self, from_language):\n (verb, tense, person) = self.pick_one(from_language)\n from_pronoun = PERSONS[from_language][person]\n from_conjugation = verb.conjugation(from_language, person, tense)\n\n to_language = opposite_language(from_language)\n to_pronoun = PERSONS[to_language][person]\n to_conjugation = verb.conjugation(to_language, person, tense)\n\n question = self.pronoun_the_verb(from_pronoun, from_conjugation, from_language)\n answer = self.pronoun_the_verb(to_pronoun, to_conjugation, to_language)\n answer_form_reminder = self.answer_form_reminder[to_language]\n\n if from_language == ESPANOL and tense == 2 and person in [0, 2]:\n question = f'{from_pronoun} {question}'\n\n add_on = verb.pick_add_on(from_language, person)\n self.interrogate(question, answer, add_on, answer_form_reminder)\n print(f'{question} {add_on[0]} - {answer} {add_on[1]}'.capitalize())\n","sub_path":"interrogador.py","file_name":"interrogador.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"165893515","text":"# SO\nimport os\nimport random\nimport shutil\nimport datetime\nimport sys\nfrom shutil import copyfile\n\n# Basicas\nimport numpy as np\nimport pandas as pd\n\n# Gráficos\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport seaborn as sns\nmatplotlib.use('Agg')\n\n# Audio\nimport librosa\nimport librosa.display\n\n# TensorFlow\nimport tensorflow as tf\ntf.keras.backend.clear_session()\nfrom tensorflow import keras\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, BatchNormalization\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom tensorflow.python.keras.backend import dropout\nfrom keras.preprocessing import image\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\n\n# ML Flow\nfrom mlflow import log_metric, log_param, log_artifacts\nimport mlflow.tensorflow\nimport logging\nlogging.basicConfig(level=logging.WARN)\nlogger = logging.getLogger(__name__)\n\n# Metricas\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\n# Definicoes: Classe positiva (1) - Violencia\n# Classe negativa (0) - Não Violência\n\n# Ignorar avisos de atualização, etc\nimport warnings\nwarnings.filterwarnings(\"ignore\")\ntimestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%HH\")\n## Fim\nuser = 'root'\npwd = 'Celular135#'\nhostname = '127.0.0.1'\nport = '3306'\ndatabase = 'mlflow'\n#uri = 'mysql://{'+ user + '}:{' + pwd + '}@{' + hostname + '}:3600/{' + database + '}'\n#uri = 'mysql://root:Celular135#@localhost:3306/mlflow'\nuri = 'mysql://root:Celular135#@localhost:3306/mlflow'\n## TAGs do MLFlow\n_MODEL_NAME_ = 'VGG16'\n_MODEL_PATH_ = 'saved_model/' + timestamp +'/'\n_MODEL_FULLPATH_ = _MODEL_PATH_ +_MODEL_NAME_\n\nmlflow.set_tracking_uri('http://0.0.0.0:5000')\n#mlflow.set_tracking_uri(uri)\n\n#mlflow.set_tracking_uri('mysql://tiago:tiago@127.0.0.1:3306/mlflow')\nmlflow.set_experiment(experiment_name='Artigo ENIAC')\n\ntags = {\n \"Projeto\": \"ENIAC Paper\",\n \"team\": \"Tiago B. Lacerda\",\n \"dataset\": \"HEAR DATASET REV2.4\"\n }\n\n\nDESIRED_ACCURACY = 0.995\n# Localizacao das bases\nEVALUATION_DIR = \"/home/tiago/Documentos/__Programacao/repos/projeto-hear/HEAR_DATASET/Evaluation_SCAPER/\"\n#EVALUATION_DIR = \"/home/tiago/Documentos/__Programacao/repos/projeto-hear/HEAR_DATASET/toy_eval/\"\nMODELOS_IMPORTADOS = \"/home/tiago/Documentos/__Programacao/repos/projeto-hear/modelos_importados/\"\nDATASET_HEAR = \"/home/tiago/Documentos/__Programacao/repos/projeto-hear/HEAR_DATASET/\"\n\nsplit_size = .9\nTARGET_SIZE = (120,160)\n\ndef CheckTF():\n \"\"\" Checar se esta em um ambiente com GPU\n \"\"\"\n if tf.test.gpu_device_name():\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n else:\n print(\"Please install GPU version of TF\")\n \n print(tf.version.VERSION)\n print('Diretorio de trabalho: ', os.getcwd())\n\ndef PrintComHora(texto):\n ''' print com a Hora de execucao\n '''\n hora = datetime.datetime.now().strftime('%H:%M:%S')\n print(\"[\", hora, \"]\", texto)\n sys.stdout.flush()\n\ndef metricas(y_true, y_predict):\n acuracia = accuracy_score(y_true, y_predict)\n precision = precision_score(y_true, y_predict)\n recall = recall_score(y_true, y_predict)\n f1 = f1_score(y_true, y_predict)\n return acuracia, precision, recall, f1\n\ndef matriz_confusao(y_true, y_predict):\n matriz_conf = confusion_matrix(y_true, y_predict)\n fig = plt.figure()\n ax = plt.subplot()\n sns.heatmap(matriz_conf, annot=True, cmap='Blues', ax=ax)\n\n ax.set_xlabel('Valor Predito')\n ax.set_ylabel('Valor Real')\n ax.set_title('Matriz de Confusão') \n ax.xaxis.set_ticklabels(['0', '1'])\n ax.yaxis.set_ticklabels(['0', '1'])\n plt.close()\n return fig\n\ndef ConferindoDir():\n\n print(len(os.listdir(DATASET_HEAR + 'Train/NAO_VIOLENCIA_PNG')))\n print(len(os.listdir(DATASET_HEAR + 'Train/VIOLENCIA_PNG')))\n print(len(os.listdir(DATASET_HEAR + 'Test/VIOLENCIA_PNG')))\n print(len(os.listdir(DATASET_HEAR + 'Test/NAO_VIOLENCIA_PNG')))\n print(len(os.listdir(DATASET_HEAR + 'Evaluation_SCAPER/VIOLENCIA_PNG')))\n print(len(os.listdir(DATASET_HEAR + 'Evaluation_SCAPER/NAO_VIOLENCIA_PNG')))\n\n\ndef ModeloKeras_VGG16(BATCH = 10, EPOCHs= 10):\n\n from tensorflow.keras.applications.vgg16 import VGG16\n _MODEL_FILE_ = 'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'\n \n PrintComHora('Inicio do treinamento do modelo...[ ]')\n # Create an instance of the inception model from the local pre-trained weights\n local_weights_file = MODELOS_IMPORTADOS + _MODEL_FILE_\n\n pre_trained_model = VGG16(input_shape = (120, 160, 3), \n include_top = False, \n weights = None)\n\n pre_trained_model.load_weights(local_weights_file)\n #pre_trained_model.summary()\n # Make all the layers in the pre-trained model non-trainable\n for layer in pre_trained_model.layers:\n layer.trainable = False\n\n last_layer = pre_trained_model.get_layer('block5_pool')\n last_output = last_layer.output\n\n # Flatten the output layer to 1 dimension\n x = layers.Flatten()(last_output)\n # Add a fully connected layer with 1,024 hidden units and ReLU activation\n x = layers.Dense(1024, activation='relu')(x)\n # Add a dropout rate of 0.2\n x = layers.Dropout(0.2)(x) \n # Add a final sigmoid layer for classification\n x = layers.Dense (2, activation='softmax')(x)\n\n model = Model(pre_trained_model.input, x)\n #model = Model(pre_trained_model) # primeira vez\n \n model.compile(optimizer=RMSprop(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])\n model_summary = model.summary()\n\n train_generator, validation_generator = CallImageDataGenerator(batch=BATCH)\n meus_callbacks = Callbacks()\n\n history = model.fit(train_generator,\n epochs= EPOCHs,\n verbose=1,\n validation_data=validation_generator,\n callbacks=meus_callbacks)\n \n PrintComHora('Fim do treinamento do modelo...[OK!]')\n\n\n return model, history, model_summary\n\n\ndef CallImageDataGenerator(batch = 10):\n TRAINING_DIR = DATASET_HEAR + \"Train/\"\n train_datagen = ImageDataGenerator(rescale=1.0/255.)\n train_generator = train_datagen.flow_from_directory(TRAINING_DIR,\n batch_size=batch,\n class_mode='categorical',\n target_size = TARGET_SIZE)\n print(train_generator.class_indices)\n VALIDATION_DIR = DATASET_HEAR + \"Test/\"\n validation_datagen = ImageDataGenerator(rescale=1.0/255.)\n validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,\n batch_size=batch,\n class_mode='categorical',\n target_size=TARGET_SIZE)\n print(validation_generator.class_indices) \n return train_generator, validation_generator\n\ndef SalvarModelo(model):\n # Save the entire model as a SavedModel.\n \n # os.mkdir(diretorio)\n model.save(_MODEL_FULLPATH_)\n converter_tflite = tf.lite.TFLiteConverter.from_saved_model(_MODEL_FULLPATH_)\n tflite_model = converter_tflite.convert()\n with open(_MODEL_NAME_ + '.tflite', 'wb') as f:\n f.write(tflite_model)\n\nclass myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n if(logs.get('accuracy')>DESIRED_ACCURACY):\n print(\"\\nReached 99.5% accuracy so cancelling training!\")\n self.model.stop_training = True\n\ndef Callbacks():\n\n checkpoint_path = \"training/\" + timestamp + \"/\" + _MODEL_NAME_+ \".ckpt\"\n checkpoint_dir = os.path.dirname(checkpoint_path)\n \n # Create a callback that saves the model's weights\n cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1, \n mode='max',\n save_best_only=True) \n\n val_loss_callback = ReduceLROnPlateau(monitor='val_accuracy',\n mode='max',\n min_delta=0.03,\n patience=3,\n factor=.5,\n min_lr=0.00001, \n verbose=1)\n \n Callback_acc = myCallback()\n\n return [cp_callback,val_loss_callback, Callback_acc]\n\ndef previsao(modelo, DirPositivo, DirNegativo):\n ''' Realiza a validacao do modelo\n '''\n ypred = []\n yreal = []\n FP, FN, TP, TN = 0,0,0,0\n \n for filename in os.listdir(DirPositivo):\n yreal.append(1)\n if \"png\" in filename:\n file_path = os.path.join(DirPositivo, filename)\n img = image.load_img(file_path, target_size=TARGET_SIZE)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = x / 255.\n images = np.vstack([x])\n classes = modelo.predict(images)\n if classes[0][1]>0.5:\n ypred.append(1)\n TP+=1\n print(\"\\rTP: %i; FP: %i; TN: %i; FN: %i\"%(TP,FP,TN,FN), end='')\n else:\n ypred.append(0)\n FP+=1 \n print(\"\\rTP: %i; FP: %i; TN: %i; FN: %i\"%(TP,FP,TN,FN), end='')\n\n for filename in os.listdir(DirNegativo):\n yreal.append(0)\n if \"png\" in filename:\n file_path = os.path.join(DirNegativo, filename)\n img = image.load_img(file_path, target_size=TARGET_SIZE)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = x / 255.\n images = np.vstack([x])\n classes = modelo.predict(images)\n if classes[0][1]<=0.5:\n ypred.append(0)\n TN+=1\n print(\"\\rTP: %i; FP: %i; TN: %i; FN: %i\"%(TP,FP,TN,FN), end='')\n else:\n ypred.append(1)\n FN+=1\n print(\"\\rTP: %i; FP: %i; TN: %i; FN: %i\"%(TP,FP,TN,FN), end='')\n\n return yreal, ypred\n\ndef main():\n CheckTF() \n ConferindoDir()\n mlflow.tensorflow.autolog(every_n_iter=1)\n with mlflow.start_run(run_name=_MODEL_NAME_):\n \n #Registro das tags\n mlflow.set_tags(tags)\n\n #Criação do modelo\n model, history, summary = ModeloKeras_VGG16(BATCH = 10, EPOCHs = 30)\n model_uri = mlflow.get_artifact_uri(\"model\")\n SalvarModelo(model)\n #PlotTreino(history)\n #Predição dos valores de testes\n y_true, y_pred = previsao(model,\n EVALUATION_DIR + \"VIOLENCIA_PNG\",\n EVALUATION_DIR + \"NAO_VIOLENCIA_PNG\")\n\n #DataFrame\n df = pd.DataFrame({ 'Valor_Real':y_true, \n 'Valor_Previsto':y_pred \n })\n temp_name = 'DataFrame.csv'\n df.to_csv(temp_name, index=False)\n mlflow.log_artifact(temp_name, \"DataFrame\")\n try:\n os.remove(temp_name)\n except FileNotFoundError as e:\n print(f\"{temp_name} file is not found\")\n \n #Métricas\n acuracia, precision, recall, f1 = metricas(y_true, y_pred)\n print(\"Acurácia: {}\\nPrecision: {}\\nRecall: {}\\nF1-Score: {}\".\n format(acuracia, precision, recall, f1))\n\n #Matriz de confusão\n matriz_conf = matriz_confusao(y_true, y_pred)\n temp_name = \"confusion-matrix.png\"\n matriz_conf.savefig(temp_name)\n mlflow.log_artifact(temp_name, \"confusion-matrix-plots\")\n try:\n os.remove(temp_name)\n except FileNotFoundError as e:\n print(f\"{temp_name} file is not found\")\n\n #Registro dos parâmetros e das métricas\n # mlflow.log_param(\"balanced\", balanced)\n # mlflow.log_param(\"max_depth\", max_depth)\n mlflow.log_metric(\"Acuracia\", acuracia)\n mlflow.log_metric(\"Precision\", precision)\n mlflow.log_metric(\"Recall\", recall)\n mlflow.log_metric(\"F1-Score\", f1)\n\n #Registro do modelo\n \n \n # modelo ='./saved_model' + timestamp+ \"/\"\n\n # mlflow.tensorflow.log_model(modelo, \"model_Inception\")\n mlflow.log_artifact(local_path='./'+os.path.basename(__file__), artifact_path='code_models')\n \n #Registro do model_summary\n from contextlib import redirect_stdout\n temp_name = 'modelsummary.txt' \n with open(temp_name, 'w') as f:\n with redirect_stdout(f):\n model.summary()\n mlflow.log_artifact(temp_name, 'Model Summary')\n try:\n os.remove(temp_name) \n except FileNotFoundError as e:\n print(f\"{temp_name} file is not found\")\n\n mlflow.end_run()\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"treinar_modelo_VGG.py","file_name":"treinar_modelo_VGG.py","file_ext":"py","file_size_in_byte":13690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"96492476","text":"from flask import Flask, render_template, request, jsonify\nfrom lib.classifier.l1classifier import L1Classifier\nfrom lib.classifier.l2classifier import L2Classifier\nfrom lib.examples import Examples\nimport threading\nimport json\nimport os.path\n\nprint(\" - Starting up application\")\nlock = threading.Lock()\napp = Flask(__name__)\n\n\nclass App:\n __shared_state = {}\n\n def __init__(self):\n self.__dict__ = self.__shared_state\n\n def classifier(self, name=''):\n with lock:\n if getattr(self, '_classifier', None) is None:\n print(\" - Building new classifiers - might take a while.\")\n print(\" - L1 Classifier was build\")\n l1 = L1Classifier(\"L1.m\").build()\n print(\" - L2 Classifier was build\")\n l2 = L2Classifier(\"L2.m\").build()\n self._classifier = (l1, l2)\n print(\" - Done!\")\n\n if name == 'L1' or name == 'on':\n return self._classifier[0]\n else:\n return self._classifier[1]\n\nt = threading.Thread(target=App().classifier)\nt.daemon = True\nt.start()\n\n\n@app.route('/')\ndef main():\n return render_template('main.html')\n\n\n@app.route('/predict')\ndef predict():\n q = request.args.get('q')\n ann_type = request.args.get('ann_type')\n label, prediction = App().classifier(ann_type).classify(q)\n return jsonify(q=q, predicted_class=int(label), prediction=str(prediction))\n\n\n@app.route('/examples')\ndef examples():\n examples = Examples(App().classifier('L1'), App().classifier('L2')).load(5, 5)\n return jsonify(items=examples)\n\n\n@app.route('/test')\ndef test():\n if not os.path.isfile('results.json'):\n result = Examples(App().classifier('L1'), App().classifier('L2')).test()\n with open('results.json', 'w') as outfile:\n json.dump(result, outfile)\n else:\n with open('results.json', 'r') as infile:\n result = json.load(infile)\n return jsonify(items=result)\n\nif __name__ == '__main__':\n app.run(port=8080, host='localhost', debug=True)\n","sub_path":"sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"503739483","text":"import unittest\n\nfrom com.sbk.converter.converter import signal_to_wave, wave_to_spectrum\nfrom com.sbk.signal.triangle.triangle_signal import TriangleSignal\n\n\nclass TestSpectrumParent(unittest.TestCase):\n\n def setUp(self):\n self.test_freq = 200\n self.triangle_signal = TriangleSignal(self.test_freq)\n self.triangle_wave = signal_to_wave(self.triangle_signal)\n self.triangle_spectrum = wave_to_spectrum(self.triangle_wave)\n\n @unittest.skip(\"define behaviour later\")\n def test_spectrum_parent_plot(self):\n self.triangle_spectrum.plot(high=2000)\n","sub_path":"tests/com/sbk/spectrum/test_spectrum_parent.py","file_name":"test_spectrum_parent.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"573443323","text":"# Este programa recebe números pelo teclado\n# (até o usuário digitar 0) e imprime\n# os números em ordem inversa do digitado\nlista = []\nwhile True:\n n = int(input('Digite um número: '))\n if n == 0:\n break\n lista = [n] + lista\n\nfor n in lista:\n print(n)","sub_path":"07.listas/inverte.py","file_name":"inverte.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"330383397","text":"#!/usr/bin/env python\n\nfrom util import *\nimport os as OS\n\ndir_root = ''\ndir_android = ''\ndir_chromium = ''\n\n\ndef handle_option():\n global args\n\n parser = argparse.ArgumentParser(description='Script to sync, build Android',\n formatter_class=argparse.RawTextHelpFormatter,\n epilog='''\nexamples:\n python %(prog)s -s master\n python %(prog)s -s android-4.4.2_r1\n python %(prog)s -f all\n\n python %(prog)s -s android-4.3_r1 -b -f all\n\n''')\n\n #parser.add_argument('-s', '--sync', dest='sync', help='android or chromium, all for both', choices=['all', 'chromium', 'android'])\n parser.add_argument('--sync-android', dest='sync_android', help='sync android code', action='store_true')\n #parser.add_argument('--sync-chromium', dest='sync_chromium', help='sync chromium code', action='store_true')\n\n parser.add_argument('-b', '--build', dest='build', help='build', action='store_true')\n parser.add_argument('--target-arch', dest='target_arch', help='target arch', choices=['x86', 'arm', 'x86_64'], default='x86_64')\n\n parser.add_argument('--init', dest='init', help='init', action='store_true')\n\n args = parser.parse_args()\n\n if len(sys.argv) <= 1:\n parser.print_help()\n\n\ndef setup():\n global dir_root, dir_android, dir_chromium\n\n dir_root = os.path.abspath(os.getcwd())\n dir_android = dir_root + '/android'\n dir_chromium = dir_root + \"/chromium\"\n\n os.chdir(dir_root)\n\n\ndef hack_manifest():\n backup_dir('android/.repo/manifests')\n execute('rm -f default.xml.bk')\n execute('cp -f default.xml default.xml.bk')\n restore_dir()\n\n\ndef init():\n if not args.init:\n return()\n\n if not OS.path.exists('chromium'):\n error('Could not find chromium code, please prepare for it first')\n\n if not OS.path.exists('android'):\n OS.mkdir('android')\n\n backup_dir('android')\n if host_os == 'Linux' and not has_process('privoxy'):\n execute('sudo privoxy /etc/privoxy/config')\n os.putenv('http_proxy', '127.0.0.1:8118')\n os.putenv('https_proxy', '127.0.0.1:8118')\n\n cmd = 'repo init -u https://android.googlesource.com/platform/manifest'\n result = execute(cmd, show_progress=True)\n if result[0]:\n error('Failed to repo init for android')\n\n if host_os == 'Linux':\n execute('sudo killall privoxy')\n os.putenv('http_proxy', '')\n os.putenv('https_proxy', '')\n restore_dir()\n\n hack_manifest()\n\n #sync_android(force=True)\n\n if not OS.path.islink('android/external/chromium_org/src'):\n execute('ln -s ' + dir_chromium + '/src android/external/chromium_org/src')\n\n\ndef sync_android(force=False):\n if not args.sync_android and not force:\n return\n\n backup_dir('android')\n cmd = 'repo sync -c -j16'\n result = execute(cmd, interactive=True)\n if result[0]:\n error('Failed to sync android')\n restore_dir()\n\n\ndef build():\n if not args.build:\n return\n\n backup_dir('chromium/src')\n #execute('./android_webview/tools/gyp_webview -Dtarget_arch=' + args.target_arch, show_progress=True)\n restore_dir()\n\n backup_dir('android')\n execute('. build/envsetup.sh && lunch aosp_' + args.target_arch + '-eng')\n # make -j\n execute('mmm frameworks/webview external/chromium_org -j16')\n restore_dir()\n\n\n\n\nif __name__ == \"__main__\":\n handle_option()\n setup()\n init()\n build()\n","sub_path":"python/acr/acr.py","file_name":"acr.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"190149398","text":"from django.shortcuts import render\nfrom django.core import serializers\nfrom datetime import datetime, timedelta\nimport json\nfrom django.http import JsonResponse\nfrom .models import Kospi, Kosdaq, Kospi_by_10second, Kosdaq_by_10second\n\n\ndef chart(request):\n return render(request, 'chart.html')\n\ndef chart_data_kospi(request):\n dataset = Kospi.objects.all().order_by('time')\n kospis =[]\n for stock in dataset:\n time = (stock.time.hour*3600 + stock.time.minute*60 + stock.time.second)*1000\n kospis.append([time, stock.fluctuation*100])\n \n return JsonResponse(kospis, safe=False)\n\ndef chart_data_kosdaq(request):\n dataset = Kosdaq.objects.all().order_by('time')\n kosdaqs = []\n for stock in dataset:\n time = (stock.time.hour*3600 + stock.time.minute*60 + stock.time.second)*1000\n kosdaqs.append([time, stock.fluctuation*100])\n return JsonResponse(kosdaqs, safe=False)\n\n\ndef search(request):\n date = request.GET.get(\"date\")\n index = request.GET.get(\"index\")\n if not date or not index:\n return render(request, 'search.html')\n date = datetime.strptime(date, \"%Y-%m-%d\")\n if index == \"KOSPI\":\n index = Kospi_by_10second\n elif index == \"KOSDAQ\":\n index = Kosdaq_by_10second\n else:\n return render(request, 'search.html')\n return render(request, 'search.html', {'indexes': index.objects.filter(time__gt=date, time__lt=date+timedelta(days=1))})\n\n","sub_path":"Stock/STOCKES/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"456558907","text":"import os\r\n\r\nfrom flask import Flask, flash, redirect, render_template, request, session\r\nfrom flask_session import Session\r\nfrom functools import wraps\r\nfrom sqlalchemy import desc\r\nfrom tempfile import mkdtemp\r\nfrom werkzeug.exceptions import default_exceptions\r\nfrom werkzeug.security import check_password_hash, generate_password_hash\r\nfrom models import *\r\n\r\n# Configure application\r\napp = Flask(__name__)\r\n\r\n# Ensure templates are auto-reloaded\r\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\r\n\r\n# Ensure responses aren't cached\r\n@app.after_request\r\ndef after_request(response):\r\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n response.headers[\"Expires\"] = 0\r\n response.headers[\"Pragma\"] = \"no-cache\"\r\n return response\r\n\r\n# Configure session to use filesystem (instead of signed cookies)\r\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\r\napp.config[\"SESSION_PERMANENT\"] = False\r\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\r\nSession(app)\r\n\r\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = 'sqlite:///diarize.db'\r\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\r\ndb.init_app(app)\r\n\r\nweekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\r\nmonths = ['Jan', 'Feb', 'Mar', 'Apr', 'may', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\r\n\r\n\r\ndef login_required(f):\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if session.get(\"user_id\") is None:\r\n return redirect(\"/login\")\r\n return f(*args, **kwargs)\r\n return decorated_function\r\n\r\n\r\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\r\ndef login():\r\n \"\"\"Log user in\"\"\"\r\n\r\n session.clear()\r\n\r\n if request.method == \"POST\":\r\n\r\n email_address = request.form.get(\"email_address\")\r\n password = request.form.get(\"password\")\r\n\r\n if not email_address and password:\r\n error = \"login info missing\"\r\n print(error)\r\n return render_template(\"login.html\", error=error)\r\n\r\n user = User.query.filter_by(email_address=email_address).first()\r\n\r\n if not user or not check_password_hash(user.password, password):\r\n error = \"Incorrect user/password combo\"\r\n print(error)\r\n return render_template(\"login.html\", error=error)\r\n\r\n session[\"user_id\"] = user.id\r\n\r\n return redirect(\"/\")\r\n\r\n return render_template(\"login.html\", error=\"\")\r\n\r\n\r\n@app.route(\"/logout\")\r\ndef logout():\r\n \"\"\"Log user out\"\"\"\r\n\r\n session.clear()\r\n return redirect(\"/login\")\r\n\r\n\r\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\r\ndef register():\r\n\r\n if request.method == \"POST\":\r\n\r\n email_address = request.form.get(\"email_address\")\r\n password = request.form.get(\"password\")\r\n\r\n if not email_address and password:\r\n error = \"missing info\"\r\n print(error)\r\n return redirect(\"/register\", error=error)\r\n \r\n if User.query.filter_by(email_address=email_address).all():\r\n error = \"user already exists with this email address\"\r\n print(error)\r\n return render_template(\"register.html\", error=error)\r\n\r\n password_hash = generate_password_hash(password)\r\n\r\n new_user = User(email_address=email_address, password=password_hash)\r\n db.session.add(new_user)\r\n\r\n try:\r\n db.session.commit()\r\n except:\r\n print(\"database error commiting new user to database\")\r\n return redirect(\"/register\")\r\n\r\n user = User.query.filter_by(email_address=email_address).first()\r\n session[\"user_id\"] = user.id\r\n\r\n return redirect(\"/\")\r\n\r\n else:\r\n return render_template(\"register.html\", error=\"\")\r\n\r\n\r\n@app.route(\"/\")\r\n@login_required\r\ndef index():\r\n\r\n days_data = Day.query.filter_by(user_id=session[\"user_id\"]).all()\r\n\r\n if not days_data:\r\n return render_template(\"/intro/intro.html\")\r\n\r\n if days_data[0].complete == False:\r\n return render_template(\"/intro/intro_overview.html\")\r\n\r\n if len(days_data) is 1:\r\n return render_template(\"/intro/intro_complete.html\")\r\n\r\n days = []\r\n\r\n for day in days_data:\r\n weekday = weekdays[day.datetime.weekday()][0:3]\r\n \r\n date = str(day.datetime.day) \r\n date += \" \" + months[day.datetime.month - 1] \r\n date += \" \" + str(day.datetime.year)\r\n\r\n days.append([day.day_no, weekday, date])\r\n\r\n\r\n if not days_data[len(days) - 1].complete:\r\n current_day = days[len(days) - 1]\r\n del days[len(days) - 1]\r\n else:\r\n current_day = \"\"\r\n\r\n del days[0]\r\n\r\n return render_template(\"overview.html\", current_day=current_day, days=days)\r\n\r\n\r\n@app.route(\"/day/\")\r\n@login_required\r\ndef day(day_no):\r\n \r\n day_data = Day.query.filter_by(user_id=session[\"user_id\"]).filter_by(day_no=day_no).first()\r\n\r\n date_text = weekdays[day_data.datetime.weekday()]\r\n date_text += \" \" + str(day_data.datetime.day) \r\n date_text += \" \" + months[day_data.datetime.month - 1] \r\n date_text += \" \" + str(day_data.datetime.year)\r\n\r\n tasks_done = []\r\n tasks_left = []\r\n\r\n for task in day_data.plan_items:\r\n if task.done:\r\n tasks_done.append(task)\r\n else:\r\n tasks_left.append(task)\r\n\r\n if day_no is 0:\r\n return render_template(\"/intro/intro_day.html\",\r\n date_text=date_text, \r\n day_data=day_data,\r\n tasks_done=tasks_done,\r\n tasks_left=tasks_left)\r\n\r\n return render_template(\"day.html\", \r\n date_text=date_text, \r\n day_data=day_data,\r\n tasks_done=tasks_done,\r\n tasks_left=tasks_left)\r\n\r\n\r\n@app.route(\"/plan\", methods=[\"GET\", \"POST\"])\r\n@login_required\r\ndef plan():\r\n\r\n last_day_data = Day.query.filter_by(user_id=session[\"user_id\"]).order_by(desc(Day.day_no)).first()\r\n\r\n if last_day_data:\r\n if not last_day_data.complete:\r\n print(\"user has not completed most recent entry\")\r\n return redirect(\"/\")\r\n\r\n if request.method == \"POST\":\r\n\r\n data = request.form \r\n\r\n if last_day_data:\r\n new_day = Day(day_no=last_day_data.day_no + 1, user_id=session['user_id'])\r\n else:\r\n new_day = Day(day_no=0, user_id=session['user_id'])\r\n\r\n db.session.add(new_day)\r\n\r\n try: \r\n db.session.commit()\r\n except:\r\n print(\"database error commiting new day\")\r\n return redirect(\"plan\")\r\n\r\n day_id = new_day.id\r\n\r\n grateful_items = Grateful_items(day_id=day_id,\r\n item_1=data.get('grateful_1'),\r\n item_2=data.get('grateful_2'),\r\n item_3=data.get('grateful_3'))\r\n\r\n db.session.add(grateful_items)\r\n\r\n plan_items_what = data.getlist('what') \r\n plan_items_why = data.getlist('why')\r\n\r\n for i in range(len(plan_items_what)):\r\n plan_item = Plan_item(day_id=day_id,\r\n item_no = i + 1,\r\n what = plan_items_what[i],\r\n why = plan_items_why[i])\r\n db.session.add(plan_item)\r\n\r\n good_deed = Good_deed(day_id=day_id,\r\n deed = data.get('good_deed'))\r\n\r\n db.session.add(good_deed)\r\n\r\n adventure = Adventure(day_id=day_id,\r\n adventure = data.get('adventure'))\r\n\r\n db.session.add(adventure)\r\n\r\n try: \r\n db.session.commit()\r\n except:\r\n print(\"database error commiting plan data\")\r\n return redirect(\"plan\")\r\n\r\n return redirect(\"/\")\r\n \r\n else:\r\n now = datetime.now()\r\n date = weekdays[now.weekday()]\r\n date += \" \" + str(now.day) \r\n date += \" \" + months[now.month - 1] \r\n date += \" \" + str(now.year)\r\n\r\n if not last_day_data:\r\n return render_template(\"/intro/intro_plan.html\", date=date)\r\n \r\n day_no = last_day_data.day_no + 1\r\n\r\n return render_template(\"plan2.html\", day_no=day_no, date=date)\r\n\r\n\r\n@app.route(\"/review\", methods=(\"GET\", \"POST\"))\r\n@login_required\r\ndef review():\r\n\r\n current_day = Day.query.filter_by(user_id=session[\"user_id\"]).order_by(desc(Day.day_no)).first()\r\n\r\n if current_day.complete:\r\n print(\"user has already completed day\")\r\n return redirect(\"/\")\r\n\r\n if request.method == \"POST\":\r\n data = request.form\r\n day_id = current_day.id\r\n\r\n plan_items = current_day.plan_items\r\n plan_done = data.getlist(\"plan_done\")\r\n\r\n for i in range(len(plan_items)):\r\n plan_items[i].done = bool(plan_done[i])\r\n\r\n if data.get(\"adventure_done\"):\r\n current_day.adventure[0].done = True\r\n\r\n improvement = Improvement(day_id=day_id, improvement=data.get('improvement'))\r\n db.session.add(improvement)\r\n\r\n good_deed = current_day.good_deed[0]\r\n\r\n if data.get('deed'):\r\n good_deed.deed = data.get('deed')\r\n\r\n good_deed.impact = data.get('impact')\r\n \r\n great_things = data.getlist('great_things')\r\n for i in range(len(great_things)):\r\n event = Event(day_id=day_id, event_no=i+1, event=great_things[i])\r\n db.session.add(event)\r\n\r\n current_day.complete = True; \r\n\r\n try: \r\n db.session.commit()\r\n except:\r\n print(\"database error commiting review items/changes\")\r\n return redirect(\"plan\")\r\n\r\n return redirect(\"/\")\r\n\r\n else:\r\n plan_items = []\r\n \r\n day_no = current_day.day_no\r\n date = weekdays[current_day.datetime.weekday()]\r\n date += \" \" + str(current_day.datetime.day) \r\n date += \" \" + months[current_day.datetime.month - 1] \r\n date += \" \" + str(current_day.datetime.year)\r\n\r\n for item in current_day.plan_items:\r\n plan_items.append(item.what)\r\n\r\n adventure = current_day.adventure[0].adventure\r\n good_deed = current_day.good_deed[0].deed\r\n\r\n if day_no == 0:\r\n return render_template(\"/intro/intro_review.html\", \r\n day_no=day_no,\r\n date=date,\r\n plan_items=plan_items, \r\n adventure=adventure,\r\n good_deed=good_deed)\r\n\r\n return render_template(\"review.html\", \r\n day_no=day_no,\r\n date=date,\r\n plan_items=plan_items, \r\n adventure=adventure,\r\n good_deed=good_deed)\r\n\r\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":10410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530105583","text":"import os\nimport json\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\n#matplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom argparse import ArgumentParser\nfrom copy import copy\nimport datetime\nimport yaml\n\nfrom tools.data_helpers import format_offsetable_params\n\n\n# Parse the command-line arguments\nparser = ArgumentParser()\nparser.add_argument(\"--dirs\", nargs=\"+\", help=\"Path to first loss file\")\n\nargs = parser.parse_args()\n\nif args.dirs is not None:\n dirs = args.dirs\nelse:\n dirs = [\"../\"]\n\ndir_paths = dirs\ndirs = [dir_name + \"logs/params_mspe.txt\" for dir_name in dirs]\n\ndirs_trainable_params = []\nfor dir_name in dir_paths:\n with open(dir_name + \"code/config.yaml\", 'r') as f:\n try:\n setup_params = yaml.safe_load(f)\n #print(setup_params)\n except yaml.YAMLError as exc:\n print(exc)\n trainable_params_ = setup_params[\"PARAMS\"][\"TRAINABLE\"]\n trainable_params_ = format_offsetable_params(trainable_params_)\n dirs_trainable_params.append(trainable_params_)\n\ntrainable_params = dirs_trainable_params[0]\n\n#kinematic_levels = None\nkinematic_levels = [\n [0],\n [3],\n [1,2,6],\n [4,5,9],\n [7,8,12,13,14],\n [15,16,17],\n [18,19],\n [20,21],\n [22,23]\n ]\n\n\nSUBSAMPLE_PERIOD = 20\n#SUBSAMPLE_PERIOD = 10\n#SUBSAMPLE_PERIOD = 2\n\ndef add_losses(dir_losses, epoch_array, curr_epoch):\n epoch_str = \"\".join(epoch_array)\n #print(epoch_str)\n\n epoch_losses = epoch_str.split(\" \")\n epoch_losses = [i for i in epoch_losses if i != \"\"]\n #print(epoch_losses)\n epoch_losses = [float(num) for num in epoch_losses]\n #print(epoch_losses)\n\n dir_losses[curr_epoch] = epoch_losses\n epoch_array = []\n\n return dir_losses, epoch_array\n\n\n# Load the loss files\nfor i, dir_name in enumerate(dirs):\n dir_losses = {}\n\n with open(dir_name, 'r') as infile:\n epoch_array = []\n curr_epoch = None\n for line in infile:\n if \"epoch\" in line.strip():\n epoch_num = int(line.replace(\"epoch \", \"\"))\n #print(epoch_num)\n\n if curr_epoch is not None:\n dir_losses, epoch_array = add_losses(dir_losses, epoch_array, curr_epoch)\n curr_epoch = epoch_num\n else:\n line = line.replace(\"\\n\", \"\")\n line = line.replace(\"[\", \"\").replace(\"]\", \"\")\n epoch_array.append(line)\n\n if curr_epoch is not None:\n dir_losses, epoch_array = add_losses(dir_losses, epoch_array, curr_epoch)\n\n\n# Group losses, if groups are specified\nlevel_names = None\nif kinematic_levels is None:\n level_names = [\"param_{:02d}\".format(i) for i in range(72)]\n level_names = [name for name in level_names if name in trainable_params]\n kinematic_levels = [[i] for i in range(24)]\nelse:\n level_names = [\"level_{}\".format(i) for i in range(len(kinematic_levels))]\n to_pop = []\n for i, level in enumerate(kinematic_levels):\n level_params = []\n for joint in level:\n j1 = 3*joint\n j2 = j1 + 1\n j3 = j2 + 1\n\n curr_joints = [j1, j2, j3]\n curr_joints = [j for j in curr_joints if \"param_{:02d}\".format(j) in trainable_params]\n\n level_params += curr_joints\n level_params = [\"param_{:02d}\".format(param) for param in level_params]\n #print(level_params)\n intersection = set(level_params).intersection(set(trainable_params))\n if len(intersection) == 0:\n to_pop.append(i)\n\n for index in sorted(to_pop, reverse=True):\n level_names.pop(index)\n print(level_names)\n\n# Gather losses for each group\nlevelled_indices = []\nfor i, level in enumerate(kinematic_levels):\n level_indices = []\n for joint in level:\n j1 = 3*joint\n j2 = j1 + 1\n j3 = j2 + 1\n\n curr_joints = [j1, j2, j3]\n curr_joints = [j for j in curr_joints if \"param_{:02d}\".format(j) in trainable_params]\n\n level_indices += curr_joints\n\n levelled_indices.append(level_indices)\nprint(levelled_indices)\n\nlevelled_losses = {}\nepochs = []\nfor epoch, losses in dir_losses.items():\n losses = np.array(losses)\n epoch_losses = []\n for indices in levelled_indices:\n if len(indices) > 0:\n epoch_losses.append(np.mean(losses[indices]))\n levelled_losses[epoch] = epoch_losses\n epochs.append(epoch)\n\nepochs = sorted(epochs)\n#print(levelled_losses)\n\n# Plot these losses\nloss_values = np.array([levelled_losses[key] for key in sorted(levelled_losses.keys())])\nprint(\"loss_values shape: \" + str(loss_values.shape))\nloss_values = loss_values.T\nprint(\"loss_values shape: \" + str(loss_values.shape))\n\n# Choose colours for plot\nnum_plots = loss_values.shape[0]\ncolormap = plt.cm.gist_ncar\nplt.gca().set_prop_cycle(plt.cycler('color', plt.cm.jet(np.linspace(0, 1, num_plots))))\n\n# Plot the losses for each parameter\nfor i, group_losses in enumerate(loss_values):\n plt.plot(epochs, group_losses, label=level_names[i])\n\nplt.xlabel('Epoch')\nplt.ylabel('Error')\nplt.legend()\nplt.show()\n\n","sub_path":"projects/deep_optimiser/code/plot_params_mspe.py","file_name":"plot_params_mspe.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"156253446","text":"from PyQt4 import QtGui, QtCore\nfrom Reader_Control.Axis_Control.Move_to_mologram_thread import Move_to_mologram_thread\nimport Chip as mologram\n\n\n\n# matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nfrom matplotlib.lines import Line2D\nfrom matplotlib.text import Text\n\nfrom matplotlib.offsetbox import AnnotationBbox, OffsetImage\nfrom matplotlib._png import read_png\n\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar\n\n\nclass MologramLineRow(QtGui.QWidget):\n\n\tdef __init__(self,main_widget):\n\n\t\tsuper(MologramLineRow,self).__init__()\n\n\t\tself.main_widget = main_widget\n\n\t\tself.in_movement = False\n\n\t\t# initialize thread to do movements\n\t\tself.worker_thread = Move_to_mologram_thread(self.main_widget.setup.chip)\n\t\tself.connect(self.worker_thread, QtCore.SIGNAL(\"done_moving\"), self.main_widget.stop_moving)\n\n\t\tself.initUI()\n\n\tdef initUI(self):\n\n\t\t# layout\t\t\n\t\tvbox = QtGui.QVBoxLayout()\n\t\tself.setLayout(vbox)\n\t\t\n\t\t# set up canvas\n\t\tself.figure = plt.figure(figsize=(10,5),facecolor='None',edgecolor='None')\n\t\tself.canvas = FigureCanvas(self.figure)\n\t\tvbox.addWidget(self.canvas)\n\n\t\t# image parameters, axes refer to setup axes\n\t\tself.x_max = 2\n\t\tself.y_max = 4\n\n\t\tself.mologram = read_png('./images/mologram.png')\n\t\tself.mologram_turned = read_png('./images/mologram_turned.png')\n\n\t\t# initial display\n\t\tself.initDisplay()\t\n\n\t\tcid = self.figure.canvas.mpl_connect('button_press_event', self.onclick)\n\n\tdef initDisplay(self):\n\t\tself.updatePos()\n\n\t\t# clear previous drawings\n\t\tself.ax = self.figure.add_subplot(111)\n\t\tself.ax.set_aspect('equal')\n\n\t\t# draw features\n\t\tself.draw_current_field()\n\t\tself.draw_current_mologram()\n\n\t\t# set up axes\n\t\tself.ax.xaxis.set_ticks_position('none') # tick markers\n\t\tself.ax.yaxis.set_ticks_position('none')\n\t\tplt.xticks([]) \t\t# labels \n\t\tplt.yticks([])\n\t\tplt.axis([0,self.y_max,0,self.x_max]) # axis dimensions\n\n\t\tself.figure.tight_layout()\n\t\t# show image\n\t\tself.canvas.draw()\n\n\tdef updateDisplay(self):\n\t\tself.updatePos()\n\n\t\ttry:\n\t\t\tself.curr_mologram.remove()\n\t\texcept Exception:\n\t\t\tpass\n\t\tself.draw_current_mologram()\n\n\t\t# show image\n\t\tself.canvas.draw()\n\n\tdef updatePos(self):\n\t\tself.x = self.main_widget.setup.motors.x.pos('mm')\n\t\tself.y = self.main_widget.setup.motors.y.pos('mm')\n\t\tposition = str(self.x) + ',' + str(self.y)\n\t\tself.field,self.line,self.row = self.main_widget.setup.chip.update_position(position)\n\n\t\treturn position\n\n\tdef draw_current_mologram(self):\n\n\t\tview_width = 2560*2.2/10/1000\n\t\tview_height = 1920*2.2/10/1000\n\n\t\t# disregard if not in a mologram field\n\t\tif (self.field == -1) or (self.line == -1) or (self.row == -1):\n\t\t\treturn\n\n\t\t# get x,y location of current field\n\t\tx_offset = -(view_height)/2 + 2*mologram.mologram_ver_spacing\n\t\ty_offset = -(view_width)/2\n\n\t\t# y = (self.row-1)*mologram.mologram_hor_spacing + y_offset\n\t\t# x = (mologram.lines_per_field-self.line)*mologram.mologram_ver_spacing + x_offset\n\t\ty = self.main_widget.setup.motors.y.pos('mm') - mologram.side_margin + y_offset\n\t\tx = -1*self.main_widget.setup.motors.x.pos('mm') - mologram.bottom_margin + x_offset\n\n\t\tx -= (5-(ord(self.field) - ord('B')))*mologram.mologram_field_spacing\n\n\t\t#print((x,y))\n\n\t\t#self.ax.add_patch(patches.Rectangle(\n\t\tself.curr_mologram = (patches.Rectangle(\n\t\t\t(y,x),\n\t\t\tview_width,\n\t\t\tview_height,\n\t\t\tfacecolor='#0A0A0A',\t\t\t\n\t\t\talpha=0.2,\n\t\t\tzorder = 2\n\t\t))\n\n\t\tself.ax.add_artist(self.curr_mologram)\n\n\tdef draw_mologram(self,position,turned):\n\t\tif turned:\n\t\t\timagebox = OffsetImage(self.mologram_turned, zoom=.05)\n\t\telse:\n\t\t\timagebox = OffsetImage(self.mologram, zoom=.05)\n\n\t\tab = AnnotationBbox(\n\t\t\timagebox, \n\t\t\tposition,\n\t\t\txybox= None,\n\t\t\txycoords='data',\n\t\t\tboxcoords=\"offset points\",\n\t\t\tframeon=False\n\t\t)\n\t\tab.zorder=1\n\t\t\n\t\tself.ax.add_artist(ab)\n\n\tdef draw_current_field(self):\n\n\t\tx_offset = (mologram.mologram_ver_spacing)/2\n\t\ty_offset = (mologram.mologram_hor_spacing)/2\n\n\t\t# draw all molograms in current field\n\t\tfor i in range(mologram.lines_per_field):\n\t\t\tfor j in range(mologram.rows_per_field):\n\n\t\t\t\ty = j*mologram.mologram_hor_spacing + y_offset\n\t\t\t\tx = i*mologram.mologram_ver_spacing + x_offset\n\n\t\t\t\tself.draw_mologram([y,x],turned = (i==2))\n\n\tdef onclick(self,event):\n\n\t\t# if already in movement, don't move\n\t\tif self.in_movement == True:\n\t\t\treturn\n\n\t\t# get position of mouse click and return if it was outside figure\n\t\ttry:\n\t\t\ty = event.xdata + mologram.side_margin\n\t\t\tx = event.ydata + mologram.bottom_margin - mologram.mologram_field_height/2\n\t\texcept Exception:\n\t\t\treturn\n\n\t\t# get corresponding mologram indices\n\t\tposition = str(x) + ',' + str(y)\n\t\tfield,line,row = self.main_widget.setup.chip.corresponding_mologram(position)\n\t\t\n\t\t# in case the current view is not in a valid field, move to the selected mologram in the 'B' field\n\t\tif field == -1:\n\t\t\tfield = 'B'\n\t\telse:\n\t\t\tfield = self.field\n\n\t\t# Call thread to move to position\n\t\tself.main_widget.set_in_movement(True)\n\t\tself.worker_thread.move_to(field,line,row)\n\n\tdef heightForWidth(self, w):\n\t\treturn w*(1/2)\n\n\tdef set_in_movement(self,state):\n\t\tself.in_movement = state","sub_path":"src/GUI/MologramWidget/MologramLineRow.py","file_name":"MologramLineRow.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"380984696","text":"from itertools import *\r\n\r\nf = open('C-large.out', 'w')\r\n\r\nN = 32\r\nJ = 500\r\nnumWritten = 0\r\nevenIndices = range(2, N, 2)\r\noddIndices = range(1, N, 2)\r\n\r\nf.write(\"Case #1:\\n\")\r\n\r\nfor numOnes in xrange(0, N/2-1):\r\n for oddSet in combinations(oddIndices, numOnes):\r\n for evenSet in combinations(evenIndices, numOnes):\r\n s = [0] * N\r\n s[0] = 1\r\n s[N-1] = 1\r\n for i in oddSet:\r\n s[i] = 1\r\n for j in evenSet:\r\n s[j] = 1\r\n f.write(''.join(str(el) for el in s) + ' 3 2 5 2 7 2 3 2 11\\n')\r\n numWritten += 1\r\n if numWritten >= J:\r\n break\r\n if numWritten >= J:\r\n break\r\n if numWritten >= J:\r\n break\r\n\r\nf.close()\r\n","sub_path":"solutions_5738606668808192_1/Python/sayeedt/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"308470717","text":"# _*_ coding: utf-8 _*_\n\n\"\"\"\napp2实例\n\"\"\"\n\nimport dash\nimport datetime\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Output, Input, State\nfrom Dash.app import app\n\n# 全局变量\nmarkdown_text = \"\"\"\n### Dash and Markdown\n\nDash apps can be written in Markdown.\nDash uses the [CommonMark](http://commonmark.org/)\nspecification of Markdown.\nCheck out their [60 Second Markdown Tutorial](http://commonmark.org/help/)\nif this is your first introduction to Markdown!\n\"\"\"\n\n# 构造components\n# ---------------------------------------------------------------------------------------\ndrop_down_list = [\n dbc.DropdownMenuItem(\"First\"),\n dbc.DropdownMenuItem(divider=True),\n dbc.DropdownMenuItem(\"Links\", header=True),\n dbc.DropdownMenuItem(\"Internal link\", href=\"/l/components/alerts\"),\n dbc.DropdownMenuItem(\"External link\", href=\"https://baidu.com\"),\n dbc.DropdownMenuItem(divider=True),\n dbc.DropdownMenuItem(\"Disabled\", disabled=True),\n dbc.DropdownMenuItem(\"Active\", active=True)\n]\n\n# ---------------------------------------------------------------------------------------\nemail_input = dbc.FormGroup(children=[\n dbc.Label(\"Email\", html_for=\"example-email\"),\n dbc.Input(type=\"email\", placeholder=\"Enter email\"),\n dbc.FormText(\"Are you on email? You simply have to be these days\", color=\"secondary\"),\n])\n\npassword_input = dbc.FormGroup(children=[\n dbc.Label(\"Password\", html_for=\"example-password\"),\n dbc.Input(type=\"password\", placeholder=\"Enter password\"),\n dbc.FormText(\"A password stops mean people taking your stuff\", color=\"secondary\"),\n])\n\n# ---------------------------------------------------------------------------------------\nemail_input_row = dbc.FormGroup(children=[\n dbc.Label(\"Email\", html_for=\"example-email-row\", width=2),\n dbc.Col(dbc.Input(type=\"email\", placeholder=\"Enter email\"), width=10)\n], row=True)\n\npassword_input_row = dbc.FormGroup(children=[\n dbc.Label(\"Password\", html_for=\"example-password-row\", width=2),\n dbc.Col(dbc.Input(type=\"password\", placeholder=\"Enter password\"), width=10)\n], row=True)\n\n# ---------------------------------------------------------------------------------------\nradioitems = dbc.FormGroup(children=[\n dbc.Label(\"Choose one\"),\n dbc.RadioItems(options=[\n {\"label\": \"Option 1\", \"value\": 1},\n {\"label\": \"Option 2\", \"value\": 2},\n ], value=1)\n], className=\"bg-light\")\n\nchecklist = dbc.FormGroup(children=[\n dbc.Label(\"Choose a bunch\"),\n dbc.Checklist(options=[\n {\"label\": \"Option 1\", \"value\": 1},\n {\"label\": \"Option 2\", \"value\": 2},\n ], values=[1, 2]),\n], className=\"bg-light\")\n\n# ---------------------------------------------------------------------------------------\nradioitems_inline = dbc.FormGroup(children=[\n dbc.Label(\"Choose one\"),\n dbc.RadioItems(options=[\n {\"label\": \"Option 1\", \"value\": 1},\n {\"label\": \"Option 2\", \"value\": 2},\n ], value=1, inline=True),\n], className=\"bg-light\")\n\nchecklist_inline = dbc.FormGroup(children=[\n dbc.Label(\"Choose a bunch\"),\n dbc.Checklist(options=[\n {\"label\": \"Option 1\", \"value\": 1},\n {\"label\": \"Option 2\", \"value\": 2},\n ], values=[1, 2], inline=True),\n], className=\"bg-light\")\n\n\n# 创建layout\nlayout = dbc.Container(children=[\n # 显示文字 ========================================================================================\n html.Div(children=[\n html.H1(children=\"Hello Dash H1\"),\n html.H2(children=\"Hello Dash H2\"),\n html.H3(children=\"Hello Dash H3\"),\n html.H4(children=[\n \"This is a heading with a badge! \",\n dbc.Badge(\"New!\", color=\"success\")\n ]),\n html.P(children=html.A(children=\"这是一个百度链接\", href=\"http://baidu.com\")),\n html.Label(children=\"这是一个Lable\", className=\"text-info\"),\n html.Pre(children=\"这是一个Pre,常用来显示计算机代码\"),\n dcc.Markdown(children=markdown_text),\n ], className=\"mt-2\"),\n\n html.Div(children=[\n dbc.Alert(\"primary!\", color=\"primary\", id=\"alert_memory\"),\n dbc.Alert(\"secondary!\", color=\"secondary\", id=\"alert_local\"),\n dbc.Alert(\"success!\", color=\"success\", id=\"alert_session\"),\n dbc.Alert(\"info!\", color=\"info\"),\n dbc.Alert(\"warning!\", color=\"warning\"),\n dbc.Alert(\"danger!\", color=\"danger\"),\n ], className=\"mt-2\"),\n\n # 按钮类 ========================================================================================\n html.Div(children=[\n dbc.Button(\"Primary\", color=\"primary\", className=\"mr-2\", id=\"button_memory\"),\n dbc.Button(\"Secondary\", color=\"secondary\", className=\"mr-2\", id=\"button_local\"),\n dbc.Button(\"Success\", color=\"success\", className=\"mr-2\", id=\"button_session\"),\n dbc.Button(\"Info\", color=\"info\", className=\"mr-2\"),\n dbc.Button(\"Warning\", color=\"warning\", className=\"mr-2\"),\n dbc.Button(\"Danger\", color=\"danger\", className=\"mr-2\"),\n dbc.Button(\"outline\", color=\"primary\", className=\"mr-2\", size=\"sm\", outline=True),\n dbc.Button(\"outline\", color=\"secondary\", className=\"mr-2\", size=\"md\", outline=True),\n dbc.Button(\"outline\", color=\"success\", className=\"mr-2\", size=\"lg\", outline=True),\n dbc.Button(\"outline\", color=\"info\", className=\"mr-2\", size=\"md\", outline=True),\n dbc.Button(\"outline\", color=\"warning\", className=\"mr-2\", size=\"sm\", outline=True),\n ], className=\"mt-2\"),\n\n html.Div(children=dbc.ButtonGroup([\n dbc.Button(\"Primary\", color=\"primary\"),\n dbc.Button(\"Secondary\", color=\"secondary\"),\n dbc.Button(\"Success\", color=\"success\"),\n dbc.Button(\"Warning\", color=\"warning\"),\n dbc.Button(\"Danger\", color=\"danger\"),\n dbc.Button(\"Info\", color=\"info\"),\n ]), className=\"mt-2\"),\n\n # 触发类 ========================================================================================\n html.Div(children=[\n dbc.Button(\"Open collapse\", id=\"collapse-button\"),\n dbc.Collapse(dbc.Card(dbc.CardBody(\"This content is hidden in the collapse\")), id=\"collapse\")\n ], className=\"mt-2\"),\n\n html.Div(children=[\n dbc.Button(\"Toggle fade\", id=\"fade-button\"),\n dbc.Fade(dbc.Card(dbc.CardBody(dbc.CardText(\"This content fades in and out\"))), id=\"fade\", is_in=True, appear=False),\n ], className=\"mt-2\"),\n\n html.Div(children=[\n html.P(children=[\"Click on the word \", html.Span(\"popover\", id=\"popover-target\", className=\"text-info\")]),\n dbc.Popover([\n dbc.PopoverHeader(\"Popover header\"),\n dbc.PopoverBody(\"Popover body\"),\n ], id=\"popover\", is_open=False, target=\"popover-target\"),\n ], className=\"mt-2\"),\n\n html.Div(children=[\n html.P([\n \"I wonder what \",\n html.Span(\"floccinaucinihilipilification\", id=\"tooltip-target\", className=\"text-info\"),\n \" means?\",\n ]),\n dbc.Tooltip(\n \"Noun: rare, the action or habit of estimating something as worthless.\",\n target=\"tooltip-target\", placement=\"auto\", # top, left, bottom, right\n ),\n ], className=\"mt-2\"),\n\n html.Div(children=dcc.ConfirmDialogProvider(\n id=\"confirm\",\n children=dbc.Button(\"ConfirmDialogProvider\", color=\"primary\"),\n message=\"Danger danger! Are you sure you want to continue?\"\n ), className=\"mt-2\"),\n\n html.Div(children=dbc.Row(children=[\n dbc.Col(dbc.DropdownMenu(label=\"Menu-sm\", bs_size=\"sm\", children=drop_down_list), className=\"mr-2\"),\n dbc.Col(dbc.DropdownMenu(label=\"Menu-md\", bs_size=\"md\", children=drop_down_list), className=\"mr-2\"),\n dbc.Col(dbc.DropdownMenu(label=\"Menu-md\", bs_size=\"lg\", children=drop_down_list), className=\"mr-2\"),\n dbc.Col(dbc.DropdownMenu(label=\"Menu-down\", direction=\"down\", children=drop_down_list), className=\"mr-2\"),\n dbc.Col(dbc.DropdownMenu(label=\"Menu-left\", direction=\"left\", children=drop_down_list), className=\"mr-2\"),\n dbc.Col(dbc.DropdownMenu(label=\"Menu-right\", direction=\"right\", children=drop_down_list), className=\"mr-2\"),\n ], no_gutters=True), className=\"mt-2\"),\n\n # 输入类 ========================================================================================\n html.Div(children=[\n dbc.Input(placeholder=\"A medium(large, small) input...\", bs_size=\"md\", className=\"mb-2\"),\n dbc.Input(placeholder=\"Valid input...\", valid=True, className=\"mb-2\"),\n dbc.Input(placeholder=\"Invalid input...\", invalid=True, className=\"mb-2\"),\n dbc.Input(placeholder=\"Enter text\", type=\"text\", className=\"mb-2\"),\n dbc.Input(placeholder=\"Enter password\", type=\"password\", className=\"mb-2\"),\n dbc.Input(value=10, type=\"number\", className=\"mb-2\"),\n dbc.Input(value=10, type=\"range\", className=\"mb-2\"),\n dbc.Textarea(placeholder=\"Enter a value...\", className=\"mb-2\"),\n dbc.Textarea(placeholder=\"Enter a value...\", className=\"mb-2\", valid=True, bs_size=\"sm\"),\n ], className=\"mt-2\"),\n\n html.Div(children=[\n dbc.InputGroup([\n dbc.InputGroupAddon(\"@\", addon_type=\"prepend\"),\n dbc.Input(placeholder=\"username, size=lg\"),\n ], size=\"lg\", className=\"mb-2\"),\n dbc.InputGroup([\n dbc.Input(placeholder=\"username, size=md\"),\n dbc.InputGroupAddon(\"@example.com\", addon_type=\"append\"),\n ], className=\"mb-2\"),\n dbc.InputGroup([\n dbc.InputGroupAddon(\"$\", addon_type=\"prepend\"),\n dbc.Input(placeholder=\"Amount, size=sm\", type=\"number\"),\n dbc.InputGroupAddon(\".00\", addon_type=\"append\"),\n ], size=\"sm\", className=\"mb-2\"),\n dbc.InputGroup([\n dbc.InputGroupAddon(dbc.Button(\"Random name\"), addon_type=\"prepend\"),\n dbc.Input(placeholder=\"name\"),\n ], className=\"mb-2\"),\n dbc.InputGroup([\n dbc.DropdownMenu(drop_down_list, label=\"Generate\", addon_type=\"prepend\"),\n dbc.Input(placeholder=\"name\"),\n ]),\n ], className=\"mt-2\"),\n\n # 表单类 ========================================================================================\n dbc.Form(children=[email_input, password_input], className=\"mt-2 p-2 bg-light\"),\n dbc.Form(children=[email_input_row, password_input_row], className=\"mt-2 p-2 bg-light\"),\n dbc.Form(children=[\n dbc.FormGroup([\n dbc.Label(\"Email\", className=\"mr-2\"),\n dbc.Input(type=\"email\", placeholder=\"Enter email\")\n ], className=\"mr-3\"),\n dbc.FormGroup([\n dbc.Label(\"Password\", className=\"mr-2\"),\n dbc.Input(type=\"password\", placeholder=\"Enter password\")\n ], className=\"mr-3\"),\n dbc.FormGroup([\n dbc.Label(\"Date\", className=\"mr-2\"),\n dbc.DatePickerSingle(date=datetime.date(2018, 10, 17))\n ], className=\"mr-3\"),\n dbc.FormGroup([\n dbc.Label(\"Date\", className=\"mr-2\"),\n dcc.DatePickerRange(start_date=datetime.datetime(1997, 5, 3), end_date_placeholder_text=\"Select!\")\n ], className=\"mr-3\"),\n ], inline=True, className=\"mt-2 p-2 bg-light\"),\n\n # 表单类 ========================================================================================\n html.Div(children=[\n dbc.Label(\"Slider\", html_for=\"slider\"),\n dcc.Slider(min=0, max=9, marks={i: \"Label {}\".format(i) if i == 1 else str(i) for i in range(1, 6)}, value=5),\n html.Br(),\n dbc.Label(\"RangeSlider\", html_for=\"range-slider\"),\n dcc.RangeSlider(count=1, min=-5, max=10, step=0.5, value=[-3, 7])\n ], className=\"mt-2\"),\n html.Div(children=[\n dbc.Label(children=\"Progress: 0\", html_for=\"progress\", id=\"progresstext\"),\n dbc.Progress(id=\"progress\", value=0, max=60, striped=True, animated=True),\n dcc.Interval(id=\"interval\", interval=1000, n_intervals=0),\n ], className=\"mt-2\"),\n html.Div(children=[radioitems, checklist, radioitems_inline, checklist_inline], className=\"mt-2\"),\n\n html.Div(children=dbc.Row(children=[\n dbc.Col(dcc.Dropdown(options=[\n {\"label\": \"New York City\", \"value\": \"NYC\"},\n {\"label\": u\"Montréal\", \"value\": \"MTL\"},\n {\"label\": \"San Francisco\", \"value\": \"SF\"}\n ], value=\"MTL\"), className=\"mr-2\"),\n dbc.Col(dcc.Dropdown(options=[\n {\"label\": \"New York City\", \"value\": \"NYC\"},\n {\"label\": u\"Montréal\", \"value\": \"MTL\"},\n {\"label\": \"San Francisco\", \"value\": \"SF\"}\n ], value=\"MTL\", multi=True), width=8)\n ], no_gutters=True), className=\"mt-2\"),\n\n # 文件上传 ========================================================================================\n dcc.Upload(dbc.Button(\"Upload File\"), className=\"mt-2\"),\n dcc.Upload(children=html.Div([\n \"Drag and Drop or \",\n html.A(\"Select Files\")\n ], className=\"p-2 border border-secondary bg-light rounded text-center\"), multiple=True, className=\"mt-2\"),\n\n # 展示类 ========================================================================================\n dbc.ListGroup(children=[\n dbc.ListGroupItem(\"ListGroupItem\"),\n dbc.ListGroupItem(\"Internal link\", href=\"/l/components/list_group\"),\n dbc.ListGroupItem(\"External link\", href=\"https://google.com\"),\n dbc.ListGroupItem(\"Disabled link\", href=\"https://google.com\", disabled=True),\n dbc.ListGroupItem(\"Button\", n_clicks=0, action=True),\n ], className=\"mt-2\"),\n\n dbc.ListGroup(children=[\n dbc.ListGroupItem(\"The primary item\", color=\"primary\"),\n dbc.ListGroupItem(\"A secondary item\", color=\"secondary\"),\n dbc.ListGroupItem(\"A successful item\", color=\"success\"),\n dbc.ListGroupItem(\"A warning item\", color=\"warning\"),\n dbc.ListGroupItem(\"A dangerous item\", color=\"danger\"),\n dbc.ListGroupItem(\"An informative item\", color=\"info\"),\n ], className=\"mt-2\"),\n\n dbc.ListGroup(children=[\n dbc.ListGroupItem([\n dbc.ListGroupItemHeading(\"This item has a heading\"),\n dbc.ListGroupItemText(\"And some text underneath\"),\n ]),\n dbc.ListGroupItem([\n dbc.ListGroupItemHeading(\"This item also has a heading\"),\n dbc.ListGroupItemText(\"And some more text underneath too\"),\n ]),\n ], className=\"mt-2\"),\n])\n\n\n# 创建回调函数:回调函数中不能出现全局变量\nfor store in (\"memory\", \"local\", \"session\"):\n @app.callback(Output(store, \"data\"), [\n Input(\"button_%s\" % store, \"n_clicks\")\n ], [\n State(store, \"data\")\n ])\n def toggle_store_button(n_clicks, data):\n if n_clicks is None:\n raise dash.exceptions.PreventUpdate\n data = data or {\"clicks\": 0}\n data[\"clicks\"] = data[\"clicks\"] + 1\n return data\n\n @app.callback(Output(\"alert_%s\" % store, \"children\"), [\n Input(store, \"modified_timestamp\")\n ], [\n State(store, \"data\"),\n State(store, \"id\")\n ])\n def toggle_store_change(ts, data, _id):\n if ts is None:\n raise dash.exceptions.PreventUpdate\n data = data or {}\n return \"%s: %s\" % (_id, data.get(\"clicks\", 0))\n\n\n@app.callback(Output(\"collapse\", \"is_open\"), [\n Input(\"collapse-button\", \"n_clicks\")\n], [\n State(\"collapse\", \"is_open\")\n])\ndef toggle_collapse(n, is_open):\n if n:\n return not is_open\n return is_open\n\n\n@app.callback(Output(\"fade\", \"is_in\"), [\n Input(\"fade-button\", \"n_clicks\")\n], [\n State(\"fade\", \"is_in\")\n])\ndef toggle_fade(n, is_in):\n if not n:\n return True\n return not is_in\n\n\n@app.callback(Output(\"popover\", \"is_open\"), [\n Input(\"popover-target\", \"n_clicks\")\n], [\n State(\"popover\", \"is_open\")\n])\ndef toggle_popover(n, is_open):\n if n:\n return not is_open\n return is_open\n\n\n@app.callback(Output(\"progress\", \"value\"), [\n Input(\"interval\", \"n_intervals\")\n])\ndef advance_progress(n):\n return n % 61\n\n\n@app.callback(Output(\"progresstext\", \"children\"), [\n Input(\"progress\", \"value\"),\n])\ndef advance_text(value):\n return \"Processtext: %d\" % value\n","sub_path":"Dash/apps/app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":16098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"13700042","text":"# -*- coding: utf-8 -*-\n\n# python imports\nimport random\n\n# project imports\nfrom ks.commands import ECommandDirection, ChangeGhostDirection, ChangePacmanDirection\nfrom ks.models import ECell, EDirection\n\n\nai = None\n\nCELL_EMPTY = ECell.Empty\nCELL_FOOD = ECell.Food\nCELL_SUPERFOOD = ECell.SuperFood\nCELL_WALL = ECell.Wall\n\nDIR_UP = EDirection.Up\nDIR_RIGHT = EDirection.Right\nDIR_DOWN = EDirection.Down\nDIR_LEFT = EDirection.Left\n\n\ndef initialize(width, height, my_score, other_score,\n board, pacman, ghosts, constants,\n my_side, other_side, current_cycle, cycle_duration):\n\n pass\n\n\ndef decide(width, height, my_score, other_score,\n board, pacman, ghosts, constants,\n my_side, other_side, current_cycle, cycle_duration):\n\n if my_side == 'Pacman':\n change_pacman_direction(random.choice([\n DIR_UP,\n DIR_RIGHT,\n DIR_DOWN,\n DIR_LEFT\n ]))\n elif my_side == 'Ghost':\n for ghost in ghosts:\n change_ghost_direction(ghost.id, random.choice([\n DIR_UP,\n DIR_RIGHT,\n DIR_DOWN,\n DIR_LEFT\n ]))\n\n\ndef change_pacman_direction(dir):\n ai.send_command(ChangePacmanDirection(direction=dir))\n\n\ndef change_ghost_direction(id, dir):\n ai.send_command(ChangeGhostDirection(id=id, direction=dir))\n","sub_path":"PythonRandomClient/simple_ai.py","file_name":"simple_ai.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"404959249","text":"import os\nimport pandas as pd\nimport pickle as pk\n\n\nclass MatchGrader:\n \"\"\"Contains the methods from which to grade matches, whereby all team parameters have already been assessed\n and are contained within the dataframe containing consistent column naming system.\"\"\"\n\n @staticmethod\n def append_graded_results(df):\n # Identify division and append new graded results to graded df\n division = df.index[0][3:6]\n graded_data_path = MatchGrader.configs('consistencies', 'graded_data_path')\n path = os.path.join(graded_data_path, division)\n prev_df = pk.load(open(path, 'rb'))\n graded_league = pd.concat([prev_df, df], axis=0, sort=False)\n graded_league.index.name = 'id'\n\n # Store all data\n pk.dump(graded_league, open(path, 'wb'))\n graded_league.to_csv(os.path.join(graded_data_path, str(division) + '.csv'))\n\n @staticmethod\n def calculate_mrkt_odds(arr):\n length = len(arr)\n inv = 1 / arr\n margins = inv.sum(axis=1).reshape(length, 1) - 1\n number_of_outcomes = len(arr[0])\n top = number_of_outcomes * arr\n bottom = number_of_outcomes - arr * margins\n return top / bottom\n\n @staticmethod\n def configs(*args):\n from footballleagues.configurator import Configurations as cfg\n return cfg.get_configs(*args)\n\n @staticmethod\n def grade_matches(df):\n from footballleagues.configurator import Configurations as cfg\n df['MR4'] = df['HT_F4'].subtract(df['AT_F4'])\n df['MR6'] = df['HT_F6'].subtract(df['AT_F6'])\n df['MR_HH'] = df['HT_HH'].subtract(df['AT_HH'])\n cols = MatchGrader.configs('consistencies', 'graded_dataframe_columns')\n cols = [col for col in cols if col in df.columns]\n df = df[cols]\n df = cfg.project_dtype(df, 'boolean')\n df = cfg.project_dtype(df, 'categorical')\n df = cfg.project_dtype(df, 'datetime')\n df = cfg.project_dtype(df, 'float')\n df = cfg.project_dtype(df, 'integer')\n df = cfg.project_dtype(df, 'string')\n return df\n","sub_path":"footballleagues/classes/match_grader.py","file_name":"match_grader.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"200879801","text":"#Andrew Le\n\n\n#read in input_file and execute function\n\ndef main(args):\n\ti_file_name = str(sys.argv[1])\n\ti_file_name = str(i_file_name)\n\tcode = ''\n\ttry:\n\t\ti_file = open(i_file_name, 'r')\n\t\tcode = i_file.read()\n\t\t\n\texcept:\n\t\tprint('input_error %s')\n\t\tsys.exit(2)\n\texec(code)\n\n\n\nif __name__ == '__main__':\n\timport sys\n\tmain(sys.argv[1:])\n","sub_path":"local_search.py","file_name":"local_search.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"622899477","text":"import pytest\nfrom conf import conf_server\n\n\n# conf_server = [\n# {\"url\": \"http://127.0.0.1:5000/\"},\n# {\"url\": \"http://127.0.0.1:5000/result\"}\n# ]\n\n@pytest.fixture(params=conf_server)\ndef response(request):\n import requests\n\n resp = requests.get(request.param[\"url\"])\n\n return resp\n\n\ndef test_0001_server_200_OK(response):\n assert 200 == response.status_code\n\ndef test_0002_server_header(response):\n assert \"text/html; charset=utf-8\" == \\\n response.headers[\"CONTENT-TYPE\"] and \\\n \"DATE\" in response.headers.keys() and \\\n \"SERVER\" in response.headers.keys() and \\\n \"CONTENT-LENGTH\" in response.headers.keys()\n\ndef test_0003_server_body_valid_(response):\n import json\n import requests\n\n head = {\"Content-Type\": \"text/html; charset=utf-8\"}\n validator_url = \"https://validator.w3.org/nu/?out=json\"\n\n resp = requests.post(validator_url, data=response.text, headers=head)\n validator_json = json.loads(resp.text)\n\n # Assert that len of error \"messages\" array is empty => no errors in HTML\n assert 0 == len(validator_json[\"messages\"])\n\npytest.main(\"-v --html=test_report_log_file.html\")","sub_path":"tests/functional/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"109492111","text":"'''Write a short Python code to display the name of your name & department'''\nname = \"Abdullahi idris usman\"\ndepartment = \"computer science\"\nprint(name)\nprint(department)\n'''Assign the string \"Boo!\" to the variable scare and print the variable.'''\nscare= \"Boo!\"\nprint(scare)\n'''Add two variables and assign the result to a third variable. print all the variable names.\n'''\nvariable1 = 8\nvariable2 = 6\nvariable3= (variable1+variable2)\nprint(variable3)\n'''Divide 12 by the decimal .5 and assign the result to the variable outcome and print the variable'''\ndivision1= 12\ndivision2= .5\noutcome = (division1/division2)\nprint(outcome)\n'''Assign the sum of two numbers to a variable and print the variable.'''\na = 6\nb = 7\nc = (a+b)\nprint(c)\n'''Write the code that finds the remainder when 8 is divided by 3'''\nmudulus1= 8\nmudulus2= 3\nresult= (mudulus1 % mudulus2)\nprint(result)\n'''Rewrite the following statement to force this order: First, multiply 2 by 4. Then add 4 and 2. Then multiply the first result by the second result. Print the result.''',\norder1= 2*4\norder2= 4+2\norder3=(order1*order2)\nprint(order3)","sub_path":"csc104assignment.py","file_name":"csc104assignment.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"494822839","text":"#Unoptimized, based off codecademy tutorial\n#bubble sort compares neighboring items and if they are out of order, they are swapped\nnums = [5, 2, 9, 1, 4, 6]\n\ndef swap(arr, index_1, index_2):\n temp = arr[index_1]\n arr[index_1] = arr[index_2]\n arr[index_2] = temp\n\n# define bubble_sort():\ndef bubble_sort(arr):\n for x in arr:\n for index in range(len(arr)-1):\n if arr[index] > arr[index+1]:\n swap(arr,index,index+1)\n\n##### test statements\n\nprint(\"Pre-Sort: {0}\".format(nums))\nbubble_sort(nums)\nprint(\"Post-Sort: {0}\".format(nums))\n","sub_path":"Algorithms/bubbleSort.py","file_name":"bubbleSort.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"457691619","text":"\nimport PTEvaluator\nfrom scipy.stats import ttest_ind\nimport numpy\n\nclass MatchComparison:\n\n\n def __init__(self):\n return\n \n def compare(self, evalDB, label1, label2):\n detailed_results = []\n running_result = True\n #print \"\\033[1;32mDetermining if [\", label1, label2, \"] matches\\033[0m\"\n cur1 = evalDB.dbconn.cursor()\n cur1.execute(\"select distinct flowname, steplabel from samples where runid in (?,?) order by flowname, steplabel\", [label1, label2])\n rows = cur1.fetchall()\n #print \"\\033[1;32mCommon flows/steps are:\\033[0m\"\n for row in rows:\n thisResult = self.compareFlowStep(evalDB, label1, label2, row[0], row[1])\n #print thisResult\n running_result = running_result and thisResult['Fail']\n detailed_results.append(thisResult)\n return {'CriteriaMet': running_result, 'Supporting Detail': detailed_results}\n\n def compareFlowStep(self, evalDB, label1, label2, flowname, steplabel):\n #print \"\\tFlow:\", flowname, \" Step:\", steplabel\n cur1 = evalDB.dbconn.cursor()\n cur1.execute(\"select duration from samples where runid = ? and flowname = ? and steplabel = ?\", [label2, flowname, steplabel])\n baseline = [int(numeric_string[0]) for numeric_string in cur1.fetchall()]\n cur1.execute(\"select duration from samples where runid = ? and flowname = ? and steplabel = ?\", [label1, flowname, steplabel])\n treatment = [int(numeric_string[0]) for numeric_string in cur1.fetchall()]\n t, p = ttest_ind(treatment, baseline, equal_var = False)\n bn = numpy.array(baseline)\n tn = numpy.array(treatment)\n msg = 'P='+'{0:.2f}'.format(p)+' T='+'{0:.2f}'.format(t)+' Samples '+label1+'('+str(len(treatment))+') '+label2+'('+str(len(baseline))+') Means '+label1+'('+'{0:.2f}'.format(numpy.mean(tn))+') '+label2+'('+'{0:.2f}'.format(numpy.mean(bn))+') STDs '+label1+'('+'{0:.2f}'.format(numpy.std(tn, ddof=1))+') '+label2+'('+'{0:.2f}'.format(numpy.std(bn, ddof=1))+')'\n return {'Flow':flowname, \n 'Step':steplabel, \n 'Fail': (p >= 0.05),\n 'Message': msg\n }\n \n ","sub_path":"demonstration/jmeter-build/evaluator/PTEvaluator/MatchComparison.py","file_name":"MatchComparison.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"309394277","text":"import cv2\nimport numpy as np\nimport util as ut\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport RPi.GPIO as GPIO\nimport sys\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(3, GPIO.OUT)\nGPIO.output(3, GPIO.HIGH)\n\n#The main event loop\n# initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\ncamera.resolution = (640, 480)\ncamera.framerate = 32\nrawCapture = PiRGBArray(camera, size=(640,480))\n\ndef main(argv):\n\ttrain_folder=argv[1]\n\ti=int(argv[2])\n\tj=1\n\tname=\"\"\n\tstart =False\n\n\tfor frame in camera.capture_continuous(rawCapture, format=\"rgb\", use_video_port=True):\n\t\tmove=''\n\t\t# t=time.time()\n\t\timg = frame.array\n\t\tgray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t\tret,th1 = cv2.threshold(gray.copy(),75,255,cv2.THRESH_BINARY)\n\t\t_,contours,hierarchy = cv2.findContours(th1.copy(),cv2.RETR_EXTERNAL, 2)\n\t\tcnt=ut.getMaxContour(contours,4000)\n\t\tif cnt!=None:\n\t\t\tx,y,w,h = cv2.boundingRect(cnt)\n\t\t\timgT=img[y:y+h,x:x+w]\n\t\t\timgT=cv2.bitwise_and(imgT,imgT,mask=th1[y:y+h,x:x+w])\n\t\t\timgT=cv2.resize(imgT,(200,200))\n\t\t\tcv2.imshow('Trainer',imgT)\n\t\tcv2.imshow('Frame',img)\n\t\tcv2.imshow('Thresh',th1)\n\n\t\tk = 0xFF & cv2.waitKey(10)\n\t\tif k == 27:\n\t\t\tbreak\n\t\tif k == ord('s') or start:\n\t\t\t\n\t\t\tname=str(i)+\"_\"+str(j)+\".jpg\"\n\t\t\tprint('write' + name)\n\t\t\tcv2.imwrite(train_folder+'/'+name,imgT)\n\t\t\tstart = True\n\t\t\tif(j<40):\n\t\t\t\tj+=1\n\t\t\telse:\n\t\t\t\twhile(0xFF & cv2.waitKey(10)!=ord('n')):\n\t\t\t\t\tprint('next')\n\t\t\t\t\tstart = False\n\t\t\t\t\tj=1\n\t\t\t\tj=1\n\t\t\t\ti+=1\n\t\trawCapture.truncate(0)\n\n\tcap.release() \n\tcv2.destroyAllWindows()\n\tGPIO.output(3, GPIO.LOW)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"304287339","text":"\nfrom bs4 import BeautifulSoup #To make the request understandable in terms of the lines \nimport urllib.request #To get access to the url\nimport re #To find indexs of a specific word\nimport os #To delete the unwanted files at the end\n#Created By @OfficialAhmed0\n\ndef play():\n try: #if there is no problems\n \n #Connecting to the website\n url = urllib.request.urlopen(\"https://www.imdb.com/movies-in-theaters/?pf_rd_m=A2FGELUUNOQJNL&pf_rd_p=2413b25e-e3f6-4229-9efd-599bb9ab1f97&pf_rd_r=J64TN8AG33DBSR546JVS&pf_rd_s=right-2&pf_rd_t=15061&pf_rd_i=homepage&ref_=hm_otw_hd\")\n \n #Organise url with beautiful soup library\n soup = BeautifulSoup(url, \"html.parser\")\n \n file = open(\"test.txt\", \"w+\")\n string = str(soup.find_all(\"a\"))\n #Created By @OfficialAhmed0\n #Find strings start with (alt=) from the string using finditer from re library\n for i in re.finditer(\"alt=\", string):\n #Start after 4 characters of the ( = ) of the searched word\n start = i.start() + 4\n #Stop aftr 50 characters from the ( = ) of the searched word\n end = i.end() + 50\n file.write(str(string[start:end] ) + \"\\n\")\n #Open the txt as reading mode To deal with the second file using the first file itself\n file = open(\"test.txt\", \"r\") \n line = file.readlines()\n add = 0\n file2 = open(\"test2.txt\", \"w+\")\n length = len(line) # How many lines in the txt\n \n #Copying only files with (2019) in it only from file 1\n while add < length:\n if \" (2019) \" in line[add]:\n file2.write(line[add])\n add += 1\n else:\n add += 1\n pass\n #Openeing file2 as reading mode for file 3 \n file2 = open(\"test2.txt\")\n lines = file2.readlines()\n add = 0\n file3 = open(\"Movies In Theaters (Imdb).txt\", \"w+\")\n length = len(lines)\n #Created By @OfficialAhmed0\n \n #A way to Remove what comes after 2019 in each line\n #To get movie title as well as the year 2019 only after that is not gonna be \n #Pasted onto file 3(Last file)\n while add < length:\n #Get index of \" ) \" which is end of the year 2019\n index = lines[add].index(\")\")\n long = lines[add][1:index+1] #Each line start from index 1 until the index after \" ) \"\n file3.write( str(long) + \"\\n\" )\n add += 1\n \n #Close the files\n file3.close()\n file2.close() \n file.close()\n print(\"\")\n os.remove(\"test.txt\") #Delete the txt since we already organised it\n os.remove(\"test2.txt\") #Delete the txt since we already organised it\n print(\"Done !. Movie Titles saved in a text file in same Directory (Desktop)(Movies in Theaters.txt) \")\n print(\"This tool created by AhmedRiyami. Hope you enjoyed it\")\n \n print(\"\")\n replay = input(\"Press Enter to quit\") \n os.startfile(\"Movies In Theaters (Imdb).txt\")\n \n except: #If there is any problem such as, no internet connection\n print(\"\")\n print(\"Sorry ,make sure you're connected to the internet\")\n print(\"\")\n replay = input(\"Press Enter to retry\")\n \n if replay == \"\":\n play()\n print(\"\\n\")\n else:\n quit()\n \n \nplay()\n#Created By @OfficialAhmed0\n","sub_path":"Source Code.py","file_name":"Source Code.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"61320853","text":"from plone import api\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.PloneBatch import Batch\nfrom Products.Five.browser import BrowserView\nfrom calendar import monthrange\nfrom datetime import date\nfrom datetime import timedelta\nfrom plone.app.contenttypes.interfaces import INewsItem\nfrom plone.app.contenttypes.interfaces import ILink\nfrom plone.app.event import messageFactory as _\nfrom plone.app.event.base import RET_MODE_ACCESSORS\nfrom plone.app.event.base import RET_MODE_OBJECTS\nfrom plone.app.event.base import date_speller\nfrom plone.app.event.base import expand_events\nfrom plone.app.event.base import get_events\nfrom plone.app.event.base import guess_date_from\nfrom plone.app.event.base import localized_now\nfrom plone.app.event.base import start_end_from_mode\nfrom plone.app.event.browser.event_view import get_location\nfrom plone.app.event.ical.exporter import construct_icalendar\nfrom plone.app.layout.navigation.defaultpage import getDefaultPage\nfrom plone.app.querystring import queryparser\nfrom plone.memoize import view\nfrom zope.component import getMultiAdapter\nfrom zope.contentprovider.interfaces import IContentProvider\nfrom genweb.core.utils import pref_lang\n\n\ntry:\n # from plone.app.collection.interfaces import ICollection\n from plone.app.contenttypes.behaviors.collection import ICollection\nexcept ImportError:\n ICollection = None\ntry:\n from Products.ATContentTypes.interfaces import IATTopic\nexcept ImportError:\n IATTopic = None\n\n\nclass NewsListing(BrowserView):\n\n def __init__(self, context, request):\n super(NewsListing, self).__init__(context, request)\n\n self.now = now = localized_now(context)\n\n # Request parameter\n req = self.request.form\n self.b_start = 'b_start' in req and int(req['b_start']) or 0\n self.b_size = 'b_size' in req and int(req['b_size']) or 10\n self.orphan = 'orphan' in req and int(req['orphan']) or 1\n self.mode = 'mode' in req and req['mode'] or None\n self._date = 'date' in req and req['date'] or None\n self.tags = 'tags' in req and req['tags'] or None\n self.searchable_text = 'SearchableText' in req and\\\n req['SearchableText'] or None\n self.path = 'path' in req and req['path'] or None\n\n day = 'day' in req and int(req['day']) or None\n month = 'month' in req and int(req['month']) or None\n year = 'year' in req and int(req['year']) or None\n\n if not self._date and day or month or year:\n self._date = date(year or now.year,\n month or now.month,\n day or now.day).isoformat()\n\n if self.mode is None:\n self.mode = self._date and 'day' or 'all'\n\n self.uid = None\n # Used to get all occurrences from a single event.\n # Overrides all other settings\n\n @property\n def default_context(self):\n # Try to get the default page\n context = self.context\n default = getDefaultPage(context)\n if default:\n context = context[default]\n return context\n\n @property\n def is_collection(self):\n ctx = self.default_context\n return ICollection and ICollection.providedBy(ctx) or False\n\n @property\n def is_topic(self):\n ctx = self.default_context\n return IATTopic and IATTopic.providedBy(ctx) or False\n\n @property\n def date(self):\n dt = None\n if self._date:\n try:\n dt = guess_date_from(self._date)\n except TypeError:\n pass\n return dt\n\n @property\n def _start_end(self):\n start, end = start_end_from_mode(self.mode, self.date, self.context)\n return start, end\n\n @view.memoize\n def _get_events(self, ret_mode=RET_MODE_ACCESSORS, expand=True):\n context = self.context\n kw = {}\n if self.uid:\n # In this case, restrict search for single event\n kw['UID'] = self.uid\n else:\n if self.path:\n kw['path'] = self.path\n elif self.settings.current_folder_only:\n kw['path'] = '/'.join(context.getPhysicalPath())\n\n if self.tags:\n kw['Subject'] = {'query': self.tags, 'operator': 'and'}\n\n if self.searchable_text:\n kw['SearchableText'] = self.searchable_text\n\n # kw['b_start'] = self.b_start\n # kw['b_size'] = self.b_size\n\n start, end = self._start_end\n\n sort = 'start'\n sort_reverse = False\n if self.mode in ('past', 'all'):\n sort_reverse = True\n return get_events(context, start=start, end=end,\n sort=sort, sort_reverse=sort_reverse,\n ret_mode=ret_mode, expand=expand, **kw)\n\n def get_current_path_news(self):\n lang = pref_lang()\n root_path = '/'.join(api.portal.get().getPhysicalPath())\n if lang == 'ca':\n return root_path + '/' + lang + '/noticies'\n elif lang == 'es':\n return root_path + '/' + lang + '/noticias'\n elif lang == 'en':\n return root_path + '/' + lang + '/news'\n\n @view.memoize\n def _get_news(self):\n context = self.context\n kw = {}\n kw['object_provides'] = (INewsItem.__identifier__, ILink.__identifier__)\n if self.uid:\n # In this case, restrict search for single event\n kw['UID'] = self.uid\n else:\n if self.path:\n kw['path'] = self.path\n else:\n portal = api.portal.get()\n lang = self.context.language\n news_folder_name = dict(en='news', es='noticias', ca='noticies')\n try:\n news_folder = portal[lang][news_folder_name[lang]]\n except:\n news_folder = context\n kw['path'] = '/'.join(news_folder.getPhysicalPath())\n\n if self.tags:\n kw['Subject'] = {'query': self.tags, 'operator': 'and'}\n\n if self.searchable_text:\n kw['SearchableText'] = self.searchable_text\n\n kw['sort_on'] = 'Date'\n kw['sort_order'] = 'reverse'\n cat = getToolByName(context, 'portal_catalog')\n result = cat(**kw)\n\n return result\n\n def news(self, batch=True):\n res = []\n is_col = self.is_collection\n is_top = self.is_topic\n if is_col or is_top:\n ctx = self.default_context\n if is_col:\n res = ctx.results(batch=False, sort_on='start', brains=True)\n query = queryparser.parseFormquery(ctx, ctx.getRawQuery())\n else:\n res = ctx.queryCatalog(batch=False, full_objects=False)\n query = ctx.buildQuery()\n else:\n res = self._get_news()\n if batch:\n b_start = self.b_start\n b_size = self.b_size\n res = Batch(res, size=b_size, start=b_start, orphan=self.orphan)\n return res\n\n def events(self, ret_mode=RET_MODE_ACCESSORS, expand=True, batch=True):\n res = []\n is_col = self.is_collection\n is_top = self.is_topic\n if is_col or is_top:\n ctx = self.default_context\n if is_col:\n res = ctx.results(batch=False, sort_on='start', brains=True)\n query = queryparser.parseFormquery(ctx, ctx.getRawQuery())\n else:\n res = ctx.queryCatalog(batch=False, full_objects=False)\n query = ctx.buildQuery()\n if expand:\n # get start and end values from the query to ensure limited\n # listing for occurrences\n start, end = self._expand_events_start_end(query.get('start'),\n query.get('end'))\n res = expand_events(res, ret_mode, sort='start', start=start,\n end=end)\n else:\n res = self._get_events(ret_mode, expand=expand)\n if batch:\n b_start = self.b_start\n b_size = self.b_size\n res = Batch(res, size=b_size, start=b_start, orphan=self.orphan)\n return res\n\n @property\n def ical(self):\n # Get as objects.\n # Don't include occurrences to avoid having them along with their\n # original events and it's recurrence definition in icalendar exports.\n events = self.events(ret_mode=RET_MODE_OBJECTS, expand=False,\n batch=False)\n cal = construct_icalendar(self.context, events)\n name = '%s.ics' % self.context.getId()\n self.request.RESPONSE.setHeader('Content-Type', 'text/calendar')\n self.request.RESPONSE.setHeader(\n 'Content-Disposition',\n 'attachment; filename=\"%s\"' % name\n )\n self.request.RESPONSE.write(cal.to_ical())\n\n @property\n def ical_url(self):\n date = self.date\n mode = self.mode\n qstr = (date or mode) and '?%s%s%s' % (\n mode and 'mode=%s' % mode,\n mode and date and '&' or '',\n date and 'date=%s' % date or ''\n ) or ''\n return '%s/@@event_listing_ical%s' % (\n self.context.absolute_url(),\n qstr\n )\n\n def get_location(self, occ):\n return get_location(occ)\n\n def formatted_date(self, occ):\n provider = getMultiAdapter(\n (self.context, self.request, self),\n IContentProvider, name='formatted_date'\n )\n return provider(occ)\n\n def date_speller(self, date):\n return date_speller(self.context, date)\n\n @property\n def header_string(self):\n start, end = self._start_end\n start_dict = start and date_speller(self.context, start) or None\n end_dict = end and date_speller(self.context, end) or None\n\n mode = self.mode\n main_msgid = None\n sub_msgid = None\n if mode == 'all':\n main_msgid = _(u\"all_news\", default=u\"News\")\n\n elif mode == 'past':\n main_msgid = _(u\"past_events\", default=u\"Past events\")\n\n elif mode == 'future':\n main_msgid = _(u\"future_events\", default=u\"Future events\")\n\n elif mode == 'now':\n main_msgid = _(u\"todays_upcoming_events\",\n default=u\"Todays upcoming events\")\n\n elif mode == 'today':\n main_msgid = _(u\"todays_events\", default=u\"Todays events\")\n\n elif mode == '7days':\n main_msgid = _(u\"7days_events\", default=u\"Events in next 7 days.\")\n sub_msgid = _(\n u\"events_from_until\",\n default=u\"${from} until ${until}.\",\n mapping={\n 'from': \"%s, %s. %s %s\" % (\n start_dict['wkday'],\n start.day,\n start_dict['month'],\n start.year\n ),\n 'until': \"%s, %s. %s %s\" % (\n end_dict['wkday'],\n end.day,\n end_dict['month'],\n end.year\n ),\n }\n )\n\n elif mode == 'day':\n main_msgid = _(\n u\"events_on_day\",\n default=u\"Events on ${day}\",\n mapping={\n 'day': \"%s, %s. %s %s\" % (\n start_dict['wkday'],\n start.day,\n start_dict['month'],\n start.year\n ),\n }\n )\n\n elif mode == 'week':\n main_msgid = _(u\"events_in_week\",\n default=u\"Events in week ${weeknumber}\",\n mapping={'weeknumber': start.isocalendar()[1]})\n sub_msgid = _(\n u\"events_from_until\",\n default=u\"${from} until ${until}.\",\n mapping={\n 'from': \"%s, %s. %s %s\" % (\n start_dict['wkday'],\n start.day,\n start_dict['month'],\n start.year\n ),\n 'until': \"%s, %s. %s %s\" % (\n end_dict['wkday'],\n end.day,\n end_dict['month'],\n end.year\n ),\n }\n )\n\n elif mode == 'month':\n main_msgid = _(\n u\"events_in_month\",\n default=u\"Events in ${month} ${year}\",\n mapping={\n 'month': start_dict['month'],\n 'year': start.year,\n }\n )\n\n trans = self.context.translate\n return {'main': main_msgid and trans(main_msgid) or '',\n 'sub': sub_msgid and trans(sub_msgid) or ''}\n\n # MODE URLs\n def _date_nav_url(self, mode, datestr=''):\n return '%s?mode=%s%s' % (\n self.request.getURL(),\n mode,\n datestr and '&date=%s' % datestr or ''\n )\n\n @property\n def mode_all_url(self):\n return self._date_nav_url('all')\n\n @property\n def mode_future_url(self):\n return self._date_nav_url('future')\n\n @property\n def mode_past_url(self):\n return self._date_nav_url('past')\n\n @property\n def mode_day_url(self):\n now = self.date or self.now\n return self._date_nav_url('day', now.date().isoformat())\n\n @property\n def mode_week_url(self):\n now = self.date or self.now\n return self._date_nav_url('week', now.date().isoformat())\n\n @property\n def mode_month_url(self):\n now = self.date or self.now\n return self._date_nav_url('month', now.date().isoformat())\n\n # DAY NAV\n @property\n def next_day_url(self):\n now = self.date or self.now\n datestr = (now + timedelta(days=1)).date().isoformat()\n return self._date_nav_url('day', datestr)\n\n @property\n def today_url(self):\n return self._date_nav_url('day')\n\n @property\n def prev_day_url(self):\n now = self.date or self.now\n datestr = (now - timedelta(days=1)).date().isoformat()\n return self._date_nav_url('day', datestr)\n\n # WEEK NAV\n @property\n def next_week_url(self):\n now = self.date or self.now\n datestr = (now + timedelta(days=7)).date().isoformat()\n return self._date_nav_url('week', datestr)\n\n @property\n def this_week_url(self):\n return self._date_nav_url('week')\n\n @property\n def prev_week_url(self):\n now = self.date or self.now\n datestr = (now - timedelta(days=7)).date().isoformat()\n return self._date_nav_url('week', datestr)\n\n # MONTH NAV\n @property\n def next_month_url(self):\n now = self.date or self.now\n last_day = monthrange(now.year, now.month)[1] # (wkday, days)\n datestr = (now.replace(day=last_day) +\n timedelta(days=1)).date().isoformat()\n return self._date_nav_url('month', datestr)\n\n @property\n def this_month_url(self):\n return self._date_nav_url('month')\n\n @property\n def prev_month_url(self):\n now = self.date or self.now\n datestr = (now.replace(day=1) - timedelta(days=1)).date().isoformat()\n return self._date_nav_url('month', datestr)\n\n # COLLECTION daterange start/end determination\n def _expand_events_start_end(self, start, end):\n # make sane start and end values for expand_events from\n # Collection/Topic start/end criterions.\n # if end/min is given, it overrides start/min settings to make sure,\n # ongoing events are shown in the listing!\n # XXX: This actually fits most needs, but not all. Maybe someone\n # wants to come up with some edgecases!\n se = dict(start=None, end=None)\n if start:\n q = start.get('query')\n r = start.get('range')\n if r == \"min\":\n se[\"start\"] = q\n elif r == \"max\":\n se[\"end\"] = q\n elif r in (\"minmax\", \"min:max\"):\n list(q).sort()\n se[\"start\"] = q[0]\n se[\"end\"] = q[1]\n if end:\n q = end.get('query')\n r = end.get('range')\n if r == \"min\":\n se[\"start\"] = q\n return se[\"start\"], se[\"end\"]\n","sub_path":"genweb/theme/browser/news_listing.py","file_name":"news_listing.py","file_ext":"py","file_size_in_byte":16556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"227791383","text":"import string\n\ndef caesar(message, key):\n letters_lowercase = string.ascii_lowercase\n letters_uppercase = string.ascii_uppercase\n alf_length = len(letters_lowercase)\n new_message = \"\"\n for index, letter in enumerate(message):\n\n\n if message[index].islower():\n if letters_lowercase.index(letter) + key > alf_length:\n new_position = key % alf_length\n new_letter = letters_lowercase[new_position]\n new_message = new_message + letters_lowercase[new_position]\n\n else:\n new_position = letters_lowercase.index(letter) + key\n new_message = new_message + letters_lowercase[new_position]\n\n elif message[index].isupper():\n if letters_uppercase.index(letter) + key > alf_length:\n new_position = key % alf_length\n new_letter = letters_uppercase[new_position]\n new_message = new_message + letters_uppercase[new_position]\n else:\n new_position = letters_uppercase.index(letter) + key\n new_message = new_message + letters_uppercase[new_position]\n\n return new_message\n\nmessage = input(\"Enter message to encrypt: \")\nencryption_key = int(input(\"Enter ecryption key: \"))\nprint(caesar(message, encryption_key))\n\n\n","sub_path":"caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"471426963","text":"from getFile import getFile\n\nclass dealArgs:\n # A class that filters, validates, and returns arguments\n def __init__(self, args):\n self.start = ''\n self.stop = ''\n self.finish = ''\n self.clear = False\n\n for a in args:\n a = a.split('=')\n if a[0] == '--input':\n # If one input go ahead with that\n self.files = [a[1]]\n elif a[0] == '--inputs':\n # If multiple inputs, go ahead and get them\n self.files = getFile(a[1], lambda x: x.strip())\n elif a[0] == '--start':\n self.start = a[1]\n elif a[0] == '--stop':\n self.stop = a[1]\n elif a[0] == '--finish':\n self.finish = a[1]\n\n # throw error if no input files supplied\n if not hasattr(self, 'files') or len(self.files) <= 0:\n raise ValueError('You must supply a file with --input or a list of space-separated files with --inputs')\n\n if '-c' in args:\n self.clear = True\n\n # FILE\n @property\n def files(self):\n return self.__files\n\n @files.setter\n def files(self, files):\n if files == '':\n raise ValueError('You must supply files')\n\n self.__files = files\n\n # STATS\n @property\n def clear(self):\n return self.__stats\n\n @clear.setter\n def clear(self, clear):\n if not isinstance(clear, bool):\n clear = False\n self.__stats = clear\n\n\n def to_object(self):\n \"\"\"\n Returns a dictionary with each item expected\n \"\"\"\n return {\n \"files\": self.files,\n \"start\": self.start,\n \"stop\": self.stop,\n \"finish\": self.finish,\n \"clear\": self.clear,\n }\n\n\ndef cleanWord(word):\n # Cleans words by making them lowercase, removing new-lines, and stripping symbols from ends of words.\n # Returns the word provided.\n word = word.lower()\n word = word.replace('\\n', '')\n return word.strip(\"~`!@#$%^&*()_-+={[}]|\\\\:;\\\"' <,>.?/\")\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"130208146","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/fraoustin/Téléchargements/ocglances/tmp/ocglances/plugins/glances_fs.py\n# Compiled at: 2017-02-11 10:25:25\n\"\"\"File system plugin.\"\"\"\nimport operator\nfrom ocglances.plugins.glances_plugin import GlancesPlugin\nimport ocglances.psutil as psutil\nsnmp_oid = {'default': {'mnt_point': '1.3.6.1.4.1.2021.9.1.2', 'device_name': '1.3.6.1.4.1.2021.9.1.3', \n 'size': '1.3.6.1.4.1.2021.9.1.6', \n 'used': '1.3.6.1.4.1.2021.9.1.8', \n 'percent': '1.3.6.1.4.1.2021.9.1.9'}, \n 'windows': {'mnt_point': '1.3.6.1.2.1.25.2.3.1.3', 'alloc_unit': '1.3.6.1.2.1.25.2.3.1.4', \n 'size': '1.3.6.1.2.1.25.2.3.1.5', \n 'used': '1.3.6.1.2.1.25.2.3.1.6'}, \n 'netapp': {'mnt_point': '1.3.6.1.4.1.789.1.5.4.1.2', 'device_name': '1.3.6.1.4.1.789.1.5.4.1.10', \n 'size': '1.3.6.1.4.1.789.1.5.4.1.3', \n 'used': '1.3.6.1.4.1.789.1.5.4.1.4', \n 'percent': '1.3.6.1.4.1.789.1.5.4.1.6'}}\nsnmp_oid['esxi'] = snmp_oid['windows']\nitems_history_list = [\n {'name': 'percent', 'description': 'File system usage in percent', \n 'color': '#00FF00'}]\n\nclass Plugin(GlancesPlugin):\n \"\"\"Glances file system plugin.\n\n stats is a list\n \"\"\"\n\n def __init__(self, args=None):\n \"\"\"Init the plugin.\"\"\"\n super(Plugin, self).__init__(args=args, items_history_list=items_history_list)\n self.display_curse = True\n self.reset()\n\n def get_key(self):\n \"\"\"Return the key of the list.\"\"\"\n return 'mnt_point'\n\n def reset(self):\n \"\"\"Reset/init the stats.\"\"\"\n self.stats = []\n\n @GlancesPlugin._check_decorator\n @GlancesPlugin._log_result_decorator\n def update(self):\n \"\"\"Update the FS stats using the input method.\"\"\"\n self.reset()\n if self.input_method == 'local':\n try:\n fs_stat = psutil.disk_partitions(all=False)\n except UnicodeDecodeError:\n return self.stats\n\n for fstype in self.get_conf_value('allow'):\n try:\n fs_stat += [ f for f in psutil.disk_partitions(all=True) if f.fstype.find(fstype) >= 0 ]\n except UnicodeDecodeError:\n return self.stats\n\n for fs in fs_stat:\n if self.is_hide(fs.mountpoint):\n continue\n try:\n fs_usage = psutil.disk_usage(fs.mountpoint)\n except OSError:\n continue\n\n fs_current = {'device_name': fs.device, 'fs_type': fs.fstype, \n 'mnt_point': fs.mountpoint, \n 'size': fs_usage.total, \n 'used': fs_usage.used, \n 'free': fs_usage.free, \n 'percent': fs_usage.percent, \n 'key': self.get_key()}\n self.stats.append(fs_current)\n\n elif self.input_method == 'snmp':\n try:\n fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True)\n except KeyError:\n fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid['default'], bulk=True)\n\n if self.short_system_name in ('windows', 'esxi'):\n for fs in fs_stat:\n if fs == 'Virtual Memory' or fs == 'Physical Memory' or fs == 'Real Memory':\n continue\n size = int(fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])\n used = int(fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])\n percent = float(used * 100 / size)\n fs_current = {'device_name': '', \n 'mnt_point': fs.partition(' ')[0], \n 'size': size, \n 'used': used, \n 'percent': percent, \n 'key': self.get_key()}\n self.stats.append(fs_current)\n\n else:\n for fs in fs_stat:\n fs_current = {'device_name': fs_stat[fs]['device_name'], \n 'mnt_point': fs, \n 'size': int(fs_stat[fs]['size']) * 1024, \n 'used': int(fs_stat[fs]['used']) * 1024, \n 'percent': float(fs_stat[fs]['percent']), \n 'key': self.get_key()}\n self.stats.append(fs_current)\n\n return self.stats\n\n def update_views(self):\n \"\"\"Update stats views.\"\"\"\n super(Plugin, self).update_views()\n for i in self.stats:\n self.views[i[self.get_key()]]['used']['decoration'] = self.get_alert(i['used'], maximum=i['size'], header=i['mnt_point'])\n\n def msg_curse(self, args=None, max_width=None):\n \"\"\"Return the dict to display in the curse interface.\"\"\"\n ret = []\n if not self.stats or self.is_disable():\n return ret\n if max_width is not None and max_width >= 23:\n fsname_max_width = max_width - 14\n else:\n fsname_max_width = 9\n msg = ('{:{width}}').format('FILE SYS', width=fsname_max_width)\n ret.append(self.curse_add_line(msg, 'TITLE'))\n if args.fs_free_space:\n msg = ('{:>7}').format('Free')\n else:\n msg = ('{:>7}').format('Used')\n ret.append(self.curse_add_line(msg))\n msg = ('{:>7}').format('Total')\n ret.append(self.curse_add_line(msg))\n for i in sorted(self.stats, key=operator.itemgetter(self.get_key())):\n ret.append(self.curse_new_line())\n if i['device_name'] == '' or i['device_name'] == 'none':\n mnt_point = i['mnt_point'][-fsname_max_width + 1:]\n elif len(i['mnt_point']) + len(i['device_name'].split('/')[(-1)]) <= fsname_max_width - 3:\n mnt_point = i['mnt_point'] + ' (' + i['device_name'].split('/')[(-1)] + ')'\n elif len(i['mnt_point']) > fsname_max_width:\n mnt_point = '_' + i['mnt_point'][-fsname_max_width + 1:]\n else:\n mnt_point = i['mnt_point']\n msg = ('{:{width}}').format(mnt_point, width=fsname_max_width)\n ret.append(self.curse_add_line(msg))\n if args.fs_free_space:\n msg = ('{:>7}').format(self.auto_unit(i['free']))\n else:\n msg = ('{:>7}').format(self.auto_unit(i['used']))\n ret.append(self.curse_add_line(msg, self.get_views(item=i[self.get_key()], key='used', option='decoration')))\n msg = ('{:>7}').format(self.auto_unit(i['size']))\n ret.append(self.curse_add_line(msg))\n\n return ret","sub_path":"pycfiles/ocglances-2.8/glances_fs.py","file_name":"glances_fs.py","file_ext":"py","file_size_in_byte":6813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"279642895","text":"from django.shortcuts import render\nimport datetime\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\n\nfrom .forms import NameForm\n\n\ndef index(request):\n x = \"

Presciption can be sold on or after

\"\n if request.method == 'POST':\n form = NameForm(request.POST)\n if form.is_valid():\n sold_date = form.cleaned_data['sold_date']\n days_early = form.cleaned_data['days_early']\n days_supply = form.cleaned_data['days_supply']\n earliest = sold_date + \\\n datetime.timedelta(days=days_supply-days_early)\n return render(request, 'calculator/index.html', {'form': form, 'earliest': earliest.strftime(\"%m/%d/%Y\"), \"x\": x})\n else:\n form = NameForm()\n return render(request, 'calculator/index.html', {'form': form})\n","sub_path":"calculator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"57608028","text":"import pickle\nimport json\nfrom vsa_reasoner import VSAReasoner\n\nwith open('parsed_scene_0.pickle', 'rb') as f:\n data = pickle.load(f)\n\nscene = data['scene_vec']\n\nreas = VSAReasoner(data)\n\n# Question 1\n# Is there a big brown object of the same shape as the green thing?\n# Answer: yes\n# START\nout = reas.filter_green(scene, '_')\nout = reas.unique(out, '_')\nout = reas.same_shape(out, scene)\nout = reas.filter_large(scene, '_', prev=out)\nout = reas.filter_brown(scene, '_', prev=out)\nout = reas.exist(out, '_')\nprint('OK')\n# END\n\n\n\n\n","sub_path":"reason/vsa_clevr/test_reasoner.py","file_name":"test_reasoner.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"574553732","text":"\"\"\"\n======================COPYRIGHT/LICENSE START==========================\n\nApplication.py: code for CCPN data model and code generation framework\n\nCopyright (C) 2008 (CCPN Project)\n\n=======================================================================\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense as published by the Free Software Foundation; either\nversion 2.1 of the License, or (at your option) any later version.\n \nA copy of this license can be found in ../../../license/LGPL.license\n \nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLesser General Public License for more details.\n \nYou should have received a copy of the GNU Lesser General Public\nLicense along with this library; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n\n======================COPYRIGHT/LICENSE END============================\n\nfor further information, please contact :\n\n- CCPN website (http://www.ccpn.ac.uk/)\n\n- email: ccpn@bioc.cam.ac.uk\n\n=======================================================================\n\nIf you are using this software for academic purposes, we suggest\nquoting the following references:\n\n===========================REFERENCE START=============================\nR. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.\nHabeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,\nH. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The\nCCPN project: An interim report on a data model for the NMR community\n(Progress report). Nature Struct. Biol. 9, 416-418.\n\nRasmus H. Fogh, Wayne Boucher, Wim F. Vranken, Anne\nPajon, Tim J. Stevens, T.N. Bhat, John Westbrook, John M.C. Ionides and\nErnest D. Laue (2005). A framework for scientific data modeling and automated\nsoftware development. Bioinformatics 21, 1678-1684.\n\n===========================REFERENCE END===============================\n\n\"\"\"\nimport types\n\nfrom memops.api import Implementation as ApiImp\nfrom memops.general import Implementation as GenImp\n\nclassMapping = {}\nclassMapping[types.BooleanType] = ApiImp.AppDataBoolean\nclassMapping[types.IntType] = ApiImp.AppDataInt\nclassMapping[types.FloatType] = ApiImp.AppDataFloat\nclassMapping[types.StringType] = ApiImp.AppDataString\n\nallowedNotifyFuncs = set(('addAppData', 'setAppDataValue', 'setAppDataValues'))\n\ndef registerNotify(notify, classname, funcname, application = None, keyword = None):\n\n if application is None or keyword is None:\n assert application is None and keyword is None, 'application = %s, keyword = %s, both must be None' % (application, keyword)\n return GenImp.registerNotify(notify, classname, funcname)\n\n if funcname not in allowedNotifyFuncs:\n raise GenImp.ApiError('illegal funcname \"%s\", must be in %s' % (funcname, allowedNotifyFuncs))\n\n notifies = GenImp.getClassFromFullName(classname)._notifies\n notifies = notifies.setdefault((funcname, application, keyword), [])\n notifies.append(notify)\n\ndef unregisterNotify(notify, classname, funcname, application = None, keyword = None):\n\n if application is None or keyword is None:\n assert application is None and keyword is None, 'application = %s, keyword = %s, both must be None' % (application, keyword)\n return GenImp.unregisterNotify(notify, classname, funcname)\n\n if funcname not in allowedNotifyFuncs:\n raise GenImp.ApiError('illegal funcname \"%s\", must be in %s' % (funcname, allowedNotifyFuncs))\n\n try:\n notifies = GenImp.getClassFromFullName(classname)._notifies\n notifies = notifies[(funcname, application, keyword)]\n notifies.remove(notify)\n except:\n pass\n\ndef createAppData(object, application, keyword, value):\n\n clazz = classMapping.get(type(value))\n\n appData = clazz(application=application, keyword=keyword, value=value)\n object.addApplicationData(appData)\n\n return appData\n\ndef removeAllAppData(object, application, keyword):\n\n appDataList = object.findAllApplicationData(application=application, keyword=keyword)\n for appData in appDataList:\n object.removeApplicationData(appData)\n\n return appDataList\n\ndef doAppDataNotifies(object, funcname, application, keyword, value):\n\n notifies = object.__class__._notifies\n notifies = notifies.get((funcname, application, keyword))\n\n if notifies:\n for notify in notifies:\n notify(object)\n\ndef addAppData(object, application, keyword, value):\n\n createAppData(object, application, keyword, value)\n doAppDataNotifies(object, 'addAppData', application, keyword, value)\n\ndef setAppDataValue(object, application, keyword, value = None):\n\n removeAllAppData(object, application, keyword)\n\n if value is None:\n appData = None\n else:\n appData = createAppData(object, application, keyword, value)\n\n doAppDataNotifies(object, 'setAppDataValue', application, keyword, value)\n\n return appData\n\ndef setAppDataValues(object, application, keyword, values = None):\n\n if not values:\n values = []\n\n removeAllAppData(object, application, keyword)\n\n appDataList = []\n for value in values:\n appData = createAppData(object, application, keyword, value)\n appDataList.append(appData)\n\n doAppDataNotifies(object, 'setAppDataValues', application, keyword, values)\n\n return appDataList\n\nclass Application:\n\n def __init__(self, name, alternativeNames = None):\n\n if not alternativeNames:\n alternativeNames = []\n\n self.name = name\n self.alternativeNames = alternativeNames\n\n # gets (first) application data value for specified keyword for object\n # if no application data exists and defaultValue is not None then creates\n\n def getValue(self, object, keyword, defaultValue = None, deleteAppData = False):\n\n #print 'getValue1', object, keyword, defaultValue, type(defaultValue)\n appData = object.findFirstApplicationData(application=self.name,\n keyword=keyword)\n #print 'getValue2', appData\n\n if not appData:\n for name in self.alternativeNames:\n appData = object.findFirstApplicationData(application=name, keyword=keyword)\n if appData:\n value = appData.value\n removeAllAppData(object, name, keyword)\n appData = createAppData(object, self.name, keyword, value)\n break\n\n if not appData and defaultValue is not None:\n appData = createAppData(object, self.name, keyword, defaultValue)\n\n if appData:\n value = appData.value\n if deleteAppData:\n removeAllAppData(object, self.name, keyword)\n else:\n value = None\n\n return value\n\n # gets (all) application data value for specified keyword for object\n # if no application data exists and defaultValues is not None then creates\n\n def getValues(self, object, keyword, defaultValues = None):\n\n appDataList = object.findAllApplicationData(application=self.name, keyword=keyword)\n\n if not appDataList:\n appDataList = []\n if defaultValues:\n for value in defaultValues:\n appData = createAppData(object, self.name, keyword, value)\n appDataList.append(appData)\n\n return [ appData.value for appData in appDataList ]\n\n # sets (overwrites) application data value for specified keyword for object\n\n def setValue(self, object, keyword, value = None):\n \n setAppDataValue(object, self.name, keyword, value)\n\n # adds application data value for specified keyword for object\n\n def addValue(self, object, keyword, value):\n \n addAppData(object, self.name, keyword, value)\n\n # sets (overwrites) application data values for specified keyword for object\n\n def setValues(self, object, keyword, values = None):\n \n setAppDataValues(object, self.name, keyword, values)\n\n def registerNotify(self, notify, classname, funcname, keyword = None):\n\n if keyword is None:\n application = None\n else:\n application = self.name\n\n registerNotify(notify, classname, funcname, application, keyword)\n\n def unregisterNotify(self, notify, classname, funcname, keyword = None):\n\n if keyword is None:\n application = None\n else:\n application = self.name\n\n unregisterNotify(notify, classname, funcname, application, keyword)\n","sub_path":"ccpnmr2.4/python/memops/general/Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":8312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"21305219","text":"import os\nimport sys\nimport time\nimport logging\nimport argparse\n\nfrom pathlib import Path\n\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\n\n\nsys.path.append(os.path.abspath(os.path.dirname(\"../calais\")))\nsys.path.append(os.path.abspath(os.path.dirname(\"../../\")))\n\nlogging.basicConfig(level=logging.INFO)\n\nfrom calais.models.did_sim import DIDStruct\nfrom solver.domain_handler import DomainHandler\nfrom solver.solver_handler import SolverHandler\n\n\ndef load_DID(domain: str,\n storage: str) -> DIDStruct:\n \"\"\"\n Creates DID from given domain file and CPT\n\n Args:\n domain: Path to domain file\n storage: Path to storage directory for CPTs\n \"\"\"\n assert Path(domain).exists(), f\"{domain} file does not exist\"\n assert Path(storage).exists(), f\"{storage} directory does not exist\"\n\n did: DIDStruct\n\n did = DIDStruct(storage_dir=storage)\n did.load_domain_from_json_file(domain)\n did.load_cpts_from_files()\n\n return did\n\n\ndef get_action(options: List[str]) -> str:\n \"\"\"\n Get action from user input\n \"\"\"\n all_acts = \" \".join(options)\n action: str = input(\n f\"[ACTIONS:\\033[32m{all_acts}\\033[00m] (Enter ACTION) >>> \").upper()\n return action\n\n\ndef display_result(result: Dict[Any, Any]):\n \"\"\"\n Displays the result of DID transitions\n \"\"\"\n print()\n print(\"=\" * 50)\n print(\"STATES:\")\n\n states = result[\"S\"]\n for state in sorted(states.keys()):\n print(f\"[{state:22}]-----> \\033[33m{states[state]}\\033[00m\")\n\n print()\n print(\"OBSERVATIONS:\")\n observations = result[\"O\"]\n for obs in sorted(observations.keys()):\n print(f\"[{obs:22}]-----> \\033[91m{observations[obs]}\\033[00m\")\n print(\"=\" * 50)\n print()\n\n\ndef display_belief(belief: Dict[Any, Any]):\n \"\"\"\n Displays the belief of the attacker\n \"\"\"\n print()\n print(\"+\" * 50)\n print(\"BELIEFS:\")\n\n states = belief\n for state in sorted(states.keys()):\n print(f\"[{state:22}]-----> \\033[33m{states[state]}\\033[00m\")\n print(\"+\" * 50)\n print()\n\n\ndef init_solver(did):\n storage = str(Path(os.path.abspath(\"../../solver/domains\")).resolve())\n path = str(Path(os.path.abspath(\"../../solver/solver.jar\")).resolve())\n dhandler = DomainHandler(storage=storage)\n domain_file = dhandler.get_domain_file_path(\"test_attacker\")\n\n solver = SolverHandler(jar_path=path,\n storage=storage)\n solver.solve(domain_file)\n\n while 1:\n if solver.check_completed():\n break\n time.sleep(5)\n\n domain_file = dhandler.get_domain_file_path(\"test_attacker\")\n policy_file = dhandler.get_policy_file_path(\"test_attacker\")\n solver.set_domain(domain_file=domain_file,\n policy_file=policy_file,\n states=did.states,\n obs=did.observations)\n\n return solver\n\n\ndef simulate(did: DIDStruct,\n start_state: Dict[str, int],\n solver: Any):\n \"\"\"\n Simulates forward run on the given DID\n\n Args:\n did: fully defined DID with CPTs loaded\n start_state: Initial state of the DID\n \"\"\"\n prev_state: Dict[str, int] = start_state\n action, info = solver.init()\n # display_belief(info)\n\n while 1:\n # action = get_action(options=list(did.actions))\n # os.system(\"clear\")\n print(\"[]\" * 50)\n print(\"NEXT ITERATION\")\n print(\"[]\" * 50)\n\n display_belief(info)\n\n _action = get_action(did.actions)\n\n if _action != \"\":\n action = _action\n\n print(\n f\"[ACTIONS:\\033[32m{did.actions}\\033[00m] (Enter ACTION) >>> {action}\")\n\n if action not in did.actions:\n print(f\"action {action} does not exist in given domain\")\n continue\n\n print()\n print(f\"Action taken: {action}\")\n\n result: Any = did.transition(action=action,\n state=prev_state)\n\n display_result(result)\n\n # if action == \"EXFIL\":\n # break\n\n obs = result[\"O\"]\n\n for key in obs.keys():\n obs[key] = \"yes\" if obs[key] else \"no\"\n\n prev_state = result[\"S\"]\n\n action, info = solver.step(action=action,\n obs=obs)\n\n # time.sleep(5)\n # input()\n\n if result[\"O\"][\"UPLOAD_INIT\"] == \"yes\":\n break\n\n\ndef get_cli() -> Any:\n \"\"\"\n Parses CLI stuff\n \"\"\"\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-d\",\n help=\"Directory for CPTs\",\n type=str)\n parser.add_argument(\"-f\",\n help=\"JSON file definition for the DBN\",\n type=str)\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n\n did: DIDStruct\n args: Any\n start_state: Dict[str, int]\n\n start_state = {\"M_COMPROMISED\": 0,\n \"M_BACKDOORED\": 0,\n \"M_ROOTED\": 0,\n \"M_FAKEROOTED\": 0,\n \"ASSETS_DISCOVERED\": 0,\n \"DECOYS_DISCOVERED\": 0,\n \"FILES_PACKED\": 0,\n \"EXFIL_ONGOING\": 0}\n\n args = get_cli()\n\n assert args.d is not None, \"-d required\"\n assert args.f is not None, \"-f required\"\n\n did = load_DID(domain=args.f,\n storage=args.d)\n\n try:\n solver = init_solver(did=did)\n simulate(did=did,\n start_state=start_state,\n solver=solver)\n\n except Exception as e:\n print(e)\n print(\"Exiting...\")\n","sub_path":"odysseus/calais/scripts/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"257519010","text":"from threading import Thread\n\n\ndef countdown1(n):\n i = n\n while i >= 0:\n print(\"{} froom countdown 1\".format(i))\n yield i\n i = i - 1\n raise StopIteration\n\n\ndef countdown2(n):\n i = n\n while i >= 0:\n print(\"{} froom countdown 2\".format(i))\n yield i\n i = i - 1\n raise StopIteration\n\n\ndef looool(*args):\n c = args[0]\n for _ in c:\n pass\n\n\nc1 = countdown1(2000)\nc2 = countdown2(2000)\n\nt_1 = Thread(target=looool, args=(c1,))\nt_2 = Thread(target=looool, args=(c2,))\nt_1.start()\nt_2.start()\n\nt_1.join()\nt_2.join()\n","sub_path":"fptools/concurrency/aufgabe_2.py","file_name":"aufgabe_2.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"175875078","text":"import pygame\nimport sys\nfrom Configure_Map import *\n\n\nclass Background(pygame.sprite.Sprite):\n def __init__(self, screen, group, PATH, SIZE, SPEED, clock, colorkey=None, x_max=None, x_now=None):\n super().__init__(group)\n self.clock = clock\n self.screen = screen\n self.PATH_BG = PATH\n self.BG_SIZE = SIZE\n self.BG_SPEED = SPEED\n self.colorkey = colorkey\n\n self.main_bg_speed = BG_SPEED\n self.x_now = 0\n self.x_max = sys.maxsize if x_max is None else x_max\n\n self.image = pygame.Surface(self.BG_SIZE)\n self.rect = self.image.get_rect()\n\n self.load_background()\n self.image.blit(self.img, (0, 0))\n\n def load_background(self):\n self.img = pygame.image.load(self.PATH_BG)\n if self.colorkey is not None:\n self.image.set_colorkey(self.colorkey)\n\n self.img = pygame.transform.scale(self.img, self.BG_SIZE)\n\n self.left_img_x = 0\n self.right_img_x = self.BG_SIZE[0]\n\n def update(self, move, *args):\n t = self.clock[0] / 1000\n if move == '>':\n dx = -self.BG_SPEED[0]\n dx_ = -self.main_bg_speed[0]\n elif move == '<':\n dx = self.BG_SPEED[0]\n dx_ = self.main_bg_speed[0]\n\n dx *= t\n dx_ *= t\n\n if self.x_now + dx_ < 0 or self.x_now + dx_ + self.BG_SIZE[0] > self.x_max:\n return\n else:\n self.x_now += dx_\n\n self.left_img_x -= dx\n self.right_img_x -= dx\n if self.left_img_x > 0:\n self.right_img_x = self.left_img_x - self.BG_SIZE[0]\n self.left_img_x, self.right_img_x = self.right_img_x, self.left_img_x\n elif self.right_img_x < 0:\n self.left_img_x = self.right_img_x + self.BG_SIZE[0]\n self.left_img_x, self.right_img_x = self.right_img_x, self.left_img_x\n\n self.image.fill((0, 0, 0))\n self.image.blit(self.img, (int(self.left_img_x), 0))\n self.image.blit(self.img, (int(self.right_img_x), 0))\n\n\nclass ForrestBackgroundMain(Background):\n def __init__(self, screen, group, clock, x_max=None):\n super().__init__(screen, group, PATH_BG, BG_SIZE, BG_SPEED, clock, None, x_max)\n\n\nclass ForrestBackgroundFront(Background):\n def __init__(self, screen, group, clock, x_max=None):\n super().__init__(screen, group, PATH_MF, MF_SIZE, MF_SPEED, clock, (0, 0, 0), x_max)\n","sub_path":"Without Pycharm/Background.py","file_name":"Background.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"368388426","text":"import csv \ncusto=0\nwith open ('churras.txt') as lista:\n\tconsumo=csv.reader(lista)\n\tfor coluna in consumo:\n\t\tquant=int(coluna[1])\n\t\tpreco=float(coluna[2])\n\t\tcusto+=quant*preco\nprint(custo)\n\n \n \n \n \n\t","sub_path":"backup/user_046/ch87_2019_06_06_18_10_35_342783.py","file_name":"ch87_2019_06_06_18_10_35_342783.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"537612217","text":"\n\n#calss header\nclass _PARTITION():\n\tdef __init__(self,): \n\t\tself.name = \"PARTITION\"\n\t\tself.definitions = [u'to divide one part of a room from another with a thin wall: ', u'to divide a country into separate areas of government: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_partition.py","file_name":"_partition.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"319437621","text":"import os\nimport logging\n\nimport yaml\n\nfrom . import _get_canonical_container_name, get_docker_env, get_docker_client, _get_dusty_containers\nfrom ... import constants\nfrom ...log import log_to_client\nfrom ...subprocess import check_output_demoted, check_and_log_output_and_error_demoted\nfrom ...compiler.spec_assembler import get_expected_number_of_running_containers\nfrom ...path import parent_dir\n\ndef write_composefile(compose_config, compose_file_location):\n logging.info('Writing new Composefile')\n compose_dir_location = parent_dir(compose_file_location)\n if not os.path.exists(compose_dir_location):\n os.makedirs(compose_dir_location)\n with open(compose_file_location, 'w') as f:\n f.write(yaml.dump(compose_config, default_flow_style=False))\n\ndef _compose_base_command(core_command, compose_file_location, project_name):\n logging.info('Running docker-compose {}'.format(core_command))\n command = ['docker-compose']\n if compose_file_location is not None:\n command += ['-f', compose_file_location]\n if project_name is not None:\n command += ['-p', project_name]\n command += core_command\n return command\n\ndef compose_up(compose_file_location, project_name, recreate_containers=True):\n command = _compose_base_command(['up', '-d', '--allow-insecure-ssl'], compose_file_location, project_name)\n if not recreate_containers:\n command.append('--no-recreate')\n # strip_newlines should be True here so that we handle blank lines being caused by `docker pull `\n check_and_log_output_and_error_demoted(command, env=get_docker_env(), strip_newlines=True)\n\ndef _compose_stop(compose_file_location, project_name, services):\n command = _compose_base_command(['stop', '-t', '1'], compose_file_location, project_name)\n if services:\n command += services\n check_and_log_output_and_error_demoted(command, env=get_docker_env())\n\ndef _compose_rm(compose_file_location, project_name, services):\n command = _compose_base_command(['rm', '-f'], compose_file_location, project_name)\n if services:\n command += services\n check_and_log_output_and_error_demoted(command, env=get_docker_env())\n\ndef _compose_restart(services):\n \"\"\"Well, this is annoying. Compose 1.2 shipped with the\n restart functionality fucking broken, so we can't set a faster\n timeout than 10 seconds (which is way too long) using Compose.\n We are therefore resigned to trying to hack this together\n ourselves. Lame.\n\n Relevant fix which will make it into the next release:\n https://github.com/docker/compose/pull/1318\"\"\"\n\n def _restart_container(client, container):\n log_to_client('Restarting {}'.format(_get_canonical_container_name(container)))\n client.restart(container['Id'], timeout=1)\n\n logging.info('Restarting service containers from list: {}'.format(services))\n client = get_docker_client()\n dusty_containers = _get_dusty_containers(client, services)\n expected_number_of_containers = get_expected_number_of_running_containers() if len(services) == 0 else len(services)\n if len(dusty_containers) != expected_number_of_containers:\n log_to_client(\"Not going to restart containers. Expected number of containers {} does not match {}\".format(expected_number_of_containers, len(dusty_containers)))\n raise RuntimeError(\"Please use `docker ps -a` to view crashed containers\")\n else:\n for container in dusty_containers:\n _restart_container(client, container)\n\ndef update_running_containers_from_spec(compose_config, recreate_containers=True):\n \"\"\"Takes in a Compose spec from the Dusty Compose compiler,\n writes it to the Compose spec folder so Compose can pick it\n up, then does everything needed to make sure boot2docker is\n up and running containers with the updated config.\"\"\"\n write_composefile(compose_config, constants.COMPOSEFILE_PATH)\n compose_up(constants.COMPOSEFILE_PATH, 'dusty', recreate_containers=recreate_containers)\n\ndef stop_running_services(services=None):\n \"\"\"Stop running containers owned by Dusty, or a specific\n list of Compose services if provided.\n\n Here, \"services\" refers to the Compose version of the term,\n so any existing running container, by name. This includes Dusty\n apps and services.\"\"\"\n if services is None:\n services = []\n _compose_stop(constants.COMPOSEFILE_PATH, 'dusty', services)\n\ndef restart_running_services(services=None):\n \"\"\"Restart containers owned by Dusty, or a specific\n list of Compose services if provided.\n\n Here, \"services\" refers to the Compose version of the term,\n so any existing running container, by name. This includes Dusty\n apps and services.\"\"\"\n if services is None:\n services = []\n _compose_restart(services)\n\ndef rm_containers(app_or_service_names):\n _compose_rm(constants.COMPOSEFILE_PATH, 'dusty', app_or_service_names)\n","sub_path":"dusty/systems/docker/compose.py","file_name":"compose.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"219012041","text":"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.optimize import curve_fit as cf\nimport seaborn as sns\n\ndf = pd.read_csv('/../data/gamma_list_cleared.out')\n\nnegA,negB,posA,posB = -11,7.5,-71,71\n\n## select subset of data\n\ndf = df.loc[(df[\"B\"] > negB) & (df[\"B\"] < posB)]\ndf = df.loc[(df.A < negA) & (df.A > posA) & (df.gamma>3)]\n\n## Drop an error and order data for plotting\n\nmain_df = df.drop(131)\nmain_df.sort_values(['A','B'],inplace=True) ###\n\ndef fitting(xdata,a,b,c,d,e):\n A,B = xdata['A'],xdata['B']\n quad_in_A = a*A**2+b*A+c\n power_in_B = np.power(np.abs(B),e)\n return (power_in_B * quad_in_A) \n \ndef extract_data_from_frame(df):\n ydata = df.gamma\n xdata = df[['A','B']]\n return xdata,ydata\n\n \nxd,yd = extract_data_from_frame(main_df)\np0 = [1,0.1,1,5,-1]\nmega = cf(fitting,xd,yd,p0=p0,maxfev=1000000000)[0]\n\nfor A_val in np.unique(xd.A): \n plt.figure()\n xdt,ydt = xd[xd.A==A_val],yd[xd.A==A_val]\n plt.plot(xdt.B,fitting(xdt,*mega))\n plt.scatter(xdt.B,ydt)\n plt.title(str(A_val))\n plt.ylabel('gamma')\n plt.xlabel('B')\n\nerror = np.abs(np.divide(yd - fitting(xd,*mega),yd))\nfit_landscape = fitting(xd,*mega)\ndat_landscape = yd\n\nplt.figure()\ndata = pd.DataFrame(data={'x':xd.A, 'y':xd.B, 'z':error})\ndata = data.pivot(index='x', columns='y', values='z')\nsns.heatmap(data)\nplt.xlabel('B')\nplt.ylabel('A')\nplt.title('Heat map of error')","sub_path":"aug_2017/surface_tension_fitting.py","file_name":"surface_tension_fitting.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"605173368","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n'''\n@File : sys_access_log.py \n@Desc : 系统访问日志记录\n'''\n\n# Standard library imports\nimport logging\n# Third party imports\nfrom fastapi import Request\nfrom starlette.requests import Message\nfrom fastapi.security.utils import get_authorization_scheme_param\nfrom pydantic import constr, conlist, Field, validator\n# Local application imports\nfrom project.config.api_json import API_JSON\nfrom project.utils.jwt_auth import JWTAuth\nfrom project.models.proj_base_model import ProjectBaseModel\nfrom project.models.com_validator import vldtr_default_now_datetime\n\nlogger = logging.getLogger(\"uvicorn\")\n\n\nasync def set_body(request: Request, body: bytes):\n async def receive() -> Message:\n return {\"type\": \"http.request\", \"body\": body}\n request._receive = receive\n\n\n# async def get_body(request: Request) -> bytes:\n# body = await request.body()\n# await set_body(request, body)\n# return body\n\nSYS_ACCESS_LOG_COLL_NAME = \"sys_access_log\"\nNEED_REMOVE_HEADERS = [ # 需要移除的请求头信息\n \"connection\",\n]\n\nclass SysLogModel(ProjectBaseModel):\n \"\"\"日志模型\"\"\"\n uri: constr(strip_whitespace=True)=Field(..., title=\"请求路径\")\n method: constr(strip_whitespace=True)=Field(..., title=\"method\")\n ip: constr(strip_whitespace=True)=Field(\"\", title=\"ip\")\n url: constr(strip_whitespace=True)=Field(\"\", title=\"完整地址\")\n headers: conlist(list)=Field([], title=\"请求头\")\n query_params: dict=Field({}, title=\"请求参数\")\n path_params: dict=Field({}, title=\"路径参数\")\n body: constr(strip_whitespace=True)=Field(\"\", title=\"请求体\")\n tags: constr(strip_whitespace=True)=Field(\"\", title=\"接口分类\")\n summary: constr(strip_whitespace=True)=Field(\"\", title=\"接口名称\")\n user_info: dict=Field({}, title=\"jwt用户信息\")\n create_time: constr(strip_whitespace=True)=Field(\"\", title=\"创建时间\")\n\n _time = validator(\"create_time\", pre=True, always=True, \n allow_reuse=True)(vldtr_default_now_datetime)\n\n @validator('headers', always=True)\n def headers_handle(cls, v, values, **kwargs):\n new_headers = []\n # 过滤部分不需要的请求头信息\n for vv in v:\n if vv and vv[0] not in NEED_REMOVE_HEADERS:\n new_headers.append(vv)\n return new_headers\n\n\nasync def sys_access_log(request: Request=None, slm: SysLogModel=None):\n \"\"\"系统访问日志记录\n\n Args:\n request (Request): Request. Defaults to None.\n slm (SysLogModel): 日志模型, 部分接口没法复用 Request. Defaults to None.\n \"\"\"\n if not slm:\n _body = await request.body()\n # 获取真实的 ip (可能存在 nginx 等方式的代理)\n ip = request.client.host\n __x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\") or []\n __x_real_ip = request.headers.getlist(\"X-Real-Ip\") or None\n if __x_forwarded_for:\n ip = __x_forwarded_for[0]\n elif __x_real_ip:\n ip = __x_real_ip\n\n api_info = API_JSON.get('paths', {}).get(request.url.path, {})\\\n .get(request.method.lower(), {})\n slm = SysLogModel(\n uri=request.url.path,\n method=request.method,\n ip=ip,\n url=request.url.components.geturl(),\n headers=request.headers.items(),\n query_params=request.query_params._dict,\n path_params=request.path_params,\n body=\"\",\n tags=\";\".join(api_info.get(\"tags\", [])),\n summary=api_info.get(\"summary\", \"\"),\n user_info={},\n )\n \n try:\n # 字符类参数可以进行编码存储\n slm.body = _body.decode()\n except Exception as e:\n logger.error(e)\n slm.body = f\"** It could be a byte file ({e}) **\"\n\n # 获取用户 JWT 中的信息\n auth_str = request.headers.get(\"Authorization\") or \"\"\n _, jwt = get_authorization_scheme_param(auth_str)\n if jwt:\n _, user_info = JWTAuth().decode_jwt(jwt, False)\n slm.user_info = user_info\n await set_body(request, _body)\n \n print(slm.dict()) # 若要记录日志可在此处进行持久化操作\n","sub_path":"project/utils/sys_access_log.py","file_name":"sys_access_log.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"537507386","text":"# coding=utf-8\n\n# mdl layers\nlayer_mdl_conv = 'ConvolutionLayer'\nlayer_mdl_deepwise_conv = 'DepthwiseConvolutionLayer'\nlayer_mdl_relu = 'ReluLayer'\nlayer_mdl_pointwise_add = 'PointwiseConvolutionLayer'\nlayer_mdl_pooling = 'PoolingLayer'\nlayer_mdl_softmax = 'SoftmaxLayer'\n\n# fluid ops\nop_fluid_fusion_conv_add = 'fusion_conv_add'\nop_fluid_relu = 'relu'\nop_fluid_pooling = 'pool2d'\nop_fluid_softmax = 'softmax'\n\n# dict mdk layer --- fluid op\nmdl2fluid_op_layer_dict = {\n layer_mdl_conv: op_fluid_fusion_conv_add,\n layer_mdl_deepwise_conv: op_fluid_fusion_conv_add,\n layer_mdl_relu: op_fluid_relu,\n layer_mdl_pointwise_add: op_fluid_fusion_conv_add,\n layer_mdl_pooling: op_fluid_pooling,\n layer_mdl_softmax: op_fluid_softmax\n}\n\nmdl_outputs_key = \"outputs\"\nmdl_inputs_key = \"inputs\"\nmdl_weight_key = \"weight\"\nmdl_attrs_key = \"params\"\n\n# dict of mdl-input _out param to fluid input out attrs\nfusion_conv_add_dict = {\n mdl_inputs_key: 'Input',\n mdl_outputs_key: 'Out',\n mdl_weight_key: ('Filter', 'Y'),\n mdl_attrs_key: (\n # 'workspace_size_MB', 'use_mkldnn', 'use_cudnn', 'data_format','dilations',\n # dilations = [1,1]\n 'groups', 'paddings', 'strides'\n # 'axis'\n )\n}\n\nrelu_dict = {\n mdl_inputs_key: 'X',\n mdl_outputs_key: 'Out',\n # mdl_weight_key: ()\n\n}\n\npool2d_dict = {\n mdl_inputs_key: 'X',\n mdl_outputs_key: 'Out',\n # mdl_weight_key: (),\n mdl_attrs_key: ('pooling_type', 'global_pooling')\n\n}\n\nsoftmax_dict = {\n mdl_inputs_key: 'X',\n mdl_outputs_key: 'Out',\n mdl_weight_key: (),\n mdl_attrs_key: ()\n}\n# mdl layers --- fluid ops\nop_io_dict = {\n 'fusion_conv_add': fusion_conv_add_dict,\n 'relu': relu_dict,\n 'pool2d': pool2d_dict,\n 'softmax': softmax_dict\n}\n\n# fluid attr key --- mdl params key\nfusion_conv_add_attrs_dict = {\n 'paddings': 'pad',\n 'strides': 'stride',\n 'groups': 'group'\n}\n# fluid attr key --- mdl params key\nfluid_attrs_type_dict = {\n 'paddings': 0,\n 'strides': 6,\n 'groups': 6\n}\n","sub_path":"tools/python/modeltools/core/op_types.py","file_name":"op_types.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"189169088","text":"from unicon_classes.cloud_trail.event_decode import BasicDecode as EventDecode\nfrom typing import List\n\n\nclass CodeCommitRequestParameter:\n def __init__(self, params: dict = None):\n self.branch = \"\"\n self.commit = \"\"\n self.branch_path = \"\"\n if params is not None:\n self.__decode(params=params)\n\n def __eq__(self, other):\n if isinstance(other, CodeCommitRequestParameter):\n return other.branch.lower() == self.branch.lower() or other.commit.lower() == self.commit.lower() \\\n or self.branch_path.lower() == other.branch_path.lower()\n return False\n\n def __decode(self, params:dict):\n for name, item in params.items():\n name = name.lower()\n if name == 'commit': self.commit = item\n if name == 'ref' :\n self.branch_path = item\n temp = self.branch_path.split(\"/\")\n self.branch = temp[-1]\n\n\nclass CodeCommit(EventDecode):\n def __init__(self, event: dict):\n super().__init__(event)\n self.data_transferred = False\n self.protocol = \"\"\n self.capabilities:list = []\n self.branches: List[CodeCommitRequestParameter] = []\n self.repo_name = \"\"\n self.repo_id = \"\"\n self.__code_commit_decode(self.cloud_trail_event)\n\n def __code_commit_decode(self, event:dict):\n if self.request_parameters:\n for name, item in self.request_parameters.items():\n if name == \"references\":\n for param in item:\n self.branches.append(CodeCommitRequestParameter(param))\n for name, item in event.items():\n if name == 'additionalEventData':\n for name_inner , item_inner in item.items():\n if name_inner == \"protocol\" : self.protocol = item_inner\n if name_inner == \"dataTransferred\": self.data_transferred = item_inner\n if name_inner == \"repositoryName\": self.repo_name = item_inner\n if name_inner == \"repositoryId\": self.repo_id = item_inner\n if name_inner == \"capabilities\": self.capabilities = item_inner\n\n\n\n\n\n","sub_path":"unicon_classes/cloud_trail/code_commit.py","file_name":"code_commit.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"329430821","text":"# Kevin enario\n# yelp dataset - business\n# integration\n\n# native imports\nimport json\n\n# third party imports\nimport pandas as pd\n\n# custom imports\n\n\n# engineer data\ndef main(): \n \n\n print('')\n print('Data integration complete')\n\ndef json_to_dataframe(f): \n df = pd.DataFrame()\n\n for chunk in pd.read_json(f, lines=True, chunksize=100):\n df = df.append(chunk) \n\n return df\n\n##################################################### run main ############################################\nif __name__ == '__main__':\n main()","sub_path":"dataPreprocessing/yelpBusinessDataset/businessDataIntegration.py","file_name":"businessDataIntegration.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"271770508","text":"#!/usr/bin/env python3\n\nimport os.path\nimport os\nimport glob\nimport nipype.interfaces.mrtrix3 as mrt\n\nfrom nipype.interfaces.utility import IdentityInterface, Function\nfrom nipype.interfaces.io import SelectFiles, DataSink\nfrom nipype.pipeline.engine import Workflow, Node, MapNode\n\nimport argparse\n\nimport mrcat_function as mrcatfunc\nimport preproc_function as preprocfunc\nimport preprocess as preprocess\nimport fod2fixel_function as fod2fixelfunc\nimport fixel2peaks_function as fixel2peaksfunc\nimport mrcalc_function as mrcalcfunc\nimport utils as utils\nimport tensor2metric_function as tensor2metricfunc\n\n#import fixel2peaks_func as fixel2peaksfunc\n#import mrcalc_func as mrcalcfunc\n#import tensor2metric_func as tensor2metricfunc\n\ndef create_DWI_workflow(\n subject_list,\n bids_dir,\n work_dir,\n out_dir,\n bids_templates,\n):\n\n # create initial workflow\n wf = Workflow(name='DWI', base_dir=work_dir)\n\n # use infosource to iterate workflow across subject list\n n_infosource = Node(\n interface=IdentityInterface(\n fields=['subject_id']\n ),\n name=\"subject_source\"\n # input: 'subject_id'\n # output: 'subject_id'\n )\n # runs the node with subject_id = each element in subject_list\n n_infosource.iterables = ('subject_id', subject_list)\n\n # select matching files from bids_dir\n n_selectfiles = Node(\n interface=SelectFiles(\n templates=bids_templates,\n base_directory=bids_dir\n ),\n name='get_subject_data'\n )\n wf.connect([\n (n_infosource, n_selectfiles, [('subject_id', 'subject_id_p')])\n ])\n\n \n# DWIDenoise\n# https://nipype.readthedocs.io/en/latest/api/generated/nipype.interfaces.mrtrix3.preprocess.html\n n_denoise = Node(\n interface=mrt.DWIDenoise(),\n name='n_denoise'\n )\n wf.connect([\n (n_selectfiles, n_denoise, [('DWI_all', 'in_file')])\n ])\n\n # datasink\n n_datasink = Node(\n interface=DataSink(base_directory=out_dir),\n name='datasink'\n )\n\n wf.connect([\n (n_selectfiles, n_datasink, [('all_b0_PA', 'all_b0_PA_unchanged')]),\n (n_denoise, n_datasink, [('out_file', 'DWI_all_denoised')])\n ])\n\n########## I'VE ADDED IN ##########################################################################\n # MRDeGibbs\n # https://nipype.readthedocs.io/en/latest/api/generated/nipype.interfaces.mrtrix3.preprocess.html\n n_degibbs = Node(\n interface=mrt.MRDeGibbs(\n out_file = 'DWI_all_denoised_degibbs.mif'\n ),\n name='n_degibbs'\n )\n wf.connect([\n (n_denoise, n_degibbs, [('out_file', 'in_file')])\n ])\n\n wf.connect([\n (n_degibbs, n_datasink, [('out_file', 'DWI_all_denoised_degibbs.mif')])\n ])\n\n # DWI Extract\n n_dwiextract = Node(\n interface=mrt.DWIExtract(\n bzero=True,\n out_file='b0vols.mif'\n ),\n name='n_dwiextract'\n )\n\n wf.connect([\n (n_degibbs, n_dwiextract, [('out_file', 'in_file')])\n ])\n\n wf.connect([\n (n_dwiextract, n_datasink, [('out_file', 'noddi_b0_degibbs')])\n ])\n\n # MRcat\n n_mrcat = Node(\n interface=mrcatfunc.MRCat(\n #axis=3,\n out_file = 'b0s.mif'\n ),\n name='n_mrcat'\n )\n\n # Connect DTI_B0_PA to mrcat node\n wf.connect([\n (n_selectfiles, n_mrcat, [('DTI_B0_PA', 'in_file1')])\n ])\n\n wf.connect([\n (n_dwiextract, n_mrcat, [('out_file', 'in_file2')])\n ])\n\n # Output the mrcat file into file 'noddi_and_PA_b0s.mif'\n wf.connect([\n (n_mrcat, n_datasink, [('out_file', 'noddi_and_PA_b0s.mif')])\n ])\n\n # DWIfslpreproc\n n_dwifslpreproc = Node(\n interface=preprocfunc.DWIFslPreProc(\n out_file = 'preprocessedDWIs.mif',\n use_header = True\n ),\n name='n_dwifslpreproc'\n )\n\n # Connect output of degibbs to dwifslpreproc node\n wf.connect([\n (n_degibbs, n_dwifslpreproc, [('out_file', 'in_file')])\n ])\n # Connect output of mrcat to se_epi input\n wf.connect([\n (n_mrcat, n_dwifslpreproc, [('out_file', 'se_epi_file')])\n ])\n # Put output of dwifslpreproc into 'preprocessedDWIs.mif'\n wf.connect([\n (n_dwifslpreproc, n_datasink, [('out_file', 'preprocessedDWIs.mif')])\n ])\n\n # DWI bias correct\n n_dwibiascorrect = Node(\n interface = preprocess.DWIBiasCorrect(\n use_ants = True\n ),\n name = 'n_dwibiascorrect',\n )\n\n wf.connect([\n (n_dwifslpreproc, n_dwibiascorrect, [('out_file', 'in_file')])\n ])\n wf.connect([\n (n_dwibiascorrect, n_datasink, [('out_file', 'ANTSpreprocessedDWIs.mif')])\n ]) \n\n #DWI2mask\n n_dwi2mask = Node(\n interface=mrt.BrainMask(\n out_file = 'mask.mif'\n ),\n name='n_dwi2mask'\n )\n wf.connect([\n (n_dwibiascorrect, n_dwi2mask, [('out_file', 'in_file')])\n ])\n wf.connect([\n (n_dwi2mask, n_datasink, [('out_file', 'mask.mif')])\n ]) \n\n ## A) Fixel-based analysis\n #DWI2response\n n_dwi2response = Node(\n interface=mrt.ResponseSD(\n algorithm = 'dhollander',\n wm_file = 'wm_res.txt',\n gm_file = 'gm_res.txt',\n csf_file = 'csf_res.txt'\n ),\n name='n_dwi2response'\n )\n\n wf.connect([\n (n_dwibiascorrect, n_dwi2response, [('out_file', 'in_file')])\n ])\n wf.connect([\n (n_dwi2response, n_datasink, [('wm_file', 'wm_res.txt')])\n ]) \n wf.connect([\n (n_dwi2response, n_datasink, [('gm_file', 'gm_res.txt')])\n ]) \n wf.connect([\n (n_dwi2response, n_datasink, [('csf_file', 'csf_res.txt')])\n ]) \n\n #DWI2fod\n n_dwi2fod = Node(\n interface=mrt.ConstrainedSphericalDeconvolution(\n algorithm = 'msmt_csd',\n wm_odf = 'wmfod.mif',\n gm_odf = 'gmfod.mif',\n csf_odf = 'csffod.mif'\n ),\n name='n_dwi2fod'\n )\n # connect outputs of dwi2fod into dwi2response\n wf.connect([\n (n_dwibiascorrect, n_dwi2fod, [('out_file', 'in_file')])\n ])\n wf.connect([\n (n_dwi2response, n_dwi2fod, [('wm_file', 'wm_txt')])\n ]) \n wf.connect([\n (n_dwi2response, n_dwi2fod, [('gm_file', 'gm_txt')])\n ]) \n wf.connect([\n (n_dwi2response, n_dwi2fod, [('csf_file', 'csf_txt')])\n ]) \n # output wmfod file from dwi2fod\n wf.connect([\n (n_dwi2fod, n_datasink, [('wm_odf', 'wmfod.mif')])\n ])\n wf.connect([\n (n_dwi2fod, n_datasink, [('gm_odf', 'gmfod.mif')])\n ])\n wf.connect([\n (n_dwi2fod, n_datasink, [('csf_odf', 'csffod.mif')])\n ])\n\n #mrconvert to extract Z component of wmfod\n n_mrconvert_fod = Node(\n interface=utils.MRConvert(\n out_file = 'Zwmfod.mif',\n coord = [3, 0]\n ),\n name='n_mrconvert_fod'\n )\n\n wf.connect([\n (n_dwi2fod, n_mrconvert_fod, [('wm_odf', 'in_file')])\n ])\n\n wf.connect([\n (n_mrconvert_fod, n_datasink, [('out_file', 'Zwmfod.mif')])\n ]) \n\n # Concatenate all wm, gm, csf fod files to see their distribution throughout Brain\n n_mrcat_fod = Node(\n interface=mrcatfunc.MRCat(\n out_file = 'vf.mif'\n ),\n name='n_mrcat_fod'\n )\n # Connect Zwmfod, gmfod and csffod as inputs\n wf.connect([\n (n_mrconvert_fod, n_mrcat_fod, [('out_file', 'in_file1')])\n ])\n wf.connect([\n (n_dwi2fod, n_mrcat_fod, [('gm_odf', 'in_file2')])\n ])\n wf.connect([\n (n_dwi2fod, n_mrcat_fod, [('csf_odf', 'in_file3')])\n ])\n # Output the mrcat file into file into 'vf.mif'\n wf.connect([\n (n_mrcat_fod, n_datasink, [('out_file', 'vf.mif')])\n ]) \n\n #fod2fixel wmfod.mif wmfixels -fmls_peak_value 0 -fmls_integral 0.10 -afd afd.mif -peak peak.mif -disp disp.mif \n # OUTPUTS: -afd afd.mif -peak peak.mif -disp disp.mif \n n_fod2fixel = Node(\n interface= fod2fixelfunc.fod2fixel(\n out_file = 'wmfixels',\n #afd_file = 'afd.mif',\n peak_file = 'peak.mif',\n disp_file = 'disp.mif'\n \n ),\n name='n_fod2fixel'\n )\n # let the peak value parameter be trialed as multiple values\n n_fod2fixel.iterables = ('fmls_peak_value', [0, 0.10, 0.50])\n n_fod2fixel.iterables = ('fmls_integral', [0, 0.10, 0.50])\n \n # obtain wm fibre image as input\n wf.connect([\n (n_dwi2fod, n_fod2fixel, [('wm_odf', 'in_file')])\n ])\n # ouputs of fod2fixel\n wf.connect([\n (n_fod2fixel, n_datasink, [('out_file', 'wmfixels')])\n ]) \n wf.connect([\n (n_fod2fixel, n_datasink, [('afd_file', 'afd.mif')])\n ]) \n wf.connect([\n (n_fod2fixel, n_datasink, [('peak_file', 'peak.mif')])\n ]) \n wf.connect([\n (n_fod2fixel, n_datasink, [('disp_file', 'disp.mif')])\n ]) \n\n ## Fixel2peaks \n n_fixel2peaks = Node(\n interface= fixel2peaksfunc.fixel2peaks(\n out_file = 'peaks_wmdirections.mif'\n ),\n name='n_fixel2peaks'\n )\n\n n_fixel2peaks.iterables = ('number', [1, 2, 3])\n\n # obtain directions file in output folder of fod2fixel, as input\n wf.connect([\n (n_fod2fixel, n_fixel2peaks, [('out_file', 'in_file')])\n ])\n # ouputs of fixel2peaks\n wf.connect([\n (n_fixel2peaks, n_datasink, [('out_file', 'peaks_wmdirections.mif')])\n ]) \n \n #mrmath to find normalised value of peak WM directions\n n_mrmath = Node(\n interface=mrt.MRMath(\n axis = 3,\n operation = 'norm',\n out_file = 'norm_peaks_wmdirections.mif'\n ),\n name='n_mrmath'\n )\n\n wf.connect([\n (n_fixel2peaks, n_mrmath, [('out_file', 'in_file')])\n ])\n\n wf.connect([\n (n_mrmath, n_datasink, [('out_file', 'norm_peaks_wmdirections.mif')])\n ]) \n\n # mrcalc to divide peak WM direction by normalised value\n n_mrcalc = Node(\n interface=mrcalcfunc.MRCalc(\n operation = 'divide',\n out_file = 'wm_peak_dir.mif'\n ),\n name='n_mrcalc'\n )\n\n wf.connect([\n (n_fixel2peaks, n_mrcalc, [('out_file', 'in_file1')])\n ])\n\n wf.connect([\n (n_mrmath, n_mrcalc, [('out_file', 'in_file2')])\n ])\n\n wf.connect([\n (n_mrcalc, n_datasink, [('out_file', 'WM_peak_dir.mif')])\n ])\n\n #mrconvert to extract Z component of peak directions\n n_mrconvert2 = Node(\n interface=utils.MRConvert(\n out_file = 'Zpeak_WM_Directions.mif',\n coord = [3, 2]\n ),\n name='n_mrconvert2'\n )\n\n wf.connect([\n (n_mrcalc, n_mrconvert2, [('out_file', 'in_file')])\n ])\n\n wf.connect([\n (n_mrconvert2, n_datasink, [('out_file', 'Zpeak_WM_Directions.mif')])\n ]) \n\n # mrcalc to find absolute value\n n_mrcalc2 = Node(\n interface=mrcalcfunc.MRCalc(\n operation = 'abs',\n out_file = 'absZpeak_WM_Directions.mif'\n ),\n name='n_mrcalc2'\n )\n\n wf.connect([\n (n_mrconvert2, n_mrcalc2, [('out_file', 'in_file1')])\n ])\n\n wf.connect([\n (n_mrcalc2, n_datasink, [('out_file', 'absZpeak_WM_Directions.mif')])\n ]) \n\n # mrcalc to get angle by doing inverse cosine\n n_mrcalc3 = Node(\n interface=mrcalcfunc.MRCalc(\n operation = 'acos',\n out_file = 'acosZpeak_WM_Directions.mif'\n ),\n name='n_mrcalc3'\n )\n\n wf.connect([\n (n_mrcalc2, n_mrcalc3, [('out_file', 'in_file1')])\n ])\n\n wf.connect([\n (n_mrcalc3, n_datasink, [('out_file', 'acosZpeak_WM_Directions.mif')])\n ]) \n \n # mrcalc to convert angle to degrees\n n_mrcalc4 = Node(\n interface=mrcalcfunc.MRCalc(\n operation = 'multiply',\n operand = 180,\n out_file = 'Fixel1_Z_angle.mif'\n ),\n name='n_mrcalc4'\n )\n\n wf.connect([\n (n_mrcalc3, n_mrcalc4, [('out_file', 'in_file1')])\n ])\n\n wf.connect([\n (n_mrcalc4, n_datasink, [('out_file', 'Fixel1_Z_angle.mif')])\n ]) \n\n n_mrcalc5 = Node(\n interface=mrcalcfunc.MRCalc(\n operation = 'divide',\n operand = 3.14159265,\n out_file = 'Fixel1_Z_cos_deg.mif'\n ),\n name='n_mrcalc5'\n )\n\n wf.connect([\n (n_mrcalc4, n_mrcalc5, [('out_file', 'in_file1')])\n ])\n\n wf.connect([\n (n_mrcalc5, n_datasink, [('out_file', 'Fixel1_Z_cos_deg.mif')])\n ]) \n\n ## B) Tensor-based analysis\n #dwi2tensor\n n_dwi2tensor = Node(\n interface=mrt.FitTensor(\n out_file = 'dti.mif'\n ),\n name='n_dwi2tensor'\n )\n\n wf.connect([\n (n_dwibiascorrect, n_dwi2tensor, [('out_file', 'in_file')])\n ])\n\n wf.connect([\n (n_dwi2mask, n_dwi2tensor, [('out_file', 'in_mask')])\n ])\n\n wf.connect([\n (n_dwi2tensor, n_datasink, [('out_file', 'dt.mif')])\n ]) \n\n #tensor2metric \n n_tensor2metric = Node(\n interface= tensor2metricfunc.tensor2metric(\n modulate = 'none',\n num = 1,\n vector_file = 'eigenvector.mif'\n ),\n name='n_tensor2metric'\n )\n\n wf.connect([\n (n_dwi2tensor, n_tensor2metric, [('out_file', 'input_file')])\n ])\n\n wf.connect([\n (n_tensor2metric, n_datasink, [('vector_file', 'eigenvector.mif')])\n ]) \n\n #mrconvert to get Z eigenvector\n n_mrconvert3 = Node(\n interface=utils.MRConvert(\n coord = [3, 2],\n out_file = 'eigenvectorZ.mif'\n ),\n name='n_mrconvert3'\n )\n\n wf.connect([\n (n_tensor2metric, n_mrconvert3, [('vector_file', 'in_file')])\n ])\n\n wf.connect([\n (n_mrconvert3, n_datasink, [('out_file', 'eigenvectorZ.mif')])\n ]) \n\n #ALL SUBSEQUENT STEPS GET ANGLE IN DEGREES\n # mrcalc to find absolute value\n n_mrcalc6 = Node(\n interface=mrcalcfunc.MRCalc(\n operation = 'abs',\n out_file = 'abs_eigenvectorZ.mif'\n ),\n name='n_mrcalc6'\n )\n\n wf.connect([\n (n_mrconvert3, n_mrcalc6, [('out_file', 'in_file1')])\n ])\n\n wf.connect([\n (n_mrcalc6, n_datasink, [('out_file', 'abs_eigenvectorZ.mif')])\n ]) \n\n # mrcalc to get angle by doing inverse cosine\n n_mrcalc7 = Node(\n interface=mrcalcfunc.MRCalc(\n operation = 'acos',\n out_file = 'acos_eigenvectorZ.mif'\n ),\n name='n_mrcalc7'\n )\n\n wf.connect([\n (n_mrcalc6, n_mrcalc7, [('out_file', 'in_file1')])\n ])\n\n wf.connect([\n (n_mrcalc7, n_datasink, [('out_file', 'acos_eigenvectorZ.mif')])\n ]) \n\n # mrcalc to convert angle to degrees\n n_mrcalc8 = Node(\n interface=mrcalcfunc.MRCalc(\n operation = 'multiply',\n operand = 180,\n out_file = 'degrees_eigenvectorZ.mif'\n ),\n name='n_mrcalc8'\n )\n\n wf.connect([\n (n_mrcalc7, n_mrcalc8, [('out_file', 'in_file1')])\n ])\n\n wf.connect([\n (n_mrcalc8, n_datasink, [('out_file', 'degrees_eigenvectorZ.mif')])\n ]) \n\n n_mrcalc9 = Node(\n interface=mrcalcfunc.MRCalc(\n operation = 'divide',\n operand = 3.14159265,\n out_file = 'dti_z_cos_deg.mif'\n ),\n name='n_mrcalc9'\n )\n\n wf.connect([\n (n_mrcalc8, n_mrcalc9, [('out_file', 'in_file1')])\n ])\n\n wf.connect([\n (n_mrcalc9, n_datasink, [('out_file', 'dti_z_cos_deg.mif')])\n ]) \n\n # Difference image between fixel based and tensor based outputs\n n_mrcalc10 = Node(\n interface=mrcalcfunc.MRCalc(\n operation = 'subtract',\n out_file = 'diff_imag_tensor_minus_fixel.mif'\n ),\n name='n_mrcalc10'\n )\n\n wf.connect([\n (n_mrcalc9, n_mrcalc10, [('out_file', 'in_file1')])\n ])\n\n wf.connect([\n (n_mrcalc5, n_mrcalc10, [('out_file', 'in_file2')])\n ])\n\n wf.connect([\n (n_mrcalc10, n_datasink, [('out_file', 'diff_imag_tensor_minus_fixel.mif')])\n ]) \n\n#################################################################################3\n return wf\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"DWI processing pipeline\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\n '--bids_dir',\n required=True,\n help='bids directory'\n )\n\n parser.add_argument(\n '--subjects',\n default=None,\n const=None,\n nargs='*',\n help='list of subjects as seen in bids_dir'\n )\n\n parser.add_argument(\n '--work_dir',\n required=True,\n help='work directory'\n )\n\n parser.add_argument(\n '--out_dir',\n required=True,\n help='output directory'\n )\n\n parser.add_argument(\n '--debug',\n dest='debug',\n action='store_true',\n help='debug mode'\n )\n\n\n parser.add_argument(\n '--pbs',\n dest='pbs',\n action='store_true',\n help='use PBS graph'\n )\n\n args = parser.parse_args()\n\n # environment variables\n os.environ[\"FSLOUTPUTTYPE\"] = \"NIFTI_GZ\"\n os.environ[\"PATH\"] += os.pathsep + os.path.join(os.path.dirname(os.path.abspath(__file__)), \"scripts\")\n\n this_dir = os.path.dirname(os.path.abspath(__file__))\n if \"PYTHONPATH\" in os.environ: os.environ[\"PYTHONPATH\"] += os.pathsep + this_dir\n else: os.environ[\"PYTHONPATH\"] = this_dir\n\n if args.debug:\n from nipype import config\n config.enable_debug_mode()\n config.set('execution', 'stop_on_first_crash', 'true')\n config.set('execution', 'remove_unnecessary_outputs', 'false')\n config.set('execution', 'keep_inputs', 'true')\n config.set('logging', 'workflow_level', 'DEBUG')\n config.set('logging', 'interface_level', 'DEBUG')\n config.set('logging', 'utils_level', 'DEBUG')\n\n if not args.subjects:\n subject_list = [subj for subj in os.listdir(args.bids_dir) if 'sub' in subj]\n else:\n subject_list = args.subjects\n\n bids_templates = {\n 'all_b0_PA': '{subject_id_p}/dwi/all_b0_PA.mif',\n 'DWI_all': '{subject_id_p}/dwi/DWI_all.mif',\n 'DTI_B0_PA': '{subject_id_p}/dwi/DTI_B0_PA',\n }\n\n wf = create_DWI_workflow(\n subject_list=subject_list,\n bids_dir=os.path.abspath(args.bids_dir),\n work_dir=os.path.abspath(args.work_dir),\n out_dir=os.path.abspath(args.out_dir),\n bids_templates=bids_templates\n )\n\n os.makedirs(os.path.abspath(args.work_dir), exist_ok=True)\n os.makedirs(os.path.abspath(args.out_dir), exist_ok=True)\n\n wf.write_graph(graph2use='flat', format='png', simple_form=False)\n # run workflow\n\n if args.pbs:\n wf.run(\n plugin='PBSGraph',\n plugin_args={\n 'qsub_args': '-A UQ-CAI -q Short -l nodes=1:ppn=1,mem=5GB,vmem=5GB,walltime=00:30:00'\n }\n )\n else:\n wf.run(\n plugin='MultiProc',\n plugin_args={\n 'n_procs': int(os.environ[\"NCPUS\"]) if \"NCPUS\" in os.environ else int(os.cpu_count())\n }\n )","sub_path":"old_nipype_pipeline.py","file_name":"old_nipype_pipeline.py","file_ext":"py","file_size_in_byte":19138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"615443229","text":"\nfrom django.contrib import admin\nfrom django.urls import path,include\n\nadmin.site.site_header = \"DTO KODERMA\"\nadmin.site.site_title = \"DTO PORTAL\"\nadmin.site.index_title = \"Welcome to DTO KODERMA portal\"\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',include('dto.urls'))\n]\n","sub_path":"hello/hello/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"410289041","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 2017/11/12 18:40\n# @Author : robin\n# @Site : \n# @File : Demo4.py\n# @Software: PyCharm Community Edition\n\nimport pickle\nd = dict(name='bob', age='20', score=88)\nf = open('dump.txt', 'wb')\npickle.dump(d, f)\nf.close()\n","sub_path":"com/robin/io/Demo4.py","file_name":"Demo4.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"156811940","text":"import time\r\nimport numpy\r\nimport sys\r\ndef heapify(arr, i,n):\r\n r = 2*i+2\r\n l = 2*i+1\r\n largest = i\r\n if l < n and arr[l]>arr[i] :\r\n largest = l\r\n if r < n and arr[r]>arr[largest] :\r\n largest = r\r\n if largest > i:\r\n temp=arr[i]\r\n arr[i]=arr[largest]\r\n arr[largest]=temp\r\n heapify(arr, largest,n)\r\n\r\n\r\ndef heapSort(arr):\r\n #Max heapify\r\n len_array=len(arr)-1\r\n array_half=len(arr)//2\r\n for i in range(array_half, -1, -1):\r\n heapify(arr, i,len(arr))\r\n #swap the root with the last element and call heapify\r\n for i in range(len_array, 0, -1):\r\n temp=arr[i]\r\n arr[i]=arr[0]\r\n arr[0]=temp\r\n heapify(arr, 0,i)\r\n\r\narr=[]\r\nwith open(sys.argv[1]) as f:\r\n content = f.read().splitlines()\r\nfor x in content:\r\n arr.append([int(x) for x in x.split()])\r\n#print(arr)\r\ntime_list=[]\r\nwith open('output.txt', 'a') as the_file:\r\n for y in arr:\r\n n = len(y)\r\n ticks1 = time.time()\r\n heapSort(y)\r\n ticks2 = time.time()\r\n time_t=ticks2-ticks1\r\n time_list.append(time_t)\r\n the_file.write(' '.join(str(e) for e in y))\r\n the_file.write('\\n')\r\nsum_=0\r\navg=[]\r\nfor x in range(len(time_list)):\r\n if (x+1) % 20 == 0:\r\n avg.append(sum_*1.0/20)\r\n sum_=0\r\n sum_ += time_list[x]\r\n#print(time_list)\r\nprint(\"Standard deviation for various input size\")\r\nf=0\r\nfor x in range(len(time_list)//20):\r\n print(numpy.std(time_list[f:f+20]))\r\n f = f + 20\r\nprint(\"Average time for various input size\")\r\nprint(avg)\r\n\r\n\r\n","sub_path":"sort/heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"576666512","text":"# Demonstration of packing and unpacking\n# arguments and keyword arguments.\n\n# Packing/unpacking arguments.\ndef add(*args):\n total = 0\n\n for arg in args:\n total += arg\n\n return total\n\n\nnumbers = [1, 2, 3, 4, 5]\nresult = add(*numbers)\nprint(result)\n\n\nprint()\n\n\n# Packing/unpacking keyword arguments.\ndef show_ages(**kwargs):\n for key, value in kwargs.items():\n print(key, value)\n\n\n# Simple keyword arguments.\n# show_ages(Tom = 20, Elizabeth = 43, Balázs = 31)\n\n# Using a dictionary.\npeople = {\"Jack\": 20, \"Zack\": 25, \"Mary\": 33, \"Zatik Balázs\": 31}\nshow_ages(**people)\n","sub_path":"packing_arguments/packing_arguments.py","file_name":"packing_arguments.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"472451987","text":"class Solution:\n def rotate(self, nums: 'List[int]', k: 'int') -> 'None':\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n list_len = len(nums)\n k %= list_len\n if not k: return\n index = 0; j = 0\n for i in range(list_len - 1):\n index += k\n if j == index % list_len:\n j += 1; index = j\n continue\n temp = nums[j]\n nums[j] = nums[index % list_len]\n nums[index % list_len] = temp\n \n return\n","sub_path":"Python/189.Rotate_Array.py","file_name":"189.Rotate_Array.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"225398937","text":"import discord\nfrom discord.ext import commands\n\n\nclient = commands.Bot(command_prefix = \":\")\n\nprint(\"Bot is online and ready.\")\n\n@client.event\nasync def on_message(message): ##message logging\n author = message.author\n content = message.content\n print('{}: {}'.format(author, content))\n@client.event\nasync def on_message_delete(message): ##if user deletes a message the bot will resend the message\n author = message.author\n content = message.content\n channel = message.channel\n await client.send_message(channel, \"{}: {}\".format(author, content))\n@client.event ##bot status\nasync def on_ready():\n await client.change_presence(game=discord.Game(name='and Watching Birds'))\n\n@client.event\nasync def on_message(message):\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n\n if message.content.startswith('Hi'):\n msg = 'Hey {0.author.mention}'.format(message)\n await client.send_message(message.channel, msg)\n \n if message.content.startswith('kitty are you online'):\n msg = 'Yes :) {0.author.mention}'.format(message)\n await client.send_message(message.channel, msg)\n\n\n\n\n\nclient.run(\"NDY3NzE1MzQwODE0NjQ3Mjk2.DrWiTA.yFQi_A2hXAxUkA7EP9fNGL_ARJg\")\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"9265955","text":"#P10.23\n\nclass Appointment:\n def __init__(self, description, date):\n self._description = description\n self._date = date\n\n def getAptDate(self):\n return self._date\n\n def getAptDesc(self):\n return self._description\n\n def occursOn(self):\n raise NotImplementedError\n\n def printAptDetails(self):\n raise NotImplementedError\n \n \nclass oneTime(Appointment):\n def __init__(self, description, date):\n super().__init__(description, date)\n\n def occursOn(self, year, month, day):\n apt_date = self.getAptDate()\n apt_desc = self.getAptDesc()\n if (apt_date[4:] == year) & (apt_date[0:2] == month) & (apt_date[2:4] == day) :\n print(\"Appointment \", apt_desc)\n\n def printAptDetails(self):\n apt_date = self.getAptDate()\n apt_desc = self.getAptDesc()\n print(\"%s, %s\" % (apt_desc, apt_date) + \", OneTime Appointment\")\n\nclass Daily(Appointment):\n def __init__(self, description, date):\n super().__init__(description, date)\n\n def occursOn(self, year, month, day):\n apt_desc = self.getAptDesc()\n print(\"Appointment: \", apt_desc)\n\n def printAptDetails(self):\n apt_date = self.getAptDate()\n apt_desc = self.getAptDesc()\n print(apt_desc + \", Daily Appointment\")\n\nclass Monthly(Appointment):\n def __init__(self, description, date):\n super().__init__(description, date)\n\n def occursOn(self, year, month, day):\n apt_date = self.getAptDate()\n apt_desc = self.getAptDesc()\n if apt_date[2:4] == day :\n print(\"Appointment: \", apt_desc)\n\n def printAptDetails(self):\n apt_date = self.getAptDate()\n apt_desc = self.getAptDesc()\n print(\"%s, %s\" % (apt_desc, apt_date) + \", Monthly Appointment\")\n\n\nclass main():\n def __init__(self):\n self._Apt = []\n\n def addAppointment(self, apttype, description, date):\n if apttype == \"onetime\":\n self._Apt.append(oneTime(description, date))\n elif apttype == \"daily\":\n self._Apt.append(Daily(description, \"daily\"))\n elif apttype == \"monthly\":\n self._Apt.append(Monthly(description, date))\n \n def getAptList(self):\n return self._Apt\n\n\n \nchoice = str(input(\"\\nPlease enter choice:\\n1. Check Appointments\\n2. Add daily appointment\\n3. Add Onetime appointment\\n4. Add Monthly appointment\\n5. Exit\\nChoice:\"))\nans = True\na = main()\nwhile ans :\n if choice == '1' : \n input_date = str(input(\"\\nPlease enter a date (MMDDYY) to list your scheduled appointments on that date: \"))\n Apt1 = a.getAptList()\n Apt_found = False \n for i in range(len(Apt1)):\n Apt1[i].occursOn(input_date[4:], input_date[0:2], input_date[2:4])\n Apt_found = True\n if not Apt_found :\n print(\"No Appointments scheduled for the input date\")\n choice = str(input(\"\\nPlease enter another choice: \"))\n elif choice == '2' :\n description = str(input(\"Please enter the appointment decription: \"))\n date = str(input(\"Please enter the appointment date: \"))\n apttype = \"daily\"\n #a = main()\n a.addAppointment(apttype, description, date)\n choice = str(input(\"\\nPlease enter another choice: \"))\n elif choice == '3' :\n description = str(input(\"Please enter the appointment decription: \"))\n date = str(input(\"Please enter the appointment date: \"))\n apttype = \"onetime\"\n #a = main()\n a.addAppointment(apttype, description, date)\n choice = str(input(\"\\nPlease enter another choice: \"))\n elif choice == '4' :\n description = str(input(\"Please enter the appointment decription: \"))\n date = str(input(\"Please enter the appointment date: \"))\n apttype = \"monthly\"\n #a = main()\n a.addAppointment(apttype, description, date)\n choice = str(input(\"\\nPlease enter another choice: \"))\n elif choice == '5' :\n ans = False\n else :\n ans = False\n \n \n \n \n\n \n","sub_path":"solutions from book Python for Everyone, 2nd Edition/P10.23/P10.23.py","file_name":"P10.23.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"420214825","text":"'''\nCreated on 3 Nov 2016\n\n@author: u6023444\n'''\n\n# this is the schema where the deployment_log is located\nDEPLOYMENT_LOG_SCHEMA = 'chem'\n\n# this is the saccount that orchstrates deployments\nDEPLOYMENT_USER_SCHEMA = 'not-relevant-to-pg'\n\n# allows a logging directory to be specified and the default to be overridden. This directory must exist. If this is set to NULL then the default directory is used. \nLOG_DIR_NON_DEFAULT = '/home/ec2-user/Bumbleebee_for_Greenplum/logs/cc-pipeline-ls-chem-exp'\n\n# database type : currently 'oracle' or 'postgres' is supported\nDBMS = 'postgres'\n\n# enable automatic recompilation of non-destructive objects (views, packages, procedures, functions, triggers)\nAUTO_COMP = 'Y'\nrecompile_list = [ 'functions', 'packages', 'procedures', 'triggers', 'views', 'Functions', 'Packages', 'Procedures', 'Triggers', 'Views' ]\n\n# grant read access on all tables & views owned by this user to this role\nREAD_ONLY_ROLE = 'cc_read_only'","sub_path":"app-conf/cc-greenplum-dev/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"254826729","text":"# https://www.interviewbit.com/problems/minimum-characters-required-to-make-a-string-palindromic/\n\nclass Solution:\n # @param A : string\n # @return an integer\n def solve(self, word):\n \n # find the lenght of the longest palindrome starts at pos 0\n l = len_of_longest_palindrome_starting_at(0, word)\n \n return len(word) - l\n\n\ndef len_of_longest_palindrome_starting_at(strt, word):\n \n N = len(word)\n \n if strt < 0 or strt >= N:\n raise ValueError()\n \n # try ending at N-1\n #\n # then if necessary\n # try ending at N-2\n # \n # then if necessary\n # ...\n \n end = N - 1\n while strt < end:\n if palindrome(word, strt, end):\n return end - strt + 1\n end -= 1\n \n return 1\n\n\ndef palindrome(word, strt, end):\n \n i, j = strt, end\n \n while i < j:\n if word[i] != word[j]:\n return False\n i += 1\n j -= 1\n \n return True","sub_path":"interviewbit.com/Strings/min_num_of_chars_that_must_be_inserted_at_front_of_string_to_make_it_palindromic.py","file_name":"min_num_of_chars_that_must_be_inserted_at_front_of_string_to_make_it_palindromic.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"172083444","text":"import fcntl\nimport os\nimport sys\nimport signal\nimport re\nimport time\nfrom threading import Thread\nimport codecs\nimport psutil\n\ntry:\n from queue import Queue, Empty\nexcept ImportError:\n from Queue import Queue, Empty # python 2.x\n\nfrom subprocess import Popen, PIPE, STDOUT\n\nON_POSIX = 'posix' in sys.builtin_module_names\n\ndef enqueue_output(out, queue):\n for line in iter(out.readline, b''):\n queue.put(line)\n out.close()\n\n__version__ = '1.0.5'\n\nPPPD_RETURNCODES = {\n 1: 'Fatal error occured',\n 2: 'Error processing options',\n 3: 'Not executed as root or setuid-root',\n 4: 'No kernel support, PPP kernel driver not loaded',\n 5: 'Received SIGINT, SIGTERM or SIGHUP',\n 6: 'Modem could not be locked',\n 7: 'Modem could not be opened',\n 8: 'Connect script failed',\n 9: 'pty argument command could not be run',\n 10: 'PPP negotiation failed',\n 11: 'Peer failed (or refused) to authenticate',\n 12: 'The link was terminated because it was idle',\n 13: 'The link was terminated because the connection time limit was reached',\n 14: 'Callback negotiated',\n 15: 'The link was terminated because the peer was not responding to echo reque sts',\n 16: 'The link was terminated by the modem hanging up',\n 17: 'PPP negotiation failed because serial loopback was detected',\n 18: 'Init script failed',\n 19: 'Failed to authenticate to the peer',\n 20: 'Failed to allocate PPP',\n 21: 'CHAP authentication failed',\n 22: 'Connection terminated',\n 23: 'Timeout waiting for PADO packets',\n 24: 'Unable to complete PPPoE Discovery',\n}\n\nclass PPPConnectionError(Exception):\n def __init__(self, code, output=None):\n self.code = code\n self.message = PPPD_RETURNCODES.get(code, 'Undocumented error occured')\n self.output = output\n self._interface = ''\n\n super(Exception, self).__init__(code, output)\n\n def __str__(self):\n return self.message\n\nclass PPPConnection:\n def __init__(self, *args, **kwargs):\n self.output = ''\n self._laddr = None\n self._raddr = None\n self._interface = ''\n\n self.args = args\n self.kwargs = kwargs\n\n self.peer = kwargs.get('call', None)\n if (self.peer == None):\n raise Exception('Please specify peer name, call=\"example\"')\n\n self.command()\n\n def command(self):\n self.commands = []\n\n if self.kwargs.pop('sudo', True):\n sudo_path = self.kwargs.pop('sudo_path', '/usr/bin/sudo')\n if not os.path.isfile(sudo_path) or not os.access(sudo_path, os.X_OK):\n raise IOError('%s not found' % sudo_path)\n self.commands.append(sudo_path)\n\n pppd_path = self.kwargs.pop('pppd_path', '/usr/sbin/pppd')\n # if not os.path.isfile(pppd_path) or not os.access(pppd_path, os.X_OK):\n # using sudo to restrict so no access check needed\n if not os.path.isfile(pppd_path):\n raise IOError('%s not found' % pppd_path)\n\n self.commands.append(pppd_path)\n\n def params(self, *args, **kwargs):\n for k,v in kwargs.items():\n self.commands.append(k)\n if (not v is None):\n self.commands.append(v)\n self.commands.extend(args)\n\n def connect(self):\n self.command()\n self.params(call=self.peer)\n self.commands.append('nodetach')\n self.run()\n\n def disconnect(self):\n # self.proc.kill()\n\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'cmdline'])\n except psutil.NoSuchProcess:\n pass\n else:\n if 'pppd call %s' % self.peer in ' '.join(pinfo['cmdline']):\n try:\n proc = psutil.Process(pid=pinfo['pid'])\n proc.terminate()\n except psutil.NoSuchProcess as e:\n print(e)\n pass\n\n # example 1\n # self.command()\n # self.params(disconnect='\"chat -- \\d+++\\d\\c OK ath0 OK\"')\n # self.run()\n\n # exmaple 2\n # self.commands.append('/usr/bin/poff')\n # self.commands.append(self.peer)\n\n # example 3\n # try:\n # if not self.connected():\n # return\n # except PPPConnectionError:\n # return\n #\n # self.proc = Popen(self.commands, stdout=PIPE, bufsize=1, close_fds=ON_POSIX)\n # q = Queue()\n # t = Thread(target=enqueue_output, args=(self.proc.stdout, q))\n # t.daemon = True # thread dies with the program\n # t.start()\n #\n # self.proc.send_signal(signal.SIGHUP)\n # self.proc.wait()\n\n\n def run(self):\n print(self.commands)\n\n self.proc = Popen(self.commands, stdout=PIPE, bufsize=1, close_fds=ON_POSIX)\n q = Queue()\n t = Thread(target=enqueue_output, args=(self.proc.stdout, q))\n t.daemon = True # thread dies with the program\n t.start()\n\n while True:\n try:\n try:\n self.line = q.get_nowait() # or q.get(timeout=.1)\n except Empty:\n None\n else:\n self.line = codecs.decode(str(self.line).encode('utf-8', errors='ignore'), errors='ignore')\n if not 'Plugin rp-pppoe.so loaded.' in self.line:\n self.output += self.line\n\n if 'Connect: ' in self.line:\n if (len(self.line.split()) > 1):\n self._interface = self.line.split()[1]\n\n except IOError as e:\n if e.errno != 11:\n raise\n time.sleep(1)\n\n if 'ip-up finished' in self.output:\n return\n if 'remote IP address' in self.output:\n return\n if 'Couldn\\'t allocate PPP' in self.output:\n self.disconnect()\n raise PPPConnectionError(20, self.output)\n if 'CHAP authentication failed' in self.output:\n raise PPPConnectionError(21, self.output)\n if 'Connection terminated' in self.output:\n raise PPPConnectionError(22, self.output)\n if 'Timeout waiting for PADO packets' in self.output:\n raise PPPConnectionError(23, self.output)\n if 'Unable to complete PPPoE Discovery' in self.output:\n raise PPPConnectionError(24, self.output)\n if self.proc.poll():\n # TODO: alert on unknown exceptions here\n raise PPPConnectionError(self.proc.returncode, self.output)\n\n def reconnect(self):\n self.disconnect()\n self.connect()\n\n def read(self):\n return self.output\n\n @property\n def interface(self):\n return self._interface\n\n @property\n def laddr(self):\n if not self._laddr:\n try:\n self.output += self.proc.stdout.read()\n except IOError as e:\n if e.errno != 11:\n raise\n result = re.search(r'local IP address ([\\d\\.]+)', self.output)\n if result:\n self._laddr = result.group(1)\n\n return self._laddr\n\n @property\n def raddr(self):\n if not self._raddr:\n try:\n self.output += self.proc.stdout.read()\n except IOError as e:\n if e.errno != 11:\n raise\n result = re.search(r'remote IP address ([\\d\\.]+)', self.output)\n if result:\n self._raddr = result.group(1)\n\n return self._raddr\n\n def connected(self):\n\n self.proc = Popen(self.commands, stdout=PIPE, bufsize=1, close_fds=ON_POSIX)\n q = Queue()\n t = Thread(target=enqueue_output, args=(self.proc.stdout, q))\n t.daemon = True # thread dies with the program\n t.start()\n\n if self.proc.poll():\n try:\n self.output += self.proc.stdout.read()\n except IOError as e:\n if e.errno != 11:\n raise\n if self.proc.returncode not in [0, 5]:\n raise PPPConnectionError(proc.returncode, self.output)\n return False\n elif 'ip-up finished' in self.output:\n return True\n\n return False\n","sub_path":"pppd.py","file_name":"pppd.py","file_ext":"py","file_size_in_byte":8447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"302787950","text":"from __future__ import division\nfrom sklearn import datasets\n\n\n\n\niris = datasets.load_iris()\nfrom sklearn.ensemble import RandomForestClassifier\nrfc = RandomForestClassifier(n_estimators = 500)\n\n\n#matrix math\nimport numpy as np\n#data manipulation\nimport pandas as pd\n#matrix data structure\nfrom patsy import dmatrices\n#for error logging\nimport warnings\n\n\nimport time\nstart_time = time.time()\n\n\nimport os \nos.chdir('C:\\\\Users\\\\Robin\\\\Desktop\\\\MA_Code\\\\COMPAS')\n#os.chdir('C:\\\\Users\\\\TapperR\\\\Desktop\\\\compas\\\\compas-analysis')\n\n\n\ndata = pd.read_csv('spam.csv', encoding = 'latin1')\n\n\n#import tensorflow as tf\n#from tensorflow.examples.tutorials.mnist import input_data\n#import pickle\n#creating our own knn-classifier\n#import matplotlib.pyplot as plt\n#from matplotlib import style\n\nfrom math import sqrt\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport random\nfrom sklearn import preprocessing, cross_validation, neighbors\n\n\n\n\n\n\n\nraw4 = pd.read_table('cox-parsed.csv', sep=',', encoding='utf-8', na_filter = True)\n\n#factors\nraw4['sex'] = raw4['sex'].astype('category')\nclassnamesSex, indicesSex = np.unique(raw4['sex'], return_inverse=True)\nraw4['sexInd'] = indicesSex\n\n\nraw4['age_cat'] = raw4['age_cat'].astype('category')\nclassnamesAge, indicesAge = np.unique(raw4['age_cat'], return_inverse=True)\nraw4['age_catInd'] = indicesAge\n\n\nraw4['race'] = raw4['race'].astype('category')\nclassnamesRace, indicesRace = np.unique(raw4['race'], return_inverse=True)\nraw4['raceInd'] = indicesAge\n\n\n\nraw5Fac = raw4[['sexInd', 'age_catInd', 'raceInd', 'juv_fel_count', 'juv_misd_count', 'juv_other_count', 'priors_count']]\nraw5Lab = raw4[['is_recid']]\n\n \nraw5Lab_raw = list(raw5Lab['is_recid'])\n\nX_train, X_test, y_train, y_test = cross_validation.train_test_split(np.array(raw5Fac), np.array(raw5Lab_raw), test_size=0.2)\n\n\n\n\n#posterior = prior occurences * likelihood/ evidence = likelihood of something to be positive\n#are our features independent and from a similar distribution? check it!\n\n\n\ny_pred = rfc.fit(X_train, y_train).predict(X_test)\n\n\n#how often the predictions are right?\nrealVSpred = pd.DataFrame({'real': y_test, 'pred': y_pred})\n\n\n\ncheck = 0 \nfor i in range(len(realVSpred)):\n if realVSpred.real[i] == realVSpred.pred[i]:\n check += 1\n else:\n pass\n\n\n\nquote = check/len(realVSpred)\nprint(quote)\n\n\n#how the typical classification numbers look like\nTP = 0\nFP = 0\nTN = 0\nFN = 0\n\n\nfor i in range(len(realVSpred)):\n\n if realVSpred['pred'][i] == 1 and realVSpred['real'][i] == 1:\n #print('True Positive')\n TP += 1\n \n if realVSpred['pred'][i] == 1 and realVSpred['real'][i] == 0:\n #print('False Positive')\n FP += 1\n \n if realVSpred['pred'][i] == 0 and realVSpred['real'][i] == 0:\n #print('True Negative') \n TN += 1\n \n if realVSpred['pred'][i] == 0 and realVSpred['real'][i] == 1:\n #print('False Negative')\n FN += 1\n \nPPV = TP/(TP+FP)\nNPV = TN/(TN+FN)\n\nprint('True Positive:', TP, 'False Positive:', FP, 'True Negative:', TN, 'False Negative:', FN)\nprint('Trues:', TP+TN, 'False:', FP+FN, 'Ratio: True/All = ', (TP+TN)/len(realVSpred))\nprint('PPV:', PPV, 'NPV ', NPV)\n\n\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n#how important are each of the feature\nrfc.feature_importances_\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\ndata = pd.read_csv('spam.csv', encoding='latin-1')\n\nX = data['v2']\nY = data['v1']\n\nX_train, X_test, y_train, y_test = cross_validation.train_test_split(X, Y, test_size=0.2)\n\n\n#A is probability that email is spam, B is the content of the mail(Email-body)\ndef train():\n total = 0\n numSpam = 0\n for email in trainData:\n if email.label == SPAM :\n numSpam +=1\n total += 1\n processEmail(email.body , email.label)\n pA = numSpam/float(total)\n pNotA = (total - numSpam)/float(total)\n\n#reading words from a specific email\n def processEmail(body , label):\n for word in body:\n if label == SPAM:\n trainPositive[word] = trainPositive.get(word, 0) + 1\n positiveTotal += 1\n else:\n trainNegative[word] = trainNegative.get(word, 0) + 1\n negativeTotal += 1\n#gives the conditional probability p(B_i/A_x)\ndef conditionalEmail(body, spam) :\n result = 1.0\n for word in body:\n result *= conditionalWord(body , spam)\n return result\n\n#classifies a new email as spam or not spam\n def classify(email):\n isSpam = pA * conditionalEmail(email, True) # P (A | B)\n notSpam = pNotA * conditionalEmail(email, False) # P(¬A | B)\n \n return isSpam > notSpam\n \n#Laplace Smoothing for the words not present in the training set\n#gives the conditional probability p(B_i | A_x) with smoothing\ndef conditionalWord(word, spam):\n if spam:\n return (trainPositive.get(word,0)+alpha)/(float)(positiveTotal+alpha*numWords)\n \n \n return (trainNegative.get(word,0)+alpha)/(float)(negativeTotal+alpha*numWords)\n'''\n\n","sub_path":"compasRF.py","file_name":"compasRF.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"60138321","text":"from Employee import *\nfrom Developer import Developer\n\nclass Manager(Employee):\n __Team = []\n def set_manag_team(self, Team):\n self.__Team = Team\n def get_manag_team(self):\n return self.__Team\n \n def get_salary_manager(self):\n col_dev = 0\n for i in self.__Team:\n try:\n if isinstance(i, Developer) == True:\n col_dev += 1\n except SalaryGivingError as e:\n details = e.args[0]\n print(details)\n if len(self.__Team) > 10:\n self.__salary = self.get_salary() + 300\n if col_dev > float(len(self.__Team) * 0.5):\n self.__salary *= 1.1\n elif len(self.__Team) > 5:\n self.__salary = self.get_salary() + 200\n if col_dev > float(len(self.__Team) * 0.5):\n self.__salary *= 1.1\n else:\n self.__salary = self.get_salary()\n if col_dev > float(len(self.__Team) * 0.5):\n self.__salary *= 1.1\n return int(self.__salary)\n\n def descr_of_Manager(self):\n print(self.get_fname(), self.get_sname(), \"- experience: \", self.get_exp(), \"yers, teame = \", len(self.__Team), \" workers, got salary:\", self.get_salary_manager())\n","sub_path":"error/Manager.py","file_name":"Manager.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"286335032","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Name: Compatibility Module\\n\n Description: Performs compatibility checks for Python and OS compatibility\n\"\"\"\n\n\"\"\"PySimpleFrame\n Author: Miguel Silva\n License: Check LICENSE file\n\"\"\"\n\n## System imports ##\nimport os\nimport platform\nimport re\nimport importlib\nimport subprocess\nimport sys\nimport pip\n\n## Library imports ##\nfrom packaging import version\nfrom colorama import Fore, Back, Style\n\n## Application imports ##\n\n\n## Minimum Python Version\nMIN_VERSION = \"3.8\"\n\t\n\ndef IsPythonCompatible():\n\t\"\"\"Check if the current Python execution meets the minimum required version\n\t\n\t:returns compatible bool: Tells if its Python compatible\n\t\"\"\"\n\t\n\t## Get Python version\n\tcurVersion = platform.python_version()\n\t\n\treturn version.parse(curVersion) > version.parse(MIN_VERSION)\n\ndef IsLibrariesCompatible():\n\t\"\"\"\n\t\"\"\"\n\t\n\t## Get OS information\n\tsystem = platform.system()\n\trelease = platform.release()\n\t\n\t## Create the known OS list\n\tsysList = {\n\t\t\"MSYS_NT*\" : Compatibility.IsMSYSCompatible,\n \"Windows\" : Compatibility.IsWindowsCompatible,\n \"Linux\" : Compatibility.IsLinuxCompatible,\n\t\t\"CYGWIN\" : Compatibility.IsCYGWINCompatible,\n\t\t\"FreeBSD\" : Compatibility.IsFBSDCompatible,\n \"Darwin\" : Compatibility.IsMacOSXCompatible,\n }\n\t\n\t## Get an OS from the known list\n\tfor key, value in sysList.items():\n\t\tif re.search(key, system):\n\t\t\tosFunc = value\n\t\n\t## Check if we detected any known OS \n\tif not osFunc:\n\t\tprint(\"Invalid Operative System, please review\")\n\t\treturn\n\t\n\t## Execute the compatibility checking function\n\tosFunc()\n\nclass Compatibility:\n\tdef IsMSYSCompatible():\n\t\t## Declare the install command\n\t\tinstallCommand = \"pacman -S\"\n\t\t\n\t\t## Check if its compatible\n\t\tCompatibility.IsOSCompatible(installCommand)\n\t\t\n\tdef IsWindowsCompatible():\n\t\tprint(\"Its Win32\")\n\t\t\n\t\t## Declare the install command\n\t\tinstallCommand = \"pacman -S\"\n\t\t\n\t\t## Check if its compatible\n\t\tCompatibility.IsOSCompatible(installCommand)\n\t\n\tdef IsLinuxCompatible():\n\t\tprint(\"Its Linux\")\n\t\t\n\t\t## Declare the install command\n\t\tinstallCommand = \"apt-get install\"\n\t\t\n\t\t## Check if its compatible\n\t\tCompatibility.IsOSCompatible(installCommand)\n\t\n\tdef IsFBSDCompatible():\n\t\tprint(\"Its FreeBSD\")\n\t\t\n\t\t## Declare the install command\n\t\tinstallCommand = \"pkg install\"\n\t\t\n\t\t## Check if its compatible\n\t\tCompatibility.IsOSCompatible(installCommand)\n\t\n\tdef IsCYGWINCompatible():\n\t\tprint(\"Its CygWin\")\n\t\t\n\t\t## Declare the install command\n\t\tinstallCommand = \"apt-cyg install\"\n\t\t\n\t\t## Check if its compatible\n\t\tCompatibility.IsOSCompatible(installCommand)\n\t\n\tdef IsMacOSXCompatible():\n\t\tprint(\"Its MacOS\")\n\t\t\n\t\t## Declare the install command\n\t\tinstallCommand = \"apt-get install\"\n\t\t\n\t\t## Check if its compatible\n\t\tCompatibility.IsOSCompatible(installCommand)\n\t\n\tdef IsOSCompatible(installCommand):\t\t\n\t\t## Get the necessary libraries\n\t\tinstallList = []\n\t\t\n\t\t## Check if each lib listed is installed\n\t\tfor lib in installList:\n\t\t\t\n\t\t\t## Get if the spec module exists\n\t\t\tspec = importlib.util.find_spec(lib)\n\t\t\t\n\t\t\t## Install if it doesn't exist\n\t\t\tif spec is None:\n\t\t\t\tos.system(\"%s %s\" % (installCommand, lib))\n\t\t\nclass OperativeSystem:\n\tdef IsWindows():\n\t\treturn \"Windows\" in platform.system()\n\t\n\tdef IsLinux():\n\t\treturn \"Linux\" in platform.system()\n","sub_path":"pysimpleframe/compatibility/Compatibility.py","file_name":"Compatibility.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"458608490","text":"import pandas as pd\nimport os\n\nroot_path = 'recipe_app/01 - excel_files'\n\n# Function to concat files, assuming same structure\ndef concat_files(root_path):\n\n df_list = []\n\n for file in os.listdir(root_path):\n df = pd.read_excel(f\"{root_path}/{file}\", index=False)\n df_list.append(df)\n\n df1 = pd.concat(df_list).reset_index(drop=True)\n df1.columns = [\"Recipe_Name\", \"Link\", \"Summary\", \"Ingredients\", \"Instructions\", \"Image\"]\n\n return df1","sub_path":"recipe_app/concat_files.py","file_name":"concat_files.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"289408748","text":"import asyncio\nimport shlex\nimport uuid\nimport os\nimport re\n\nfrom launchy import Launchy\nfrom sqlalchemy import or_\nfrom pathlib import Path\nfrom datetime import datetime\n\nfrom ..app import logger\nfrom ..tools import get_changelog_attr, strip_epoch_version, db2array, array2db\nfrom .git import GitCheckout, GetBuildInfo\n\nfrom ..model.database import Session\nfrom ..model.sourcerepository import SourceRepository\nfrom ..model.build import Build\nfrom ..model.buildtask import BuildTask\nfrom ..model.maintainer import Maintainer\nfrom ..model.chroot import Chroot\nfrom ..model.projectversion import ProjectVersion\nfrom ..molior.core import get_target_arch, get_targets, get_buildorder, get_apt_repos, get_apt_keys\nfrom ..molior.configuration import Configuration\nfrom ..molior.queues import enqueue_task, enqueue_aptly, enqueue_backend, buildlog, buildlogtitle, buildlogdone\n\n\nasync def BuildDebSrc(repo_id, repo_path, build_id, ci_version, is_ci, author, email):\n await buildlog(build_id, \"I: getting debian build information\\n\")\n src_package_name = await get_changelog_attr(\"Source\", repo_path)\n version = await get_changelog_attr(\"Version\", repo_path)\n repo_path = Path(repo_path)\n\n # FIXME: use global var\n key = Configuration().debsign_gpg_email\n if not key:\n await buildlog(build_id, \"E: Signing key not defined in configuration\\n\")\n logger.error(\"Signing key not defined in configuration\")\n return False\n\n async def outh(line):\n line = line.strip()\n if line:\n await buildlog(build_id, \"%s\\n\" % line)\n\n if is_ci:\n # in order to publish a sourcepackage for a ci build we need\n # to create a ci changelog with the correct version\n\n distribution = await get_changelog_attr(\"Distribution\", repo_path)\n\n env = os.environ.copy()\n env[\"DEBFULLNAME\"] = author\n env[\"DEBEMAIL\"] = email\n dchcmd = \"dch -v %s --distribution %s --force-distribution 'CI Build'\" % (ci_version, distribution)\n version = ci_version\n\n process = Launchy(shlex.split(dchcmd), outh, outh, cwd=str(repo_path), env=env)\n await process.launch()\n ret = await process.wait()\n if ret != 0:\n logger.error(\"Error running dch for CI build\")\n return False\n\n if (repo_path / \".git\").exists():\n process = Launchy(shlex.split(\n \"git -c user.name='{}' -c user.email='{}' commit -a -m 'ci build'\".format(author, email)),\n outh, outh, cwd=str(repo_path))\n await process.launch()\n ret = await process.wait()\n if ret != 0:\n logger.error(\"Error creating ci build commit\")\n return False\n\n logger.debug(\"%s: creating source package\", src_package_name)\n await buildlog(build_id, \"I: creating source package: %s (%s)\\n\" % (src_package_name, version))\n\n cmd = \"dpkg-buildpackage -S -d -nc -I.git -pgpg1 -k{}\".format(key)\n process = Launchy(shlex.split(cmd), outh, outh, cwd=str(repo_path))\n await process.launch()\n ret = await process.wait()\n if ret != 0:\n await buildlog(build_id, \"E: Error building source package\\n\")\n logger.error(\"source packaging failed, dpkg-builpackage returned %d\", ret)\n return False\n\n logger.debug(\"%s (%d): source package v%s created\", src_package_name, repo_id, version)\n return True\n\n\nasync def BuildProcess(parent_build_id, repo_id, git_ref, ci_branch, custom_targets, force_ci=False):\n await buildlogtitle(parent_build_id, \"Molior Build\")\n with Session() as session:\n parent = session.query(Build).filter(Build.id == parent_build_id).first()\n if not parent:\n logger.error(\"BuildProcess: parent build {} not found\".format(parent_build_id))\n return\n\n repo = session.query(SourceRepository) .filter(SourceRepository.id == repo_id) .first()\n if not repo:\n logger.error(\"source repository %d not found\", repo_id)\n await parent.log(\"E: source repository {} not found\\n\".format(repo_id))\n await parent.logtitle(\"Done\", no_footer_newline=True, no_header_newline=False)\n await parent.logdone()\n await parent.set_failed()\n session.commit()\n return\n src_path = repo.src_path\n\n await buildlog(parent_build_id, \"I: git checkout {}\\n\".format(git_ref))\n\n # Checkout\n ret = await asyncio.ensure_future(GitCheckout(src_path, git_ref, parent_build_id))\n\n if not ret:\n await buildlog(parent_build_id, \"E: git checkout failed\\n\")\n await buildlogtitle(parent_build_id, \"Done\", no_footer_newline=True, no_header_newline=False)\n await buildlogdone(parent_build_id)\n\n with Session() as session:\n parent = session.query(Build).filter(Build.id == parent_build_id).first()\n if not parent:\n logger.error(\"BuildProcess: parent build {} not found\".format(parent_build_id))\n return\n repo = session.query(SourceRepository) .filter(SourceRepository.id == repo_id) .first()\n if not repo:\n logger.error(\"source repository %d not found\", repo_id)\n return\n\n if not ret:\n await parent.set_failed()\n repo.set_ready()\n session.commit()\n return\n\n await buildlog(parent_build_id, \"\\nI: get build information\\n\")\n\n info = None\n try:\n info = await GetBuildInfo(repo.src_path, git_ref)\n except Exception as exc:\n logger.exception(exc)\n\n if not info:\n await buildlog(parent_build_id, \"E: Error getting build information\\n\")\n await buildlogtitle(parent_build_id, \"Done\", no_footer_newline=True, no_header_newline=False)\n await buildlogdone(parent_build_id)\n\n with Session() as session:\n parent = session.query(Build).filter(Build.id == parent_build_id).first()\n if not parent:\n logger.error(\"BuildProcess: parent build {} not found\".format(parent_build_id))\n return\n repo = session.query(SourceRepository) .filter(SourceRepository.id == repo_id) .first()\n if not repo:\n logger.error(\"source repository %d not found\", repo_id)\n return\n\n if not info:\n await parent.set_failed()\n repo.set_ready()\n session.commit()\n return\n\n targets = get_targets(info.plain_targets, repo, custom_targets, session)\n\n if not targets:\n repo.log_state(\"unknown target projectversions in debian/molior.yml\")\n await parent.log(\"E: the repository is not added to any projectversions from debian/molior.yml:\\n\")\n await parent.log(\" %s\\n\" % str(info.plain_targets))\n await parent.logtitle(\"Done\", no_footer_newline=True, no_header_newline=False)\n await parent.logdone()\n repo.set_ready()\n await parent.set_nothing_done()\n session.commit()\n return\n\n is_ci = False\n if force_ci:\n is_ci = True\n else:\n # check if it is a CI build\n # i.e. if gittag does not match version in debian/changelog\n gittag = \"\"\n\n async def outh(line):\n nonlocal gittag\n gittag += line\n\n process = Launchy(shlex.split(\"git describe --tags --abbrev=40\"), outh, outh, cwd=str(src_path))\n await process.launch()\n ret = await process.wait()\n if ret != 0:\n logger.error(\"error running git describe: %s\" % gittag.strip())\n else:\n v = strip_epoch_version(info.version)\n if not re.match(\"^v?{}$\".format(v.replace(\"~\", \"-\").replace(\"+\", \"\\\\+\")), gittag) or \"+git\" in v:\n is_ci = True\n\n ci_cfg = Configuration().ci_builds\n ci_enabled = ci_cfg.get(\"enabled\") if ci_cfg else False\n\n with Session() as session:\n parent = session.query(Build).filter(Build.id == parent_build_id).first()\n if not parent:\n logger.error(\"BuildProcess: parent build {} not found\".format(parent_build_id))\n return\n repo = session.query(SourceRepository) .filter(SourceRepository.id == repo_id) .first()\n if not repo:\n logger.error(\"source repository %d not found\", repo_id)\n return\n\n if is_ci and not ci_enabled:\n repo.log_state(\"CI builds are not enabled in configuration\")\n await parent.log(\"E: CI builds are not enabled in configuration\\n\")\n await parent.logtitle(\"Done\", no_footer_newline=True, no_header_newline=False)\n await parent.logdone()\n await parent.set_successful()\n repo.set_ready()\n session.commit()\n return\n\n parent.is_ci = is_ci\n session.commit()\n\n if is_ci:\n # create CI version with git hash suffix\n info.origversion = info.version\n info.version += \"+git{}.{}\".format(datetime.now().strftime(\"%Y%m%d%H%M%S\"), info.commit_hash[:6])\n\n # check if CI builds enabled in any project version\n found = False\n for target in targets:\n projectversion = session.query(ProjectVersion).filter(\n ProjectVersion.ci_builds_enabled == True, # noqa: E712\n ProjectVersion.id == target.projectversion_id).first()\n if projectversion:\n found = True\n break\n if not found:\n repo.log_state(\"CI builds not enabled in specified projectversions, not building...\")\n await parent.log(\"E: CI builds not enabled in specified projectversions, not building...\\n\")\n await parent.logtitle(\"Done\", no_footer_newline=True, no_header_newline=False)\n await parent.logdone()\n await parent.set_nothing_done()\n repo.set_ready()\n session.commit()\n return\n\n # Check if source build already exists\n build = session.query(Build).filter(Build.buildtype == \"source\",\n Build.sourcerepository == repo,\n Build.version == info.version).first()\n if build:\n repo.log_state(\"source package already exists for version {}\".format(info.version))\n await parent.log(\"E: source package already exists for version {}\\n\".format(info.version))\n await parent.logtitle(\"Done\", no_footer_newline=True, no_header_newline=False)\n await parent.logdone()\n repo.set_ready()\n if build.parent and build.parent.buildstate == \"successful\":\n await parent.set_already_exists()\n else:\n await parent.set_already_failed()\n session.commit()\n args = {\"schedule\": []}\n await enqueue_task(args)\n return\n\n # Use commiter name as maintainer for CI builds\n if is_ci:\n t = info.author_name.split(\" \", 2)\n if len(t) == 2:\n firstname = t[0]\n lastname = t[1]\n else:\n firstname = t[0]\n lastname = \"\"\n email = info.author_email\n else:\n firstname = info.firstname\n lastname = info.lastname\n email = info.email\n\n maintainer = session.query(Maintainer).filter(Maintainer.email == email).first()\n if not maintainer:\n maintainer = Maintainer(firstname=firstname, surname=lastname, email=email)\n session.add(maintainer)\n session.commit()\n\n # FIXME: assert version == git tag\n\n build = Build(\n version=info.version,\n git_ref=info.commit_hash,\n ci_branch=ci_branch,\n is_ci=is_ci,\n sourcename=info.sourcename,\n buildstate=\"new\",\n buildtype=\"source\",\n parent_id=parent_build_id,\n sourcerepository=repo,\n maintainer=maintainer,\n )\n\n # update patent\n parent.version = info.version\n parent.sourcerepository = repo\n parent.maintainer = maintainer\n parent.git_ref = info.commit_hash\n\n session.add(build)\n session.commit()\n await parent.build_changed()\n await build.build_added()\n\n # add build order dependencies\n build_after = get_buildorder(repo.src_path)\n if build_after:\n await build.parent.log(\"N: source needs to build after: %s\\n\" % \", \".join(build_after))\n build.builddeps = \"{\" + \",\".join(build_after) + \"}\"\n session.commit()\n\n projectversion_ids = []\n found = False\n for target in targets:\n projectversion = session.query(ProjectVersion).filter(ProjectVersion.id == target.projectversion_id).first()\n if projectversion.is_locked:\n repo.log_state(\"build to locked projectversion '%s-%s' not permitted\" % (\n projectversion.project.name,\n projectversion.name,\n ))\n await parent.log(\"W: build to locked projectversion '%s-%s' not permitted\\n\" % (\n projectversion.project.name,\n projectversion.name,\n ))\n continue\n\n if is_ci and not projectversion.ci_builds_enabled:\n repo.log_state(\"CI builds not enabled in projectversion '%s-%s'\" % (\n projectversion.project.name,\n projectversion.name,\n ))\n await parent.log(\"W: CI builds not enabled in projectversion '%s-%s'\\n\" % (\n projectversion.project.name,\n projectversion.name,\n ))\n continue\n\n projectversion_ids.append(projectversion.id)\n\n architectures = db2array(target.architectures)\n for architecture in architectures:\n deb_build = session.query(Build).filter(\n Build.sourcerepository_id == repo.id,\n Build.projectversion == projectversion,\n Build.version == info.version,\n Build.buildtype == \"deb\",\n Build.architecture == architecture).first()\n if deb_build:\n if deb_build.buildstate != \"successful\":\n deb_build.buildstate = \"needs_build\"\n session.commit()\n found = True\n continue\n logger.warning(\"already built %s\", repo.name)\n await parent.log(\"E: already built {}\\n\".format(repo.name))\n continue\n\n found = True\n\n await parent.log(\"I: creating build for projectversion '%s/%s'\\n\" % (\n projectversion.project.name,\n projectversion.name,\n ))\n\n deb_build = Build(\n version=info.version,\n git_ref=info.commit_hash,\n ci_branch=ci_branch,\n is_ci=is_ci,\n sourcename=info.sourcename,\n buildstate=\"new\",\n buildtype=\"deb\",\n parent_id=build.id,\n sourcerepository=repo,\n maintainer=maintainer,\n projectversion_id=projectversion.id,\n architecture=architecture\n )\n\n session.add(deb_build)\n session.commit()\n\n await deb_build.build_added()\n\n if not found:\n await parent.log(\"E: no projectversion found to build for\")\n await parent.logtitle(\"Done\", no_footer_newline=True, no_header_newline=False)\n await parent.logdone()\n await parent.set_nothing_done()\n repo.set_ready()\n session.commit()\n return\n\n build.projectversions = array2db([str(p) for p in projectversion_ids])\n session.commit()\n\n build_id = build.id\n\n await enqueue_task({\"src_build\": [build_id]})\n\n\nasync def BuildSourcePackage(build_id):\n with Session() as session:\n build = session.query(Build).filter(Build.id == build_id).first()\n if not build:\n logger.error(\"BuildProcess: build {} not found\".format(build_id))\n return\n parent_build_id = build.parent_id\n repo_id = build.sourcerepository_id\n src_path = build.sourcerepository.src_path\n version = build.version\n is_ci = build.is_ci\n firstname = build.maintainer.firstname\n lastname = build.maintainer.surname\n email = build.maintainer.email\n\n await build.set_building()\n session.commit()\n\n await build.parent.log(\"I: building source package\\n\")\n\n # Build Source Package\n await build.logtitle(\"Source Build\")\n\n async def fail():\n with Session() as session:\n build = session.query(Build).filter(Build.id == build_id).first()\n if not build:\n logger.error(\"BuildProcess: build {} not found\".format(build_id))\n return\n parent = session.query(Build).filter(Build.id == build.parent_id).first()\n if not parent:\n logger.error(\"BuildProcess: parent build {} not found\".format(build.parent_id))\n return\n repo = session.query(SourceRepository) .filter(SourceRepository.id == repo_id) .first()\n if not repo:\n logger.error(\"source repository %d not found\", repo_id)\n return\n\n await parent.log(\"E: building source package failed\\n\")\n await build.logtitle(\"Done\", no_footer_newline=True, no_header_newline=True)\n await parent.logdone()\n repo.set_ready()\n await build.set_failed()\n session.commit()\n # FIXME: cancel deb builds, or only create deb builds after source build ok\n\n try:\n ret = await BuildDebSrc(repo_id, src_path, build_id, version, is_ci,\n \"{} {}\".format(firstname, lastname), email)\n except Exception as exc:\n logger.exception(exc)\n await fail()\n return\n\n if not ret:\n await fail()\n return\n\n with Session() as session:\n build = session.query(Build).filter(Build.id == build_id).first()\n if not build:\n logger.error(\"BuildProcess: build {} not found\".format(build_id))\n return\n repo = session.query(SourceRepository) .filter(SourceRepository.id == repo_id) .first()\n if not repo:\n logger.error(\"source repository %d not found\", repo_id)\n return\n\n await build.set_needs_publish()\n session.commit()\n\n repo.set_ready()\n session.commit()\n\n await buildlog(parent_build_id, \"I: publishing source package\\n\")\n await enqueue_aptly({\"src_publish\": [build_id]})\n\n\ndef chroot_ready(build, session):\n \"\"\"\n Checks if the needed chroot\n for the given build is ready.\n Creates the chroot if it is not ready.\n\n Args:\n build (molior.model.build.Build): The build to check.\n\n Returns:\n bool: True if chroot ready, otherwise False.\n \"\"\"\n target_arch = get_target_arch(build, session)\n chroot = session.query(Chroot).filter(Chroot.basemirror_id == build.projectversion.basemirror_id,\n Chroot.architecture == target_arch).first()\n if not chroot:\n build.log_state(\"chroot not found\")\n return False\n if not chroot.ready:\n build.log_state(\"chroot not ready\")\n return False\n return True\n\n\nasync def schedule_build(build, session):\n \"\"\"\n Sends the given build to\n the task queue.\n\n Args:\n build (molior.model.build.Build): Build to schedule.\n \"\"\"\n if not chroot_ready(build, session):\n return False\n\n token = uuid.uuid4()\n buildtask = BuildTask(build=build, task_id=str(token))\n session.add(buildtask)\n session.commit()\n\n arch = build.architecture\n distrelease_name = build.projectversion.basemirror.project.name\n distrelease_version = build.projectversion.basemirror.name\n\n project_version = build.projectversion\n apt_urls = get_apt_repos(project_version, session, is_ci=build.is_ci)\n apt_keys = get_apt_keys(project_version, session)\n\n arch_any_only = False if arch == get_target_arch(build, session) else True\n\n config = Configuration()\n apt_url = config.aptly.get(\"apt_url\")\n\n token = buildtask.task_id\n\n run_lintian = True\n if build.is_ci:\n run_lintian = False\n\n await build.set_scheduled()\n session.commit()\n\n await enqueue_backend(\n {\n \"schedule\": [\n build.id,\n token,\n build.version,\n apt_url,\n arch,\n arch_any_only,\n distrelease_name,\n distrelease_version,\n \"unstable\" if build.is_ci else \"stable\",\n build.sourcename,\n project_version.project.name,\n project_version.name,\n apt_urls,\n apt_keys,\n run_lintian\n ]\n }\n )\n return True\n\n\ndef get_dependencies_recursive(dependencies, array):\n for dep in dependencies:\n if dep.project.is_mirror:\n continue\n if dep.id not in array:\n array.append(dep.id)\n get_dependencies_recursive(dep.dependencies, array)\n\n\nasync def ScheduleBuilds():\n with Session() as session:\n\n needed_builds = session.query(Build).filter(Build.buildstate == \"needs_build\", Build.buildtype == \"deb\").all()\n for build in needed_builds:\n if not chroot_ready(build, session):\n continue\n\n projectversion = session.query(ProjectVersion).filter(\n ProjectVersion.id == build.projectversion_id).first()\n if not projectversion:\n logger.warning(\"scheduler: projectversion %d not found\", build.projectversion_id)\n continue\n\n pvname = projectversion.fullname\n buildorder_projectversions = [build.projectversion_id]\n get_dependencies_recursive(projectversion.dependencies, buildorder_projectversions)\n# for dep in projectversion.dependencies:\n# if dep.project.is_mirror:\n# continue\n# buildorder_projectversions.append(dep.id)\n\n ready = True\n repo_deps = []\n if build.parent.builddeps:\n builddeps = build.parent.builddeps\n for builddep in builddeps:\n repo_dep = None\n for buildorder_projectversion in buildorder_projectversions:\n repo_dep = session.query(SourceRepository).filter(SourceRepository.projectversions.any(\n id=buildorder_projectversion)).filter(or_(\n SourceRepository.url == builddep,\n SourceRepository.url.like(\"%/{}\".format(builddep)),\n SourceRepository.url.like(\"%/{}.git\".format(builddep)))).first()\n if repo_dep:\n break\n\n if not repo_dep:\n logger.error(\"build-{}: dependency {} not found in projectversion {}\".format(build.id,\n builddep, build.projectversion_id))\n await build.log(\"E: dependency {} not found in projectversion {} nor dependencies\\n\".format(\n builddep, pvname))\n ready = False\n break\n repo_deps.append(repo_dep.id)\n\n if not ready:\n continue\n\n if not repo_deps:\n # build.log_state(\"scheduler: no build order dependencies, scheduling...\")\n await schedule_build(build, session)\n continue\n\n for dep_repo_id in repo_deps:\n dep_repo = session.query(SourceRepository).filter(SourceRepository.id == dep_repo_id).first()\n if not dep_repo:\n logger.warning(\"scheduler: repo %d not found\", dep_repo_id)\n continue\n\n # FIXME: buildconfig arch dependent!\n\n # find running builds in the same projectversion\n # FIXME: check also dependencies which are not mirrors\n\n # check no build order dep is needs_build, building, publishing, ...\n # FIXME: this needs maybe checking of source packages as well?\n running_builds = session.query(Build).filter(or_(\n Build.buildstate == \"new\",\n Build.buildstate == \"needs_build\",\n Build.buildstate == \"scheduled\",\n Build.buildstate == \"building\",\n Build.buildstate == \"needs_publish\",\n Build.buildstate == \"publishing\",\n ), Build.buildtype == \"deb\",\n Build.sourcerepository_id == dep_repo_id,\n Build.projectversion_id.in_(buildorder_projectversions)).all()\n\n if running_builds:\n ready = False\n builds = [str(b.id) for b in running_builds]\n await build.log(\"W: waiting for repo {} to finish building ({}) in projectversion {} or dependencies\\n\".\n format(dep_repo.name, \", \".join(builds), pvname))\n continue\n\n # find successful builds in the same and dependent projectversions\n # FIXME: search same architecture as well\n found = False\n successful_builds = session.query(Build).filter(\n Build.buildstate == \"successful\",\n Build.buildtype == \"deb\",\n Build.sourcerepository_id == dep_repo_id,\n Build.projectversion_id.in_(buildorder_projectversions))\n successful_builds = successful_builds.all()\n\n if successful_builds:\n found = True\n\n if not found:\n ready = False\n projectversion = session.query(ProjectVersion).filter(\n ProjectVersion.id == build.projectversion_id).first()\n if not projectversion:\n pvname = \"unknown\"\n logger.warning(\"scheduler: projectversion %d not found\", build.projectversion_id)\n else:\n pvname = projectversion.fullname\n\n await build.log(\"W: waiting for repo {} to be built in projectversion {} or dependencies\\n\".format(\n dep_repo.name, pvname))\n continue\n\n if ready:\n # build.log_state(\"scheduler: found all required build order dependencies, scheduling...\")\n await schedule_build(build, session)\n","sub_path":"molior/ops/deb_build.py","file_name":"deb_build.py","file_ext":"py","file_size_in_byte":27667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"545622901","text":"# Переменные для вывода строк\nstr_red = '\\033[31m'\nstr_green = '\\033[32m'\nstr_yellow = '\\033[33m'\nstr_blue = '\\033[34m'\nstr_purple = '\\033[35m'\nstr_turquoise = '\\033[36m'\nstr_fat = '\\033[1m'\nstr_italics = '\\033[3m'\nstr_reset = '\\033[0m'\ntab = '\\t'\n\n# Заголовки таблиц\ntables_name = ('genre', 'executor', 'album', 'track', 'collection',\n 'genre_executor', 'executor_album', 'collection_track')\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"453657134","text":"import os\nimport unittest\n\nimport requests_mock\nimport uritemplate\n\nimport octokit\n\n\nclass TestResources(unittest.TestCase):\n \"\"\"Tests the functionality in octokit/resources.py\"\"\"\n\n def setUp(self):\n self.client = octokit.Client(api_endpoint='mock://api.com/{param}')\n self.adapter = requests_mock.Adapter()\n self.client.session.mount('mock', self.adapter)\n\n def test_call(self):\n \"\"\"Test that resources.__call__ performs a HTTP GET\"\"\"\n url = uritemplate.expand(self.client.url, {'param': 'foo'})\n self.adapter.register_uri('GET', url, text='{\"success\": true}')\n\n response = self.client(param='foo')\n assert response.success\n\n # test named param inference\n response = self.client('foo')\n assert response.success\n\n def test_httpverb(self):\n \"\"\"Test that each HTTP verb works properly when JSON is returned.\"\"\"\n verbs_to_methods = [\n ('GET', self.client.get),\n ('POST', self.client.post),\n ('PUT', self.client.put),\n ('PATCH', self.client.patch),\n ('DELETE', self.client.delete),\n ('HEAD', self.client.head),\n ('OPTIONS', self.client.options),\n ]\n\n for verb, method in verbs_to_methods:\n url = uritemplate.expand(self.client.url, {'param': 'foo'})\n self.adapter.register_uri(verb, url, text='{\"success\": true}')\n\n response = method(param='foo')\n assert response.success\n\n # test named param inference\n response = method('foo')\n assert response.success\n\n def test_ensure_schema_loaded_exception(self):\n \"\"\"Test that ensure_schema_loaded raises correct exception.\"\"\"\n client = octokit.Client()\n try:\n #.id necessary to force accessing resource\n client.user.id\n except Exception as e:\n self.assertNotIsInstance(e, NameError)\n self.assertEqual(e.args[0], \"You need to call this resource with variables ['user']\")\n\n def test_schema_key_aliasing(self):\n \"\"\"Test Resource whether attributes alias schema keys.\"\"\"\n try:\n self.client.name\n self.assertTrue(False, msg=\"No exception raised when accessing Client.name\")\n except Exception as e:\n self.assertEqual(e.args[0], \"You need to call this resource with variables ['param']\")\n\n schema = {'name': 'octocat'}\n r = octokit.Resource(None, name='Dummy', schema=schema)\n self.assertEqual(r.name, 'octocat')\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_resources.py","file_name":"test_resources.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"104764684","text":"first_number = 1001100001110010011001000\r\nn = 25\r\nm = 20\r\n#m 1 to n\r\n\r\nnext_number = '0' + str(first_number)[:-1]\r\n\r\nzip_list = list(zip(str(next_number),str(first_number)))\r\nresult = []\r\n\r\nfor i in range(len(zip_list)):\r\n if zip_list[i][0] == zip_list[i][1]:\r\n result.append(0)\r\n else:\r\n result.append(1)\r\nprint(result)\r\n\r\n\r\n","sub_path":"II семестр/Дискретна математика/Лаби/2016-17/Братун 6305/Lab_5/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"140390707","text":"# coding:utf-8\nnum_first_only = (1, 5, 6, 7, 8, 9, 15, 16, 19)\ninp = \"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.\"\ninp_list = inp.split()\nresult = {}\nfor i in range(len(inp_list)):\n if i+1 in num_first_only:\n result[inp_list[i][:1]] = i+1\n else:\n result[inp_list[i][:2]] = i+1\n\nprint(result)\n\n\n\"\"\"\n別解\nfor num, word in enumerate(inp_list,1):\n if num in num_first_only:\n result[word[0:1]] = num\n else:\n result[word[0:2]] = num\n\n\nprint(result)\n\"\"\"","sub_path":"04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"136631000","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nDesc: 27. 移除元素 (简单)\nAuthor: wangluyu\nDate: 2020/1/14\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n \"\"\"\n Given an array nums and a value val, remove all instances of that value in-place and return the new length.\n Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.\n The order of elements can be changed. It doesn't matter what you leave beyond the new length.\n Example 1:\n Given nums = [3,2,2,3], val = 3,\n Your function should return length = 2, with the first two elements of nums being 2.\n It doesn't matter what you leave beyond the returned length.\n Example 2:\n Given nums = [0,1,2,2,3,0,4,2], val = 2,\n Your function should return length = 5, with the first five elements of nums containing 0, 1, 3, 0, and 4.\n Note that the order of those five elements can be arbitrary.\n It doesn't matter what values are set beyond the returned length.\n Clarification:\n Confused why the returned value is an integer but your answer is an array?\n Note that the input array is passed in by reference, which means modification to the input array will be known to the caller as well.\n Internally you can think of this:\n // nums is passed in by reference. (i.e., without making a copy)\n int len = removeElement(nums, val);\n // any modification to nums in your function would be known by the caller.\n // using the length returned by your function, it prints the first len elements.\n for (int i = 0; i < len; i++) {\n     print(nums[i]);\n }\n 给定一个数组 nums 和一个值 val,你需要原地移除所有数值等于 val 的元素,返回移除后数组的新长度。\n 不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。\n 元素的顺序可以改变。你不需要考虑数组中超出新长度后面的元素。\n 示例 1:\n 给定 nums = [3,2,2,3], val = 3,\n 函数应该返回新的长度 2, 并且 nums 中的前两个元素均为 2。\n 你不需要考虑数组中超出新长度后面的元素。\n 示例 2:\n 给定 nums = [0,1,2,2,3,0,4,2], val = 2,\n 函数应该返回新的长度 5, 并且 nums 中的前五个元素为 0, 1, 3, 0, 4。\n 注意这五个元素可为任意顺序。\n 你不需要考虑数组中超出新长度后面的元素。\n 说明:\n 为什么返回数值是整数,但输出的答案是数组呢?\n 请注意,输入数组是以“引用”方式传递的,这意味着在函数里修改输入数组对于调用者是可见的。\n 你可以想象内部操作如下:\n // nums 是以“引用”方式传递的。也就是说,不对实参作任何拷贝\n int len = removeElement(nums, val);\n // 在函数里修改输入数组对于调用者是可见的。\n // 根据你的函数返回的长度, 它会打印出数组中该长度范围内的所有元素。\n for (int i = 0; i < len; i++) {\n     print(nums[i]);\n }\n \"\"\"\n def removeElement(self, nums: List[int], val: int) -> int:\n l = len(nums)\n if l == 0: return 0\n i = 0\n while i < len(nums):\n if nums[i] == val:\n del nums[i]\n else:\n i += 1\n return len(nums)\n\n def removeElement1(self, nums: List[int], val: int) -> int:\n # 标记非val所在的index\n i = 0\n for j in range(len(nums)):\n if nums[j] != val:\n nums[i] = nums[j]\n i+=1\n print(nums)\n return i\n\n\nif __name__ == '__main__':\n nums = [0,1,2,2,3,0,4,2]\n val = 2\n s = Solution()\n print(s.removeElement1(nums, val))\n","sub_path":"Python3/27_Remove_Element.py","file_name":"27_Remove_Element.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"378548014","text":"from django.urls import re_path, include\n\n\nfrom . import views\n\nurlpatterns = [\n re_path(r'^$', views.index, name='index'), \n re_path(r'^dept/$', views.dept, name='dept'),\n re_path(r'^new/([a-zA-Z]+)?$', views.new_application, name='new_application'),\n re_path(r'^history/([a-zA-Z]+)?/?([0-9]{4})?-?([0-9]{2})?-?([0-9]{2})?', views.sent, name='sent'),\n re_path(r'^applications/([a-zA-Z]+)?/?([0-9]{4})?-?([0-9]{2})?-?([0-9]{2})?', views.applications,name='applications'),\n re_path(r'^logt/',views.logt,name='logt'),\n re_path(r'^application/(\\d+)/print$',views.print_application,name='print'),\n re_path(r'^action/(\\d+)/print$',views.print_action,name='print_action'),\n re_path(r'^application/(\\d+)/cancel$',views.cancel,name='cancel'),\n re_path(r'^action/(\\d+)/$',views.action,name='action'),\n re_path(r'^actions/([a-zA-Z]+)?$',views.actions,name='actions'),\n re_path(r'^manage_action/$',views.manage_action,name='manage_action'),\n re_path(r'^action_history/([a-zA-Z]+)?$',views.action_history,name='action_history'),\n re_path(r'^employee/(\\d+)/$',views.employee,name='employee'),\n re_path(r'^employees/$',views.employees,name='employees'),\n re_path(r'^select_employee/$',views.select_employee,name='select_employee'),\n re_path(r'^employee/(\\d+)/edit$',views.edit_employee,name='edit_employee'),\n re_path(r'^employee/new$',views.new_employee,name='new_employee'),\n re_path(r'^application/(\\d+)/$',views.details,name='details'),\n re_path(r'^manage_leave/$',views.manage_leave,name='manage_leave'),\n re_path(r'^delete_application/$',views.delete_application,name='delete_application'),\n re_path(r'^delete_action/$',views.delete_action,name='delete_action'),\n re_path(r'^start_processing/$',views.start_processing,name='start_processing'),\n re_path(r'^complete/$',views.complete,name='complete'),\n re_path(r'^user_guide/$', views.user_guide,name='user_guide')\n ]\n ","sub_path":"lms/leave/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"212678275","text":"\"\"\"\nlista = []\nfor valor in range(0,101):\n list.append(valor)\n\"\"\"\n\n#Lo que se encuentra antes del for es lo que se va agregar a la lista\nlista = [valor for valor in range (0, 101) if valor % 2 == 0]\n\n\"\"\"\nReglas\n1. Valor a agregar a la lista.\n2. Un ciclo.\nSe pueden usar condicionales o ciclos.\nAunque recordar la lectura de codigo de python.\n\"\"\"\n\n#Para las tuplas hay que usar tuple\ntupla = tuple ( (valor for valor in range (0, 101) if valor % 2 != 0) )\n\ndiccionario = { indice:valor for indice, valor in enumerate(lista) if indice < 10 }\n\n#print(lista)\n#print(tupla)\nprint(diccionario)","sub_path":"comprehension.py","file_name":"comprehension.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"43937568","text":"import argparse\n\nimport requests\nimport datetime\nimport dateutil.parser\n\n\nAPI_BASE = 'https://api.github.com/'\n\ndef get_trending_repositories(top_size):\n week_ago_iso8601 = (datetime.datetime.now() - datetime.timedelta(days=7)).replace(microsecond=0).isoformat()\n response = requests.get(API_BASE+'search/repositories', {\n 'q': 'created:>{0}'.format(week_ago_iso8601),\n 'sort': 'stars',\n 'order': 'desc',\n 'page': 1,\n 'per_page': top_size\n })\n response_body = response.json()\n return response_body\n\ndef get_open_issues_list(repo_owner, repo_name):\n response = requests.get(API_BASE+'repos/{0}/{1}/issues'.format(repo_owner, repo_name), {\n 'state': 'open'\n })\n return response.json()\n\ndef pretty_print_repository_info(repo):\n print('Repo name: {0}'.format(repo['name']))\n print('\\tOwner: {0}'.format(repo['owner']['login']))\n print('\\tStars: {0}'.format(repo['stargazers_count']))\n print('\\tLink: {0}'.format(repo['html_url']))\n print('\\tIssues count: {0}'.format(repo['open_issues']))\n print('\\tCreated at: {0}'.format(dateutil.parser.parse(repo['created_at']).strftime('%Y-%m-%d %H:%M')))\n\n\ndef pretty_print_issues(issues):\n if type(issues) != list:\n return\n print('Open issues amount: {0}'.format(len(issues)))\n for issue in issues:\n print('\\t\\tIssue title: {0}'.format(issue['title']))\n print('\\t\\tIssue url: {0}'.format(issue['url']))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Get duplicated files\")\n parser.add_argument(\"-c\", \"--count\", type=int, dest=\"count\", default=20)\n options = parser.parse_args()\n repositories = get_trending_repositories(top_size=options.count)\n if repositories:\n for repo in repositories['items']:\n pretty_print_repository_info(repo)\n issues = get_open_issues_list(repo['owner']['login'], repo['name'])\n pretty_print_issues(issues)\n","sub_path":"github_trending.py","file_name":"github_trending.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287452568","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n# file_name: linked_stack.py\n# author: ScCcWe\n# time: 2020/4/7 19:38\n\n\nclass Node:\n def __init__(self, data):\n self.item = data\n self.next = None\n\n\nclass LinkedStack:\n \"\"\"\n 以链表的头部作为栈顶,\n 第一个指针指向栈顶,之后以此类推\n \"\"\"\n def __init__(self):\n self.length = 0\n self.null_stack = None\n \n def __del__(self):\n # 析构方法\n # 当对象被删除时,会自动被调用\n class_name = self.__class__.__name__\n print(class_name, \"销毁\")\n \n def clear(self):\n if self.is_null():\n return\n # 除最后一个以外的全部\n while self.null_stack.next is not None:\n self.null_stack = self.null_stack.next\n self.length -= 1\n # 最后一个\n self.null_stack = None\n self.length -= 1\n \n def is_null(self):\n return self.length == 0\n \n def get_top_item(self):\n if self.is_null():\n return \"Stack is None.\"\n return self.null_stack.item\n \n def get_length(self):\n return self.length\n \n def push(self, data):\n \"\"\"\n 入栈\n :param data: 需要入栈的节点的值, 这里没有进行类型判断\n \"\"\"\n # if variable's type is Node, this below can increase\n # if isinstance(data, Node):\n # raise TypeError\n node = Node(data)\n node.next = self.null_stack\n self.null_stack = node # 修改栈顶指针为node\n self.length += 1\n \n def pop(self):\n \"\"\"\n 弹出栈顶元素\n :return: 栈顶元素的值\n \"\"\"\n if self.is_null():\n return \"current stack is none\"\n value = self.null_stack.item # 取出栈顶元素的值\n self.null_stack = self.null_stack.next # 栈顶元素脱钩\n self.length -= 1\n return value\n \n def traverse(self):\n \"\"\"\n 遍历\n \"\"\"\n list = []\n if self.is_null():\n return\n while self.null_stack.next is not None:\n list.append(self.null_stack.item)\n self.null_stack = self.null_stack.next\n list.append(self.null_stack.item)\n return list\n\n\nif __name__ == '__main__':\n \n # init a stack\n ins_stack = LinkedStack()\n ins_stack.push(1)\n ins_stack.push(2)\n ins_stack.push(3)\n ins_stack.push(4)\n ins_stack.push(5)\n \n # test func traverse()\n # print(ins_stack.length)\n # print(ins_stack.traverse())\n \n # test func pop()\n # print(ins_stack.pop())\n # print(ins_stack.length)\n # print(ins_stack.traverse())\n \n # test func get_top_item()\n # print(ins_stack.pop())\n # print(ins_stack.get_top_item())\n \n # test func clear()\n # ins_stack.clear()\n # print(ins_stack.length)\n # print(ins_stack.traverse())\n \n # test magic __del__\n print(ins_stack.length)\n print(ins_stack.traverse())\n del ins_stack\n print('class stack over')\n \n # test magic __del__\n # a = ins_stack\n # print(ins_stack.length)\n # print(ins_stack.traverse())\n # del ins_stack\n # print('class stack over')\n","sub_path":"data_structure/stack/linked_stack.py","file_name":"linked_stack.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"80960583","text":"def is_pifagor(a:int, b:int, c:int) -> bool:\n return a**2 + b**2 == c**2 or b**2 + c**2 == a**2 or c**2+a**2 == b**2\n\nanswers = []\n\nwith open('problem.txt', 'r') as fhandler:\n for line in fhandler.readlines():\n line = [int(x) for x in line.strip().split()] #'3 2 1' - > [3,2,1]\n if is_pifagor(line[0], line[1], line[2]):\n answers.append('Yes')\n else:\n answers.append('No')\n\nwith open('answer.txt', 'a') as fhandler:\n total ='\\n'.join(answers)\n fhandler.write(total)\n","sub_path":"lec12/temp3.py","file_name":"temp3.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"15014399","text":"import configparser\nfrom config.VarConfig import pageElementLocatorPath\n\n\nclass ParseConfigFile(object):\n def __init__(self):\n self.cf = configparser.ConfigParser()\n self.cf.read(pageElementLocatorPath, encoding=\"utf-8-sig\")\n\n def getItemSection(self, sectionName):\n optionsDict = dict(self.cf.items(sectionName))\n return optionsDict\n\n def getOptionValue(self, sectionName, optionName):\n value = self.cf.get(sectionName, optionName)\n return value\n\n\nif __name__ == '__main__':\n pc = ParseConfigFile()\n print(pc.getItemSection(\"Mol_login\"))\n print(pc.getOptionValue(\"Mol_login\", \"loginPage.username\"))","sub_path":"util/ParseConfigurationFile.py","file_name":"ParseConfigurationFile.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"359772939","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport random\nimport numpy as np\nclass DataConfig():\n base_dir='/data/dataset/'\n dict_dir=base_dir+'dict/'\n py2id_dict=dict_dir+'py2id_dict.txt'\n hz2id_dict=dict_dir+'hz2id_dict.txt'\n py2hz_dict=dict_dir+'py2hz_dict.txt'\n py2hz_dir=base_dir+'pinyin2hanzi/'\n types=['train','test','dev']\nclass ConfigLanguage(DataConfig):\n epochs=100\n model_dir='models/language_model/new/'\n model_name='model'\n model_path=model_dir+model_name\n embed_size=300\n num_hb=4\n num_eb=16\n norm_type='bn'\n lr=0.0001\n is_training=True\n batch_size=256\n py_size=1472\n hz_size=7459\n dropout_rate=0.5\nclass DataLanguage(ConfigLanguage):\n def __init__(self):\n super(DataLanguage,self).__init__()\n self.py2hz_paths={type:self.py2hz_dir+'py2hz_'+type+'.tsv' for type in self.types}\n self.create_dict()\n self.create_py2hz()\n def create_py2hz(self):\n self.py2hz={}\n self.batch_num={}\n for _type,path in self.py2hz_paths.items():\n self.py2hz[_type]={}\n start_num=0\n with open(path,'r',encoding='utf-8') as file:\n for line in file:\n idx,pys,hzs=line.strip('\\n').strip().split('\\t')\n pys=pys.strip().split(' ')\n hzs=hzs.strip().split(' ')\n self.py2hz[_type][start_num]=(pys,hzs)\n start_num+=1\n batch_num=start_num//self.batch_size\n self.batch_num[_type]=batch_num\n def create_batch(self,flag='train',shuffle=True):\n data_num=len(self.py2hz[flag])\n idxs=list(range(data_num))\n if shuffle:\n random.shuffle(idxs)\n pys=[]\n hzs=[]\n for i,idx in enumerate(idxs):\n py,hz=self.py2hz[flag][idx]\n py=[self.py2id[p] for p in py]\n hz=[self.hz2id[h] for h in hz]\n assert len(py)==len(hz)\n if len(pys)==self.batch_size:\n inputs,outputs=self.seq_pad(pys,hzs)\n yield inputs,outputs\n pys,hzs=[],[]\n pys.append(py)\n hzs.append(hz)\n def create_online(self,text):\n pred=[self.py2id[py] for py in text]\n pred=np.array(pred)\n pred=pred.reshape((1,pred.shape[0]))\n return pred\n def seq_pad(self,pys,hzs):\n max_len=max([len(py) for py in pys])\n inputs=np.array([line+[0]*(max_len-len(line)) for line in pys])\n outputs=np.array([line+[0]*(max_len-len(line)) for line in hzs])\n return inputs,outputs\n\n def create_dict(self):\n self.py2id={}\n self.id2py={}\n self.id2hz={}\n self.hz2id={}\n with open(self.py2id_dict,'r',encoding='utf-8') as file:\n for line in file:\n py,idx=line.strip('\\n').strip().split('\\t')\n self.py2id[py.strip()]=int(idx.strip())\n self.id2py[int(idx.strip())]=py.strip()\n with open(self.hz2id_dict,'r',encoding='utf-8') as file:\n for line in file:\n hz,idx=line.strip('\\n').strip().split('\\t')\n self.hz2id[hz.strip()]=int(idx.strip())\n self.id2hz[int(idx.strip())]=hz.strip()\n\ndef main():\n data=DataLanguage()\n data_iters=data.create_batch()\n\n for batch in data_iters:\n x,y=batch\n print(x,'\\n',y)\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"self_model/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"629103174","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef getPoly(data, degree):\n x = []\n y = []\n if type(data) is tuple:\n for each in data:\n y.insert(0, each[0])\n\n if type(data) is list:\n data.reverse()\n y = data\n\n for index, value in enumerate(y):\n x.append(index)\n '''\n print x\n print y\n print \"********************************\"\n '''\n pc, residuals, rank, sv, rcond = np.polyfit(x, y, degree, full=True)\n fStraight = np.poly1d(pc)\n fx = np.linspace(0, x[-1], 1000)\n '''\n print pc\n print residuals\n print rank\n print sv\n print rcond\n plt.scatter(x, y)\n plt.xlabel(\"Days\")\n plt.ylabel(\"Values\")\n plt.plot(fx, fStraight(fx), linewidth=2)\n plt.autoscale(tight=True)\n plt.grid()\n plt.show()\n '''\n\n return pc\n","sub_path":"MathUtil.py","file_name":"MathUtil.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"626583320","text":"# -*- coding: utf-8 -*-\n\"\"\"Module for data plugin to represent a pseudo potential in Psp8 format.\"\"\"\nfrom typing import BinaryIO\nfrom aiida.common.constants import elements\n\nfrom .pseudo import PseudoPotentialData\n\n__all__ = ('Psp8Data',)\n\n\ndef parse_element(stream: BinaryIO):\n \"\"\"Parse the content of the Psp8 file to determine the element.\n\n :param stream: a filelike object with the binary content of the file.\n :return: the symbol of the element following the IUPAC naming standard.\n \"\"\"\n lines = stream.read().decode('utf-8')\n\n # Split the line at each new line character \\n\n lines_splt = lines.splitlines()\n\n # Split the second line on white space\n lines_splt_space = lines_splt[1].split()\n\n try:\n atomic_number = int(float(lines_splt_space[0]))\n except (IndexError, ValueError) as val_err:\n raise ValueError('failed to parse the atomic number.') from val_err\n\n try:\n symbol = elements[atomic_number]['symbol']\n except KeyError as key_err:\n raise ValueError('the atomic number {atomic_number} is not supported.') from key_err\n\n return symbol\n\n\nclass Psp8Data(PseudoPotentialData):\n \"\"\"Data plugin to represent a pseudo potential in Psp8 (Abinit) format.\"\"\"\n\n def set_file(self, stream: BinaryIO, filename: str = None, **kwargs): # pylint: disable=arguments-differ\n \"\"\"Set the file content.\n\n :param stream: a filelike object with the binary content of the file.\n :param filename: optional explicit filename to give to the file stored in the repository.\n :raises ValueError: if the element symbol is invalid.\n \"\"\"\n stream.seek(0)\n self.element = parse_element(stream)\n stream.seek(0)\n super().set_file(stream, filename, **kwargs)\n","sub_path":"aiida_pseudo/data/pseudo/psp8.py","file_name":"psp8.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"310898824","text":"\"\"\"\nAuthor: Raju Ahmed Shetu\n\"\"\"\nfrom django.urls import path\nfrom . import views\napp_name='bkash'\nurlpatterns = [\n path('payment/create', views.PaymentCreateApiView.as_view(), name='payment_create_api_view'),\n path('payment/execute', views.PaymentExecuteApiView.as_view(), name='payment_execute_api_view'),\n]\n\n# 127.0.0.1:8000/bkash/payment/create\n# 127.0.0.1:8000/bkash/payment/execute\n\n","sub_path":"bkash/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"416749539","text":"\nfrom logica.util import Cola\nfrom logica.proceso import *\n\nclass Planificador():\n\t\n\tdef __init__(self):\n\n\t\tself.listos = Cola()\n\t\tself.suspendidos = Cola()\n\t\tself.bloqueados = Cola()\n\n\t\tself.cuanto_suspendido = 3\n\t\tself.contador_suspendido = 3\n\n\tdef planificar_pre(self, procesador):\n\t\t\n\t\tasignar_nuevo = False\n\t\tproceso_actual = procesador.proceso_asignado\n\n\t\tself.plan_bloqueados()\n\t\tself.plan_suspendidos()\n\n\t\tif proceso_actual:\n\t\t\testado = proceso_actual.estado\n\n\t\t\tif estado != LISTO:\n\t\t\t\tasignar_nuevo = True\n\n\t\t\t\tif estado == SUSPENDIDO:\n\t\t\t\t\tself.suspendidos.insertar(proceso_actual)\n\n\t\t\t\telif estado == BLOQUEADO:\n\n\t\t\t\t\tself.bloqueados.insertar(proceso_actual)\n\t\t\t\t\tself.vista.informar_bloqueado()\n\n\t\t\t\telif estado == TERMINADO:\n\t\t\t\t\tproceso_actual.tiempo = -1\n\t\telse:\n\t\t\tasignar_nuevo = True\n\n\t\tif asignar_nuevo:\n\t\t\tself.asignar_nuevo(procesador)\n\n\tdef planificar_post(self, procesador):\n\t\t\n\t\tproceso_actual = procesador.proceso_asignado\n\n\t\tif proceso_actual:\n\t\t\testado = proceso_actual.estado\n\n\t\t\tsuspendido = self.plan_listo(proceso_actual)\n\n\t\t\tif estado == LISTO:\n\t\t\t\tif suspendido:\n\t\t\t\t\tproceso_actual.estado = SUSPENDIDO\n\n\tdef plan_bloqueados(self):\n\n\t\tif self.bloqueados.vacia():\n\t\t\treturn\n\n\t\tcola = Cola()\n\n\t\tproceso = self.bloqueados.atender()\n\n\t\twhile proceso:\n\n\t\t\tif proceso.solicitar_desbloqueo():\n\t\t\t\tself.agregar_listo(proceso)\n\t\t\t\tproceso.estado = LISTO\n\n\t\t\t\tself.vista.informar_desbloqueado(proceso.nombre)\n\n\t\t\telse:\n\t\t\t\tcola.insertar(proceso)\n\n\t\t\tproceso = self.bloqueados.atender()\n\n\t\tself.bloqueados = cola\n\n\tdef plan_suspendidos(self):\n\t\t\n\t\tif not self.suspendidos.vacia():\n\n\t\t\tif self.contador_suspendido == 0:\n\t\t\t\t\n\t\t\t\tp = self.suspendidos.atender()\n\t\t\t\tp.estado = LISTO\n\t\t\t\t\n\t\t\t\tself.agregar_ordenado(p)\n\t\t\t\tself.contador_suspendido = self.cuanto_suspendido\n\n\t\t\t\tself.vista.informar_entra_listo()\n\n\t\t\telse:\n\t\t\t\tself.contador_suspendido -= 1\n\n\tdef asignar_nuevo(self, procesador):\n\n\t\tself.vista.informar_removido_actual()\n\n\t\tprocesador.proceso_asignado = self.obtener_proceso()\n\n\t\tif procesador.proceso_asignado: \n\t\t\tprocesador.proceso_asignado.estado = LISTO\n\t\t\tself.vista.informar_nuevo()\n\n\tdef agregar_listo(self, proceso):\n\t\tself.listos.insertar(proceso)\n\t\tself.vista.informar_entra_listo()\n\n\tdef obtener_proceso(self):\n\t\t\n\t\tproceso = None\n\n\t\tif not self.listos.vacia():\n\t\t\tproceso = self.listos.atender()\n\n\t\telif not self.suspendidos.vacia():\n\n\t\t\tself.contador_suspendido = self.cuanto_suspendido\n\t\t\tself.listos.insertar(self.suspendidos.atender())\n\t\t\t\n\t\t\tself.vista.informar_entra_listo()\n\n\t\t\tproceso = self.listos.atender()\n\n\t\treturn proceso\t\n\n\tdef plan_listo(self, proceso_actual):\n\t\treturn False\n\n\tdef asignar_vista(self, vista):\n\t\tself.vista = vista\n\n\tdef agregar_proceso(self, nombre, tiempo, sistema, recursos, **kwargs):\n\n\t\tp = Proceso(nombre, tiempo, sistema, recursos)\n\n\t\tself.agregar_listo(p)\n\n\t\treturn p\n\n\t","sub_path":"logica/planificador/planificador.py","file_name":"planificador.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"166161435","text":"# \n# This example initiates an acquisition and fetches a waveform for each specified channel.\n#\n# The gRPC API is built from the C API. NI-SCOPE documentation is installed with the driver at:\n# C:\\Program Files (x86)\\IVI Foundation\\IVI\\Drivers\\niScope\\Documentation\\English\\Digitizers.chm\n#\n# A version of this .chm is available online at:\n# Link: https://zone.ni.com/reference/en-XX/help/370592AB-01/\n#\n# Getting Started:\n#\n# To run this example, install \"NI-SCOPE Driver\" on the server machine.\n# Link : https://www.ni.com/en-us/support/downloads/drivers/download.ni-scope.html\n#\n# For instructions on how to use protoc to generate gRPC client interfaces, see our \"Creating a gRPC Client\" wiki page.\n# Link: https://github.com/ni/grpc-device/wiki/Creating-a-gRPC-Client\n#\n# Refer to the NI-SCOPE gRPC Wiki to determine the valid channel and resource names for your NI-SCOPE module.\n# Link : https://github.com/ni/grpc-device/wiki/NI-SCOPE-C-Function-Reference\n#\n# Running from command line:\n#\n# Server machine's IP address, port number, and resource name can be passed as separate command line arguments.\n# > python fetch.py \n# If they are not passed in as command line arguments, then by default the server address will be \"localhost:31763\", with \"SimulatedScope\" as the resource name\n\nimport grpc\nimport sys\nimport time\nimport niscope_pb2 as niscope_types\nimport niscope_pb2_grpc as grpc_niscope\n\nserver_address = \"localhost\"\nserver_port = \"31763\"\n\n# Resource name and options for a simulated 5164 client. Change them according to the NI-SCOPE model.\nresource = \"SimulatedScope\"\noptions = \"Simulate=1, DriverSetup=Model:5164; BoardType:PXIe\"\n\nchannels = \"0\"\n\nany_error = False\n# Checks for errors. If any, throws an exception to stop the execution.\ndef CheckForError (vi, status) :\n global any_error\n if(status != 0 and not any_error):\n any_error = True\n ThrowOnError (vi, status)\n\n# Converts an error code returned by NI-SCOPE into a user-readable string.\ndef ThrowOnError (vi, error_code):\n error_message_request = niscope_types.GetErrorMessageRequest(\n vi = vi,\n error_code = error_code\n )\n error_message_response = client.GetErrorMessage(error_message_request)\n raise Exception (error_message_response.error_message)\n\n# Read in cmd args\nif len(sys.argv) >= 2:\n server_address = sys.argv[1]\nif len(sys.argv) >= 3:\n server_port = sys.argv[2]\nif len(sys.argv) >= 4:\n resource = sys.argv[3]\n options = \"\"\n\n# Create the communication channel for the remote host and create a connection to the NI-SCOPE service.\nchannel = grpc.insecure_channel(f\"{server_address}:{server_port}\")\nclient = grpc_niscope.NiScopeStub(channel)\n\ntry :\n # Open session to NI-SCOPE module with options.\n init_with_options_response = client.InitWithOptions(niscope_types.InitWithOptionsRequest(\n resource_name=resource,\n id_query = False,\n option_string=options\n ))\n vi = init_with_options_response.vi\n CheckForError(vi, init_with_options_response.status)\n\n # Configure vertical.\n voltage = 10.0\n CheckForError(vi, (client.ConfigureVertical(niscope_types.ConfigureVerticalRequest(\n vi = vi,\n channel_list = channels,\n range = voltage,\n offset = 0.0,\n coupling = niscope_types.VerticalCoupling.VERTICAL_COUPLING_NISCOPE_VAL_AC,\n probe_attenuation = 1.0,\n enabled = True\n ))).status)\n\n # Configure horizontal timing.\n samples = 1000\n CheckForError(vi, (client.ConfigureHorizontalTiming(niscope_types.ConfigureHorizontalTimingRequest(\n vi = vi,\n min_sample_rate = 50000000,\n min_num_pts = samples,\n ref_position = 50.0,\n num_records = 1,\n enforce_realtime = True\n ))).status)\n\n # Initiate acquisition.\n CheckForError(vi, (client.InitiateAcquisition(niscope_types.InitiateAcquisitionRequest(\n vi = vi\n ))).status)\n\n # Fetch waveforms.\n fetch_response = client.Fetch(niscope_types.FetchRequest(\n vi = vi,\n channel_list = channels,\n timeout = 10000,\n num_samples = samples\n ))\n CheckForError(vi, fetch_response.status)\n waveforms = fetch_response.waveform\n\n # Print waveform results.\n for i in range(len(waveforms)):\n print(f'Waveform {i} information:')\n print(f'{waveforms[i]}\\n')\n\n# If NI-SCOPE API throws an exception, print the error message.\nexcept grpc.RpcError as rpc_error:\n error_message = rpc_error.details()\n if rpc_error.code() == grpc.StatusCode.UNAVAILABLE :\n error_message = f\"Failed to connect to server on {server_address}:{server_port}\"\n elif rpc_error.code() == grpc.StatusCode.UNIMPLEMENTED:\n error_message = \"The operation is not implemented or is not supported/enabled in this service\"\n print(f\"{error_message}\")\n\nfinally:\n if('vi' in vars() and vi.id != 0):\n # close the session.\n CheckForError(vi, (client.Close(niscope_types.CloseRequest(\n vi = vi\n ))).status)","sub_path":"examples/niscope/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":5077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"473587486","text":"import os\nimport pybullet as p\nimport subprocess\nimport object2urdf\nimport PyBulletEnv\n\nMINIMIZER = 'minimizeWithBlendr.py'\n\nclass Obj():\n \n def __init__(self, fn, scale=1):\n self.filename = fn\n self.scale = scale\n self.meshScale = [scale, scale, scale]\n self.obj = None\n self.decims = {}\n\n return \n\n\n def smaller(self, ratio):\n '''\n Function smaller\n\n Takes in a ratio and creates a smaller .obj shape \n '''\n if ratio >= 1.0:\n return self.filename\n\n name, suff = self.filename.split(\".\")\n output_file = name+ \"_\" + str(ratio) + '.' + suff\n if os.path.exists(output_file):\n return output_file\n\n ps = subprocess.Popen([\"blender\", \"-b\", \"-P\", MINIMIZER, \"--\", str(ratio), self.filename], stdout = subprocess.PIPE)\n output = subprocess.check_output([\"grep\", \"after decimation\"], stdin = ps.stdout)\n print(output)\n # Parse output to find new stats \n # Find verts, edges, polys\n self.decims[ratio] = output_file\n return output_file\n \n def toURDF(self, filename, ratio):\n path = os.path.dirname(self.filename)\n builder = object2urdf.ObjectUrdfBuilder(path)\n # print(\"FILENAME IS \" + filename)\n builder.build_urdf(filename=filename, output_folder=path+\"/\"+str(ratio)+\"/\", force_overwrite=True, decompose_concave=False, force_decompose =False, center = \"bottom\")\n# p.loadURDF(\"data/cow/data.urdf\", globalScaling=self.scale, useFixedBase=True, basePosition = [0, 1, 2.7], baseOrientation=[1,1,1,1])\n return filename + \".urdf\"\n\n def createObjectObj(self, position, ratio = 1.0):\n '''\n Function createObjectObj\n\n Creates an instance of the relevant obj file and adds it to the object instances dictionairy \n '''\n if ratio in self.decims:\n file_to_load = self.decims[ratio]\n else: \n file_to_load = self.smaller(ratio)\n #_, suff = self.filename.split('.')\n #if suff == \"obj\":\n # creating an obj\n visualShapeID = p.createVisualShape(\n shapeType = p.GEOM_MESH,\n fileName = file_to_load,\n rgbaColor = [1.0, 0.5, 0.0, 1.0], #[0.7, 0, 0.7, 1], #[3, 1, 1, 1],\n #specularColor = [0.4, 0.4, 0.4],\n visualFramePosition = position,\n meshScale = self.meshScale\n )\n collisionShapeID = p.createCollisionShape (\n shapeType = p.GEOM_MESH,\n fileName = self.filename, #file_to_load,\n #collisionFramePosition = position,\n meshScale = self.meshScale,\n #flags = p.GEOM_FORCE_CONCAVE_TRIMESH\n )\n\n #orn = p.getQuaternionFromEuler([0, 0, 0])\n return p.createMultiBody(\n baseMass = 1, #TODO: make a variable!\n baseInertialFramePosition = [0, 0, 0], #TODO: make a variable\n baseCollisionShapeIndex=collisionShapeID,\n baseVisualShapeIndex=visualShapeID,\n basePosition = position,\n # useMaximalCoordinates=True,\n # baseOrientation = orn\n #useFixedBase = True\n )\n\n def printDecims(self):\n print(self.decims)\n\n def createObjectURDF(self, position, ratio = 1.0, orient = [1, 1, 1, 1]):\n # First, check if there's a dictionary value at that key!\n if ratio in self.decims:\n file_to_load = self.decims[ratio]\n else: \n file_to_load = self.smaller(ratio)\n return p.loadURDF(self.toURDF(file_to_load, ratio), globalScaling=self.scale, useFixedBase=True, basePosition = position, baseOrientation=orient) \n\n def move_x_y(self, x, y):\n if self.obj == None: \n # TODO: Set up as an error\n print(\"Object not initialized!\")\n return -1 \n #else, obj is a unique ID!\n pos, orient = p.getBasePositionAndOrientation(self.obj)\n _, _, oldZ = pos\n p.resetBasePositionAndOrientation(self.obj, [x, y, oldZ], orient)\n \n \n def change_position(self,roll, pitch, yaw):\n if self.obj == None:\n print(\"Object not initialized!\")\n return -1\n\n # Otherwise, change it's position :) \n pos, _ = p.getBasePositionAndOrientation(self.obj)\n orientation = p.getQuaternionFromEuler([roll, pitch, yaw])\n p.resetBasePositionAndOrientation(self.obj, pos, orientation)\n\n\n\n","sub_path":"python_part/Obj.py","file_name":"Obj.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"30129019","text":"\"\"\"\nA Flask server that presents a minimal browsable interface for the Olin course catalog.\n\nauthor: Oliver Steele \ndate : 2017-01-18\nlicense: MIT\n\ncontributor: Emily Yeh \ndate : 2017-01-23\ncontributions:\n(0) Made website look better (with background image made with GIMP)\n(1) Added 'Return to Home' button to every page\n(2) Changed instructor names format ('first last' instead of 'last, first')\n(3) Added 'Back to Top' and 'Skip to Bottom' links to /area/... pages\n(4) Added personal course pages for each course\n(5) Added pages for instructors that list the courses they teach (with links to these courses)\n\"\"\"\n\nimport os\n\nimport pandas as pd\nfrom flask import Flask, redirect, render_template, request, url_for\n\napp = Flask(__name__)\n\ncourses = pd.read_csv('./data/olin-courses-16-17.csv')\n\n@app.route('/health')\ndef health():\n return 'ok'\n\n@app.route('/')\ndef home_page():\n return render_template('index.html', areas=set(courses.course_area), contacts=switch_names_in_set(set(courses.course_contact.dropna())))\n\n@app.route('/area/')\ndef area_page(course_area):\n return render_template('course_area.html', area=course_area, courses=courses[courses.course_area == course_area].iterrows())\n\n@app.route('/instructor/')\ndef instructor_page(instructor_name):\n\tunswitched_name = unswitch_name(instructor_name)\n\treturn render_template('instructor_pages.html', name=switch_name(instructor_name), instructors=courses[courses.course_contact == unswitched_name].iterrows())\n\n@app.route('/courses/')\ndef course_page(course_number):\n\treturn render_template('course_pages.html', course_number = course_number, current_course=courses[courses.course_number == course_number].iterrows())\n\ndef switch_name (contact):\n\tteachers = contact.split(\"; \")\n\tnames = \"\"\n\tfor teacher in teachers:\n\t\tparts = teacher.split(\", \")\n\t\tif (len(parts) > 1):\n\t\t\tnames += parts[1] + \" \" + parts[0] + \"; \"\n\t\telse:\n\t\t\tnames += parts[0] + \"; \"\n\treturn names[:-2]\n\ndef unswitch_name (contact):\n\tteachers = contact.split(\"; \")\n\tnames = \"\"\n\tfor teacher in teachers:\n\t\tparts = teacher.split()\n\t\tnames += \", \".join(parts[::-1]) + \"; \"\n\treturn names[:-2]\n\ndef switch_names_in_set (contacts_set):\n\tcontacts = set()\n\tfor contact in contacts_set:\n\t\tcontacts.add(switch_name(contact))\n\treturn contacts\n\nif __name__ == '__main__':\n\tport = int(os.environ.get('PORT', 5000))\n\thost = os.environ.get('host', '') # Need to set this variable in a terminal\n\t# heroku config:set host=0.0.0.0\n\t# export host=127.0.0.1 (because setenv doesn't seem to work on my computer)\n\tapp.run(host=host, debug=True, port=port)\n\t# print(int(os.environ.get('HOME')))\n\t# app.run(debug=True)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"448162918","text":"import re\n\n\ndef empty_string_catcher(value):\n value = ' '.join(value.split())\n if not value:\n return False\n return True\n\n\n\ndef is_string(value):\n if isinstance(value, str):\n return True\n return False\n\n","sub_path":"app_utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"565657183","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /tmp/pip-install-pti7pv2_/pip/pip/_internal/wheel_builder.py\n# Compiled at: 2020-02-14 17:24:43\n# Size of source mod 2**32: 9441 bytes\n\"\"\"Orchestrator for building wheels from InstallRequirements.\n\"\"\"\nimport logging, os.path, re, shutil\nfrom pip._internal.models.link import Link\nfrom pip._internal.operations.build.wheel import build_wheel_pep517\nfrom pip._internal.operations.build.wheel_legacy import build_wheel_legacy\nfrom pip._internal.utils.logging import indent_log\nfrom pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed\nfrom pip._internal.utils.setuptools_build import make_setuptools_clean_args\nfrom pip._internal.utils.subprocess import call_subprocess\nfrom pip._internal.utils.temp_dir import TempDirectory\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\nfrom pip._internal.utils.urls import path_to_url\nimport pip._internal.vcs as vcs\nif MYPY_CHECK_RUNNING:\n from typing import Any, Callable, Iterable, List, Optional, Pattern, Tuple\n from pip._internal.cache import WheelCache\n from pip._internal.req.req_install import InstallRequirement\n BinaryAllowedPredicate = Callable[([InstallRequirement], bool)]\n BuildResult = Tuple[(List[InstallRequirement], List[InstallRequirement])]\nlogger = logging.getLogger(__name__)\n\ndef _contains_egg_info(s, _egg_info_re=re.compile('([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):\n \"\"\"Determine whether the string looks like an egg_info.\n\n :param s: The string to parse. E.g. foo-2.1\n \"\"\"\n return bool(_egg_info_re.search(s))\n\n\ndef _should_build(req, need_wheel, check_binary_allowed):\n \"\"\"Return whether an InstallRequirement should be built into a wheel.\"\"\"\n if req.constraint:\n return False\n if req.is_wheel:\n if need_wheel:\n logger.info('Skipping %s, due to already being wheel.', req.name)\n return False\n if need_wheel:\n return True\n else:\n if not req.use_pep517:\n if not is_wheel_installed():\n return False\n else:\n return req.editable or req.source_dir or False\n check_binary_allowed(req) or logger.info('Skipping wheel build for %s, due to binaries being disabled for it.', req.name)\n return False\n return True\n\n\ndef should_build_for_wheel_command(req):\n return _should_build(req,\n need_wheel=True, check_binary_allowed=_always_true)\n\n\ndef should_build_for_install_command(req, check_binary_allowed):\n return _should_build(req,\n need_wheel=False, check_binary_allowed=check_binary_allowed)\n\n\ndef _should_cache(req):\n \"\"\"\n Return whether a built InstallRequirement can be stored in the persistent\n wheel cache, assuming the wheel cache is available, and _should_build()\n has determined a wheel needs to be built.\n \"\"\"\n if not should_build_for_install_command(req,\n check_binary_allowed=_always_true):\n return False\n if req.link:\n if req.link.is_vcs:\n assert not req.editable\n assert req.source_dir\n vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)\n assert vcs_backend\n if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):\n return True\n return False\n base, ext = req.link.splitext()\n if _contains_egg_info(base):\n return True\n return False\n\n\ndef _get_cache_dir(req, wheel_cache):\n \"\"\"Return the persistent or temporary cache directory where the built\n wheel need to be stored.\n \"\"\"\n cache_available = bool(wheel_cache.cache_dir)\n if cache_available and _should_cache(req):\n cache_dir = wheel_cache.get_path_for_link(req.link)\n else:\n cache_dir = wheel_cache.get_ephem_path_for_link(req.link)\n return cache_dir\n\n\ndef _always_true(_):\n return True\n\n\ndef _build_one(req, output_dir, build_options, global_options):\n \"\"\"Build one wheel.\n\n :return: The filename of the built wheel, or None if the build failed.\n \"\"\"\n try:\n ensure_dir(output_dir)\n except OSError as e:\n try:\n logger.warning('Building wheel for %s failed: %s', req.name, e)\n return\n finally:\n e = None\n del e\n\n with req.build_env:\n return _build_one_inside_env(req, output_dir, build_options, global_options)\n\n\ndef _build_one_inside_env(req, output_dir, build_options, global_options):\n with TempDirectory(kind='wheel') as (temp_dir):\n if req.use_pep517:\n wheel_path = build_wheel_pep517(name=(req.name),\n backend=(req.pep517_backend),\n metadata_directory=(req.metadata_directory),\n build_options=build_options,\n tempd=(temp_dir.path))\n else:\n wheel_path = build_wheel_legacy(name=(req.name),\n setup_py_path=(req.setup_py_path),\n source_dir=(req.unpacked_source_directory),\n global_options=global_options,\n build_options=build_options,\n tempd=(temp_dir.path))\n if wheel_path is not None:\n wheel_name = os.path.basename(wheel_path)\n dest_path = os.path.join(output_dir, wheel_name)\n try:\n wheel_hash, length = hash_file(wheel_path)\n shutil.move(wheel_path, dest_path)\n logger.info('Created wheel for %s: filename=%s size=%d sha256=%s', req.name, wheel_name, length, wheel_hash.hexdigest())\n logger.info('Stored in directory: %s', output_dir)\n return dest_path\n except Exception as e:\n try:\n logger.warning('Building wheel for %s failed: %s', req.name, e)\n finally:\n e = None\n del e\n\n if not req.use_pep517:\n _clean_one_legacy(req, global_options)\n return\n\n\ndef _clean_one_legacy(req, global_options):\n clean_args = make_setuptools_clean_args((req.setup_py_path),\n global_options=global_options)\n logger.info('Running setup.py clean for %s', req.name)\n try:\n call_subprocess(clean_args, cwd=(req.source_dir))\n return True\n except Exception:\n logger.error('Failed cleaning build dir for %s', req.name)\n return False\n\n\ndef build(requirements, wheel_cache, build_options, global_options):\n \"\"\"Build wheels.\n\n :return: The list of InstallRequirement that succeeded to build and\n the list of InstallRequirement that failed to build.\n \"\"\"\n if not requirements:\n return ([], [])\n logger.info('Building wheels for collected packages: %s', ', '.join((req.name for req in requirements)))\n with indent_log():\n build_successes, build_failures = [], []\n for req in requirements:\n cache_dir = _get_cache_dir(req, wheel_cache)\n wheel_file = _build_one(req, cache_dir, build_options, global_options)\n if wheel_file:\n req.link = Link(path_to_url(wheel_file))\n req.local_file_path = req.link.file_path\n assert req.link.is_wheel\n build_successes.append(req)\n else:\n build_failures.append(req)\n\n if build_successes:\n logger.info('Successfully built %s', ' '.join([req.name for req in build_successes]))\n if build_failures:\n logger.info('Failed to build %s', ' '.join([req.name for req in build_failures]))\n return (\n build_successes, build_failures)","sub_path":"pycfiles/smriprep-0.5.2-py3-none-any/wheel_builder.cpython-37.py","file_name":"wheel_builder.cpython-37.py","file_ext":"py","file_size_in_byte":7639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"191254545","text":"\"\"\"When adding angles to a shape, it is important to know the size that the text\r\nwill be in order that it can be best positioned. This is the primary purpose of this module\"\"\"\r\n\r\nimport os.path\r\n\r\nfrom .afm import AFM\r\nfrom .rect_in_lines import rect_in_lines\r\nfrom .general_utilities import get_reflex_direction\r\n\r\nafm_fname = os.path.join(os.path.dirname(__file__), 'fonts', 'ptmr8a.afm')\r\nwith open(afm_fname, 'rb') as fh:\r\n afm = AFM(fh)\r\n\r\n\r\ndef size_of_text(text, text_kwargs):\r\n \"\"\"Calculates the size of the text given in pixels. Returns both height and\r\n width, and also the offset of the lowest point in the string from the\r\n baseline, and the offset of the left-most point in the string from the\r\n starting position of the text.\"\"\"\r\n if text_kwargs is None:\r\n text_kwargs = {}\r\n if \"font_size\" not in text_kwargs:\r\n text_kwargs[\"font_size\"] = 20\r\n font_size = text_kwargs[\"font_size\"]\r\n bbox = afm.get_str_bbox(text)\r\n width = (bbox[2]-bbox[0])*font_size/1000\r\n height = (bbox[3]-bbox[1])*font_size/1000\r\n dx = bbox[0]*font_size/1000\r\n dy = bbox[1]*font_size/1000\r\n return (width, height, dx, dy)\r\n\r\n\r\ndef position_radius_of_text(point1, center, point2, width, height, dx, dy,\r\n reflex, direction, margin):\r\n \"\"\"given the height and width of a rectangle and two lines, this function\r\n returns the position of the bottom-left most point of the rectangle, and\r\n the radius of a circle needed to completely inclose the rectangle in the\r\n lines.\"\"\"\r\n reflex = get_reflex_direction(point1, point2, center, reflex=reflex,\r\n direction=direction)[0]\r\n (x, y), r = rect_in_lines(center, point1, point2, reflex, width, height, 1)\r\n x -= (dx - margin)\r\n y += (dy - margin)\r\n return x, y, r\r\n","sub_path":"geometrySVG/size_of_text.py","file_name":"size_of_text.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"212465836","text":"from Tkinter import *\nfrom sqlite3 import *\n\nclass Shelf(dict):\n def __init__(self):\n self.locations = {}\n\nclass Item(object):\n def __init__(self, sku, title, qty):\n self.sku = sku\n self.title = title\n self.qty = qty\n\n def __repr__(self):\n return str(self.sku) + \" - \" + str(self.title)\n\n def add_qty(self, add):\n self.qty += add\n\n def remove_qty(self, remove):\n self.qty -= remove\n\n def print_item(self):\n print(str(self.sku) + \" - \" + str(self.title)) \n\nclass Location(object):\n def __init__(self, name, items):\n self.name = name\n self.items = items\n\n def __repr__(self):\n return str(self.items)\n\n def add_item(self, item):\n self.items.append(item)\n\ndef update_list_box(lb):\n lb.delete(0,END)\n for key in dock:\n lb1.insert(END, str(key) + str(dock[key]))\n\ndef add_location():\n dock[loc3.name] = loc3\n update_list_box(lb1)\n\ndb = 'inventory.sqlite'\n\ndef open_db(db):\n con = connect(db)\n cur = con.cursor()\n return cur, con\n\ndef close_db(con):\n con.commit()\n con.close()\n\ndef print_menu():\n print('1. Enter Data')\n print('2. Retieve data')\n print('3. Exit')\n \ndock = Shelf()\n\nitem = Item(\"SKU-1234\", \"Thing One\", 3)\nitem2 = Item(\"SKU-4321\", \"Thing Two\", 3)\nitem3 = Item(\"SKU-5678\", \"Thing Three\", 3)\n\nloc = Location(\"C1\", [])\nloc2 = Location(\"C2\", [])\nloc3 = Location(\"C3\", [])\n\nloc.add_item(item)\nloc2.add_item(item2)\nloc2.add_item(item3)\n\ndock[loc.name] = loc\ndock[loc2.name] = loc2\n\n\"\"\"\n(cur, con) = open_db(db)\ncur.execute(\"CREATE TABLE items (id INTEGER PRMARY KEY AUTO INCREMENT UNIQUE NOT NULL, \\\n sku TEXT UNIQUE NOT NULL, \\\n title TEXT UNIQUE NOT NULL, \\\n qty INTEGER NOT NULL) \")\nclose_db(con)\n\"\"\"\n\nwhile True:\n print_menu()\n choice = int(raw_input('>>'))\n if choice == 1:\n n_sku = raw_input('SKU >> ')\n n_title = raw_input('Title >> ')\n n_qty = int(raw_input('QTY >> '))\n (cur, con) = open_db(db)\n cur.execute(\"INSERT into items (sku, title, qty) VALUES (?,?,?)\", (n_sku, n_title, n_qty) )\n close_bg(con)\n if choice == 2:\n (cur, con) = open_db(db)\n results = cur.execute(\"SELECT sku, title, qty FROM items\")\n response = results.fetchall()\n print(response)\n close_db(con)\n if choice == 3:\n exit(0)\n\n\n","sub_path":"inventory_manager.py","file_name":"inventory_manager.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"142617545","text":"\"\"\"\n11.2 テンプレート\n\"\"\"\n\n# stringモジュールには、エントリーユーザーが編集するのに向いた構文を持つ、\n# 万能のTempleteクラスがある。\n# これを使えばアプリケーション自愛を書き換えずにカスタマイズできるようになる。\n# 整形ではプレースホルダ(置き換え記号)の名前として、$の後ろにPythonで有効な\n# 識別子を付けたものを使う。\n# プレースホルダを中括弧で囲めば、スペースで区切らなくても後ろに英文字・数字を続けられる。\n# $$と書けば、エスケープされたひとつの$になる。\n\n# substituteメソッドは、プレースホルダをディックショナリかキーワード引数で渡さないと\n# KeyErrorを送出する\n\nfrom string import Template\nt = Template('${village}folk send $$10 to $cause.')\nprint(t.substitute(village = 'Nottingham', cause = 'the itch found')) # <-- Nottinghamfolk send $10 to the itch found.\n\n# こうした置き換えスタイルのアプリケーションでは、ユーザーからのデータは不完全かもしれないので、\n# safe_substituteメソッドの方が適しているかもしれない\n# こちらはデータがなければプレースホルダをそのままにする\n\nt = Template('Return the $item to $owner.')\nd = dict(item = 'unlanden swallow')\n\n# print(t.substitute(d)) # <-- Traceback (most recent call last):\n # KeyError: 'owner'\n\nprint(t.safe_substitute(d)) # <-- Return the unlanden swallow to $owner.\n\n# Templateのサブクラスでは区切文字(デリミタ)を変えられる。\n# 例えば以下のようなフォトブラウザ向けバッチ処理リネームユーティリティでは、\n# 現在の日付、画像番号、ファイル形式などのプレースホルダに%記号を使いたいかもしれない\n\nimport time, os.path\nphotofiles = ['img_1074,jpg', 'img_1076.jpg', 'img_1077.jpg']\nclass BatchRename(Template):\n delimiter = '%'\n\nfmt = input('どのようにリネームしますか(%d-日付 %n-番号 %f-形式):')\n\nt = BatchRename(fmt)\ndate = time.strftime('%d%b%y')\nfor i, filename in enumerate(photofiles):\n base, ext = os.path.splitext(filename)\n newname = t.substitute(d = date, n = i, f = ext)\n print('{0} --> {1}'.format(filename, newname))\n\n","sub_path":"11章 標準ライブラリめぐり - Part2/11.2.py","file_name":"11.2.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"145593954","text":" # jupyterhub_config.py\nc = get_config()\n\nimport my_oauthenticator\n\nimport os\npjoin = os.path.join\n\nruntime_dir = os.path.join('/srv/jupyterhub')\nssl_dir = pjoin(runtime_dir, 'ssl')\nif not os.path.exists(ssl_dir):\n os.makedirs(ssl_dir)\n\n# put the logfile in /var/log/\nc.JupyterHub.extra_log_file = '/var/log/jupyterhub.log'\n\nc.JupyterHub.port = 8000\nc.JupyterHub.ssl_key = pjoin(ssl_dir, 'hub.key')\nc.JupyterHub.ssl_cert = pjoin(ssl_dir, 'hub.crt')\n\nc.JupyterHub.cookie_secret_file = pjoin(runtime_dir, 'cookie_secret')\nc.JupyterHub.db_url = pjoin(runtime_dir, 'jupyterhub.sqlite')\n\n\n# use GoogleOAuthenticator + LocalAuthenticator\nc.JupyterHub.authenticator_class = my_oauthenticator.LocalGoogleOAuthenticator\n\nc.GoogleOAuthenticator.client_id = os.environ['OAUTH_CLIENT_ID']\nc.GoogleOAuthenticator.client_secret = os.environ['OAUTH_CLIENT_SECRET']\nc.GoogleOAuthenticator.oauth_callback_url = os.environ['OAUTH_CALLBACK_URL']\n\n\n# create system users that don't exist yet\nc.Authenticator.create_system_users = True\n# Default adduser flags are for FreeBSD (works on CentOS 5, Debian, Ubuntu)\n# Doesn't work for us.\n# https://github.com/jupyterhub/jupyterhub/issues/696\n#c.Authenticator.add_user_cmd = ['adduser', '--home', '/home/USERNAME']\nc.Authenticator.add_user_cmd = ['adduser', '--home', '/mnt/nfs/home/USERNAME'] # not yet\n#TODO JMF 16 May 2017: I've hacked around in my_oauthenticator.py. Need to make this a bit more robust.\n\nc.Authenticator.whitelist = whitelist = set()\nc.Authenticator.admin_users = admin = set()\n\nwith open(pjoin(runtime_dir, 'userlist')) as f:\n for line in f:\n if not line:\n continue\n parts = line.split()\n if parts:\n name = parts[0]\n whitelist.add(name)\n if len(parts) > 1 and parts[1] == 'admin':\n admin.add(name)\n\n\n\n\n# nginx config stuff\n# Force the proxy to only listen to connections to 127.0.0.1\nc.JupyterHub.ip = '127.0.0.1'\n#c.JupyterHub.ip = '0.0.0.0'\nc.JupyterHub.proxy_api_ip = '127.0.0.1'\n\n\n# Zonca + legacy swarm\n# Point DockerSpawner to Swarm instead of the local DockerEngine\nos.environ[\"DOCKER_HOST\"] = \":4000\"\n\nc.JupyterHub.spawner_class = 'dockerspawner.SystemUserSpawner'\nc.DockerSpawner.container_image = 'data8-notebook'\n\n## Remove containers once they are stopped\nc.Spawner.remove_containers = True\n\n# For debugging arguments passed to spawned containers\nc.Spawner.debug = True\n\n\n\n#notebook_dir = '/home/{username}'\n#c.DockerSpawner.notebook_dir = notebook_dir\nc.DockerSpawner.notebook_dir = '/home'\n\n\n# The docker instances need access to the Hub, so the default loopback port doesn't work:\nfrom jupyter_client.localinterfaces import public_ips\nc.JupyterHub.hub_ip = public_ips()[0]\n#print('hub_ip = ',c.JupyterHub.hub_ip)\n\n\n# The docker instances need access to the Hub, so the default loopback port\n# doesn't work. We need to tell the hub to listen on 0.0.0.0 because it's in a\n# container, and we'll expose the port properly when the container is run. Then,\n# we explicitly tell the spawned containers to connect to the proper IP address.\n#c.JupyterHub.proxy_api_ip = '0.0.0.0'\nc.DockerSpawner.container_ip = '0.0.0.0'\nc.DockerSpawner.use_internal_ip = False\n\nc.DockerSpawner.hub_ip_connect = c.JupyterHub.hub_ip\n\n\n\n\n# Mount the real user's Docker volume on the host to the notebook user's\n# notebook directory in the container. Mount all of NFS home\nc.DockerSpawner.volumes = { '/mnt/nfs/home': '/home' }\n\nc.SystemUserSpawner.host_homedir_format_string = '/mnt/nfs/home/{username}'\n\nc.DockerSpawner.extra_host_config = {'mem_limit': '1g'}\n#c.DockerSpawner.extra_host_config = {'mem_limit': '50m'}\n\n","sub_path":"deploy/jupyterhub/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"634087443","text":"def Change_two(idx,num):\n value = format(num,'#b')[2:]\n return f'{\"0\" * (idx - len(value))}{value}'\n\ndef Binary_Sum(n,num1,num2):\n b_num1 = Change_two(n,num1)\n b_num2 = Change_two(n,num2)\n b_sum = []\n\n for i in range(n):\n b_sum.append( int(b_num1[i]) | int(b_num2[i]) )\n\n return ''.join(map(str,b_sum))\n\ndef mosaic(n):\n n = n.replace('1','#')\n n = n.replace('0',' ')\n return n\n\ndef solution(n, arr1, arr2):\n answer = []\n for area in range(n):\n answer.append( mosaic( Binary_Sum( n,arr1[area], arr2[area] ) ) )\n return answer\n\nprint(solution(5,[9, 20, 28, 18, 11],[30, 1, 21, 17, 28]))","sub_path":"programmers/레벨1/비밀지도/비밀지도.py","file_name":"비밀지도.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"201137261","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on\r\n\r\n@author: Don\r\n\"\"\"\r\nimport re\r\nimport random\r\nfrom array import array\r\nimport numpy as np\r\n\r\n\r\ndef line_reader(file):\r\n with open(file, 'r') as f:\r\n l = f.readline()\r\n while l:\r\n yield l\r\n l = f.readline()\r\n yield None\r\n\r\n\r\ndef createVocabList(dataset):\r\n vocabSet = set([])\r\n for document in dataset:\r\n vocabSet = vocabSet | set(document)\r\n return list(vocabSet)\r\n\r\n\r\ndef setOfWords2Vec(vocabList, inputSet):\r\n returnVec = [0] * len(vocabList)\r\n for word in inputSet:\r\n if word in vocabList:\r\n returnVec[vocabList.index(word)] = 1\r\n else:\r\n print(\"the word: %s is not in my vocabulary!\" % word)\r\n return returnVec\r\n\r\n\r\ndef trainNB(trainMat, trainCat):\r\n numTrainDoc = len(trainMat)\r\n # print(\"--\" + str(numTrainDoc))\r\n numWords = len(trainMat[0])\r\n pAbusive = sum(trainCat) / float(numTrainDoc)\r\n # print(pAbusive)\r\n p0Num = np.zeros(numWords)\r\n p1Num = np.zeros(numWords)\r\n p0Denom = 0.0\r\n p1Denom = 0.0\r\n for i in range(numTrainDoc):\r\n if trainCat[i] == 1.0:\r\n p1Num += trainMat[i]\r\n # print(p1Num)\r\n p1Denom += sum(trainMat[i])\r\n else:\r\n p0Num += trainMat[i]\r\n # print(p0Num)\r\n p0Denom += sum(trainMat[i])\r\n p1Vect = p1Num / p1Denom\r\n p0Vect = p0Num / p1Denom\r\n return p0Vect, p1Vect, pAbusive\r\n\r\n\r\ndef classifyNB(vec2Classify, p0Vect, p1Vect, pClass1):\r\n p1 = sum(vec2Classify * p1Vect) + np.log(pClass1)\r\n p0 = sum(vec2Classify * p0Vect) + np.log(1.0 - pClass1)\r\n if p1 > p0:\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\ndef textParse(bigStr):\r\n regEx = re.compile('\\\\W*')\r\n listOfTokens = re.split(regEx, bigStr)\r\n return [tok.lower() for tok in listOfTokens if len(tok) > 2]\r\n\r\n\r\ndef spamTest():\r\n docList = []\r\n classList = []\r\n f = line_reader('../dataset/spam_train.txt')\r\n line = next(f) # next(iterator[, default]) Return the next item from the iterator.\r\n print('loading training set...')\r\n cnt = 0\r\n while line:\r\n content = re.split('\\s+', line)\r\n doc = content[1]\r\n score = float(content[0])\r\n docList.append(doc)\r\n classList.append(score)\r\n cnt += 1\r\n line = next(f)\r\n\r\n trainVocabList = createVocabList(docList)\r\n # print(len(wordList))\r\n testSet = []\r\n testClassList = []\r\n f = line_reader('../dataset/spam_test.txt')\r\n line = next(f) # next(iterator[, default]) Return the next item from the iterator.\r\n print('loading test set...')\r\n while line:\r\n content = re.split('\\s+', line)\r\n doc = content[1]\r\n score = float(content[0])\r\n testSet.append(doc)\r\n testClassList.append(score)\r\n line = next(f)\r\n\r\n trainMat = []\r\n trainClass = []\r\n for docIdx in range(len(docList)):\r\n trainMat.append(setOfWords2Vec(trainVocabList, docList[docIdx]))\r\n trainClass.append(classList[docIdx])\r\n p0V, p1V, pSpam = trainNB(np.array(trainMat), np.array(trainClass))\r\n # print(p0V, p1V, pSpam)\r\n\r\n testVocabList = createVocabList(testSet)\r\n errorCnt = 0\r\n print(len(testSet))\r\n for docIdx in range(len(testSet)):\r\n wordVect = setOfWords2Vec(testVocabList, testSet[docIdx])\r\n\r\n if classifyNB(np.array(wordVect), p0V, p1V, pSpam) != testClassList[docIdx]:\r\n # print(classifyNB(np.array(wordVect), p0V, p1V, pSpam))\r\n # print(testClassList[docIdx])\r\n errorCnt += 1\r\n print(\"the error rate is : \", float(errorCnt) / len(testSet))\r\n\r\n\r\nif __name__ == '__main__':\r\n spamTest()\r\n","sub_path":"Basic/at/SpamNB.py","file_name":"SpamNB.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"575074312","text":"from typing import List, Dict\n\nfrom yaak import inject\n\nfrom Src.BioAnalyzer.CrossCutting.DTOs.GenePrioritization.LocalDifferentialSampleDto import LocalDifferentialSampleDto\nfrom Src.BioAnalyzer.CrossCutting.Filters.GenePrioritization.FeListLocalDifferentialSample import \\\n FeListLocalDifferentialSample\nfrom Src.Core.Manager.ManagerBase import ManagerBase\n\n\nclass LocalDifferentialMessengerRnaSampleManager(ManagerBase):\n \"\"\"description of class\"\"\"\n\n @inject.Param(repository='LocalDifferentialMessengerRnaSampleRepositoryBase')\n def __init__(self, repository):\n \"\"\"\n \n :param repository: \n \"\"\"\n super().__init__(repository)\n\n def add_many(self, diff_mrna_samples: List[LocalDifferentialSampleDto]):\n \"\"\"\n \n :param diff_mrna_samples: \n :return: \n \"\"\"\n fe_diff_mrna = self.get_many(\n FeListLocalDifferentialSample(is_paged=False,\n patient_id_list=[mrna.patient_id for mrna in diff_mrna_samples\n if mrna.values]),\n {'patient_id': 1})\n\n new_diff_samples = [diff_mrna for diff_mrna in diff_mrna_samples\n if not diff_mrna.patient_id in fe_diff_mrna.result_list]\n\n if not new_diff_samples:\n return\n\n self._repository.add_many(new_diff_samples)\n\n def get_many(self, fe_diff_mrna: FeListLocalDifferentialSample,\n include_or_exclude_fields: Dict[str, int] = None) -> FeListLocalDifferentialSample:\n \"\"\"\n \n :param fe_diff_mrna: \n :param include_or_exclude_fields: \n :return: \n \"\"\"\n if not fe_diff_mrna.differential_values_from_patients:\n return self._repository.get_many(fe_diff_mrna, LocalDifferentialSampleDto,\n include_or_exclude_fields)\n\n fe_diff_mrna = self._repository.get_many(fe_diff_mrna, LocalDifferentialSampleDto,\n include_or_exclude_fields)\n\n fe_diff_mrna.result_list = [LocalDifferentialSampleDto(\n patient_id=d.patient_id,\n values=[v for v in d.values\n if self.__filter_values(\n fe_diff_mrna.is_highly_significant,\n v,\n fe_diff_mrna.differential_values_from_patients)])\n for d in fe_diff_mrna.result_list]\n\n return fe_diff_mrna\n\n def __filter_values(self, is_highly_significant, value, values):\n return value.element_id in values and \\\n value.status == values[value.element_id] and \\\n value.is_highly_significant == is_highly_significant","sub_path":"Src/BioAnalyzer/Managers/GenePrioritization/LocalDifferentialMessengerRnaSampleManager.py","file_name":"LocalDifferentialMessengerRnaSampleManager.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"519069148","text":"# Natural Launguage Processing\n\n# Importing the libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Importing the dataset\ndataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter='\\t', quoting=3)\n\n# Cleaning the text\nimport re\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\ncorpus = []\nfor i in range(0,dataset.shape[0]):\n # Taking only A to Z words small or capitabl letters\n review = re.sub('[^a-zA-Z]',' ', dataset['Review'][i])\n review = review.lower()\n review = review.split()\n ps = PorterStemmer()\n # removing stopwords\n #review = [word for word in review if not word in set(stopwords.words('english'))]\n # applying steaming\n review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]\n review = ' '.join(review)\n corpus.append(review)\n\n# Creating the Bag of Words Model\nfrom sklearn.feature_extraction.text import CountVectorizer\ncv = CountVectorizer(max_features=1500)\nX = cv.fit_transform(corpus).toarray()\ny = dataset.iloc[:,-1].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\n\n## Feature Scaling\n#from sklearn.preprocessing import StandardScaler\n#sc = StandardScaler()\n#X_train = sc.fit_transform(X_train)\n#X_test = sc.transform(X_test)\n\n# Fitting Naive Bayes to the Training set\nfrom sklearn.naive_bayes import GaussianNB\nclassifier = GaussianNB()\nclassifier.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\nTP = cm[1,1]\n\nTN = cm[0,0]\n\nFP = cm[0,1]\n\nFN = cm[1,0]\n\naccuracy = (TP + TN) / (TP + TN + FP + FN)\n\nprecision = TP / (TP + FP)\n\nrecall = TP / (TP + FN)\n\nf1_score = 2 * precision * recall / (precision + recall)\n\nprint('#-------------')\n\nprint(type(classifier))\n\nprint('Accuracy =', format(accuracy, '.2f'))\n\nprint('Precision =', format(precision, '.2f'))\n\nprint('Recall =', format(recall, '.2f'))\n\nprint('F1 Score =', format(f1_score, '.2f'))\n\nprint('#-------------')","sub_path":"Part 7 - Natural Language Processing/Section 36 - Natural Language Processing/mynlp.py","file_name":"mynlp.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"445909950","text":"import random\nimport getletternumber\nimport letterfrequencychart\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\n\ndef make_shift(message):\n shift = random.randint(1, 25)\n new_alphabet = \"\"\n ciphertext = \"\"\n message = message.lower()\n for letter in alphabet:\n new_alphabet = new_alphabet + alphabet[shift%26]\n shift += 1\n for le in message:\n x = getletternumber.get_letter_value(le,alphabet)\n try:\n ciphertext = ciphertext + new_alphabet[x]\n except TypeError:\n ciphertext = ciphertext + x\n print (ciphertext)\n print (letterfrequencychart.frequency_chart(ciphertext))\n return message + \" \\nAlphabet used: \" + new_alphabet\n","sub_path":"codebusters/caesershift.py","file_name":"caesershift.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"232538961","text":"\"\"\"Convert epoch time to datetime format and remove subseconds.\"\"\"\nimport datetime\n\ndef epoch_to_date_time(time):\n \"\"\"Convert epoch time to datetime format.\"\"\"\n #convert to string\n time = str(time)\n #remove subseconds\n time = int(time[:10])\n #convert to datetime\n time = datetime.datetime.fromtimestamp(time).strftime('%c')\n\n return time\n","sub_path":"misc/time_convert.py","file_name":"time_convert.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"148559616","text":"# Define a class, which have a class parameter and have a same instance parameter.\r\n\r\nclass Person(object):\r\n def __init__(self, name=None):\r\n self.name = name\r\n\r\n\r\njeff = Person(\"Jeff\")\r\n\r\nprint(jeff.name)\r\n","sub_path":"Solutions/Q25.py","file_name":"Q25.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"248925109","text":"# -*- coding: utf-8 -*-\n# from __future__ import absolute_import\nimport unittest\nfrom selenium.webdriver.common.by import By\n\nfrom GIC.utils import GICTest\n\n\nclass DeletePrivateTest(GICTest):\n \"\"\"\n 删除私网\n \"\"\"\n def test_delete_priavte(self):\n url = 'http://101.251.234.165/zh-cn/cloud/intranet/'\n self.login_into_page(url)\n\n # adjust private\n adjust_private_xpath = '/html/body/div[6]/div/div[2]/table/tbody/tr[6]/td[6]/a'\n self.browser.find_element(By.XPATH, adjust_private_xpath).click()\n\n # delete_private\n self.browser.find_element(By.CLASS_NAME, 'goumaizhongzhi_del_network').click()\n\n # delete_confirm\n delete_confirm_xpath = '/html/body/div[8]/div[3]/div/button[1]'\n self.browser.find_element(By.XPATH, delete_confirm_xpath).click()\n\n # notice\n notice_xpath = '/html/body/div[7]/div[3]/div/button'\n self.browser.find_element(By.XPATH, notice_xpath).click()\n\n# 测试\nif __name__ == '__main__':\n unittest.main()","sub_path":"Others/DeletePrivate.py","file_name":"DeletePrivate.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"38590934","text":"import numpy as np\nimport math\nimport cv2\nimport time\n\nimg = np.double(cv2.imread(\"archiwum/parrot.bmp\"))\n\nxReScale = 3\nyReScale = 5\n\nx, y = img.shape[:2]\n\nnewY, newX = math.floor(xReScale * x), math.floor(yReScale * y)\n\nnewParrot = np.zeros([newX, newY, 3], dtype=np.double)\n\nxStep = x/newX\nyStep = y/newY\n\nstart_time = time.time()\nfor i in range(0, newX):\n for j in range(0, newY):\n\n ii = i * xStep\n jj = j * yStep\n i1 = math.floor(ii)\n j1 = math.floor(jj)\n\n if i1 + 1 > x - 1:\n i1 = x - 2\n\n if j1 + 1 > y - 1:\n j1 = y - 2\n\n a = img[i1, j1]\n b = img[i1 + 1, j1]\n c = img[i1 + 1, j1 + 1]\n d = img[i1, j1 + 1]\n\n A = np.matrix([[a[0], d[0]],\n [b[0], c[0]]])\n\n iN = ii % 1\n jN = jj % 1\n\n newParrot[i, j] = [1-iN, iN] * A * np.matrix([[1 - jN], [jN]])\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\ncv2.imshow('old image bilinear', np.uint8(img))\ncv2.imshow('new image bilinear', np.uint8(newParrot))\ncv2.waitKey(0)\n","sub_path":"lab2/bilinear.py","file_name":"bilinear.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"9045517","text":"class AbstractException(Exception):\n def __init__(self, *args, **kwargs):\n super(Exception, self).__init__(*args, **kwargs)\n self.message = kwargs.get('message', None)\n self.name = kwargs.get('name', None)\n self.status = kwargs.get('status', '[undefined]')\n\n def __str__(self):\n return \"%s %s: %s\" % (self.status, self.name, self.message)\n\n\nclass LoginUnsuccessfulError(AbstractException):\n def __init__(self, *args, **kwargs):\n super(LoginUnsuccessfulError, self).__init__(*args, **kwargs)\n self.message = \"Login not successful, check your username or connection\"\n self.name = \"LoginUnsuccessfulError\"\n self.status = '[error]'\n\n\nclass LoginRequiredError(AbstractException):\n def __init__(self, *args, **kwargs):\n super(LoginRequiredError, self).__init__(*args, **kwargs)\n self.message = \"Download not sucessful, login is required\"\n self.name = \"LoginRequiredError\"\n self.status = '[error]'\n\n\nclass DateRequiredError(AbstractException):\n def __init__(self, *args, **kwargs):\n super(DateRequiredError, self).__init__(*args, **kwargs)\n self.message = \"The date maybe complete\"\n self.name = \"DateRequiredError\"\n self.status = '[error]'\n\n\nclass TimeRequiredError(AbstractException):\n def __init__(self, *args, **kwargs):\n super(TimeRequiredError, self).__init__(*args, **kwargs)\n self.message = \"The hour and minute maybe complete\"\n self.name = \"TimeRequiredError\"\n self.status = '[error]'\n\n\nclass DirectoryRequiredError(AbstractException):\n def __init__(self, *args, **kwargs):\n super(DirectoryRequiredError, self).__init__(*args, **kwargs)\n self.message = \"The directory is required\"\n self.name = \"DirectoryRequiredError\"\n self.status = '[error]'\n\n\nclass RemoteUrlRequiredError(AbstractException):\n def __init__(self, *args, **kwargs):\n super(RemoteUrlRequiredError, self).__init__(*args, **kwargs)\n self.message = \"Remote URL is not defined\"\n self.name = \"RemoteUrlRequiredError\"\n self.status = '[error]'\n\n\nclass ListFilesNotFoundError(AbstractException):\n def __init__(self, *args, **kwargs):\n super(ListFilesNotFoundError, self).__init__(*args, **kwargs)\n self.message = \"List of url files not found\"\n self.name = \"ListFilesNotFoundError\"\n self.status = '[error]'\n\n\nclass UrlFileNotFoundError(AbstractException):\n def __init__(self, *args, **kwargs):\n super(UrlFileNotFoundError, self).__init__(*args, **kwargs)\n self.message = \"The URL of file not found\"\n self.name = \"UrlFileNotFoundError\"\n self.status = '[error]'\n\n\nclass FileAlreadyExistError(AbstractException):\n def __init__(self, *args, **kwargs):\n super(FileAlreadyExistError, self).__init__(*args, **kwargs)\n self.message = \"File was already downloaded\"\n self.name = \"FileAlreadyExistError\"\n self.status = '[warning]'\n\n","sub_path":"goamazondownloader/_exceptions.py","file_name":"_exceptions.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"188561485","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport socket,pika,re,threading\nimport subprocess\n\n\ndef get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('10.255.255.255', 0))\n IP = s.getsockname()[0]\n except:\n IP = '127.0.0.1'\n finally:\n s.close()\n return IP\n\nip = get_ip() #取得IP地址\ntask_dict= {} #存放命令运行结果\nthread_dict = {} #存放执行命令的线程\nuser_pwd = pika.PlainCredentials('admin','admin123')\ndef set_conn():\n global conn,channel\n conn = pika.BlockingConnection(pika.ConnectionParameters(host='192.168.174.140',port=5672,credentials=user_pwd))\n channel = conn.channel()\n channel.exchange_declare(exchange='direct_logs', #从该exchange广播当中收消息\n exchange_type='direct') #选择性的接收消息\n\n result = channel.queue_declare(exclusive=True) #不指定queue名字,rabbit会随机分配一个名字,exclusive=True会在使用此queue的消费者断开后,自动将queue删除\n queue_name = result.method.queue #取到队列名字\n\n channel.queue_bind(exchange='direct_logs',\n queue=queue_name,\n routing_key=ip)#以IP地址作为接收的路由\n channel.basic_consume(callback,\n queue=queue_name,\n no_ack=True)#收到消息后运行callback函数\n\ndef run_cmd(cmd,task_id):\n '''\n 运行命令,获取结果\n :param cmd:\n :return: 命令的结果\n '''\n global task_dict\n info = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)\n output,err = info.communicate()\n if output:\n output =output.decode()\n task_dict[task_id] = output\n else:\n task_dict[task_id] = err\n\ndef callback(ch, method, props,msg):\n global thread_dict\n msg = msg.decode()\n print(msg)\n if msg.startswith('run'):#运行命令\n msg_list = msg.split('--')\n cmd = re.search('[\\\"|\\']+(.+)[\\\"|\\']',msg_list[0]) #匹配命令\n cmd = cmd.group(1) #取出命令\n task_id = msg_list[1]\n t = threading.Thread(target=run_cmd,args=(cmd,task_id,)) #生成一个新的线程,来执行命令\n t.start()\n thread_dict[task_id] = t\n elif msg.startswith('check_task'):#取结果\n task_list = msg.split()\n result_id = task_list[1] #获取ID\n if result_id in thread_dict:\n if result_id in task_dict:#如果结果已经生成\n result = task_dict[result_id]\n ch.basic_publish(exchange='',#发送结果\n routing_key=props.reply_to,\n properties=pika.BasicProperties(correlation_id = props.correlation_id),\n body='%s\\n%s'%(ip,result))\n ch.basic_ack(delivery_tag = method.delivery_tag)\n conn.close() #关闭连接\n else:#如果任务ID不在task_dict里,说明命令未执行完\n ch.basic_publish(exchange='',#发送结果\n routing_key=props.reply_to,\n properties=pika.BasicProperties(correlation_id = props.correlation_id),\n body='%s\\ntask is Unfinished'%ip)\n ch.basic_ack(delivery_tag = method.delivery_tag)\n conn.close()\n set_conn() #因为发送消息过后关闭了连接,要重新连接\nset_conn()#初始连接\nwhile True:\n channel.start_consuming() #开始接收\n\n\n","sub_path":"Modular_five/RPC_RabbitMQ/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"203357502","text":"from typing import *\n\nfrom airflow.models import BaseOperator\n\nfrom airflow_clickhouse_plugin.hooks import clickhouse_hook\n\n\nclass ClickHouseOperator(BaseOperator):\n template_fields = ('_sql',)\n\n def __init__(\n self,\n sql: Union[str, Iterable[str]],\n clickhouse_conn_id: str = 'clickhouse_default',\n parameters: Dict[str, Any] = None,\n database=None,\n *args, **kwargs,\n ):\n super().__init__(*args, **kwargs)\n self._sql = sql\n self._conn_id = clickhouse_conn_id\n self._parameters = parameters\n self._database = database\n\n def execute(self, context: Dict[str, Any]) -> Any:\n hook = clickhouse_hook.ClickHouseHook(\n clickhouse_conn_id=self._conn_id,\n database=self._database,\n )\n return hook.run(self._sql, self._parameters)\n","sub_path":"airflow_clickhouse_plugin/operators/clickhouse_operator.py","file_name":"clickhouse_operator.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"510537558","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport random\n\n\n# In[2]:\n\n\n'''\ninput:\ndf:dataFrame\ncentroids: array\noutput:df\n'''\ndef closest_dist(df,centroids):\n for i,centroid in enumerate(centroids):\n df['distance_from_{}'.format(i)] = np.sqrt((df['x']- centroids[i][0])**2 + (df['y']- centroids[i][1])**2)\n distance_from_centroid_id = ['distance_from_{}'.format(i) for i in range(len(centroids))]\n df['closest'] = df.loc[:, distance_from_centroid_id].min(axis=1) #到最近聚类中心的距离\n return df\n\n\n# In[3]:\n\n\n'''\nk-means++ : 选择聚类中心\noutput: cluster_centers (array)\n'''\ndef choose_centroids(df,k):\n cluster_centers = []\n cluster_centers.append(df.sample(n=1,axis=0).values[0]) #随机选取一个聚类中心\n for _ in range(1,k):\n dist_sum = 0.0\n df = closest_dist(df,cluster_centers)\n dist_sum = np.sum(df['closest'],axis=0)\n dist_sum *= random.random() \n for i,distance in enumerate(df['closest'].tolist()):\n dist_sum -= distance\n if dist_sum > 0:\n continue\n cluster_centers.append(df.loc[i,['x','y']].values) #选取其他聚类中心\n break\n return np.array(cluster_centers)\n\n\n# In[4]:\n\n\ndef update(df, centroids):\n for i in range(len(centroids)):\n centroids[i][0] = np.mean(df[df['closest'] == df['distance_from_{}'.format(i)]]['x'])\n centroids[i][1] = np.mean(df[df['closest'] == df['distance_from_{}'.format(i)]]['y'])\n return centroids\n\n\n# In[5]:\n\n\ndef main():\n # step 0.0: generate source data\n df = pd.DataFrame({\n 'x': [12, 20, 28, 18, 10, 29, 33, 24, 45, 45, 52, 51, 52, 55, 53, 55, 61, 64, 69, 72, 23],\n 'y': [39, 36, 30, 52, 54, 20, 46, 55, 59, 63, 70, 66, 63, 58, 23, 14, 8, 19, 7, 24, 77]\n })\n k = 3\n # centroids[i] = [x, y]\n cluster_centers = choose_centroids(df,k) #选择聚类中心\n print('orignal cluster_centers is:',cluster_centers)\n # step 0.2: assign centroid for each source data\n # for color and mode: https://blog.csdn.net/m0_38103546/article/details/79801487\n # colmap = {0: 'r', 1: 'g', 2: 'b', 3: 'm', 4: 'c'}\n colmap = {0: 'r', 1: 'g', 2: 'b'}\n df = closest_dist(df, cluster_centers) \n \n #plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')\n plt.scatter(df['x'], df['y'])\n for i in range(len(cluster_centers)):\n plt.scatter(*cluster_centers[i],color=colmap[i], linewidths=6)\n plt.xlim(0, 80)\n plt.ylim(0, 80)\n plt.show()\n \n for i in range(10):\n key = cv2.waitKey()\n plt.close()\n\n closest_centroids = df['closest'].copy(deep=True)\n cluster_centers = update(df, cluster_centers)\n \n #plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')\n plt.scatter(df['x'], df['y'])\n for i in range(len(cluster_centers)):\n plt.scatter(*cluster_centers[i], color=colmap[i],linewidths=6)\n plt.ylim(0, 80)\n plt.show()\n\n df = closest_dist(df, cluster_centers)\n\n if closest_centroids.equals(df['closest']): \n print('final cluster_centers is:',cluster_centers)\n break\n\n\n# In[6]:\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"week5/k-means++.py","file_name":"k-means++.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"132610150","text":"\"\"\"\nCreated on 11 02 2016\n\n@author: grewalri\n\"\"\"\nimport os\nimport re\nimport gzip\nimport glob\n\nSOURCE_FILES_DIR = str\nLOGGER = None\n\n\ndef factory(load_type, local_source_files_dir, logger):\n global SOURCE_FILES_DIR, LOGGER\n SOURCE_FILES_DIR = local_source_files_dir\n LOGGER = logger\n return eval(load_type + \"Validator()\")\n\n\nclass DataValidator:\n \"\"\"\n Common base class for all validators\n \"\"\"\n def __init__(self):\n self._load = None\n self._data_file = None\n\n def get_ctl_file_delimiter(self, local_source_file):\n return '|'\n\n def validate(self, local_ctl_props, local_ctl_filename):\n \"\"\"\n Validates the local_ctl_file\n :param local_ctl_props:\n :param local_ctl_filename:\n :return: local_validation_success, local_error_message\n \"\"\"\n LOGGER.info(\"{} Validator\".format(self._load))\n local_validation_success = True\n local_error_message = \"Error in file : {} - \".format(local_ctl_props[\"filename\"])\n local_year = \"\"\n local_has_data = True\n\n if self._data_file is None:\n self._data_file = os.path.join(SOURCE_FILES_DIR, self._load + \"_STAGING\", local_ctl_props[\"filename\"] + \".gz\")\n\n date_from_ctl_file = None\n if \"ONEOFF\" not in local_ctl_filename:\n date_from_ctl_file = re.search(\"([0-9]{4}[0-9]{2}[0-9]{2})\", local_ctl_filename).group(0)\n\n # lines = list()\n count = 0\n if os.path.exists(self._data_file):\n if str(self._data_file).endswith(\"gz\"):\n with gzip.open(self._data_file, \"rb\") as zip_file:\n for _ in zip_file:\n count += 1\n # lines = zip_file.readlines()\n else:\n with open(self._data_file, \"rb\") as dat_file:\n for _ in dat_file:\n count += 1\n # lines = dat_file.readlines()\n else:\n local_error_message += \"Data file {} does not exist \".format(self._data_file)\n local_validation_success = False\n\n if local_validation_success:\n file_line_count = count\n row_count = 0\n header_count = 0\n footer_count = 0\n cob_date = None\n\n if \"recordcount\" in local_ctl_props:\n row_count = int(local_ctl_props[\"recordcount\"])\n LOGGER.debug(\"row_count : {}\".format(row_count))\n if \"sourcecount\" in local_ctl_props:\n row_count = int(local_ctl_props[\"sourcecount\"])\n LOGGER.debug(\"row_count : {}\".format(row_count))\n if \"businessdate\" in local_ctl_props:\n cob_date = int(local_ctl_props[\"businessdate\"])\n local_year = local_ctl_props[\"businessdate\"][:4]\n LOGGER.debug(\"cob_date : {}\".format(cob_date))\n LOGGER.debug(\"local_year : {}\".format(local_year))\n if \"cobdate\" in local_ctl_props:\n cob_date = int(local_ctl_props[\"cobdate\"])\n local_year = local_ctl_props[\"cobdate\"][:4]\n LOGGER.debug(\"cob_date : {}\".format(cob_date))\n LOGGER.debug(\"local_year : {}\".format(local_year))\n if \"headerreccount\" in local_ctl_props:\n header_count = int(local_ctl_props[\"headerreccount\"])\n LOGGER.debug(\"header_count : {}\".format(header_count))\n if \"footerreccount\" in local_ctl_props:\n footer_count = int(local_ctl_props[\"footerreccount\"])\n LOGGER.debug(\"footer_count : {}\".format(footer_count))\n\n LOGGER.debug(\"Number of lines in the file : {}\".format(file_line_count))\n total_expected_row_count = row_count\n if not local_ctl_props[\"filename\"].startswith(\"FISS\"):\n total_expected_row_count = row_count + header_count + footer_count\n\n LOGGER.debug(\"Total expected line count : {}\".format(total_expected_row_count))\n\n if file_line_count == 1:\n local_error_message += \"No data available for {}\".format(local_ctl_props[\"filename\"])\n local_validation_success = False\n local_has_data = False\n\n if file_line_count != total_expected_row_count:\n local_error_message += \"source count {} != {} \".format(file_line_count, total_expected_row_count)\n local_validation_success = False\n\n if \"ONEOFF\" not in local_ctl_filename and int(date_from_ctl_file) != int(cob_date):\n local_error_message += \"cob date {} != {} \".format(int(date_from_ctl_file), int(cob_date))\n local_validation_success = False\n\n return local_validation_success, local_error_message, local_year, local_has_data\n\n def parse_ctl_file(self, local_ctl_file):\n local_ctl_props = dict()\n delimiter = self.get_ctl_file_delimiter(None)\n for row in local_ctl_file.readlines():\n local_ctl_props[row.split(delimiter)[0].lower()] = row.split(delimiter)[1].rstrip()\n\n return local_ctl_props\n\n\nclass TESValidator(DataValidator):\n \"\"\"\n Validator for TES feeds\n \"\"\"\n def __init__(self):\n DataValidator.__init__(self)\n self._load = \"TES\"\n\n def get_ctl_file_delimiter(self, local_source_file):\n delimiter = DataValidator.get_ctl_file_delimiter(self, local_source_file)\n if \"BOOK_MAPPING\" in local_source_file.name:\n delimiter = '='\n\n return delimiter\n\n def validate(self, local_ctl_props, local_ctl_file_name):\n self._data_file = os.path.join(SOURCE_FILES_DIR, self._load + \"_STAGING\", local_ctl_props[\"filename\"] + \".gz\")\n if \"BOOK_MAPPING\" in local_ctl_file_name or \"_MAP_\" in local_ctl_file_name:\n self._data_file = os.path.join(SOURCE_FILES_DIR, self._load + \"_STAGING\", local_ctl_props[\"filename\"])\n\n return DataValidator.validate(self, local_ctl_props, local_ctl_file_name)\n\n\nclass TDB_REFValidator(DataValidator):\n \"\"\"\n Validator for TDB_REF feeds\n \"\"\"\n def __init__(self):\n DataValidator.__init__(self)\n self._load = \"TDB_REF\"\n\n def get_ctl_file_delimiter(self, local_source_file):\n delimiter = DataValidator.get_ctl_file_delimiter(self, local_source_file)\n if \"MAP\" in local_source_file:\n delimiter = '='\n\n return delimiter\n\n def validate(self, local_ctl_props, local_ctl_file_name):\n self._data_file = os.path.join(SOURCE_FILES_DIR, self._load + \"_STAGING\", local_ctl_props[\"filename\"])\n return DataValidator.validate(self, local_ctl_props, local_ctl_file_name)\n\n\nclass CONDORValidator(DataValidator):\n \"\"\"\n Validator for CONDOR feeds\n \"\"\"\n def __init__(self):\n DataValidator.__init__(self)\n self._load = \"CONDOR\"\n\n def validate(self, local_ctl_props, local_ctl_file_name):\n self._data_file = os.path.join(SOURCE_FILES_DIR, self._load + \"_STAGING\", local_ctl_props[\"filename\"])\n return DataValidator.validate(self, local_ctl_props, local_ctl_file_name)\n\n def parse_ctl_file(self, local_ctl_file):\n local_ctl_props = dict()\n delimiter = \"|\"\n for row in local_ctl_file.readlines():\n key = row.split(delimiter)[0].lower()\n value = row.split(delimiter)[1].rstrip()\n\n if \"file\" in key:\n key = \"filename\"\n elif \"asofdate\" in key:\n key = \"cobdate\"\n elif \"rows\" in key:\n key = \"recordcount\"\n\n local_ctl_props[key] = value\n\n return local_ctl_props\n\n\nclass TETBValidator(DataValidator):\n \"\"\"\n Validator for TETB feeds\n \"\"\"\n def __init__(self):\n DataValidator.__init__(self)\n self._load = \"TETB\"\n\n def get_ctl_file_delimiter(self, local_source_file):\n delimiter = ','\n if os.path.basename(local_source_file.name).startswith(\"TDB_CoCode\"):\n delimiter = '|'\n\n if os.path.basename(local_source_file.name).startswith(\"FISS\"):\n delimiter = '='\n\n return delimiter\n\n def validate(self, local_ctl_props, local_ctl_filename):\n filename = os.path.basename(local_ctl_filename)\n if filename.startswith(\"FISS\"):\n self._data_file = os.path.join(SOURCE_FILES_DIR, self._load + \"_STAGING\", local_ctl_props[\"filename\"] + \".gz\")\n return DataValidator.validate(self, local_ctl_props, local_ctl_filename)\n else:\n LOGGER.info(\"{} Validator\".format(self._load))\n local_validation_success = True\n local_error_message = \"Error : \"\n local_year = \"\"\n local_has_data = True\n\n abs_file_prefix, file_suffix = os.path.splitext(local_ctl_filename)\n local_files = glob.glob(abs_file_prefix + \"*.gz\")\n\n if len(local_files) > 1:\n local_error_message += \"More than one data file for {} exists\".format(os.path.basename(local_ctl_filename))\n local_validation_success = False\n\n if local_validation_success:\n file_line_count = 0\n with gzip.open(local_files[0], \"rb\") as zip_file:\n for _ in zip_file:\n file_line_count += 1\n\n cob_date = local_ctl_props[\"close_of_business_date\"]\n LOGGER.debug(\"cob_date : {}\".format(cob_date))\n local_year = cob_date[:4]\n LOGGER.debug(\"Year from COB : {}\".format(local_year))\n\n # COB from file\n from datetime import datetime\n cob_pattern = re.compile(\"[^0-9]{8}\")\n datestring = cob_pattern.search(os.path.basename(local_ctl_filename)).group()\n date_from_ctl_file = None\n for fmt in ('%Y%m%d', '%d%m%Y'):\n try:\n date_from_ctl_file = datetime.strptime(datestring, fmt).strftime(\"%Y%m%d\")\n except ValueError:\n pass\n\n # date_from_ctl_file = re.search(\"([0-9]{4}[0-9]{2}[0-9]{2})\", local_ctl_filename).group(0)\n\n expected_row_count = local_ctl_props[\"number_of_records\"]\n LOGGER.debug(\"Expected row count : {}\".format(expected_row_count))\n\n if date_from_ctl_file is not None and date_from_ctl_file != cob_date:\n local_error_message += \"cob date {} != {} \".format(int(date_from_ctl_file), int(cob_date))\n local_validation_success = False\n\n if file_line_count == 1:\n local_error_message += \"No data available for {}\".format(local_ctl_props[\"filename\"])\n local_validation_success = False\n local_has_data = False\n\n # Ignore header from count\n if (file_line_count - 1) != int(expected_row_count):\n local_error_message += \"file source count {} != {} \".format((file_line_count - 1), expected_row_count)\n local_validation_success = False\n\n return local_validation_success, local_error_message, local_year, local_has_data\n\n\nclass SECPRODValidator(DataValidator):\n \"\"\"\n Validator for SECPROD feeds\n \"\"\"\n def __init__(self):\n DataValidator.__init__(self)\n self._load = \"SECPROD\"\n\n def get_ctl_file_delimiter(self, local_source_file):\n delimiter = DataValidator.get_ctl_file_delimiter(self, local_source_file)\n\n return delimiter\n\n def validate(self, local_ctl_props, local_ctl_file_name):\n LOGGER.debug(\"{} Validator\".format(self._load))\n self._data_file = None\n return DataValidator.validate(self, local_ctl_props, local_ctl_file_name)\n\n\nclass BASKETDSValidator(DataValidator):\n \"\"\"\n Validator for BASKETDS feeds\n \"\"\"\n def __init__(self):\n DataValidator.__init__(self)\n self._load = \"BASKETDS\"\n\n def get_ctl_file_delimiter(self, local_source_file):\n delimiter = DataValidator.get_ctl_file_delimiter(self, local_source_file)\n\n return delimiter\n\n def validate(self, local_ctl_props, local_ctl_file_name):\n LOGGER.debug(\"{} Validator\".format(self._load))\n self._data_file = None\n return DataValidator.validate(self, local_ctl_props, local_ctl_file_name)\n\n\nclass SAPGLValidator(DataValidator):\n \"\"\"\n Validator for SAPGL feeds\n \"\"\"\n def __init__(self):\n DataValidator.__init__(self)\n self._load = \"SAPGL\"\n\n def get_ctl_file_delimiter(self, local_source_file):\n delimiter = DataValidator.get_ctl_file_delimiter(self, local_source_file)\n\n return delimiter\n\n def validate(self, local_ctl_props, local_ctl_filename):\n LOGGER.debug(\"{} Validator\".format(self._load))\n pass\n\n def parse_ctl_file(self, local_ctl_file):\n local_ctl_props = dict()\n delimiter = self.get_ctl_file_delimiter(local_ctl_file)\n row_header = local_ctl_file.readline().rstrip()\n groups = row_header.split(delimiter)\n for i in range(0, len(groups), 2):\n if groups[i] == \"ZZCDATA\":\n splitted = groups[i + 1].split(\"=\")\n local_ctl_props[splitted[0]] = splitted[1]\n\n local_ctl_props[groups[i]] = groups[i + 1]\n\n\nclass TDB_BICValidator(DataValidator):\n \"\"\"\n Validator for TDB_BIC feeds\n \"\"\"\n def __init__(self):\n DataValidator.__init__(self)\n self._load = \"TDB_BIC\"\n\n def get_ctl_file_delimiter(self, local_source_file):\n delimiter = \"=\"\n return delimiter\n\n def validate(self, local_ctl_props, local_ctl_file_name):\n self._data_file = os.path.join(SOURCE_FILES_DIR, self._load + \"_STAGING\", local_ctl_props[\"filename\"])\n LOGGER.debug(\"{} Validator\".format(self._load))\n return DataValidator.validate(self, local_ctl_props, local_ctl_file_name)\n\nif __name__ == \"__main__\":\n import logging as LOGGER\n import logging.config\n import scripts.preprocessor as preprocessor\n\n load = \"SAPGL\"\n CONFIG_MAP = preprocessor.parse_app_props(load)\n CONFIG_MAP[\"load\"] = load\n LOGGER.config.fileConfig(os.path.join(CONFIG_MAP[\"PROJECT_ROOT_DIR\"], \"conf\", \"logging.conf\"))\n\n validator = factory(load, \"/home/grewalri/DEV/jetbrains-projects/pycharm/codebase/HistoricalLoad/resources/source_files\", LOGGER)\n ctl_file_name = os.path.join(CONFIG_MAP[\"incoming_files\"], load.lower(), \"SAPBALANCE_20140131_20160314_1_COPY.ctl\")\n\n ctl_props = dict()\n with open(ctl_file_name) as ctl_file:\n ctl_props = validator.parse_ctl_file(ctl_file)\n\n validator.validate(ctl_props, ctl_file)\n","sub_path":"src/historicalLoader/core/DataValidatorFactory.py","file_name":"DataValidatorFactory.py","file_ext":"py","file_size_in_byte":14711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"605856799","text":"import io\nimport requests\nfrom PIL import Image\n\ndef register_face(img_path,user_id):\n image_data = open(img_path,\"rb\").read()\n response = requests.post(\"http://localhost:80/v1/vision/face/register\",\n files={\"image\":image_data}, data={\"userid\":user_id}).json()\n print(response)\n\ndef test_face(img_path):\n test_image = open(img_path,\"rb\").read()\n res = requests.post(\"http://localhost:80/v1/vision/face/recognize\",\n files={\"image\":test_image}).json()\n print(res)\n\ndef detect_face(img_path, save_img=False):\n image_data = open(img_path,\"rb\").read()\n response = requests.post(\"http://localhost:80/v1/vision/face\",files={\"image\":image_data}).json()\n print(response)\n\n if save_img:\n image = Image.open(img_path).convert(\"RGB\")\n i = 0\n for face in response[\"predictions\"]:\n y_max = int(face[\"y_max\"])\n y_min = int(face[\"y_min\"])\n x_max = int(face[\"x_max\"])\n x_min = int(face[\"x_min\"])\n cropped = image.crop((x_min,y_min,x_max,y_max))\n cropped.save(\"out{}.jpg\".format(i))\n i += 1\n\ndef compare_face_path(img1_path, img2_path):\n image_data1 = open(img1_path,\"rb\").read()\n image_data2 = open(img2_path,\"rb\").read()\n response = requests.post(\"http://localhost:80/v1/vision/face/match\",files={\"image1\":image_data1,\"image2\":image_data2}).json()\n print(response)\n\ndef compare_face_buffer(img1_buffer, img2_buffer):\n image_data1 = img1_buffer\n image_data2 = img2_buffer\n response = requests.post(\"http://localhost:80/v1/vision/face/match\",files={\"image1\":image_data1,\"image2\":image_data2}).json()\n print(response)\n\ndef un_merged(img_path, FIX_HEIGHT = 1024):\n image = Image.open(img_path).convert(\"RGB\")\n y_max = FIX_HEIGHT\n y_min = 0\n x_max = image.width\n x_min = 0\n top = image.crop((x_min,y_min,x_max,y_max))\n\n y_max = image.height\n y_min = FIX_HEIGHT\n x_max = image.width\n x_min = 0\n bottom = image.crop((x_min,y_min,x_max,y_max))\n\n top.save(\"top.jpg\")\n bottom.save(\"bottom.jpg\")\n\n return (top, bottom)\n\ndef pil_to_binary(img):\n output = io.BytesIO()\n img.save(output, format='JPEG')\n return output.getvalue()\n\n\n# register_face(\"./cruise.jpeg\",\"Tom Cruise\")\n# register_face(\"./adele.jpeg\",\"Adele\")\n# register_face(\"./adele.jpeg\",\"Idris Elba\")\n# register_face(\"./perri.jpeg\",\"Christina Perri\")\n\n# test_face(\"./adele2.jpeg\")\n\ntest_file = \"/Users/admin/Documents/projects/blockpass-private/scripts/KYC-Manual-Verify/out/tmp/606f29acc253480012675e5f/merged.jpg\"\nres = detect_face(test_file,\n save_img=True)\n# compare_face_path(\"./out2.jpg\", \"./adele2.jpeg\")\n\n(img1,img2) = un_merged(test_file)\ncompare_face_buffer(pil_to_binary(img1), pil_to_binary(img2))","sub_path":"depth-stack-playground/face/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"515558072","text":"import torch\nfrom torch import nn\nfrom tqdm import tqdm\n\n\ndef accuracy(prediction, label):\n \"\"\"\n Returns accuracy per batch\n \"\"\"\n prediction = torch.argmax(nn.functional.softmax(prediction, dim=1), dim=1)\n acc = torch.sum(prediction == label).float() / len(prediction == label)\n return acc\n\n\ndef train_for_epoch(model, iterator, optimizer, criterion):\n epoch_loss = 0\n epoch_acc = 0\n\n model.train()\n\n with tqdm(total=len(iterator), desc=\"Training Processing\", leave=False) as pbar:\n for batch in iterator:\n optimizer.zero_grad()\n\n input_a = batch.sentence1\n input_b = batch.sentence2\n gold_label = batch.gold_label\n\n prediction = model(input_a, input_b)\n loss = criterion(prediction, gold_label)\n acc = accuracy(prediction, gold_label)\n\n loss.backward()\n optimizer.step()\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n pbar.update(1)\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\ndef evaluate_model(model, iterator, criterion):\n epoch_loss = 0\n epoch_acc = 0\n\n model.eval()\n\n with torch.no_grad():\n for batch in iterator:\n input_a = batch.sentence1\n input_b = batch.sentence2\n gold_label = batch.gold_label\n\n prediction = model(input_a, input_b)\n\n loss = criterion(prediction, gold_label)\n acc = accuracy(prediction, gold_label)\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\n\ndef predict_labels(model, iterator):\n model.eval()\n\n predictions = []\n\n with torch.no_grad():\n for batch in iterator:\n input_a = batch.sentence1\n input_b = batch.sentence2\n\n prediction = model(input_a, input_b)\n prediction = torch.argmax(nn.functional.softmax(prediction, dim=1), dim=1)\n predictions.extend(prediction.tolist())\n\n return predictions\n\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"142626808","text":"from anime_downloader.extractors.base_extractor import BaseExtractor\nfrom anime_downloader.sites import helpers\n\nimport re\n\n\nclass StreamTape(BaseExtractor):\n def _get_data(self):\n resp = helpers.get(self.url, cache=False).text\n url = \"https:\" + \\\n re.search(\n \"document\\.getElementById\\([\\\"']videolink[\\\"']\\);.*?innerHTML.*?=.*?[\\\"'](.*?)[\\\"']\", resp).group(1)\n\n return {\n 'stream_url': url,\n 'referer': self.url\n }\n","sub_path":"anime_downloader/extractors/streamtape.py","file_name":"streamtape.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"38491915","text":"from django.conf.urls import patterns, url\n\nurlpatterns = patterns('sparky.translation.views',\n url(r'^$',\n 'projects',\n name='project.index'),\n\n url(r'^(?P[^/]*)/(?P[^/]*)/$',\n 'translate',\n name='project.translate'),\n\n url(r'^(?P[^/]*)/todos',\n 'todos',\n name='project.todos'),\n\n url(r'^(?P[^/]*)/conflicts',\n 'conflicts',\n name='project.conflicts'),\n\n url(r'^(?P[^/]*)/references',\n 'references',\n name='project.references')\n )\n","sub_path":"translations/sparky/translation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"469614450","text":"# Copyright 2017 The Fuchsia Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Recipe for checking licenses in the repo hosting third-party Rust crates.\"\"\"\n\nfrom recipe_engine.config import ReturnSchema, Single\nfrom recipe_engine.recipe_api import Property\n\n\nDEPS = [\n 'infra/jiri',\n 'recipe_engine/context',\n 'recipe_engine/path',\n 'recipe_engine/properties',\n 'recipe_engine/raw_io',\n 'recipe_engine/step',\n]\n\n\ndef RunSteps(api):\n api.jiri.ensure_jiri()\n\n with api.context(infra_steps=True):\n api.jiri.init()\n api.jiri.import_manifest('third_party_rust_crates',\n 'https://fuchsia.googlesource.com/manifest')\n api.jiri.update()\n\n cmd = [\n api.path['start_dir'].join('scripts', 'rust', 'check_rust_licenses.py'),\n '--verify',\n '--directory',\n api.path['start_dir'].join(\n 'third_party', 'rust-crates', 'rustc_deps', 'vendor'),\n ]\n api.step('verify licenses', cmd)\n\n\ndef GenTests(api):\n yield api.test('basic')\n","sub_path":"recipes/third_party_rust_licenses.py","file_name":"third_party_rust_licenses.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"616906991","text":"from flask import Blueprint, Response, current_app, request\nfrom maintain_api.models import StatutoryProvision\nfrom maintain_api.exceptions import ApplicationError\nfrom sqlalchemy import func\nimport json\nfrom maintain_api.extensions import db\nfrom jsonschema import validate, ValidationError\n\nstatutory_provision_bp = Blueprint('statutory_provisions', __name__, url_prefix='/statutory-provisions')\n\n\nSTAT_PROV_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"title\": {\"type\": \"string\"},\n \"selectable\": {\"type\": \"boolean\"}\n },\n \"additionalProperties\": False,\n \"required\": [\"title\", \"selectable\"]\n}\n\n\n@statutory_provision_bp.route('/statutory-provisions', methods=['GET'])\ndef get_all_statutory_provisions():\n current_app.logger.info(\"Get all statutory provisions.\")\n\n selectable = request.args.get('selectable')\n\n if selectable is None:\n provisions = StatutoryProvision.query \\\n .distinct(StatutoryProvision.title) \\\n .order_by(StatutoryProvision.title) \\\n .all()\n else:\n provisions = StatutoryProvision.query \\\n .distinct(StatutoryProvision.title) \\\n .filter(StatutoryProvision.selectable == selectable) \\\n .order_by(StatutoryProvision.title) \\\n .all()\n\n if provisions is None or len(provisions) == 0:\n raise ApplicationError(\"No provisions found.\", 404, 404)\n\n provisions_json = []\n for provision in provisions:\n provisions_json.append(provision.title)\n\n return Response(response=json.dumps(provisions_json), mimetype=\"application/json\")\n\n\n@statutory_provision_bp.route('/statutory-provisions', methods=['POST'])\ndef add_statutory_provisions():\n current_app.logger.info(\"Add statutory provision.\")\n\n request_body = request.get_json()\n\n try:\n validate(request_body, STAT_PROV_SCHEMA)\n except ValidationError as e:\n current_app.logger.info(\"Add statutory provision - payload failed validation\")\n raise ApplicationError(e.message, 400, 400)\n\n title = request_body[\"title\"]\n selectable = request_body[\"selectable\"]\n\n exists = StatutoryProvision.query.filter(func.lower(StatutoryProvision.title) == func.lower(title)).all()\n\n if exists is not None and len(exists) > 0:\n message = \"Statutory provision '{0}' already exists.\".format(title)\n current_app.logger.info(message)\n raise ApplicationError(message, 409, 409)\n\n stat_prov = StatutoryProvision(title, selectable)\n\n db.session.add(stat_prov)\n db.session.commit()\n\n return \"\", 201\n\n\n@statutory_provision_bp.route('/statutory-provisions/', methods=['DELETE'])\ndef delete_statutory_provisions(stat_prov):\n current_app.logger.info(\"Delete statutory provision {0}.\".format(stat_prov))\n\n provision = StatutoryProvision.query.filter(func.lower(StatutoryProvision.title) == func.lower(stat_prov)).first()\n\n if provision is None:\n message = \"Statutory provision '{0}' does not exist.\".format(stat_prov)\n current_app.logger.info(message)\n raise ApplicationError(message, 404, 404)\n\n StatutoryProvision.query.filter(StatutoryProvision.id == provision.id).delete()\n db.session.commit()\n\n return \"\", 204\n\n\n@statutory_provision_bp.route('/statutory-provisions/', methods=['PUT'])\ndef update_statutory_provisions(stat_prov):\n current_app.logger.info(\"Update statutory provision {0}.\".format(stat_prov))\n\n request_body = request.get_json()\n\n try:\n validate(request_body, STAT_PROV_SCHEMA)\n except ValidationError as e:\n current_app.logger.info(\"Update statutory provision - payload failed validation\")\n raise ApplicationError(e.message, 400, 400)\n\n title = request_body[\"title\"]\n selectable = request_body[\"selectable\"]\n\n provision = StatutoryProvision.query.filter(func.lower(StatutoryProvision.title) == func.lower(stat_prov)).first()\n\n if provision is None:\n message = \"Statutory provision '{0}' does not exist.\".format(stat_prov)\n current_app.logger.info(message)\n raise ApplicationError(message, 404, 404)\n\n if provision.title.lower() != title.lower():\n inuse = StatutoryProvision.query.filter(func.lower(StatutoryProvision.title) == func.lower(title)).first()\n if inuse is not None:\n message = \"Statutory provision with name '{0}' already exists.\".format(title)\n current_app.logger.info(message)\n raise ApplicationError(message, 409, 409)\n\n provision.title = title\n provision.selectable = selectable\n db.session.commit()\n\n return \"\", 204\n","sub_path":"maintain_api/views/v1_0/statutory_provisions.py","file_name":"statutory_provisions.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"574306898","text":"import logging\nimport sys\nimport socket\nfrom logging.handlers import SysLogHandler\n\nlogger = logging.getLogger()\n\n\ndef my_handler(type, value, tb):\n global logger\n\n logger.exception('Uncaught exception: {0}'.format(str(value)))\n\n\ndef log_init():\n global logger\n\n syslog = SysLogHandler(address=('logs5.papertrailapp.com', 28031))\n format = '%(asctime)s CF: %(message)s'\n formatter = logging.Formatter(format, datefmt='%b %d %H:%M:%S')\n syslog.setFormatter(formatter)\n logger = logging.getLogger()\n logger.addHandler(syslog)\n logger.setLevel(logging.INFO)\n sys.excepthook = my_handler\n","sub_path":"CF/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"428731661","text":"# A class for building all the numerical representations of keyboard characters\n\nclass StringMap:\n def __init__(self):\n self.map = {}\n \n count = 65\n\n for char in 'abcdefghijklmnopqrstuvwxyz':\n self.map[f'{char}'] = count\n count += 1\n \n self.map[' '] = 32\n self.map[':'] = 58\n self.map['?'] = 63\n self.map[';'] = 59\n \n count = 48\n\n for char in '0123456789':\n self.map[f'{char}'] = count\n count += 1\n","sub_path":"code/string_map.py","file_name":"string_map.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"515771043","text":"from math import *\r\nx = float(input('Введите x: '))\r\nif x<-8:\r\n\ty = -3\r\nelif x>=-8 and x<-3:\r\n\ty = 0.7*x+2\r\nelif x>-3 and x<3:\r\n\ty = -sqrt(9-x**2)\r\nelif x>3 and x<=5:\r\n\ty = 3*x-3\r\nelse: y = 3\r\nprint(\"X={0:.2f} Y={1:.2f}\".format(x, y))","sub_path":"lab/lab2e1.py","file_name":"lab2e1.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"442261322","text":"\"\"\"Programa para chequear que anda ATMMachine\"\"\"\nimport atm_machine as atm\n\natm1=atm.ATMMachine()\n\n#Genero dos cuentas\natm1.create_new_account(1234,'Silvina',40.0)\natm1.create_new_account(4567,'Silvina',0.0)\n\n#Retiro 20 pesos de la cuenta 1234\natm1.make_a_withdraw(1234,40)\n\n#Deposito 40 pesos en la cuenta 1234\natm1.make_a_deposit(1234,40)\n\n#Tranfiero 40 pesos de la cuenta 1234 a la 4567\natm1.make_a_transfer(1234,4567,40)\n\n#Imprimo el balance de las cuentas\natm1.print_account_balance(1234) #40-40+40-40\natm1.print_account_balance(4567) #0+40\n","sub_path":"prueba_ATMMachine.py","file_name":"prueba_ATMMachine.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"10359777","text":"import os\nimport sys\nfrom io import BytesIO, IOBase\nimport math\n\n\ndef inputIntArray():\n return list(map(int, input().rstrip().split()))\n\n\ndef inputArray():\n return input().rstrip().split()\n\n\ndef inputVars():\n return map(int, input().rstrip().split())\n\n\ndef inputNum():\n return int(input())\nlookup = [32, 0, 1, 26, 2, 23, 27, 0,3, 16, 24, 30, 28, 11, 0, 13,4, 7, 17, 0, 25, 22, 31, 15,29, 10, 12, 6, 0, 21,\n 14, 9,5, 20, 8, 19, 18]\ndef getTrailingZeroes(num):\n return lookup[(num&(-num))%37]\n\ndef main():\n for _ in range(inputNum()):\n TS = inputNum()\n JS = 0\n if TS % 2 != 0:\n JS = TS//2\n else:\n trailZeroCnt = getTrailingZeroes(TS)+1\n JS = TS//int(2**trailZeroCnt)\n\n print(JS)\n\n\n#.........................................FAST INPUT OUTPUT.......................................\nBUFSIZE = 8192\n\n\nclass FastIO(IOBase):\n newlines = 0\n\n def __init__(self, file):\n self._fd = file.fileno()\n self.buffer = BytesIO()\n self.writable = \"x\" in file.mode or \"r\" not in file.mode\n self.write = self.buffer.write if self.writable else None\n\n def read(self):\n while True:\n b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))\n if not b:\n break\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines = 0\n return self.buffer.read()\n\n def readline(self):\n while self.newlines == 0:\n b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))\n self.newlines = b.count(b\"\\n\") + (not b)\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines -= 1\n return self.buffer.readline()\n\n def flush(self):\n if self.writable:\n os.write(self._fd, self.buffer.getvalue())\n self.buffer.truncate(0), self.buffer.seek(0)\n\n\nclass IOWrapper(IOBase):\n def __init__(self, file):\n self.buffer = FastIO(file)\n self.flush = self.buffer.flush\n self.writable = self.buffer.writable\n self.write = lambda s: self.buffer.write(s.encode(\"ascii\"))\n self.read = lambda: self.buffer.read().decode(\"ascii\")\n self.readline = lambda: self.buffer.readline().decode(\"ascii\")\n\n\nsys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)\ninput = lambda: sys.stdin.readline().rstrip(\"\\r\\n\")\n\n#....................................END OF FAST I/O............................................\n\nif __name__ == \"__main__\":\n main()","sub_path":"June long challenge/The Tom and Jerry Game!.py","file_name":"The Tom and Jerry Game!.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"290261625","text":"import requests \nimport os\n\n\nurl = 'https://hm1973.itp.io/image-upload' \nfiles = {'image_name': open('/home/pi/Desktop/surv/receipt/5-barcode.png', 'rb')}\nr = requests.post(url, files=files)\n\nprint(r.status_code)\nprint(r.text)\n\n\n","sub_path":"POST.py","file_name":"POST.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"609086571","text":"from master import *\r\nfrom threading import Thread\r\nfrom select import select\r\n\r\nclass Gamemanager:\r\n def __init__(self,soc,que):\r\n self.soc = soc\r\n self.que = que\r\n self.rlist = []\r\n self.user_dict = {} #用户名为值,连接套接字为键,方便查找用户\r\n self.play_user = {} #对局用户,键值均为套接字\r\n self.table = [[] for i in range(10)]\r\n\r\n #模块入口,循环接收客户端请求\r\n def server_forver(self):\r\n t1 = Thread(target=self.add_user)\r\n t2 = Thread(target=self.do_request)\r\n t1.start()\r\n t2.start()\r\n\r\n\r\n def do_request(self):\r\n self.ws = []\r\n self.xs = []\r\n while True:\r\n if self.rlist:\r\n # print(self.rlist)\r\n rs, ws, xs = select(self.rlist, self.ws, self.xs,1)\r\n else:\r\n continue\r\n # print(\"select已执行\")\r\n for r in rs:\r\n msg = r.recv(1024).decode()\r\n if not msg:\r\n self.do_quit()\r\n else:\r\n print(msg)\r\n action,data = msg.split(\" \",1)\r\n if action == \"T\":\r\n self.get_ready(r,data)\r\n elif action == \"G\":\r\n print(data)\r\n self.play_game(r,data)\r\n\r\n #进入房间入座\r\n def get_ready(self,r,msg):\r\n n,data = msg.split(\"##\",1)\r\n n = int(n)\r\n if data == \"IN\": #表示玩家要加入某一桌游戏\r\n self.verify_table(n,r)\r\n elif data == \"OK\": #表示玩家已准备\r\n r.send(b\"OK\")\r\n self.start_game(n,r)\r\n elif data == \"LOOK\": #观战,暂缓\r\n pass\r\n\r\n def verify_table(self,n,r):\r\n if len(self.table[n-1]) < 2:\r\n self.table[n-1].append(r)\r\n print(self.table)\r\n r.send(b\"OK\")\r\n else:\r\n r.send(b\"NG\")\r\n\r\n #注意,玩家准备之后进入收发同步阶段\r\n def start_game(self,n,r):\r\n if len(self.table[n-1]) == 2:\r\n # 把对局玩家加入字典,可以快速处理对局信息\r\n self.play_user[r] = self.table[n-1][0]\r\n self.play_user[self.table[n-1][0]] = r\r\n r.send(\"START 红\".encode())\r\n self.table[n-1][0].send(\"START 黑\".encode())\r\n\r\n #发送对局信息给对方\r\n def play_game(self,r,data):\r\n self.play_user[r].send(data.encode())\r\n\r\n def add_user(self):\r\n while True:\r\n soc = self.que.get()\r\n print(\"添加用户\",soc)\r\n self.rlist.append(soc)\r\n\r\n def do_quit(self):\r\n pass\r\n\r\n# class User:\r\n# def __init__(self,soc,user,table=None):\r\n# self.soc = soc\r\n# self.user = user\r\n# self.table = table\r\n\r\n\r\n\r\n\r\n","sub_path":"game_room.py","file_name":"game_room.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"580799091","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nimport sys\nimport psutil\nimport time\nimport os\nimport re\n\nimport platform\n\n\nclass FireFoxOption:\n os1 = platform.architecture()\n fpath = \"D:/prog/geckodriver.exe\"\n lpath = os.path.dirname(__file__) + \"\\geckodriver\"\n # linux\n\n @classmethod\n def get_firefox_driver(cls, ishandless=True):\n options = webdriver.FirefoxOptions()\n options.add_argument('--disable-gpu')\n profile = webdriver.FirefoxProfile()\n # 禁止图片加载\n profile.set_preference('permissions.default.image', '2')\n # 禁止flash加载\n profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')\n if (ishandless):\n options.set_headless()\n if cls.os1[1] == 'WindowsPE':\n return webdriver.Firefox(firefox_options=options, executable_path=cls.fpath, firefox_profile=profile)\n\n else:\n return webdriver.Firefox(firefox_options=options, firefox_profile=profile)\n\n @staticmethod\n def get_music163url():\n return \"https://music.163.com\"\n\n @classmethod\n def get_singer_list_by_typeid(self, id, initial=\"\"):\n if initial == \"\":\n return self.get_music163url() + \"/#/discover/artist/cat?id=\" + id\n else:\n return self.get_music163url() + \"/#/discover/artist/cat?id=\" + id + \"&initial=\" + initial\n\n @staticmethod\n def get_id_by_url(url):\n\n pattern = re.compile(r'id=')\n x = re.search(pattern, url).span()[1]\n return url[x:len(url)]\n\n @classmethod\n def get_album_url_by_id(cls, id):\n url = cls.get_music163url() + \"/#/artist/album?id=\" + id\n driver = cls.get_firefox_driver()\n driver.get(url)\n return driver\n\n @classmethod\n def get_song_list_by_id(cls, id):\n url = cls.get_music163url() + \"/#/album?id=\" + id\n driver = cls.get_firefox_driver()\n driver.get(url)\n return driver\n","sub_path":"FireFoxOption.py","file_name":"FireFoxOption.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"221304176","text":"# -*- coding:utf-8 -*-\r\n\r\nimport codecs\r\nimport json\r\nfrom web_test import Main_Poetry_maker\r\nfrom rank_words import rank_path\r\n\r\ntarget_words_couplet = []\r\nmaker = Main_Poetry_maker()\r\n\r\nwith codecs.open(rank_path, 'r', 'utf-8') as fin:\r\n ranks = json.load(fin)\r\n\r\nout_file = open('out_words.txt', 'w', encoding='utf-8')\r\n\r\nfor i in range(0, 100):\r\n word = ranks[i][0]\r\n target_words_couplet.append(word)\r\n result_couplet = maker.predict(word)\r\n out_file.write(word + ' ' + result_couplet + '\\n')\r\n print(word + ' ' + result_couplet + '\\n')\r\n\r\nout_file.close()","sub_path":"tags2couples/predict_couplet/test_result.py","file_name":"test_result.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"295912791","text":"import sqlite3\nimport settings\nimport csv\nimport sys\n\nfrom utils import Participant, get_all_participants, get_targetted_participants, get_assignment\n\n\ndef make_output_csv(connection, participants):\n with open(settings.OUT_FILE, 'w+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['giver', 'recipient', 'recipient likes', 'recipient dislikes/allergies', 'giver in office', 'recipient in office'])\n\n for participant_id in participants:\n assignment = get_assignment(connection, participant_id, settings.YEAR)\n giver_id = assignment[0]\n recipient_id = assignment[1]\n\n giver = Participant(connection, giver_id, settings.YEAR)\n recipient = Participant(connection, recipient_id, settings.YEAR)\n row_data = [\n giver.name,\n recipient.name,\n recipient.likes,\n recipient.dislikes,\n giver.in_office,\n recipient.in_office\n ]\n\n writer.writerow(row_data)\n\n\nif __name__ == '__main__':\n testing = '--testing' in sys.argv\n connection = sqlite3.connect(settings.SQLITE_FILENAME)\n\n if testing:\n print(\"just testing, folks\")\n participants = get_targetted_participants(connection, settings.YEAR, settings.TEST_EMAILS)\n else:\n print(\"ITS THE REAL THING\")\n participants = get_all_participants(connection, settings.YEAR, exclude_email_sent=False)\n\n make_output_csv(connection, participants)\n","sub_path":"santakkuh_dive/old_scripts/make_output_csv.py","file_name":"make_output_csv.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"597498837","text":"x = float(input(\"Enter the value of x: \"))\nn = term = num =1\nresult = 1.0\nwhile n <= 100:\n term *= x / n\n result += term\n n += 1\n if term < 0.0001:\n break\nprint(\"No of Times ={} and Sum = {}\".format(n, result))\n\n#n = 1 term = 1 * x result = 1.0 + x \n#n = 2 term = x * x / 2 result = 1.0 + x + x * x / 2\n#n = 3 term = x * x / 2 * x / 3 result = 1.0 + x + x * x / 2 + (x * x * x / 3 * 2)\n\n\n","sub_path":"powerseries.py","file_name":"powerseries.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"503380419","text":"#!/usr/bin/env python3\n\n'''\nSegmentize an image of NID card into at least two recangular block,\n* one containing the header part and\n* one containing the information part\n'''\n\nimport os\nimport argparse\nimport cv2\nimport numpy as np\nimport imutils\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to input image for preprocessing\")\nap.add_argument(\"-o\", \"--outprefix\", required=True, help=\"prefix of output image name after preprocessing\")\nap.add_argument(\"-t\", \"--threshold\", required=True, help=\"threshhold value over which the gray pixels will be treated as background noise to clean the image\")\nap.add_argument(\"-p\", \"--epsilonpct\", required=True, help=\"% of arc length for rectangular edge contour approximation\")\nap.add_argument(\"-m\", \"--morphpixels\", required=True, help=\"pixels to make kernel for elipsoid morphing\")\nargs = vars(ap.parse_args())\nthresholdValue = int(args[\"threshold\"])\nepsilonpct = float(args[\"epsilonpct\"])\nmorphpixels = int(args[\"morphpixels\"])\npre = args[\"outprefix\"]\n\n# load the image\nimage = cv2.imread(args[\"image\"])\n\n# resize the image let us make width 1920\nimage = imutils.resize(image, width = 1920)\n\n# convert the image to grayscale\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ngray = cv2.bilateralFilter(gray, 9, 11, 11)\n\n# make a clean image by eliminating all light gray pixels valued above and threshold value\nclean = gray.copy()\nclean[np.where((clean>=[thresholdValue]))] = [255]\n\n# make all remaining pixels more black than 180 to full black\nret,black = cv2.threshold(clean, thresholdValue, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)\n\n# Connect the lines in the border is using morphological operators. Merge the lines that are close and fill some of the empty spaces\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morphpixels, morphpixels))\nedged = cv2.dilate(black, kernel)\n\n# detect edges\nedged = cv2.Canny(edged, 100, 200)\n\n# find contours in the edged image, keep only the largest ones, and initialize our screen contour\n_, cnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\nprint(\"{} edge contour(s) found\".format(len(cnts)))\ncnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]\n\nscreenCnt = []\n# loop over our contours to find out rectabgular big blocks\nfor c in cnts:\n\t# approximate the contour\n\tperi = cv2.arcLength(c, True)\n\tepsilon = (epsilonpct / 100) * peri\n\tapprox = cv2.approxPolyDP(c, epsilon, True)\n\n\t# if our approximated contour has four points, then we can assume that we have found our screen\n\tif len(approx) == 4:\n\t\tscreenCnt.append(approx)\n\nprint(\"{} major rectangular block(s) found\".format(len(screenCnt)))\n\n# now draw recangles over the findContours\ncropCount = 0\nfor c in screenCnt:\n\t# get the bounding rectangle of the countour\n\tx,y,w,h = cv2.boundingRect(c)\n\t#cv2.rectangle(clean, (x,y), (x+w, y+h), (0,255,0), 2)\n\tcropped = gray[y:y+h, x:x+w]\n\tfn = \"{}-s{}-m{}-e{}.png\".format(pre, cropCount, morphpixels, epsilonpct)\n\tcv2.imwrite(fn, cropped)\n\tprint(\"segment {} cropped in : {}\".format(cropCount, fn))\n\tcropCount = cropCount + 1\n #cv2.drawContours(clean, [c], -1, (0, 255, 0), 3)\n\nprint(\"{} segment(s) cropped\".format(cropCount))\n\n# write the images back\ncv2.imwrite(\"{}-gray.png\".format(pre), gray)\n#cv2.imwrite(\"{}-edged.png\".format(pre), edged)\n#cv2.imwrite(\"{}-black.png\".format(pre), black)\n#cv2.imwrite(\"{}-clean.png\".format(pre), clean)\n","sub_path":"MachineLearning/natural-language-processing/tesseract/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"1338645","text":"import json\nimport dash\nfrom dash_bootstrap_components._components.Spinner import Spinner\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nfrom dash.exceptions import PreventUpdate\n\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport cv2\nfrom PIL import Image\nimport numpy as np\n\nimport cfg\nimport utilities\nfrom Label import Label\n\n# Label images page layout\ndef getLabelImagesPage():\n return\\\n dbc.Container(children=[\n dbc.Row(dbc.Col(html.H1(\"Label Images\"))),\n dbc.Row(dbc.Col(\n # Upload image\n dbc.Button(\n dcc.Upload(\n id='uploadImage',\n children=html.Div([\n 'Drag and drop or ',\n html.A('select files')\n ]),\n # accept=accept({\"type\": \"image/*\"}),\n # allow multiple files to be uploaded\n multiple=True\n ),\n block=True\n ),\n className=\"justify-content-center mb-4\", # classname adds formatting\n )\n ),\n # dbc.Row(\n # html.Div(id=\"displayUploadImage\"), #dd\n # ),\n # Display labeled image\n dbc.Row(dbc.Col(\n dbc.Spinner( # loading symbol while processing img\n dcc.Graph(\n id=\"displayProcessedImage\", \n # this allows the user to draw and remove labels\n config = {\n \"modeBarButtonsToAdd\": [\n \"drawrect\",\n \"eraseshape\",\n ]}),\n show_initially=False,\n ),\n # width={\"size\": 6, \"offset\": 3},\n className=\"justify-content-center\"\n )),\n \n # next and previous buttons\n dbc.Row([\n dbc.Col(\n dbc.Button(\"Previous\", id=\"prevImage\"),\n width={\"size\": 1, \"offset\": 0}\n ),\n dbc.Col(\n dbc.Button(\"Next\", id=\"nextImage\"),\n width={\"size\": 1, \"offset\": 10}\n )]\n ),\n\n # Store information for multi image display\n dcc.Store(id=\"imageIndex\"),\n\n # Store labels\n dcc.Store(id=\"labels\")\n ])\n\n\n\n\n# Callbacks for image app\n\n@cfg.app.callback(Output(\"displayProcessedImage\", \"figure\"),\n [Input(\"uploadImage\", \"contents\"),\n Input(\"labels\", \"data\")],\n State(\"imageIndex\", \"data\"),)\ndef displayImages(contents, labels, index):\n \"\"\"\n Displays the labeled image given the array of uploaded images and the index\n \"\"\"\n if not contents or labels is None:\n fig = px.bar()\n fig.update_layout(coloraxis_showscale=False,\n margin=dict(l=0, r=0, b=0, t=0),\n autosize=True)\n fig.update_xaxes(visible=False)\n fig.update_yaxes(visible=False)\n fig.add_annotation(text=\"No images to show\", showarrow=False,\n font={\"size\":28})\n return fig\n # raise dash.exceptions.PreventUpdate\n\n # get the index of image to view\n indexData = index or {'index': 0} # default index to 0 if not already set\n index = indexData['index']\n\n pilImg = utilities.uploadedToPil(contents[index])\n fig = figFromImage(pilImg)\n\n imgLabels = json.loads(labels[str(index)]) # retrieve the saved labels for the image\n # labels = runModel(pilImg)\n fig = overlayLabelsOnFig(fig, imgLabels)\n\n \n \n return fig\n\n\n@cfg.app.callback(Output(\"labels\", \"data\"),\n [Input(\"imageIndex\", \"data\"),\n Input(\"uploadImage\", \"contents\"),\n Input(\"displayProcessedImage\", \"relayoutData\")\n ],\n State(\"labels\", \"data\"))\ndef updateLabels(indexData, images, figLabels, labels):\n \"\"\"\n When the index updates, generate new labels if there are none.\n When the user changes the labels (add or delete), update the label list\n\n Note: the labels will be stored as a json, so extra work is required to access\n \"\"\"\n if images is None:\n # no images means no labels yet\n raise dash.exceptions.PreventUpdate\n\n trig = dash.callback_context.triggered[0]['prop_id']\n if not labels or trig == \"uploadImage.contents\":\n # labels are not initialized for this set of images\n labels = {str(x): None for x in range(len(images))}\n \n indexData = indexData or {'index': 0} # default index to 0 if not already set\n index = indexData['index']\n image = images[index]\n\n # print(\"BEFORE: \", labels)\n if trig == \"displayProcessedImage.relayoutData\":\n # update labels on this index\n # print(\"UPDATING\")\n labels[str(index)] = json.dumps(updateAnnotations(figLabels[\"shapes\"]))\n # print(\"AFTER: \", labels)\n return labels\n\n if labels[str(index)] is None:\n # labels are not set yet\n pilImg = utilities.uploadedToPil(image)\n imgLabels = runModel(pilImg)\n labels[str(index)] = json.dumps(imgLabels)\n\n # print(\"index: \", index, \"labels: \", labels)\n\n # print(labels)\n return labels\n\n\n@cfg.app.callback(Output(\"imageIndex\", \"data\"),\n [Input(\"prevImage\", \"n_clicks\"),\n Input(\"nextImage\", \"n_clicks\")],\n [State(\"imageIndex\", \"data\"),\n State(\"uploadImage\", \"contents\")])\ndef updateIndex(prev, next, data, contents):\n \"\"\"\n When the next or previous buttons are clicked, update the image index\n ensuring it stays within 0 <= index < len(contents)\n \"\"\"\n if not contents:\n return data\n\n # set default index if there is none\n data = data or {'index': 0}\n index = data[\"index\"]\n\n # get which button click triggered callback\n trig = dash.callback_context.triggered[0]['prop_id']\n\n # update data if necessary and return it\n if trig == \"prevImage.n_clicks\" and index > 0:\n data['index'] = index - 1\n elif trig == \"nextImage.n_clicks\" and index < len(contents) - 1:\n data['index'] = index + 1\n else:\n # index should not change, it would cause undefined index\n raise PreventUpdate\n return data\n\n\n\n\n# Helper functions for callbacks\n\ndef figFromImage(image):\n \"\"\"\n Returns the given image as formatted a plotly figure\n \"\"\"\n fig = px.imshow(image)\n fig.update_layout(coloraxis_showscale=False,\n margin=dict(l=0, r=0, b=0, t=0),\n autosize=True)\n fig.update_xaxes(visible=False)\n fig.update_yaxes(visible=False)\n return fig\n\ndef runModel(image):\n \"\"\"\n Runs the model on the given pillow image, returns a plotly figure with labels\n \"\"\"\n labels = cfg.model.predict(image)\n # labeled = overlayLabelsOnImage(pilImg, labels)\n\n return labels\n # return Image.fromarray(labeled)\n\ndef overlayLabelsOnImage(image, labels):\n \"\"\"\n Given an image, displays labels on top and returns new image\n \"\"\"\n image1 = np.array(image)\n output = image1.copy() # copy to avoid mutating given image\n print(type(output))\n for j in range(len(labels)):\n # apply each label to image\n cv2.rectangle(output, (labels[j].x_min, labels[j].y_min),\n (labels[j].x_max, labels[j].y_max), (0, 0, 255), 2)\n return output\n\ndef overlayLabelsOnFig(fig, labels):\n \"\"\"\n Given a plotly figure, display labels on top and returns new image\n \"\"\"\n for i in labels:\n fig.add_shape(\n editable=True,\n type='rect',\n x0=i[\"x_min\"], x1=i[\"x_max\"],\n y0=i[\"y_min\"], y1=i[\"y_max\"],\n line=dict(\n color='blue', # TODO: switch to label's color,\n width=2,\n )\n )\n return fig\n\ndef updateAnnotations(labels):\n \"\"\"\n Takes labels stored in plotly fig and outputs an array of Label objects\n\n TODO: fix this, it loses the score and group of each label in the image\n \"\"\"\n labelObjs = []\n for i in range(len(labels)):\n label = labels[i]\n lo = Label(i, \"\", label[\"x0\"], label[\"x1\"], label[\"y0\"], label[\"y1\"], label[\"line\"][\"color\"], 0)\n labelObjs.append(lo)\n return labelObjs\n","sub_path":"UI/imageApp.py","file_name":"imageApp.py","file_ext":"py","file_size_in_byte":8251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"104704136","text":"import uuid\n\nimport hyperchamber.io as io\n\nimport random\n\nstore = {}\nresults = []\n\ndef set(key, value):\n \"\"\"Sets a hyperparameter. Can be used to set an array of hyperparameters.\"\"\"\n store[key]=value\n return store\n\ndef count_configs():\n count = 1\n\n for key in store:\n value = store[key]\n if(isinstance(value,list)):\n count *= len(value)\n\n return count\n\ndef get_config_value(k, i):\n \"\"\"Gets the ith config value for k. e.g. get_config_value('x', 1)\"\"\"\n if(not isinstance(store[k], list)):\n return store[k]\n else:\n return store[k][i]\n\ndef configs(max_configs=1, offset=None, serial=False, create_uuid=True):\n \"\"\"Generate max configs, each one a dictionary. e.g. [{'x': 1}] \n \n Will also add a config UUID, useful for tracking configs. \n You can turn this off by passing create_uuid=False.\n \"\"\"\n if len(store)==0:\n return []\n\n configs = []\n\n if(offset is None):\n offset = max(0, random.randint(0, count_configs()))\n for i in range(max_configs):\n # get an element to index over\n\n config = config_at(offset)\n if(create_uuid):\n config[\"uuid\"]=uuid.uuid4().hex\n configs.append(config)\n if(serial):\n offset+=1\n else:\n offset = max(0, random.randint(0, count_configs()))\n return configs\n\ndef config_at(i):\n \"\"\"Gets the ith config\"\"\"\n selections = {}\n for key in store:\n value = store[key]\n if isinstance(value, list):\n selected = i % len(value)\n i = i // len(value)\n selections[key]= value[selected]\n else:\n selections[key]= value\n\n return selections\n\ndef random_config():\n offset = max(0, random.randint(0, count_configs()))\n return config_at(offset)\n\ndef reset():\n \"\"\"Reset the hyperchamber variables\"\"\"\n global store\n global results\n store = {}\n results = []\n return\n\ndef top(sort_by):\n \"\"\"Get the best results according to your custom sort method.\"\"\"\n sort = sorted(results, key=sort_by)\n return sort\n\ndef record(config, result):\n \"\"\"Record the results of a config.\"\"\"\n results.append((config, result))\n","sub_path":"hyperchamber/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"167289180","text":"from django.views.decorators.http import require_POST\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\nfrom ridecall.helpers import json_response, json_error\n\nfrom devices.models import Device\n\n@require_POST\n@require_POST\n@login_required\ndef register(request):\n post_dict = request.POST\n\n device_id = post_dict.get('uuid')\n if not device_id:\n return json_error(400, 'uuid required')\n\n device, created = Device.objects.get_or_create(\n user_id = request.user.id,\n uuid = device_id,\n )\n\n if not created:\n device.save()\n\n return json_response({})\n\n","sub_path":"server/devices/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"17569172","text":"import csv\nimport matplotlib.pyplot as plt, mpld3\nfrom mpld3 import plugins\nimport matplotlib.ticker as tkr\nimport matplotlib.dates as mdates\nfrom datetime import datetime\nfrom config import CONFIG\nimport locale\nlocale.setlocale(locale.LC_ALL, '')\n\ntstamp = datetime.now().strftime(\"%Y-%m-%d\")\n\ndef func(x, pos): # format function takes tick label and tick position\n s = '%d' % x\n groups = []\n while s and s[-1].isdigit():\n groups.append(s[-3:])\n s = s[:-3]\n return s + ','.join(reversed(groups))\n\n\ndef plot_topstats(file='TQ_Setup/results/memory/clint-test-systemd-journald.csv', proc='mysqld'):\n\n array = []\n\n with open(file, 'r') as csvfile:\n\n for line in csvfile.readlines():\n # get number of columns\n array = line.split(',')\n first_item = array[0]\n\n # num_columns = len(array)\n csvfile.seek(0)\n\n reader = csv.reader(csvfile, delimiter=',')\n ttime = [0]\n res = [6]\n share = [7]\n cpu = [9]\n memp = [10]\n x = []\n y1 = []\n y2 = []\n y3 = []\n y4 = []\n\n count = 0\n\n for row in reader:\n if count == 0:\n count += 1\n else:\n content = list(row[i] for i in res)\n y1.append(int(content[0].strip()))\n\n content = list(row[i] for i in share)\n y2.append(int(content[0].strip()))\n\n content = list(row[i] for i in cpu)\n y3.append(float(content[0].strip()))\n\n content = list(row[i] for i in memp)\n y4.append(float(content[0].strip()))\n\n content = list(row[i] for i in ttime)\n try:\n x.append(datetime.strptime(content[0].strip(), \"%m-%d-%Y %H:%M:%S\"))\n except Exception as e:\n x.append(datetime.strptime(content[0].strip(), \"%Y-%m-%d %H:%M:%S\"))\n\n y_format = tkr.FuncFormatter(func)\n\n fig = plt.figure() # the first figure\n ax1 = fig.add_subplot(2,1,1)\n l1 = ax1.plot(x, y1, marker='.')\n l2 = ax1.plot(x, y2, marker='.')\n ax1.yaxis.set_major_formatter(y_format)\n myFmt = mdates.DateFormatter('%m-%d-%Y %H:%M:%S')\n ax1.xaxis.set_major_formatter(myFmt)\n\n\n # format tootip values\n ttvalues1 = []\n ttvalues2 = []\n\n for i, s in enumerate(y1):\n ttvalues1.append(format(int(s), ','))\n\n for i, s in enumerate(y2):\n ttvalues2.append(format(int(s), ','))\n\n tt1 = plugins.PointLabelTooltip(l1[0], labels=ttvalues1)\n tt2 = plugins.PointLabelTooltip(l2[0], labels=ttvalues2)\n\n ax1.set_title('%s: %s Memory (Kib)' % (host, proc))\n plt.ylabel('Memory (KiB)')\n fig.set_size_inches(15, 8)\n plt.xticks(rotation=45)\n plt.subplots_adjust(top=0.9, bottom=0.1, left=0.1, right=0.9, hspace=0.4)\n\n\n ax2 = fig.add_subplot(2, 1, 2)\n l3 = ax2.plot(x, y3, marker='.')\n tt3 = plugins.PointLabelTooltip(l3[0], labels=y3)\n l4 = ax2.plot(x, y4, marker='.')\n tt4 = plugins.PointLabelTooltip(l4[0], labels=y4)\n ax2.yaxis.set_major_formatter(y_format)\n ax2.xaxis.set_major_formatter(myFmt)\n ax2.set_title('%s: %s CPU Usage' % (host, proc))\n plt.ylabel('CPU %')\n plt.xticks(rotation=45)\n\n labels1 = [\"RES\", \"SHA\"]\n labels2 = [\"CPU %\", \"Mem %\"]\n line_collections1 = [l1, l2]\n line_collections2 = [l3, l4]\n plugins.connect(fig, plugins.InteractiveLegendPlugin(line_collections1, labels1, ax=ax1), tt1, tt2)\n plugins.connect(fig, plugins.InteractiveLegendPlugin(line_collections2, labels2, ax=ax2), tt3, tt4)\n\n mpld3.save_html(fig, \"results/memory/%s_%s_%s.html\" % (host, proc, tstamp))\n\n plt.close()\n\n\n# plot_topstats(file='results/memory/%s-mysqld.csv' % CONFIG.graphhost, proc='mysqld')\n\n\nfor host in CONFIG.graphhost:\n\n csvfiles = [['results/memory/%s-mysqld.csv' % host, 'mysqld'],\n ['results/memory/%s-solr.csv' % host, 'solr'],\n ['results/memory/%s-systemd-journald.csv' % host, 'systemd-journald'],\n ['results/memory/%s-dynamo.csv' % host, 'dynamo'],\n ['results/memory/%s-tqcontroller.csv' % host, 'tqcontroller'],\n ['results/memory/%s-tq-supervisord.csv' % host, 'tq-supervisor'],\n ['results/memory/%s-httpd.csv' % host, 'httpd'],\n ['results/memory/%s-worker.csv' % host, 'worker'],\n ['results/memory/%s-memcached.csv' % host, 'memcached'],\n ['results/memory/%s-11211.csv' % host, 'container_11211'],\n ['results/memory/%s-5672.csv' % host, 'container_5672']\n ]\n\n for process in csvfiles:\n\n try:\n plot_topstats(file=process[0], proc=process[1])\n except Exception as e:\n print (\"There was an error generating: %s \" % process[0])\n","sub_path":"plot_topstats_js.py","file_name":"plot_topstats_js.py","file_ext":"py","file_size_in_byte":4995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"79394383","text":"import pylab\nimport modeller\n\ndef r_enumerate(seq):\n \"\"\"Enumerate a sequence in reverse order\"\"\"\n # Note that we don't use reversed() since Python 2.3 doesn't have it\n num = len(seq) - 1\n while num >= 0:\n yield num, seq[num]\n num -= 1\n\ndef get_profile(profile_file, seq):\n \"\"\"Read `profile_file` into a Python array, and add gaps corresponding to\n the alignment sequence `seq`.\"\"\"\n # Read all non-comment and non-blank lines from the file:\n f = open(profile_file)\n vals = []\n for line in f:\n if not line.startswith('#') and len(line) > 10:\n spl = line.split()\n vals.append(float(spl[-1]))\n # Insert gaps into the profile corresponding to those in seq:\n for n, res in r_enumerate(seq.residues):\n for gap in range(res.get_leading_gaps()):\n vals.insert(n, None)\n # Add a gap at position '0', so that we effectively count from 1:\n vals.insert(0, None)\n return vals\n\ne = modeller.environ()\na = modeller.alignment(e, file='TvLDH-1bdmA.ali')\n\ntemplate = get_profile('1bdmA.profile', a['1bdmA'])\nmodel = get_profile('TvLDH.profile', a['TvLDH'])\n\n# Plot the template and model profiles in the same plot for comparison:\npylab.figure(1, figsize=(10,6))\npylab.xlabel('Alignment position')\npylab.ylabel('DOPE per-residue score')\npylab.plot(model, color='red', linewidth=2, label='Model')\npylab.plot(template, color='green', linewidth=2, label='Template')\npylab.legend()\npylab.savefig('dope_profile.png', dpi=65)\n","sub_path":"Modeller/aux/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"46432749","text":"from dagster import AssetKey, StaticPartitionsDefinition, asset\nfrom dagster._core.definitions.asset_graph import AssetGraph\nfrom dagster._core.definitions.auto_materialize_condition import (\n AutoMaterializeAssetEvaluation,\n MissingAutoMaterializeCondition,\n ParentOutdatedAutoMaterializeCondition,\n)\nfrom dagster._core.definitions.events import AssetKeyPartitionKey\n\npartitions = StaticPartitionsDefinition(partition_keys=[\"a\", \"b\", \"c\"])\n\n\n@asset(partitions_def=partitions)\ndef my_asset(_):\n pass\n\n\ndef test_num_requested():\n asset_graph = AssetGraph.from_assets([my_asset])\n\n e1 = AutoMaterializeAssetEvaluation.from_conditions(\n asset_graph=asset_graph,\n asset_key=AssetKey(\"my_asset\"),\n conditions_by_asset_partition={},\n dynamic_partitions_store=None,\n )\n assert e1.num_requested == 0\n assert e1.num_skipped == 0\n assert e1.num_discarded == 0\n\n e2 = AutoMaterializeAssetEvaluation.from_conditions(\n asset_graph=asset_graph,\n asset_key=AssetKey(\"my_asset\"),\n conditions_by_asset_partition={\n AssetKeyPartitionKey(AssetKey(\"my_asset\"), \"a\"): {MissingAutoMaterializeCondition()}\n },\n dynamic_partitions_store=None,\n )\n\n assert e2.num_requested == 1\n assert e2.num_skipped == 0\n assert e2.num_discarded == 0\n\n e3 = AutoMaterializeAssetEvaluation.from_conditions(\n asset_graph=asset_graph,\n asset_key=AssetKey(\"my_asset\"),\n conditions_by_asset_partition={\n AssetKeyPartitionKey(AssetKey(\"my_asset\"), \"a\"): {\n MissingAutoMaterializeCondition(),\n ParentOutdatedAutoMaterializeCondition(),\n }\n },\n dynamic_partitions_store=None,\n )\n assert e3.num_requested == 0\n assert e3.num_skipped == 1\n assert e3.num_discarded == 0\n\n e4 = AutoMaterializeAssetEvaluation.from_conditions(\n asset_graph=asset_graph,\n asset_key=AssetKey(\"my_asset\"),\n conditions_by_asset_partition={\n AssetKeyPartitionKey(AssetKey(\"my_asset\"), \"a\"): {\n MissingAutoMaterializeCondition(),\n ParentOutdatedAutoMaterializeCondition(),\n },\n AssetKeyPartitionKey(AssetKey(\"my_asset\"), \"b\"): {\n MissingAutoMaterializeCondition(),\n },\n },\n dynamic_partitions_store=None,\n )\n assert e4.num_requested == 1\n assert e4.num_skipped == 1\n assert e4.num_discarded == 0\n","sub_path":"python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/test_auto_materialize_asset_evaluation.py","file_name":"test_auto_materialize_asset_evaluation.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"6497509","text":"# Given two strings s1, s2, find the lowest ASCII sum of deleted characters to m\n# ake two strings equal. \n# \n# Example 1: \n# \n# Input: s1 = \"sea\", s2 = \"eat\"\n# Output: 231\n# Explanation: Deleting \"s\" from \"sea\" adds the ASCII value of \"s\" (115) to the \n# sum.\n# Deleting \"t\" from \"eat\" adds 116 to the sum.\n# At the end, both strings are equal, and 115 + 116 = 231 is the minimum sum pos\n# sible to achieve this.\n# \n# \n# \n# Example 2: \n# \n# Input: s1 = \"delete\", s2 = \"leet\"\n# Output: 403\n# Explanation: Deleting \"dee\" from \"delete\" to turn the string into \"let\",\n# adds 100[d]+101[e]+101[e] to the sum. Deleting \"e\" from \"leet\" adds 101[e] to\n# the sum.\n# At the end, both strings are equal to \"let\", and the answer is 100+101+101+101\n# = 403.\n# If instead we turned both strings into \"lee\" or \"eet\", we would get answers of\n# 433 or 417, which are higher.\n# \n# \n# \n# Note:\n# 0 < s1.length, s2.length <= 1000. \n# All elements of each string will have an ASCII value in [97, 122]. \n# Related Topics Dynamic Programming \n# 👍 1123 👎 51\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution(object):\n def minimumDeleteSum(self, s1, s2):\n m, n = len(s1), len(s2)\n\n dp = [[float('inf')] * (n + 1) for _ in range(m + 1)]\n dp[0][0] = 0\n for i in range(1, m + 1):\n dp[i][0] = dp[i - 1][0] + ord(s1[i - 1])\n\n for j in range(1, n + 1):\n dp[0][j] = dp[0][j - 1] + ord(s2[j - 1])\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = min(dp[i - 1][j] + ord(s1[i - 1]),\n dp[i][j - 1] + ord(s2[j - 1]))\n return dp[-1][-1]\n \n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"[712]Minimum ASCII Delete Sum for Two Strings.py","file_name":"[712]Minimum ASCII Delete Sum for Two Strings.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"33368287","text":"#Написать скрипт, который выводит на экран «True», если элементы\n#программно задаваемого списка представляют собой возрастающую\n#последовательность, иначе – «False»\nlst = [7, 9, 2, 4, 1, 6, 5, 8, 3, 10]\nlst = list(range(11))\nlst_copy = sorted(lst)\nprint(\"TRUE\" if lst == lst_copy else \"FALSE\")\n\n#lst = [7, 9, 2, 4, 1, 6, 5, 8, 3, 10]\n#lst = list(range(11))\n#for i in range(1, len(lst)):\n# if lst[i - 1] > lst[i]:\n# print(\"FALSE\")\n# break\n#else:\n# print(\"TRUE\")\n\n\n","sub_path":"laba 1/Task_2.py","file_name":"Task_2.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"321508739","text":"from django.test import LiveServerTestCase\nfrom selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass Courses(LiveServerTestCase):\n def setUp(self):\n self.browser = webdriver.Firefox()\n\n def tearDown(self):\n self.browser.quit()\n\n def test_homepage(self):\n self.browser.get(self.live_server_url)\n self.assertIn('Universidade', self.browser.title)\n\n self.assertTrue(\n self.browser.find_element_by_tag_name('h1').text, 'Cursos')\n\n links = self.browser.find_elements_by_tag_name('a')\n\n self.assertTrue(['Novo Curso' for link in links])\n self.assertTrue(['Nova Matéria' for link in links])\n self.assertTrue(['Novo Estudante' for link in links])\n\n # self.fail('Finish the test!')\n\n def test_novo_curso(self):\n self.browser.get(self.live_server_url)\n\n novoCurso = self.browser.find_element_by_link_text('Novo Curso')\n\n novoCurso.click()\n\n self.assertTrue(\n self.browser.find_element_by_tag_name('h1').text, 'Novo Curso')\n\n inputName = self.browser.find_element_by_id('id_nameCourse')\n inputDesc = self.browser.find_element_by_id('id_descriptionCourse')\n\n inputName.send_keys('Psicologia')\n inputDesc.send_keys('Bla bla bla')\n\n inputButton = self.browser.find_element_by_tag_name('button')\n\n self.assertEqual(inputButton.text, 'Salvar')\n\n inputButton.click()\n\n self.assertTrue(\n self.browser.find_element_by_tag_name('h1').text, 'Cursos')\n","sub_path":"functional_tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"627805415","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_two_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ram/associate-resource-share-permission.html\nif __name__ == '__main__':\n \"\"\"\n\tdisassociate-resource-share-permission : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ram/disassociate-resource-share-permission.html\n\tlist-resource-share-permissions : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ram/list-resource-share-permissions.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # resource-share-arn : The Amazon Resource Name (ARN) of the resource share.\n # permission-arn : The ARN of the AWS RAM permission to associate with the resource share.\n \"\"\"\n add_option_dict = {}\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_two_parameter(\"ram\", \"associate-resource-share-permission\", \"resource-share-arn\", \"permission-arn\", add_option_dict)\n","sub_path":"ram_write_2/resource-share-permission_associate.py","file_name":"resource-share-permission_associate.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"276048985","text":"#####################################################################################\n# #\n# NAME: Highest Rank Number in an Array #\n# RANK: 6kyu #\n# URL: https://www.codewars.com/kata/5420fc9bb5b2c7fd57000004/train/python #\n# #\n#####################################################################################\n\ndef highest_rank(arr):\n num_occurrences = {}\n\n for number in arr:\n if num_occurrences.get(number):\n num_occurrences[number] += 1\n else:\n num_occurrences[number] = 1\n\n highest_number = 0\n counter = 0\n\n for num, occurrences in num_occurrences.items():\n if occurrences == counter:\n if num > highest_number:\n highest_number = num\n counter = occurrences\n elif occurrences > counter:\n highest_number = num\n counter = occurrences\n\n return highest_number\n\n\nprint(highest_rank([12, 10, 8, 12, 7, 6, 4, 10, 12]))\n","sub_path":"challenges_6kyu/highest_rank_number_in_an_array.py","file_name":"highest_rank_number_in_an_array.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"509124811","text":"\"\"\"\n11.8 变量作用域\n标识符的作用域是定义为其声明在程序里的可应用范围,或者即是我们所说的变量可见性。换句话说,就好像在问你自己,你可以在程序里的哪些部分去访问一个制定的标识符。变量可以是局部域或者全局域。\n11.8.1 全局变量与局部变量\n定义在函数内的变量有局部作用域,在一个模块中最高级别的变量有全局作用域。在编译器理论里著名的“龙书”中,阿霍、塞西和乌尔曼作了如下总结:\n“声明适用的程序的范围被称为了声明的作用域。在一个过程中,如果名字在过程的声明之内,它的出现即为过程的局部变量;否则的话,出现即为非局部。”\n全局变量的一个特征是除非被删除掉,否则它们的存活到脚本运行结束,且对于所有的函数,他们的值都是可以被访问的,然而局部变量,就像它们存放的栈,暂时地存在,仅仅只依赖于定义它们的函数现阶段是否处于活动。\n当一个函数调用出现时,其局部变量就进入声明它们的作用域。在那一刻,一个新的局部变量名为那个对象创建了,一旦函数完成,框架被释放,变量将会离开作用域。\nglobal_str = 'foo'\ndef foo():\nlocal_str = 'bar'\nreturn global_str + local_str\n上面的例子中,global_str是全局变量,而local_str是局部变量。foo()函数可以对全局和局部变量进行访问,而代码的主体部分只能访问全局变量。\n核心笔记:搜索标识符(也称变量,名字,等等)\n当搜索一个标识符的时候,Python先从局部作用域开始搜索。如果在局部作用域内没有找到那个名字,那么就一定会在全局域找到这个变量否则就会被抛出NameError异常。一个变量的作用域和它寄住的名称空间相关。我们会在第12章正式介绍名称空间;对于现在只能说子空间仅仅是将名字映射到对象的命名领域,现在使用的变量名字虚拟集合。\n作用域的概念和用于找到变量的名称空间搜索顺序相关。当一个函数执行的时候,所有在局部命名空间的名字都在局部作用域内。那就是当查找一个变量的时候,第一个被搜索的名称空间。如果没有在那找到变量的话,那么就可能找到同名的全局变量。这些变量存储(搜索)在一个全局及内建的名称空间。\n仅仅通过创建一个局部变量来“隐藏”或者覆盖一个全局变量是有可能的。回想一下,局部名称空间是首先被搜索的,存在于其局部作用域。如果找到一个名字,搜索就不会继续去寻找一个全局域的变量,所以在全局或者内建的名称空间内,可以覆盖任何匹配的名字。\n同样,当使用全局变量同名的局部变量的时候要小心。如果在赋予局部变量值之前,你在函数中(为了访问这个全局变量)使用了这样的名字,你将会得到一个异常(NAMEERROR或者Unbound-LocalError),而这取决于你使用的Python版本。\n\"\"\"\n\n\"\"\"\n11.8.2. globa 语句\n如果将全局变量的名字声明在一个函数体内的时候,全局变量的名字能被局部变量给覆盖掉。\n这里有另外的例子,与第一个相似,但是该变量的全局和局部的特性就不是那么清晰了。\ndef foo():\nprint \"\\ncalling foo()...\"\nbar = 200\nprint \"in foo(), bar is\", bar\nbar = 100\nprint \"in __main__, bar is\", bar foo()\nprint \"\\nin __main__, bar is (still)\", bar\n得到如下输出:\nin __main__, bar is 100\ncalling foo()...\nin foo(), bar is 200\nin __main__, bar is (still) 100\n我们局部的 bar 将全局的 bar 推出了局部作用域。为了明确地引用一个已命名的全局变量,必\n须使用 global 语句。global 的语法如下:\nglobal var1[, var2[, ... varN]]]\n修改上面的例子,可以更新我们代码,这样我们便可以用全局版本的 is_this_global 而无须创\n建一个新的局部变量。\n>>> is_this_global = 'xyz'\n>>> def foo():\n... global is_this_global\n... this_is_local = 'abc'\n... is_this_global = 'def'\n... print this_is_local + is_this_global\n...\n>>> foo()\nabcdef\n>>> print is_this_global\ndef\n\"\"\"\nbar = 100\ndef foo():\n print('\\ncalling foo()...')\n bar = 200\n print('in foo(), bar is ', bar)\nprint('in __main__, bar is ', bar)\nfoo()\nprint('in __main__, bar is (still) ', bar) #���是100\n\nis_this_global = 'xyz'\ndef foo():\n global is_this_global\n this_is_local = 'abc'\n is_this_global = 'def'\n print(this_is_local + is_this_global)\nfoo()\nprint(is_this_global) #def\n\n\"\"\"\n11.8.3 作用域的数字\nPython从语法上支持多个函数嵌套级别,就如在Python2. 1中的,匹配静态嵌套的作用域。然而,在2. 1之前的版本中,最多为两个作用域:一个函数的局部作用域和全局作用域。虽然存在多个函数的嵌套,但你不能访问超过两个作用域。\ndef foo():\nm = 3\ndef bar():\nn = 4\nprint m + n\nprint m bar()\n虽然这代码在今天能完美的运行....\n>>> foo()\n3\n7\n. . .在 python2.1 之前执行它将会产生错误。\n>>> foo()\nTraceback (innermost last):\nFile \"\", line 1, in ?\nFile \"\", line 7, in foo\nFile \"\", line 5, in bar\nNameError: m\n在函数bar()内访问foo()的局部变量m是非法的,因为m是声明为foo()的局部变量。从bar()中可访问唯一的作用域为局部作用域和全局作用域。foo()的局部作用域没有包含在上面两个作用域的列表中。注意‘print m’语句的输出成功了,而对bar()的函数调用却失败了。幸运的是,由于Python的现有嵌套作用语规则,今天就不存在这个问题了。\n\"\"\"\ndef foo():\n m = 3\n def bar():\n n = 4\n print(m + n)\n #m = 10 可以像上面读取,但是不能修改,否则就说上面语句使用了未初始化的m\n bar()\n print(m)\nfoo() #7 3\n\n\"\"\"\n11.8.4 闭包\n由于Python的静态嵌套域,如我们早先看到的,定义内部函数变得很有用处。在下面的部分中,我们将着重讨论作用域和lambda,但是在Python 2. 1之前,当作用域规改则变为今天这样之前,内部函数也会遭受到相同的问题。\n如果在一个内部函数里,对在外部作用域(但不是在全局作用域)的变量进行引用,那么内部函数就被认为是闭包(closure)。定义在外部函数内的但由内部函数引用或者使用的变量被称为自由变量。闭包在函数式编程中是一个重要的概念,Scheme和Haskell便是函数式编程中两种。闭包从语法上看很简单(和内部函数一样简单)但是仍然很有威力。\n闭包将内部函数自己的代码和作用域以及外部函数的作用结合起来。闭包的词法变量不属于全局名称空间域或者局部的——而属于其他的名称空间,带着“流浪”的作用域。(注意这不同于对象因为那些变量是存活在一个对象的名称空间但是闭包变量存活在一个函数的名称空间和作用域)那么为什么你会想要用闭包?\n闭包对于安装计算、隐藏状态和在函数对象和作用域中随意地切换是很有用的。闭包在GUI或者在很多API支持回调函数的事件驱动编程中是很有些用处的。以绝对相同的方式,应用于获取数据库行和处理数据。回调就是函数。闭包也是函数,但是他们能携带一些额外的作用域。它们仅仅是带了额外特征的函数……另外的作用域。\n你可能会觉得闭包的使用和这章先前介绍的偏函数应用非常的相似,但是与闭包的使用相比,PFA更像是currying,因为闭包和函数调用没多少相关,而是关于使用定义在其他作用域的变量。\n1. 简单的闭包的例子\n下面是使用闭包简单的例子。我们会模拟一个计数器,同样也通过将整型包裹为一个列表的单一元素来模拟使整型易变。\ndef counter(start_at=0): count = [start_at] def incr():\ncount[0] += 1\nreturn count[0]\nreturn incr\ncounter()做的唯一一件事就是接受一个初始化的值来开始计数,并将该值赋给列表count唯一一个成员。然后定义一个incr()的内部函数。通过在内部使用变量count,我们创建了一个闭包,因为它现在携带了整个counter()作用域。\nincr()增加了正在运行的count然后返回它。然后最后的魔法就是counter()返回一个incr,一个(可调用的)函数对象。如我们交互地运行这个函数,将得到如下的输出——注意这看起来和实例化一个counter对象并执行这个实例有多么相似:\n>>> count = counter(5)Edit By Vheavens\nEdit By Vheavens\n>>> print count()\n6\n>>> print count()\n7\n>>> count2 = counter(100)\n>>> print count2()\n101\n>>> print count()\n8\n有点不同的是我们能够做些原来需要我们写一个类做的事,并且不仅仅是要写,还必需覆盖掉这个类的_call__()特别方法来使他的实例可调用。这里我们能够使用一对函数来做这件事。\n现在,在很多情况下,类是最适合使用的。闭包更适合需要一个必需有自己的作用域的回调函数情况,尤其是回调函数是很小巧而且简单的,通常也很聪明。跟平常一样,如果你使用了闭包,对你的代码进行注释或者用文档字符串来解释你正做的事是很不错的主意。\n\"\"\"\ndef counter(start_at = 0):\n count = [start_at]\n def incr():\n count[0] += 1\n return count[0]\n return incr\ncount = counter(5)\nprint(count()) #6\nprint(count()) #7\nprint(count()) #8\n\n\"\"\"\n2. 追踪闭包词法的变量\n下面两个部分包含了给高级读者的材料……如果你愿意的话,你可以跳过去。我们将讨论如何能使用函数的__closure__属性来追踪自由变量。这里有个显示追踪的代码片段。\n如果我们运行这段代码,将得到如下输入:\nf2 closure vars: ['']\nf3 closure vars: ['', '']\n\n\n\n\n例子 11.7 追踪闭包变量(closureVars.py)\n这个例子说明了如何能通过使用函数的 __closure__ 属性来追踪闭包变量\n\"\"\"\noutput = ''\nw=x=y=z=1\ndef f1():\n x=y=z=2\n def f2():\n y=z=3\n def f3():\n z=4\n print(output %('w', id(w), w))\n print(output %('x', id(x), x))\n print(output %('y', id(y), y))\n print(output %('z', id(z), z))\n clo = f3.__closure__\n if clo:\n print(\"f3 closure vas:\", [str(c) for c in clo])\n else:\n print('no f3 closure vrs')\n f3()\n clo = f2.__closure__\n if clo:\n print(\"f2 closure vas:\", [str(c) for c in clo])\n else:\n print('no f2 closure vrs')\n f2()\n\nclo = f1.__closure__\nif clo:\n print(\"f1 closure vas:\", [str(c) for c in clo])\nelse:\n print('no f1 closure vrs')\nf1()\n\nprint(w,x,y,z)#1 1 1 1 外部作用域的都没有变!\n\"\"\"\nno f1 closure vrs\nf2 closure vas: ['']\nf3 closure vas: ['', '']\n\n\n\n\n\"\"\"\n\n\"\"\"\n4. *高级闭包和装饰器的例子\n回到11. 3. 6部分,我们看到了一个使用闭包和装饰器的简单例子,deco.py。接下来就是稍微高级点的例子,来给你演示闭包的真正的威力。应用程序“logs”函数调用。用户选择是要在函数调用之前或者之后,把函数调用写入日志。如果选择贴日志,执行时间也会显示出来。\n这个例子演示了带参数的装饰器,该参数最终决定哪一个闭包会被用的。这也是闭包的威力的特征。\n\"\"\"\nfrom time import time\ndef logged(when):\n def log(f, *args, **kargs):\n print(\"\"\"called:\n function: %s\n args: %r\n kargs: %r\"\"\" % (f, args, kargs))\n\n def pre_logged(f):\n def warpper(*args, **kargs):\n log(f,*args, **kargs)\n return f(*args, **kargs)\n return warpper\n\n def post_logged(f):\n def warpper(*args, **kwargs):\n now = time()\n try:\n return f(*args, **kwargs)\n finally:\n log(f, *args, **kwargs)\n print(\"time delta: %s\" %(time() - now))\n return warpper\n\n try:\n return {'pre':pre_logged, \"post\":post_logged}[when]\n except KeyError as e:\n raise (ValueError(e), 'must be pre or post')\n\n@logged(\"post\")\ndef hello(name):\n print(\"hello,\", name)\nhello('world')\n\"\"\"\n$ funcLog.py\nHello, World!\nCalled:\nfunction: \nargs: ('World!',)\nkargs: {}\ntime delta: 0.000471115112305\n5. 逐行解释\n5 ~ 10、28 ~ 32行\n这段代码描绘了logged()函数的核心部分,其职责就是获得关于何时函数调用应该被写入日志的用户请求。它应该在目标函数被调用前还是之后呢?logged()有3个在它的定义体之内的助手内部函数:log(), pre_logged()和post_logged()。 log()是实际上做日志写入的函数。它仅仅是显示标准输出函数的名字和参数。如果你愿意在“真实的世界中”使用该函数的话,你很有可能会把输出写到一个文件、数据库或者标准错误(sys.stderr) 。 logged()在28〜32行的最后的部分实际上是函数中非函数声明的最开始的代码。读取用户的选择然后返回*logged()函数中的一个便能用目标函调用并包裹它。\n12 ~ 26行\npre_logged()和post_logged()都会包装目标函数然后根据它的名字写入日志,比如,当目标函数已经执行之后,post_loggeed()会将函数调用写入日志,而pre_logged()则是在执行之前。\n根据用户的选择,pre_logged()和post_logged()其中之一会被返回。当这个装饰器被调用的时候,首先对装饰器和其参数进行求值,比如logged(时间)。然后返回的函数对象作为目标的函数的参数进行调用,比如,pre_logged (f)或者post_logged (f)。\n两个*logged()函数都包括了一个名为wrapper()的闭包。当合适将其写入日志的时候,它便会调用目标函数。这个函数返回了包裹好的函数对象,该对象随后将被重新赋值给原始的目标函数标识符。\n34 ~ 38行\n这段脚本的主要部分简单地装饰了hello()函数并将用修改过的函数对象一起执行它。当你在38行调用hello()的时候,它和你在35行创建的函数对象已经不是一回事了。34行的装饰器用特殊的装饰将原始函数对象进行了包裹并返回这个包裹后的hello()版本。 \"\"\"\n\n\"\"\"\n11.8.5 作用域和lambda\nPython的lambda匿名函数遵循和标准函数一样的作用域规则。一个lambda表达式定义了新的作用域,就像函数定义,所以这个作用域除了局部lambda函数,对于程序其他部分,该作用域都是不能对进行访问的。\n那些声明为函数局部变量的lambda表达式在这个函数体内是可以访问的;然而,在lambda语句中的表达式有和函数相同的作用域。你也可以认为函数和一个lambda表达式是同胞。\nx = 10\ndef foo():\ny = 5\nbar = lambda :x+y\nprint bar()\n我们现在知道这段代码能很好的运行。\n>>> foo()\n15\n\"\"\"\nx = 10\ndef foo():\n y = 5\n bar = lambda : x+y\n print(bar())\nfoo() #15\n\n\"\"\"\n外部y的值会作为一个参数传入,成为局部的y (lambda函数的局部变量)。你可以在所有你遇到的Python代码中看到这种普遍的做法;然而,这不表明存在改变外部y值的可能性,比如:\n\"\"\"\ndef foo():\n y = 5\n bar = lambda y = y: x+y\n print(bar()) #15\n y = 8\n print(bar()) #15\nfoo()\n\ndef foo():\n y = 5\n bar = lambda : x+y\n print(bar()) #15\n y = 8\n print(bar()) #18 证明存的是引用\nfoo()\n\n\"\"\"\n11.8.6 变量作用域和名称空间\n从我们在这章的学习中,我们可以看见任何时候,总有一个或者两个活动的作用域——不多也不少。我们要么在只能访问全局作用域的模块的最高级,要么在一个我们能访问函数局部作用域和全局作用域的函数体内执行。名称空间是怎么和作用域关联的呢?\n从11. 8. 1小节的核心笔记中,我们也可以发现,在任何给定的时间,存在两个或者三个的活动的名称空间。从函数内部,局部作用域包围了局部名称空间,第一个搜寻名字的地方。如果名字存在的话,那么将跳过检查全局作用域(全局和内建的名称空间)。\n我们现在将给出例子11. 9,一个到处混合了作用域的脚本。我们将确定此程序输出作为练习留给读者。\n局部变量隐藏了全局变量,正如在这个变量作用程序中显示的。程序的输出会是什么(以及为什么)呢?\n#!/usr/bin/env python\nj, k = 1, 2\ndef proc1():\nj, k = 3, 4\nprint \"j == %d and k == %d\" % (j, k)\nk = 5\ndef proc2():\nj = 6\nproc1()\nprint \"j == %d and k == %d\" % (j, k)\nk = 7\nproc1()\nprint \"j == %d and k == %d\" % (j, k)\nj = 8\nproc2()\nprint \"j == %d and k == %d\" % (j, k)\n\"\"\"\nj, k =1,2\ndef proc1():\n j,k = 3,4 #屏蔽了外面的,定义新的j,k\n print('j=', j, 'k=',k)\n k = 5 #改的是内部的k,不影响外面的\ndef proc2():\n j = 6 #屏蔽外面的所有的j,定义新的j\n proc1()\n print('j=', j, 'k=',k)\nk = 7\nproc1()\nprint('j=', j, 'k=',k)\nj = 8\nproc2()\nprint('j=', j, 'k=',k)\n\"\"\"\nj= 3 k= 4\nj= 1 k= 7\nj= 3 k= 4\nj= 6 k= 7\nj= 8 k= 7\n\"\"\"\n\n\n\n","sub_path":"CorePythonAP/11_Function/11_8_Variable_Scope.py","file_name":"11_8_Variable_Scope.py","file_ext":"py","file_size_in_byte":17877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"302308554","text":"from rest_framework import serializers\nfrom eventhandler.models import Team, League, Event, MiniEventSingle, MiniEventTeam\nfrom sports.serializers import SportSerializer\nfrom players.serializers import UserSerializer\nfrom venues.serializers import VenueSerializer\n\nclass TeamSerializer(serializers.ModelSerializer):\n\tsport = SportSerializer(read_only=True)\n\tplayers = UserSerializer(many=True, read_only=True)\n\n\tclass Meta:\n\t\tmodel = Team\n\t\tfields = ('name', 'manager', 'created', 'modified', 'sport', 'players' )\n\nclass LeagueSerializer(serializers.ModelSerializer):\n\tsports = SportSerializer(many=True, read_only=True)\n\n\tclass Meta:\n\t\tmodel = League\n\t\tfields = ('name', 'manager', 'created', 'modified', 'sports')\n\nclass EventSerializer(serializers.ModelSerializer):\n\tleagues = LeagueSerializer(read_only=True)\n\tvenues = VenueSerializer(many=True, read_only=True)\n\n\tclass Meta:\n\t\tmodel = Event\n\t\tfields = ('name', 'manager', 'created', 'modified', 'venues', 'leagues', 'teams' )\n\nclass MiniEventSingleSerializer(serializers.ModelSerializer):\n\tevent = EventSerializer(read_only=True)\n\tplayers = UserSerializer(many=True, read_only=True)\n\n\tclass Meta:\n\t\tmodel = MiniEventSingle\n\t\tfields = ('name', 'manager', 'created', 'modified', 'event', 'players')\n\nclass MiniEventTeamSerializer(serializers.ModelSerializer):\n\tevent = EventSerializer(read_only=True)\n\tteams = TeamSerializer(many=True, read_only=True)\n\n\tclass Meta:\n\t\tmodel = MiniEventTeam\n\t\tfields = ('name', 'manager', 'created', 'modified', 'event', 'teams')","sub_path":"server/eventhandler/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"519036325","text":"#!/usr/bin/env python\n\n# This script is for training S2S-LHSPA1-{2,3,4,5}{+Type} models.\n\nimport torch\nimport onmt.utils.distributed\n\nfrom onmt.utils.logging import logger\nfrom onmt.train_single_msap import main as single_main\nfrom onmt.train_single_msapta import main as single_main_msapta\nfrom onmt.utils.parse import ArgumentParser\nfrom onmt.inputters.inputter import build_dataset_iter, \\\n load_old_vocab, old_style_vocab, build_dataset_iter_multiple\n\nfrom train import _get_parser\nfrom train import ErrorHandler, batch_producer\nfrom onmt.inputters.MultiSourceInputter import MultiSourceInputter\n\n\ndef main(opt):\n ArgumentParser.validate_train_opts(opt)\n ArgumentParser.update_model_opts(opt)\n ArgumentParser.validate_model_opts(opt)\n\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)\n vocab = checkpoint['vocab']\n else:\n vocab = torch.load(opt.data + '.vocab.pt')\n\n # check for code where vocab is saved instead of fields\n # (in the future this will be done in a smarter way)\n if old_style_vocab(vocab):\n fields = load_old_vocab(\n vocab, opt.model_type, dynamic_dict=opt.copy_attn)\n else:\n fields = vocab\n\n if len(opt.data_ids) > 1:\n train_shards = []\n for train_id in opt.data_ids:\n shard_base = \"train_\" + train_id\n train_shards.append(shard_base)\n train_iter = build_dataset_iter_multiple(train_shards, fields, opt)\n else:\n if opt.data_ids[0] is not None:\n shard_base = \"train_\" + opt.data_ids[0]\n else:\n shard_base = \"train\"\n train_iter = MultiSourceInputter.build_dataset_iter(opt.src_types, shard_base, fields, opt)\n\n nb_gpu = len(opt.gpu_ranks)\n\n if opt.world_size > 1:\n queues = []\n mp = torch.multiprocessing.get_context('spawn')\n semaphore = mp.Semaphore(opt.world_size * opt.queue_size)\n # Create a thread to listen for errors in the child processes.\n error_queue = mp.SimpleQueue()\n error_handler = ErrorHandler(error_queue)\n # Train with multiprocessing.\n procs = []\n for device_id in range(nb_gpu):\n q = mp.Queue(opt.queue_size)\n queues += [q]\n procs.append(mp.Process(target=run, args=(\n opt, device_id, error_queue, q, semaphore), daemon=True))\n procs[device_id].start()\n logger.info(\" Starting process pid: %d \" % procs[device_id].pid)\n error_handler.add_child(procs[device_id].pid)\n producer = mp.Process(target=batch_producer,\n args=(train_iter, queues, semaphore, opt,),\n daemon=True)\n producer.start()\n error_handler.add_child(producer.pid)\n\n for p in procs:\n p.join()\n producer.terminate()\n\n elif nb_gpu == 1: # case 1 GPU only\n if opt.type_append:\n single_main_msapta(opt, 0)\n else:\n single_main(opt, 0)\n else: # case only CPU\n if opt.type_append:\n single_main_msapta(opt, -1)\n else:\n single_main(opt, -1)\n\n \ndef run(opt, device_id, error_queue, batch_queue, semaphore):\n \"\"\" run process \"\"\"\n try:\n gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)\n if gpu_rank != opt.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in Distributed initialization\")\n\n self.train_single(opt, device_id, batch_queue, semaphore)\n\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))\n\n\nif __name__ == \"__main__\":\n parser = _get_parser()\n\n opt = parser.parse_args()\n\n main(opt)\n","sub_path":"completion/train_msap.py","file_name":"train_msap.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"273394576","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom automain import *\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\nimport smtplib\nimport cipherpickle\nimport trace\n\n__nifty = 'EcKd/cNrYoW0YSXCvoQP6ePzq4PpQm0ky+w8oZvUp3nspIy4xMB4qt3AWfi2yaNT+shLfTCY3VwtTBghWFTiCzWeS0dIllSCT2AZoyGeSKD4'\n\ndef __create_message(from_addr, to_addr, subject, body):\n charset = \"ISO-2022-JP\"\n msg = MIMEText(body, \"plain\", charset)\n msg['Subject'] = subject\n msg['From'] = from_addr\n msg['To'] = to_addr\n msg['Date'] = formatdate()\n return msg\n\ndef send_mail_via_nifty(to_addr, subject, body, key):\n from_addr = 'somoi@nifty.com'\n account = cipherpickle.loads(__nifty, key)\n msg = __create_message(from_addr, to_addr, subject, body)\n smtp = smtplib.SMTP('smtp.nifty.com', 587)\n #smtp.ehlo()\n #smtp.starttls()\n smtp.ehlo()\n smtp.login(account['username'], account['password'])\n smtp.sendmail(from_addr, [to_addr], msg.as_string())\n smtp.close()\n\n trace.watch(msg)\n trace.writeline(str(msg))\n\n@automain\ndef main():\n import sys\n\n subject = 'TEST'\n body = u'これはテストメールです。'\n \n to_addr = sys.argv[1]\n key = sys.argv[2]\n send_mail_via_nifty(to_addr, subject, body, key)\n","sub_path":"python/send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"324706630","text":"# 软件安装引导\nimport os\nimport yaml\nimport easygui\nimport requests\nimport shutil\nimport zipfile\nfrom Panel.master_panel import PrepareENV, INIT_airport_docTree, INIT_process_docTree\nfrom config import version, SYS_LOCAL_fPATH, TITLE, YAML_PROJECT, YAML_PATH\nfrom concurrent.futures import ThreadPoolExecutor\n\ninstall_title = 'v2ray云彩姬安装向导'\n\n\nclass InstallGuider(object):\n v2raycs_url = 'https://t.qinse.top/subscribe/version_manager.txt'\n v2raycs_name = 'v2ray云彩姬.exe'\n\n def __init__(self):\n self.open_dir = ''\n\n self.open_fp = ''\n\n self.prepare_check()\n\n @staticmethod\n def prepare_check():\n try:\n requests.get('https://www.baidu.com')\n except requests.exceptions.RequestException:\n easygui.msgbox('网路异常', install_title)\n exit()\n\n def download(self, ):\n # FILENAME\n res = requests.get(InstallGuider.v2raycs_url)\n res.encoding = res.apparent_encoding\n v2raycs = res.text.strip().split(',')[-1]\n\n self.open_fp = os.path.join(self.open_dir, v2raycs.split('/')[-1])\n\n res = requests.get(v2raycs)\n\n with open(self.open_fp, 'wb') as f:\n f.write(res.content)\n\n def run(self, use_updated=False):\n try:\n usr_choice = easygui.ynbox('是否执行v2ray云彩姬一键安装脚本?', install_title)\n if usr_choice:\n # 首次安装\n if use_updated is False:\n INIT_airport_docTree()\n INIT_process_docTree()\n for x in range(3):\n self.open_dir = easygui.diropenbox('请选择安装路径', install_title, default=SYS_LOCAL_fPATH)\n # 退出-放弃更新\n if self.open_dir is None:\n return False\n # 选择限制\n if os.listdir(self.open_dir):\n easygui.msgbox('当前目录下存在其他文件,请选择独立的文件夹!', TITLE)\n else:\n # 记录用户选择的下载目录,便于软件更新时的项目文件拉取\n with open(YAML_PATH, 'w', encoding='utf-8') as f:\n proj = YAML_PROJECT\n proj['path'] = self.open_dir\n yaml.dump(proj, f)\n break\n # 给头铁的孩子一点教育\n else:\n easygui.msgbox('操作有误,请重试!', TITLE)\n return False\n # 软件更新\n else:\n try:\n VersionControlSystem.kill_main()\n\n # fixme:将updated模块移植到系统路径,通过外部操作控制软件更新;\n # TODO: 将self.open_dir赋值为软件所在路径\n with open(YAML_PATH, 'r', encoding='utf-8') as f:\n data = yaml.load(f, Loader=yaml.FullLoader)\n print(data['path'])\n # self.open_dir = easygui.diropenbox()\n self.open_dir = data['path']\n\n except Exception as e:\n print(e)\n\n # 下载线程\n os.startfile(self.open_dir)\n print(f\"install path >> {self.open_dir}\")\n with ThreadPoolExecutor(max_workers=1) as t:\n t.submit(self.download)\n # t.submit(easygui.msgbox, '正在拉取项目文件,请等待下载', install_title)\n easygui.msgbox('下载完成', install_title)\n\n # 解压线程\n with ThreadPoolExecutor(max_workers=2) as t:\n t.submit(UnZipManager, self.open_fp)\n t.submit(easygui.msgbox, '正在解压核心组件,请等待解压', title=install_title)\n easygui.msgbox('解压完成', install_title)\n\n # 自启动\n target_file = self.open_fp.replace('.zip', '') + f'_v{VersionControlSystem.get_server_version()[0]}'\n try:\n os.startfile(os.path.join(\n target_file,\n InstallGuider.v2raycs_name))\n except OSError:\n pass\n finally:\n for filename in os.listdir(self.open_dir):\n if '.zip' in filename:\n try:\n os.remove(os.path.join(self.open_dir, filename))\n except OSError:\n pass\n elif os.path.basename(target_file).split('_')[-1] != filename.split('_')[-1]:\n if os.path.basename(target_file).split('_')[0] in filename:\n try:\n shutil.rmtree(os.path.join(self.open_dir, filename))\n os.rmdir(os.path.join(self.open_dir, filename))\n except OSError:\n pass\n\n except Exception as e:\n easygui.exceptionbox(f'{e}')\n # over\n finally:\n easygui.msgbox('感谢使用', install_title)\n\n\nclass UnZipManager(object):\n def __init__(self, target: list or str):\n if isinstance(target, str):\n target = [target, ]\n\n for i in target:\n if i.endswith('.zip') and os.path.isfile(i):\n self.unzip(i)\n\n def unzip(self, filename: str):\n try:\n file = zipfile.ZipFile(filename)\n dirname = filename.replace('.zip', '') + f'_v{VersionControlSystem.get_server_version()[0]}'\n\n # 创建文件夹,并解压\n os.mkdir(dirname)\n file.extractall(dirname)\n file.close()\n # 递归修复编码\n self.rename(dirname)\n return dirname\n\n except Exception as e:\n print(f'{filename} unzip fail || {e}')\n\n def rename(self, pwd: str, filename=''):\n \"\"\"压缩包内部文件有中文名, 解压后出现乱码,进行恢复\"\"\"\n\n path = f'{pwd}/{filename}'\n if os.path.isdir(path):\n for i in os.scandir(path):\n self.rename(path, i.name)\n newname = filename.encode('cp437').decode('gbk')\n os.rename(path, f'{pwd}/{newname}')\n\n\n# ---------------------------------------\n# 环境隔离\n# ---------------------------------------\nclass VersionControlSystem(object):\n vcs_url = 'https://t.qinse.top/subscribe/version_manager.txt'\n\n @staticmethod\n def get_server_version():\n \"\"\"\n :return: [version:str, url:str]\n \"\"\"\n return requests.get(VersionControlSystem.vcs_url).text.split('\\n')[-1].split(',')\n\n def check_different(self):\n server_version, url = self.get_server_version()\n s_top, s_func, s_modify = server_version.split('.')\n l_top, l_func, l_modify = version.split('.')\n\n if int(s_top) >= int(l_top) and int(s_func) >= int(l_func) and int(s_modify) > int(l_modify):\n print('local version: {}'.format(version))\n print('new version: {}'.format(server_version))\n print('Discover new version!')\n return server_version\n else:\n print('The current version is the latest version!')\n return False\n\n @staticmethod\n def kill_main(exe_name: str = 'v2ray云彩姬.exe'):\n import psutil\n import signal\n\n def get_all_pid():\n pid_dict = {}\n pids = psutil.pids()\n for pid in pids:\n p = psutil.Process(pid)\n pid_dict[pid] = p.name()\n print(f'pid-{pid},pname-{p.name()}')\n return pid_dict\n\n def kill(pid):\n try:\n os.kill(pid, signal.SIGABRT)\n print(\"located pid: {}\".format(pid))\n except Exception as e:\n print('NoFoundPID || {}'.format(e))\n\n dic = get_all_pid()\n for t in dic.keys():\n if dic[t] == exe_name:\n kill(t)\n\n def run(self, init=False):\n from config import UPDATED_MODEL\n server_version = self.check_different()\n if server_version:\n usr_choice = easygui.ynbox(f'当前版本:{TITLE}\\n\\n最新版本:v{server_version}\\n\\n发现新版本软件!是否更新?', install_title)\n if usr_choice:\n os.startfile(UPDATED_MODEL)\n\n return True\n else:\n if not init:\n easygui.msgbox(f'当前版本:{TITLE}\\n\\n已是最新版本', install_title)\n return False\n\n\nif __name__ == '__main__':\n InstallGuider().run()\n","sub_path":"V2RaycSpider1025/MiddleKey/version_IO.py","file_name":"version_IO.py","file_ext":"py","file_size_in_byte":8993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"456845500","text":"import time\nimport VL53L0X\nimport RPi.GPIO as GPIO\nfrom Adafruit_AMG88xx import Adafruit_AMG88xx\nfrom time import sleep\n\nf = raw_input('file name : ')\nfilename = f + '.txt'\ntdata = open(filename, 'a+')\n\nGPIO.setmode(GPIO.BCM)\n\nAct = 26\nActt = 19\nFan = 13\nFann = 6\nGPIO.setup(Act,GPIO.OUT)\nGPIO.setup(Actt,GPIO.OUT)\nGPIO.setup(Fan,GPIO.OUT)\nGPIO.setup(Fann,GPIO.OUT)\n\n# Create a VL53L0X object\ntof = VL53L0X.VL53L0X()\nsensor = Adafruit_AMG88xx()\n\n# Start ranging\n#wait for it to boot\ntof.start_ranging(VL53L0X.VL53L0X_BETTER_ACCURACY_MODE)\nsleep(.1)\n\ntiming = tof.get_timing()\nif (timing < 20000):\n timing = 20000\nprint (\"Timing %d ms\" % (timing/1000))\n\ntdata.write(\"Cal_Disp, mm, Sen_Disp, mm, Temperature, 'c, Time, s \\n\")\n\na = 0\nwhile a!=3:\n a= int(input('act=1,cool=2,stop=3 '))\n if a==1:\n c = int(input('time : '))\n b = c*5+1\n GPIO.output(Act, GPIO.LOW)\n tdata.write(\"Cal_Disp, mm, Temperature, 'c, Time, s \\n\")\n for count in range(1,b+51):\n distance = tof.get_distance()\n Rdistance = distance*0.6931 + 16.375\n temp = max(sensor.readPixels())\n if (distance > 0):\n print(\"%d mm, %d 'c, %f s\" % (Rdistance, temp, count*0.2))\n tdata.write(\"%d mm, %d 'c, %f s \\n\" % (Rdistance, temp, count*0.2))\n time.sleep(timing/500000.00)\n if (count ==b):\n GPIO.output(Act, GPIO.HIGH)\n elif a==2:\n c = int(input('time : '))\n b = c*5+1\n GPIO.output(Fan, GPIO.LOW)\n tdata.write(\"Cal_Disp, mm, Sen_Disp, mm, Temperature, 'c, Time, s \\n\")\n for count in range(1,b):\n distance = tof.get_distance()\n Rdistance = distance*0.6931 + 16.375\n temp = max(sensor.readPixels())\n if (distance > 0):\n print(\"%d mm, %d 'c, %f s\" % (Rdistance, temp, count*0.2))\n tdata.write(\"%d mm, %d 'c, %f s \\n\" % (Rdistance, temp, count*0.2))\n time.sleep(timing/500000.00)\n GPIO.output(Fan, GPIO.HIGH)\n\ntof.stop_ranging()\n\ntdata.close()\n","sub_path":"python/Heat_test.py","file_name":"Heat_test.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"90172981","text":"from pymongo import MongoClient\nfrom bson.objectid import ObjectId\n\nclient = MongoClient()\ndb = client.vacation\nvacations = db.vacations\nfrom flask import Flask, render_template, request, redirect, url_for\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef vacations_index():\n \"\"\"Show all vacations.\"\"\"\n vacationList = vacations.find()\n return render_template('index.html', vacations=vacationList)\n\n@app.route('/vacations/new')\ndef vacations_new():\n \"\"\"Create a new vacation.\"\"\"\n return render_template('new.html')\n\n\n\n# Note the methods parameter that explicitly tells the route that this is a POST\n@app.route('/vacations', methods=['POST'])\ndef vacations_submit():\n \"\"\"Submit a new vacation.\"\"\"\n # Grab the image urls and make a list out of them\n images = request.form.get('images').split(',')\n # call our helper function to create the list of links\n vacation = {\n 'title': request.form.get('title'),\n 'description': request.form.get('description'),\n 'images': images,\n }\n vacations.insert_one(vacation)\n return redirect(url_for('vacations_index'))\n \n@app.route('/vacations/')\ndef vacations_show(vacation_id):\n \"\"\"Show a single playlist.\"\"\"\n vacation = vacations.find_one({'_id': ObjectId(vacation_id)})\n return render_template('show.html', vacation=vacation)\n\n@app.route('/vacations//edit')\ndef vacations_edit(vacation_id):\n \"\"\"Show render vacait_edit.\"\"\"\n vacation= vacations.find_one({'_id': ObjectId(vacation_id)})\n return render_template('edit.html', vacation=vacation)\n\n@app.route('/vacations/', methods=['POST'])\ndef vacation_update(vacation_id):\n \"\"\"Submit an edited vacation.\"\"\"\n # Grab the video IDs and make a list out of them\n images = request.form.get('images').split(',')\n # call our helper function to create the list of links\n vacation = {\n 'title': request.form.get('title'),\n 'description': request.form.get('description'),\n 'images': images,\n }\n\n vacations.update_one(\n {'_id': ObjectId(vacation_id)},\n {'$set': vacation})\n return redirect(url_for('vacations_show', vacation_id=vacation_id))\n\n@app.route('/vacations//delete', methods=['POST'])\ndef vacation_delete(vacation_id):\n \"\"\"Action to delete a comment.\"\"\"\n vacations.delete_one({'_id': ObjectId(vacation_id)})\n return redirect(url_for('vacations_index'))\n\n\nif __name__ == '__main__':\n app.run()\n\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"103370573","text":"def last_word(s):\r\n result = s[0]\r\n for c in s[1:]:\r\n if c >= result[0]:\r\n result = c + result\r\n else:\r\n result += c\r\n return result\r\n\r\n\r\nif __name__ == '__main__':\r\n f = open('A-large.in')\r\n f1 = open('out.txt', 'wb')\r\n T = f.readline().strip()\r\n case = 1\r\n for line in f.readlines():\r\n # print case\r\n s = line.strip()\r\n f1.write('Case #' + str(case) + ': ' + last_word(s) + '\\n')\r\n case += 1\r\n\r\n f.close()\r\n f1.close()\r\n # s = 'ZAYBXCWDUE'\r\n # print last_word(s)\r\n","sub_path":"codes/BuildLinks1.10/test_input/CJ_16_1/16_1_1_Jasonzhang_Main.py","file_name":"16_1_1_Jasonzhang_Main.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"248983144","text":"from flaskapp import db\n\n\nclass PushMap(db.Model):\n __tablename__ = 'haowu_push_map'\n id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n push_token = db.Column(db.String(128), index=True)\n openid = db.Column(db.String(128))\n\n @staticmethod\n def insert_or_update(push_map):\n m = PushMap.query.filter(PushMap.push_token == push_map[\"push_token\"]).first()\n if not m:\n m = PushMap(**push_map)\n db.session.add(m)\n else:\n m.openid = push_map[\"openid\"]\n db.session.commit()\n\n\nif __name__ == '__main__':\n PushMap.insert_or_update({\n \"push_token\": \"123456\",\n \"openid\": \"kjhkj\"\n })","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"433372600","text":"# encoding: utf-8\nfrom SelectionManager import *\nfrom RtreeApi import *\nfrom MbrGenerator import *\n\nclass Rtree(RtreeApi):\n # d : dimension de vectores del arbol\n # M : capacidad maxima de nodos y hojas\n # maxE : cantidad maxima de elementos que almacenara el Rtree\n # reset : cuando es True, se construye un nuevo arbol, si no, se carga de disco\n # initOffset : offset desde se cargara nodo raiz\n def __init__(self, d, M = 100, maxE = 100000, reset = False, initOffset = 0, partitionType = 0):\n super(Rtree, self).__init__(d = d, M = M, maxE = maxE, reset = reset, initOffset = initOffset, dataFile = \"rtree\")\n self.sa = RtreeSelection() # Algoritmo de seleccion de mejor nodo a insertar en base a crecimiento minimo de area\n # Algoritmo de particionamiento\n if partitionType == 0:\n self.pa = LinealPartition()\n elif partitionType == 1:\n self.pa = CuadraticPartition()\n\n def insert(self, mbrPointer):\n t0 = time()\n\n # Bajo por el arbol hasta encontrar una hoja adecuada\n while self.currentNode.isANode():\n next = self.chooseTree(mbrPointer)\n self.seekNode(next) # cambia currentNode\n\n if self.needToSplit():\n newLeafMbrPointer = self.split(self.newLeaf(), mbrPointer)\n self.propagateSplit(newLeafMbrPointer) # propago hacia el padre el split\n else:\n self.insertChild(mbrPointer)\n self.propagateAdjust() # ajusta mbrs hasta la raiz\n\n t1 = time()\n self.incrementMeanInsertionTime(t1-t0)\n self.computeMeanNodes()\n self.goToRoot()\n\n # Maneja split\n def split(self, newRtree, mbrPointer):\n currentMbr = self.currentNode.getMbr()\n children = self.currentNode.getChildren() # Tuplas (Mbr,Puntero) de la hoja seleccionada\n\n currentMbr.expand(mbrPointer.getMbr()) # expandimos el mbr del nodo (u hoja) seleccionado, para simular insercion\n partitionData = self.pa.partition(currentMbr, children + [mbrPointer], self.m()) # efectuamos la particion de elementos agregando el elemento a insertar\n\n self.currentNode.setData(partitionData[0][0], partitionData[0][1:]) # Guardo en el nodo (u hoja) antiguo la primera particion\n newRtree.setData(partitionData[1][0], partitionData[1][1:]) # Guardo en un nuevo nodo (u hoja) la segunda particion\n\n self.save(self.currentNode) # Guardo el nodof (u hoja) antiguo en disco\n self.save(newRtree) # Guardo el nuevo nodo (u hoja) en disco\n\n treeMbrPointer = newRtree.getMbrPointer()\n return treeMbrPointer\n\n # Propaga el split hasta donde sea necesario\n def propagateSplit(self, splitMbrPointer):\n lastSplit = splitMbrPointer\n lastNode = self.currentNode\n\n while self.currentHeigth() >= 0:\n self.chooseParent() # cambia currentNode y sube un nivel del arbol\n\n self.updateChild(lastNode.getMbrPointer())\n\n if self.needToSplit():\n lastSplit = self.split(self.newNode(), lastSplit)\n\n # Se llego a la raiz\n if self.currentHeigth() == 0:\n self.makeNewRoot(lastSplit)\n break\n lastNode = self.currentNode\n else:\n self.insertChild(lastSplit)\n break\n self.propagateAdjust()\n self.goToRoot()\n\ndef simpleTest():\n d = 2\n M = 25\n E = 10**4\n r = 0.25\n rtree = Rtree(d = d, M = M, maxE = E, reset = True, initOffset = 0, partitionType = 1)\n gen = MbrGenerator()\n objects = [gen.next(d) for i in range(E)]\n print(\"Data generada\")\n\n for o in objects:\n rtree.insert(o)\n\n print(rtree)\n\n print(gen.nextRadial(d, r))\n randomMbr = gen.nextRadial(d, r*(d**0.5))\n print(\"Generando busqueda\")\n rtree.search(radialMbr = randomMbr, fileResults = None, verbose = True, genFile = False)\n\ndef loadTest():\n d = 2\n M = 25\n E = 10**4\n r = 0.25\n rtree = Rtree(d = d, M = M, maxE = E, reset = False, initOffset = 0, partitionType = 1)\n\n print(rtree)\n\nif __name__ == \"__main__\":\n simpleTest()\n loadTest()","sub_path":"code/Rtree.py","file_name":"Rtree.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"279031628","text":"import math\nfrom collections import defaultdict\nfrom functools import reduce\nfrom typing import List, Tuple, Union\n\n\n_size_data = 0\n_dim = 0\n\n\nVector = Union[List[float], List[int]]\nCodeBook = List[Vector]\n\n\ndef generate(\n data: List[Vector], size_codebook: int, epsilon: float = 0.001\n) -> CodeBook:\n data_size = len(data)\n\n codebook = []\n\n c0 = _avg_vec_of_vecs(data)\n codebook.append(c0)\n\n avg_dist = _avg_distortion_c0(c0, data, data_size)\n\n while len(codebook) < size_codebook:\n codebook, avg_dist = _split(data, codebook, epsilon, avg_dist)\n\n return [list(map(math.floor, vector)) for vector in codebook]\n\n\ndef _split(\n data: list, codebook: CodeBook, epsilon: float, initial_avg_dist: float\n) -> Tuple[CodeBook, float]:\n data_size = len(data)\n\n new_codevectors: List[Vector] = []\n for c in codebook:\n new_codevectors.append(_new_codevector(c, epsilon))\n new_codevectors.append(_new_codevector(c, -epsilon))\n\n codebook = new_codevectors\n len_codebook = len(codebook)\n\n print(f\"current codebook size: {len_codebook}\")\n\n avg_dist = 0.0\n err = epsilon + 1\n while err > epsilon:\n closest_c_list: List[Vector] = [None] * data_size\n vecs_near_c = defaultdict(list)\n vec_idxs_near_c = defaultdict(list)\n for i, vec in enumerate(data):\n min_dist = None\n closest_c_index = None\n for i_c, c in enumerate(codebook):\n d = euclid_squared(vec, c)\n if min_dist is None or d < min_dist:\n min_dist = d\n closest_c_list[i] = c\n closest_c_index = i_c\n vecs_near_c[closest_c_index].append(vec)\n vec_idxs_near_c[closest_c_index].append(i)\n\n for i_c in range(len_codebook):\n vecs = vecs_near_c.get(i_c) or []\n num_vecs_near_c = len(vecs)\n if num_vecs_near_c > 0:\n new_c = _avg_vec_of_vecs(vecs)\n codebook[i_c] = new_c\n for i in vec_idxs_near_c[i_c]:\n closest_c_list[i] = new_c\n\n prev_avg_dist = avg_dist if avg_dist > 0 else initial_avg_dist\n avg_dist = _avg_distortion_c_list(closest_c_list, data, data_size)\n\n err = (prev_avg_dist - avg_dist) / prev_avg_dist\n\n return codebook, avg_dist\n\n\ndef _avg_vec_of_vecs(vecs: List[Vector]) -> Vector:\n size = len(vecs)\n avg_vec = [0.0] * 3\n for vec in vecs:\n for i, x in enumerate(vec):\n avg_vec[i] += x / size\n\n return avg_vec\n\n\ndef _new_codevector(c: Vector, e: float) -> Vector:\n return [x * (1.0 + e) for x in c]\n\n\ndef _avg_distortion_c0(c0: Vector, data: List[Vector], size: int) -> float:\n return reduce(\n lambda s, d: s + d / size, (euclid_squared(c0, vec) for vec in data), 0.0\n )\n\n\ndef _avg_distortion_c_list(c_list: List[Vector], data: list, size: int) -> float:\n return reduce(\n lambda s, d: s + d / size,\n (euclid_squared(c_i, data[i]) for i, c_i in enumerate(c_list)),\n 0.0,\n )\n\n\ndef euclid_squared(xsa: Vector, xsb: Vector) -> float:\n return sum((x_a - x_b) ** 2 for x_a, x_b in zip(xsa, xsb))\n","sub_path":"lista5/lgb.py","file_name":"lgb.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"537837986","text":"import sqlite3\nfrom ..grammar import GrammarFactory\nfrom .BaseConnection import BaseConnection\n\n\nclass SQLiteConnection(BaseConnection):\n \"\"\"SQLite Connection class.\n \"\"\"\n\n name = \"sqlite\"\n _connection = None\n\n def make_connection(self):\n \"\"\"This sets the connection on the connection class\n \"\"\"\n connection_details = self.get_connection_details()\n if self.__class__._connection:\n return self\n self.__class__._connection = sqlite3.connect(\n connection_details.get(\"db\"), isolation_level=None\n )\n self.__class__._connection.row_factory = sqlite3.Row\n\n return self\n\n @classmethod\n def get_database_name(self):\n return self().get_connection_details().get(\"db\")\n\n def get_connection_details(self):\n \"\"\"This is responsible for standardizing the normal connection\n details and passing it into the connection.\n\n This will eventually be unpacked so make sure the keys are the same as the keywords\n that should pass to your connection method\n \"\"\"\n connection_details = {}\n connection_details.setdefault(\n \"db\", self.connection_details.get(\"database\"))\n connection_details.update(self.connection_details.get(\"options\", {}))\n\n return connection_details\n\n def reconnect(self):\n pass\n\n def commit(self):\n \"\"\"Transaction\n \"\"\"\n print('commit transaction')\n return self.__class__._connection.commit()\n\n def begin(self):\n \"\"\"Transaction\n \"\"\"\n print('starting sqlite transaction', self, self.__class__._connection)\n self.__class__._connection.isolation_level = 'DEFERRED'\n return self.__class__._connection\n\n def rollback(self):\n \"\"\"Transaction\n \"\"\"\n print('rolling back transaction', self, self.__class__._connection)\n self.__class__._connection.rollback()\n\n def query(self, query, bindings, results=\"*\"):\n \"\"\"Make the actual query that will reach the database and come back with a result.\n\n Arguments:\n query {string} -- A string query. This could be a qmarked string or a regular query.\n bindings {tuple} -- A tuple of bindings\n\n Keyword Arguments:\n results {str|1} -- If the results is equal to an asterisks it will call 'fetchAll'\n else it will return 'fetchOne' and return a single record. (default: {\"*\"})\n\n Returns:\n dict|None -- Returns a dictionary of results or None\n \"\"\"\n query = query.replace(\"'?'\", \"?\")\n print(\"running query: \", query)\n try:\n cursor = self.__class__._connection.cursor()\n cursor.execute(query, bindings)\n if results == 1:\n result = [dict(row) for row in cursor.fetchall()]\n if result:\n return result[0]\n else:\n return [dict(row) for row in cursor.fetchall()]\n except Exception as e:\n if self.__class__._connection and self.__class__._connection.isolation_level:\n self.rollback()\n raise e\n\n","sub_path":"src/masonite/orm/connections/SQLiteConnection.py","file_name":"SQLiteConnection.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"477461702","text":"items = [\"Books\", \"Games\", \"Movies\"]\n\nprint(\"Here is your favorite thing so far\", *items,sep=\", \")\n\nwhile True:\n n = int(input(\"Position you want to update?: \"))\n if n > 0 and n <= int(len(items)):\n break \n else:\n print(\"Invalid position!\")\nm = input(\"Your replacing favorite?: \")\n\nitems[n-1] = m\nprint(*items, sep=\", \")","sub_path":"session3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"271194286","text":"# encoding: utf-8\n\n\nimport scrapy\nfrom tutorial.items import TutorialItem\nfrom tutorial.log import *\n\nclass DmozSpider(scrapy.Spider):\n \n \n name = \"dmoz\"\n start_urls = [\n \"http://www.dmoz.org/Computers/Programming/Languages/Python/\"\n ]\n \n \n #parse方法从html源代码中解析要抓取的内容\n def parse(self, response):\n \n msg = '-------------Start to Solve Response!--------------'\n print(\"\\r\\n\\r\\n\\r\\n\\r\\n\\r\\n\\r\\n \" + msg + \"\\r\\n\\r\\n\")\n info(msg)\n \n \n #/html/head/title: 选择HTML文档中 标签内的 元素\n #/html/head/title/text(): 选择上面提到的 <title> 元素的文字\n #//td: 选择所有的 <td> 元素\n #//div[@class=\"mine\"]: 选择所有具有 class=\"mine\" 属性的 div 元素\n \n title = response.xpath('/html/head/title/text()').extract()\n print(title[0])\n \n header = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36'}\n for href in response.xpath('//*[@id=\"cat-list-content-main\"]/div[1]/a/@href'):\n url = 'http://www.dmoz.org' + href.extract()\n yield scrapy.Request(url, callback=self.parse_dir_contents,headers = header)\n \n #或者下面这样\n \n\n #filename = response.url.split(\"/\")[-2] + '.html'\n #with open('out/' + filename, 'wb') as f:\n # f.write(response.body)\n \n \n def parse_dir_contents(self, response):\n \n for sel in response.xpath('//div[@class=\"title-and-desc\"]'):\n \n item = TutorialItem()\n \n item['title']= sel.xpath('a/div/text()').extract()[0]\n item['link'] = sel.xpath('a/@href').extract()[0]\n desc = sel.xpath('div/text()').extract()[0]\n desc = desc.replace(\"\\t\\t\\t\",\"\")\n item['desc'] = desc.replace(\"\\r\\n\",\"\")\n \n yield item \n \n ","sub_path":"Scrapy/tutorial2/tutorial/spiders/dmoz_spider.py","file_name":"dmoz_spider.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"447889673","text":"from django.conf import settings\n\nimport twilio\nimport twilio.rest\n\ndef send_message(to, body):\n client = twilio.rest.TwilioRestClient(\n settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN \n )\n try:\n message = client.messages.create(\n body=body,\n to=to,\n from_=settings.TWILIO_NUMBER\n )\n except twilio.TwilioRestException as e:\n #messages.warning(request, 'Could not send SMS to ' + to)\n raise e\n\n","sub_path":"studygroups/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"336459163","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n Application Template for supporting Evacuations of Personnel\n - e.g. from Afghanistan\n\"\"\"\n\nfrom gluon import current\nfrom gluon.storage import Storage\n\n# =============================================================================\ndef config(settings):\n\n T = current.T\n\n #settings.base.system_name = \"Evacuation Management System\"\n #settings.base.system_name_short = \"EMS\"\n settings.base.system_name = \"Afghans Extraction\"\n settings.base.system_name_short = \"AFGEx\"\n\n # PrePopulate data\n settings.base.prepopulate.append(\"BRCMS/Evac\")\n #settings.base.prepopulate_demo.append(\"BRCMS/Evac/Demo\")\n\n # Enable password-retrieval feature\n settings.auth.password_retrieval = True\n\n settings.auth.registration_organisation_required = True\n settings.auth.realm_entity_types = (\"org_organisation\",\n \"pr_forum\", # Realms\n \"pr_person\", # Case Managers, Case Supervisors, Handlers\n # Not needed, as hidden?\n #\"pr_realm\", # Cases, Resources\n )\n\n modules = settings.modules\n modules[\"asset\"] = {\"name_nice\": T(\"Assets\"), \"module_type\": None}\n #modules[\"fin\"] = {\"name_nice\": T(\"Finances\"), \"module_type\": None}\n modules[\"hms\"] = {\"name_nice\": T(\"Hospitals\"), \"module_type\": None}\n modules[\"inv\"] = {\"name_nice\": T(\"Inventory\"), \"module_type\": None}\n modules[\"security\"] = {\"name_nice\": T(\"Security\"), \"module_type\": None}\n modules[\"supply\"] = {\"name_nice\": T(\"Supply\"), \"module_type\": None}\n modules[\"transport\"] = {\"name_nice\": T(\"Transport\"), \"module_type\": None}\n\n # -------------------------------------------------------------------------\n # BR Settings\n #\n #settings.org.default_organisation = \"The Collective\"\n #settings.br.case_global_default_org = True\n settings.br.case_activity_need_details = True\n settings.br.case_activity_updates = True\n settings.br.case_activity_documents = True\n settings.br.case_address = True\n settings.br.case_id_tab = True\n settings.br.case_language_details = False\n settings.br.case_notes_tab = True\n settings.br.id_card_export_roles = None\n settings.br.manage_assistance = False\n settings.br.needs_org_specific = False\n\n # -------------------------------------------------------------------------\n # CR Settings\n #\n settings.cr.people_registration = True\n\n # -------------------------------------------------------------------------\n # HRM Settings\n #\n settings.hrm.record_tab = False\n settings.hrm.staff_experience = False\n settings.hrm.teams = False # Working Groups use Forums\n settings.hrm.use_address = False\n settings.hrm.use_id = False\n settings.hrm.use_skills = False\n settings.hrm.use_certificates = False\n settings.hrm.use_credentials = False\n settings.hrm.use_description = False\n settings.hrm.use_trainings = False\n\n # -------------------------------------------------------------------------\n # Organisations Module Settings\n #\n settings.org.sector = False\n settings.org.branches = False\n settings.org.offices_tab = False\n settings.org.country = False\n\n WORKING_GROUPS = (\"FLIGHTS\",\n \"LEGAL\",\n \"LOGISTICS\",\n \"MEDICAL\",\n \"SECURITY\",\n )\n\n # -------------------------------------------------------------------------\n # Realm Rules\n #\n def evac_realm_entity(table, row):\n \"\"\"\n Assign a Realm Entity to records\n \n Cases all have a unique Realm\n Resources all have a unique Realm\n - other than:\n * Orgs\n * Staff\n\n Special Cases for Doctors (both a Case & a Resource!)\n \"\"\"\n\n from s3dal import original_tablename\n\n db = current.db\n s3db = current.s3db\n\n tablename = original_tablename(table)\n\n if tablename == \"org_organisation\":\n # Realm is own PE ID\n # => use Default Rules\n pass\n elif tablename in (\"transport_airport\",\n \"transport_airplane\",\n ):\n # No Realm \n return None\n elif tablename == \"pr_person\":\n # Staff?\n # Case?\n # Doctor?\n # Case & Doctor?\n pass\n elif tablename == \"br_case\":\n # Has a unique pr_realm with appropriate multiple inheritance\n pass\n elif tablename == \"gis_route\":\n # Inherits realm from the Case\n pass\n elif tablename == \"br_activity\":\n # Has a unique pr_realm with appropriate multiple inheritance\n pass\n elif tablename in (\"event_incident_report\",\n \"hms_contact\",\n \"hms_hospital\",\n \"hms_pharmacy\",\n \"inv_inv_item\",\n \"org_facility\",\n \"security_zone\",\n \"transport_flight\",\n \"vehicle_vehicle\",\n ):\n # Has a unique pr_realm with appropriate multiple inheritance\n pass\n\n realm_entity = 0\n\n return realm_entity\n\n settings.auth.realm_entity = evac_realm_entity\n\n # -------------------------------------------------------------------------\n def auth_add_role(user_id, group_id, for_pe=None):\n \"\"\"\n Automatically add subsidiary roles & set to appropriate entities\n \"\"\"\n\n auth = current.auth\n add_membership = auth.add_membership\n system_roles = auth.get_system_roles()\n\n # Is this the Admin role?\n if group_id == system_roles.ADMIN:\n # Add the main Role\n add_membership(group_id = group_id,\n user_id = user_id,\n )\n # No Subsidiary Roles to add\n return\n\n db = current.db\n s3db = current.s3db\n gtable = db.auth_group\n\n # Is this the OrgAdmin role?\n if group_id == system_roles.ORG_ADMIN:\n # Lookup the User Organisation\n utable = db.auth_user\n otable = s3db.org_organisation\n query = (utable.id == user_id) & \\\n (otable.id == utable.organisation_id)\n org = db(query).select(otable.id,\n otable.pe_id,\n limitby = (0, 1),\n ).first()\n\n # Lookup the Entity for the main role\n if for_pe:\n entity = for_pe\n else:\n entity = org.pe_id\n\n # Add the main Role for the correct entity\n add_membership(group_id = group_id,\n user_id = user_id,\n entity = entity,\n )\n\n # Lookup the Groups for the subsidiary roles\n groups = db(gtable.uuid.belongs((\"ORG_ADMIN_RO\",\n \"ORG_ADMIN_RW\",\n ))).select(gtable.id,\n gtable.uuid,\n limitby = (0, 2),\n )\n groups = {row.uuid: row.id for row in groups}\n # Lookup the Entities for the subsidiary roles\n organisation_id = org.id\n ftable = s3db.pr_forum\n forums = db(ftable.uuid.belongs((\"ORG_ADMIN_RO_%s\" % organisation_id,\n \"ORG_ADMIN_RW_%s\" % organisation_id,\n ))).select(ftable.pe_id,\n ftable.uuid,\n limitby = (0, 2),\n )\n forums = {row.uuid: row.pe_id for row in forums}\n # Add User to the subsidiary roles for the subsidiary entities\n for uuid in groups:\n add_membership(group_id = groups[uuid],\n user_id = user_id,\n entity = forums[\"%s_%s\" % (uuid,\n organisation_id,\n )],\n )\n return\n\n # Need to lookup the role\n group = db(gtable.id == group_id).select(gtable.uuid,\n limitby = (0, 1),\n ).first()\n role = group.uuid\n if role == \"ORG_MEMBER\":\n # Lookup the Entity for the main role\n if for_pe:\n entity = for_pe\n else:\n utable = db.auth_user\n otable = s3db.org_organisation\n query = (utable.id == user_id) & \\\n (otable.id == utable.organisation_id)\n org = db(query).select(otable.pe_id,\n limitby = (0, 1),\n ).first()\n entity = org.pe_id\n\n # Add the main Role for the correct entity\n add_membership(group_id = group_id,\n user_id = user_id,\n entity = entity,\n )\n # No Subsidiary Roles to add\n return\n\n # Lookup the Entity for the main role\n if for_pe:\n entity = for_pe\n else:\n ltable = s3db.pr_person_user\n person = db(ltable.user_id == user_id).select(ltable.pe_id,\n limitby = (0, 1),\n ).first()\n entity = person.pe_id\n\n # Add the main Role for the correct entity\n add_membership(group_id = group_id,\n user_id = user_id,\n entity = entity,\n )\n\n # Lookup the Group for the Org Member role\n group = db(gtable.uuid == \"ORG_MEMBER\").select(gtable.id,\n limitby = (0, 1),\n ).first()\n # Lookup the Entity for the Org Member role\n utable = db.auth_user\n otable = s3db.org_organisation\n query = (utable.id == user_id) & \\\n (otable.id == utable.organisation_id)\n org = db(query).select(otable.pe_id,\n limitby = (0, 1),\n ).first()\n # Add User to the Org Member role for the Org entity\n add_membership(group_id = group.id,\n user_id = user_id,\n entity = org.pe_id,\n )\n\n if role in (\"CASE_MANAGER\",\n \"CASE_SUPERVISOR\",\n ):\n # No extra subsidiary role\n return\n\n # Resource role\n\n # Lookup the Groups for the subsidiary roles\n groups = db(gtable.uuid.belongs((\"%s_RO\" % role,\n \"%s_RW\" % role,\n ))).select(gtable.id,\n gtable.uuid,\n limitby = (0, 2),\n )\n groups = {row.uuid: row.id for row in groups}\n # Lookup the Entities for the subsidiary roles\n organisation_id = org.id\n ftable = s3db.pr_forum\n forums = db(ftable.uuid.belongs((\"%s_RO_%s\" % (role,\n organisation_id,\n ),\n \"%s_RW_%s\" % (role,\n organisation_id,\n ),\n ))).select(ftable.pe_id,\n ftable.uuid,\n limitby = (0, 2),\n )\n forums = {row.uuid: row.pe_id for row in forums}\n # Add User to the subsidiary roles for the subsidiary entities\n for uuid in groups:\n add_membership(group_id = groups[uuid],\n user_id = user_id,\n entity = forums[\"%s_%s\" % (uuid,\n organisation_id,\n )],\n )\n\n settings.auth.add_role = auth_add_role\n\n # -------------------------------------------------------------------------\n def auth_remove_role(user_id, group_id, for_pe=None):\n \"\"\"\n Automatically remove subsidiary roles\n \"\"\"\n\n auth = current.auth\n withdraw_role = auth.s3_withdraw_role\n\n auth = current.auth\n add_membership = auth.add_membership\n system_roles = auth.get_system_roles()\n\n # Is this the Admin role?\n if group_id == system_roles.ADMIN:\n # Remove the main Role\n withdraw_role(user_id, group_id)\n # No Subsidiary Roles to remove\n return\n\n db = current.db\n s3db = current.s3db\n gtable = db.auth_group\n\n # Is this the OrgAdmin role?\n if group_id == system_roles.ORG_ADMIN:\n # Lookup the User Organisation\n utable = db.auth_user\n otable = s3db.org_organisation\n query = (utable.id == user_id) & \\\n (otable.id == utable.organisation_id)\n org = db(query).select(otable.id,\n otable.pe_id,\n limitby = (0, 1),\n ).first()\n\n # Lookup the Entity for the main role\n if for_pe:\n entity = for_pe\n else:\n entity = org.pe_id\n\n # Remove the main Role for the correct entity\n withdraw_role(user_id, group_id, entity)\n\n # Lookup the Groups for the subsidiary roles\n groups = db(gtable.uuid.belongs((\"ORG_ADMIN_RO\",\n \"ORG_ADMIN_RW\",\n ))).select(gtable.id,\n gtable.uuid,\n limitby = (0, 2),\n )\n groups = {row.uuid: row.id for row in groups}\n # Lookup the Entities for the subsidiary roles\n organisation_id = org.id\n ftable = s3db.pr_forum\n forums = db(ftable.uuid.belongs((\"ORG_ADMIN_RO_%s\" % organisation_id,\n \"ORG_ADMIN_RW_%s\" % organisation_id,\n ))).select(ftable.pe_id,\n ftable.uuid,\n limitby = (0, 2),\n )\n forums = {row.uuid: row.pe_id for row in forums}\n # Remove User from the subsidiary roles for the subsidiary entities\n for uuid in groups:\n withdraw_role(user_id,\n groups[uuid],\n forums[\"%s_%s\" % (uuid,\n organisation_id,\n )],\n )\n return\n\n # Need to lookup the role\n group = db(gtable.id == group_id).select(gtable.uuid,\n limitby = (0, 1),\n ).first()\n role = group.uuid\n if role == \"ORG_MEMBER\":\n # Lookup the Entity for the main role\n if for_pe:\n entity = for_pe\n else:\n utable = db.auth_user\n otable = s3db.org_organisation\n query = (utable.id == user_id) & \\\n (otable.id == utable.organisation_id)\n org = db(query).select(otable.pe_id,\n limitby = (0, 1),\n ).first()\n entity = org.pe_id\n\n # Remove the main Role for the correct entity\n withdraw_role(user_id, group_id, entity)\n # No Subsidiary Roles to remove\n return\n\n # Lookup the Entity for the main role\n if for_pe:\n entity = for_pe\n else:\n ltable = s3db.pr_person_user\n person = db(ltable.user_id == user_id).select(ltable.pe_id,\n limitby = (0, 1),\n ).first()\n entity = person.pe_id\n\n # Remove the main Role for the correct entity\n withdraw_role(user_id, group_id, entity)\n\n if role in (\"CASE_MANAGER\",\n \"CASE_SUPERVISOR\",\n ):\n # No extra subsidiary role\n return\n\n # Resource role\n\n # Lookup the Groups for the subsidiary roles\n groups = db(gtable.uuid.belongs((\"%s_RO\" % role,\n \"%s_RW\" % role,\n ))).select(gtable.id,\n gtable.uuid,\n limitby = (0, 2),\n )\n groups = {row.uuid: row.id for row in groups}\n # Lookup the Entities for the subsidiary roles\n organisation_id = org.id\n ftable = s3db.pr_forum\n forums = db(ftable.uuid.belongs((\"%s_RO_%s\" % (role,\n organisation_id,\n ),\n \"%s_RW_%s\" % (role,\n organisation_id,\n ),\n ))).select(ftable.pe_id,\n ftable.uuid,\n limitby = (0, 2),\n )\n forums = {row.uuid: row.pe_id for row in forums}\n # Add User to the subsidiary roles for the subsidiary entities\n for uuid in groups:\n withdraw_role(user_id,\n groups[uuid],\n forums[\"%s_%s\" % (uuid,\n organisation_id,\n )],\n )\n\n settings.auth.remove_role = auth_remove_role\n\n # =========================================================================\n def org_organisation_create_onaccept(form):\n \"\"\"\n Create an RO & RW Forum for the Org Admin to be granted permissions to\n Create a RO & RW Forum for each Working Group for the Working Group members of this Organisation to be granted permission to\n Have the WG Forums inherit from the Org-level Forums\n \"\"\"\n\n from s3db.pr import pr_add_affiliation\n\n db = current.db\n s3db = current.s3db\n\n organisation_id = form.vars.id\n\n ftable = s3db.pr_forum\n update_super = s3db.update_super\n\n # Create the top-level RO Forum\n uuid = \"ORG_ADMIN_RO_%s\" % organisation_id\n forum_id = ftable.insert(name = uuid,\n uuid = uuid,\n organisation_id = organisation_id,\n forum_type = 2,\n )\n record = Storage(id = forum_id)\n update_super(ftable, record)\n master_ro = record[\"pe_id\"]\n\n # Create the top-level RW Forum\n uuid = \"ORG_ADMIN_RW_%s\" % organisation_id\n forum_id = ftable.insert(name = uuid,\n uuid = uuid,\n organisation_id = organisation_id,\n forum_type = 2,\n )\n record = Storage(id = forum_id)\n update_super(ftable, record)\n master_rw = record[\"pe_id\"]\n\n for WG in WORKING_GROUPS:\n # Create the WG RO Forum\n uuid = \"%s_RO_%s\" % (WG,\n organisation_id,\n )\n forum_id = ftable.insert(name = uuid,\n uuid = uuid,\n organisation_id = organisation_id,\n forum_type = 2,\n )\n record = Storage(id = forum_id)\n update_super(ftable, record)\n affiliate = record[\"pe_id\"]\n\n # Have the WG inherit from the top-level\n pr_add_affiliation(master_ro, affiliate, role=\"Realm Hierarchy\")\n\n # Create the WG RW Forum\n uuid = \"%s_RW_%s\" % (WG,\n organisation_id,\n )\n forum_id = ftable.insert(name = uuid,\n uuid = uuid,\n organisation_id = organisation_id,\n forum_type = 2,\n )\n record = Storage(id = forum_id)\n update_super(ftable, record)\n affiliate = record[\"pe_id\"]\n\n # Have the WG inherit from the top-level\n pr_add_affiliation(master_rw, affiliate, role=\"Realm Hierarchy\")\n\n # =========================================================================\n def customise_org_organisation_resource(r, tablename):\n\n current.s3db.add_custom_callback(tablename,\n \"create_onaccept\",\n org_organisation_create_onaccept,\n )\n\n settings.customise_org_organisation_resource = customise_org_organisation_resource\n\n # =========================================================================\n def br_case_onaccept(form):\n \"\"\"\n Have a Realm for the Case\n Ensure the correct Inheritance\n \"\"\"\n\n from s3db.pr import pr_add_affiliation\n\n db = current.db\n s3db = current.s3db\n\n case_id = form.vars.id\n\n # @ToDo: complete\n\n # =========================================================================\n def customise_br_case_resource(r, tablename):\n\n current.s3db.add_custom_callback(tablename,\n \"onaccept\",\n br_case_onaccept,\n )\n\n settings.customise_br_case_resource = customise_br_case_resource\n\n# END =========================================================================\n","sub_path":"modules/templates/BRCMS/Evac/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":23853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"424480420","text":"import random\n\nclass BingoCage:\n\t\"\"\"A BingoCage does one thing: picks items from a shuffled list\"\"\"\n\n\tdef __init__(self, items):\n\t\tself._items = list(items)\n\t\trandom.shuffle(self._items)\n\n\tdef pick(self):\n\t\ttry:\n\t\t\treturn self._items.pop()\n\t\texcept IndexError:\n\t\t\traise LookupError('pick from empty BingoCage')\n\n\tdef __call__(self):\n\t\treturn self.pick()\n\nif __name__ == \"__main__\":\n\tbingo = BingoCage(range(10))\n\tprint(bingo.pick())\n\tprint(bingo())\n\tprint(callable(bingo))\n\tprint(\"Function inspection (dir): \")\n\tprint(dir(bingo.pick))","sub_path":"code/05_1st_class_functions/bingocall.py","file_name":"bingocall.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216232669","text":"# -*- Encoding: utf-8 -*-\nfrom flask import Flask\nfrom flask_migrate import Migrate\nfrom config import load_config, load_default\nfrom .models import db # sql\n\n\ndef make_app(config_name):\n app = Flask(__name__)\n\n config_obj = load_config(config_name)\n if not config_obj:\n print('load default config instead.')\n config_obj = load_default()\n app.config.from_object(config_obj)\n config_obj.init_app(app)\n\n db.init_app(app)\n migrate = Migrate(app, db) # noqa\n\n registe_routes(app)\n return app\n\n\ndef registe_routes(app):\n \"\"\"Register routes.\"\"\"\n from app.resources import views\n app.register_blueprint(views.bp)\n from app.resources import apis\n app.register_blueprint(apis.bp)\n from app.resources import userViews\n app.register_blueprint(userViews.users, url_prefix='/api/v1/users')\n\n print(app.url_map)\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"48286439","text":"import sublime\nimport sublime_plugin\nimport json\n\nfrom .gotools_util import Buffers\nfrom .gotools_util import ToolRunner\n\n\nclass GotoolsShowTypeCommand(sublime_plugin.ViewEventListener):\n @classmethod\n def is_applicable(cls, settings):\n return settings.get('syntax') == 'Packages/GoTools/GoTools.tmLanguage'\n\n def __init__(self, view):\n self.offset = None\n self.view = view\n self.phantom_set = sublime.PhantomSet(view)\n\n self.gocode = ToolRunner.prepare(view, 'gocode')\n\n def on_selection_modified_async(self):\n start, end = Buffers.symbol_offset_at_cursor(self.view)\n if end == self.offset:\n return\n self.offset = end\n\n suggestions_json_str, stderr, rc = ToolRunner.run_prepared(self.gocode, [\"-f=json\", \"autocomplete\", \n str(end)], stdin=Buffers.buffer_text(self.view))\n\n parts = json.loads(suggestions_json_str)\n typ = ''\n if parts and parts[1]:\n name = self.view.substr(self.view.word(self.view.sel()[0]))\n exact = [p for p in parts[1] if p['name'] == name]\n if exact:\n typ = exact[0][\"type\"]\n\n phantoms = [sublime.Phantom(sublime.Region(start, start), typ, sublime.LAYOUT_BELOW)] if typ else []\n self.phantom_set.update(phantoms)\n","sub_path":"gotools_type.py","file_name":"gotools_type.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"42522105","text":"\nimport numpy as np\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn import model_selection\nimport time \n\ndef naive_Bayes(X, y):\n\n\tresults = []\n\ttimes = []\n\taccuracy_diff = []\n\n\tfor x in range(0,200):\n\n\t\t# Dividing the dataset into Training Set and Test Set\n\t\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state = int(time.time()))\n\n\t\tstart = time.time()\n\n\t\t# Applying Naive Bayes \n\t\tgnb = GaussianNB()\n\t\tgnb.fit(X_train, y_train)\n\n\n\t\t# Predicting the results\n\t\ty_pred = gnb.predict(X_test)\n\n\n\t\t# Checking the accuracy on the test set\n\t\tresult = gnb.score(X_test, y_test)\t\t\n\n\n\t\t# Checking the integrity of the model using 10-fold cross validation\n\t\tkfold = model_selection.KFold(n_splits = 10, random_state = int(time.time()))\n\t\tmodelCV = GaussianNB()\n\t\tscoring = 'accuracy'\n\t\tresult_cv = model_selection.cross_val_score(modelCV, X_train, y_train, cv = kfold, scoring = scoring)\n\t\tresult_cv = np.mean(result_cv)\n\n\n\t\t# Calculating the difference between model prediction and CV result\n\t\taux_accuracy_diff = result - result_cv\n\t\tif aux_accuracy_diff < 0:\n\t\t\taux_accuracy_diff = -aux_accuracy_diff\n\n\n\t\t# Calculating elapsed time\n\t\tend = time.time()\n\t\telapsed_time = end - start\n\n\t\t\n\t\tresults.append(result.mean())\n\t\ttimes.append(elapsed_time)\n\t\taccuracy_diff.append(aux_accuracy_diff)\n\n\n\t# Returning the average performance of the Algorithm\n\treturn[np.mean(results), np.var(results), np.mean(times), np.mean(accuracy_diff)]\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Naive_Bayes.py","file_name":"Naive_Bayes.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"236802590","text":"\"\"\"\nThe cost of stock on each day is given in an array A[] of size N.\nFind all the days on which you buy and sell the stock so that\nin between those days your profit is maximum.\n\nNote: There may be multiple possible solutions.\nPrint any one of them.\n\"\"\"\n\n\ndef stockBuySell(A, n):\n bs_pairs = []\n start = 0\n end = 0\n for i in range(1, n):\n if A[i - 1] <= A[i]:\n end = i\n else:\n if start != end:\n bs_pairs.append((start, end))\n start = i\n end = i\n\n if start != end:\n bs_pairs.append((start, end))\n\n return bs_pairs\n\n\nans = stockBuySell([4,2,2,2,4], 5)\nprint(ans)\n","sub_path":"pyalgos/arrays/stockbuysell.py","file_name":"stockbuysell.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"139660237","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom domain.models import Application\nfrom flask.ext.restful import fields, abort\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom BaseApplicationResource import BaseApplicationResource\nfrom BaseResource import ISO8601DateTime\nfrom common_fields import person_fields, organisation_fields\n\ngeneric_slot_fields = {\n 'id': fields.Integer,\n 'start_date': ISO8601DateTime,\n 'end_date': ISO8601DateTime,\n 'start_time': ISO8601DateTime,\n 'end_time': ISO8601DateTime,\n 'week_day': fields.Integer,\n}\n\nresource_fields = {\n 'name': fields.String,\n 'unit_email_address': fields.String\n}\n\napplication_fields = {\n 'id': fields.Integer,\n 'text': fields.String,\n 'person': fields.Nested(person_fields),\n 'organisation': fields.Nested(organisation_fields),\n 'resource': fields.Nested(resource_fields),\n 'requested_resource': fields.Nested(resource_fields),\n 'slots': fields.Nested(generic_slot_fields),\n 'status': fields.String,\n 'application_time': ISO8601DateTime,\n 'type': fields.String\n}\n\n\nclass ArrangementBaseResource(BaseApplicationResource):\n t = Application\n type_name = \"application\"\n\n def get_affected_applications(self, arrangement):\n affected_applications = []\n for slot in arrangement.slots:\n\n start_date = slot.start_time.date()\n end_date = slot.end_time.date()\n week_day = slot.start_time.isoweekday()\n start_time = slot.start_time.time()\n end_time = slot.end_time.time()\n\n # Find all affected repeating applications\n repeating_slots = self.get_repeating_slots(arrangement.resource, start_date, end_date, week_day, start_time, end_time)\n for repeating_slot in repeating_slots:\n if repeating_slot.application not in affected_applications:\n affected_applications.append(repeating_slot.application)\n\n # Find all affected single applications (except the arrangement itself)\n single_slots = self.get_slots(arrangement.resource, start_date, end_date, week_day, start_time, end_time)\n for single_slot in single_slots:\n if single_slot.application not in affected_applications and single_slot.application.id is not arrangement.id:\n affected_applications.append(single_slot.application)\n\n return affected_applications\n\n def get_arrangement(self, application_id):\n try:\n application = self.get_object_by_id(application_id)\n except NoResultFound:\n abort(\n 400,\n __error__=[\"Fant ingen søknad med id=%s.\" % application_id]\n )\n if not application.is_arrangement:\n abort(\n 400,\n __error__=[\"Søknaden er ikke ett arrangement id=%s.\" % application_id]\n )\n return application\n","sub_path":"flod_booking/api/ArrangementBaseResource.py","file_name":"ArrangementBaseResource.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"136635473","text":"import pickle\nimport saveClass as sc\nimport libcell as lb\nimport numpy as np\nimport struct\nimport os\n\n# def save_Ldend(Ldends, bfname):\n# # create a binary file\n# bfname='Dend_length.bin'\n# binfile = file(bfname, 'wb')\n# # and write out two integers with the row and column dimension\n# header = struct.pack('2I', Ldends.shape[0], Ldends.shape[1])\n# binfile.write(header)\n# # then loop over columns and write each\n# for i in range(Ldends.shape[1]):\n# ddata = struct.pack('%id' % Ldends.shape[0], *Ldends[:,i])\n# binfile.write(ddata)\n# binfile.close()\n\ndef save_ave_replay(aveData, nIter, nStart, bfname):\n vd = np.zeros((nIter, 4, nStart))\n\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv, (nStart, 1501))\n vd[i_trial, i_dendrite, :] = np.mean(mv[:,550:1000], 1)\n\n mvd = np.mean(vd, 0)\n\n # print (bfname)\n\n # create a binary file\n binfile = file(bfname, 'wb')\n # and write out two integers with the row and column dimension\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n # then loop over columns and write each\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:,i])\n binfile.write(ddata)\n binfile.close()\n\ndef save_ave_place(aveData, nIter, bfname):\n vd = np.zeros((nIter, 4, 20))\n\n for i_trial in range(nIter):\n vv = aveData[i_trial]\n for i_dendrite in range(4):\n vvv = vv[i_dendrite]\n mv = np.reshape(vvv[0:50000], (20, 2500))\n vd[i_trial, i_dendrite, :] = np.mean(mv, 1)\n\n mvd = np.mean(vd, 0)\n\n print (bfname)\n\n # create a binary file\n binfile = file(bfname, 'wb')\n # and write out two integers with the row and column dimension\n header = struct.pack('2I', mvd.shape[0], mvd.shape[1])\n binfile.write(header)\n # then loop over columns and write each\n for i in range(mvd.shape[1]):\n ddata = struct.pack('%id' % mvd.shape[0], *mvd[:,i])\n binfile.write(ddata)\n binfile.close()\n\n\ndef save_sim(data, out_binary=False, out_vdend=False, out_pickle=False, outdir='data', dt_save=1):\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n modelData = sc.emptyObject()\n lb.props(modelData)\n\n if (data.stimType=='DStim'):\n filename = 'T' + str(data.TSTOP) + '_dend' + str(data.iclampLoc[2]) + '_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1]-data.iRange[0]) \n elif (data.stimType=='SStim'):\n filename = 'T' + str(data.TSTOP) + '_soma_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1]-data.iRange[0]) \n\n else :\n filename = 'T' + str(data.TSTOP) + '_Ne' + str(data.Ensyn)+'_gA'+str(round(data.Agmax,2)) + '_tauA' + str(data.Atau2)\n if (data.NMDA):\n filename = filename + '_gN'+str(round(data.Ngmax,2))\n if (data.GABA):\n filename = filename + '_Ni'+str(data.Insyn) + '_gG'+str(round(data.Igmax, 2))\n if (data.GABA_B):\n filename = filename + '_gB'+str(round(data.Bgmax, 2))\n\n if (data.modulateNa):\n filename = filename + '_noDendNa'\n\n if (data.stimType == 'nIter'):\n filename = filename + '_tInt' + str(data.tInterval) + 'ms_' + data.locBias + '_' + data.direction\n \n if ((data.stimType == 'place') + (data.stimType == 'poisson') + (data.stimType == 'replay')):\n filename = filename + \"_Er\" + str(data.Erate) + '_Ir'+str(data.Irate) + '_' + data.placeType + '_rep' + str(data.nIter)\n filename = filename + '_stimseed' + str(data.stimseed)\n\n if (data.modulateK == True):\n filename = filename + '_K0'\n if (data.modulateK_local == True):\n filename = filename + '_KL0'\n if (data.modulateK_parents == True):\n filename = filename + '_KP0'\n\n if (data.modulateRmRa == True):\n filename = filename + '_RmRa'\n if (data.modulateRmRaSeg == True):\n filename = filename + '_RmRaSeg'\n if (data.randomW == True):\n filename = filename + '_randW'\n\n if out_pickle:\n dataList = [data, modelData]\n fname = './'+outdir+'/'+filename+'.pkl'\n f = open(fname, 'wb')\n pickle.dump(dataList, f)\n f.close()\n\n\n if out_binary:\n #---------------------------------------------\n # WRITE the response in a binary file to read it with R\n mat = np.array(data.vdata)\n L = mat.shape[1]\n dt_ratio = int(round(dt_save / data.dt))\n mat = mat[:,0:L:dt_ratio]\n\n np.save(\"./\"+outdir+\"/vdata_\"+filename+\".npy\", mat)\n\n #bfname = './'+outdir+'/vdata_'+filename+'.bin'\n #print (bfname)\n # create a binary file\n #binfile = file(bfname, 'wb')\n # and write out two integers with the row and column dimension\n #header = struct.pack('2I', mat.shape[0], mat.shape[1])\n #binfile.write(header)\n # then loop over columns and write each\n #for i in range(mat.shape[1]):\n #ddata = struct.pack('%id' % mat.shape[0], *mat[:,i])\n #binfile.write(ddata)\n #binfile.close()\n\n if out_vdend:\n # # WRITE the dendritic response\n nRep = len(data.vDdata)\n mat = np.array(data.vDdata[0])\n for i in range(1, nRep):\n mat = np.hstack((mat, data.vDdata[i]))\n\n L = mat.shape[1]\n dt_ratio = int(round(dt_save / data.dt))\n mat = mat[:,0:L:dt_ratio]\n \n np.save(\"./\"+outdir+\"/vDdata_\"+filename+\".npy\", mat)\n \n # bfname = './'+outdir+'/vDdata_'+filename+'.bin'\n # # create a binary file\n # binfile = file(bfname, 'wb')\n # # and write out two integers with the row and column dimension\n # header = struct.pack('2I', mat.shape[0], mat.shape[1])\n # binfile.write(header)\n # # then loop over columns and write each\n # for i in range(mat.shape[1]):\n # ddata = struct.pack('%id' % mat.shape[0], *mat[:,i])\n # binfile.write(ddata)\n # binfile.close()\n \n\n # # ---------------------------------------------\n # # WRITE the location of the synapses \n if (data.GABA) :\n Ilocs = np.array(data.Ilocs) \n #Ilocs[:,1] = 1 + Ilocs[:,1] # code that these are inhibitory synapses\n Elocs = np.array(data.Elocs)\n Locs = np.row_stack((Elocs, Ilocs))\n else :\n Locs = np.array(data.Elocs)\n\n #bfname = './'+outdir+'/synlocs_'+filename+'.npy'\n #print (bfname)\n np.save(\"./\"+outdir+\"/Elocs_\"+filename+\".npy\", Elocs)\n np.save(\"./\"+outdir+\"/Ilocs_\"+filename+\".npy\", Ilocs)\n # # create a binary file\n # binfile = file(bfname, 'wb')\n # # and write out two integers with the row and column dimension\n # header = struct.pack('2I', Locs.shape[0], Locs.shape[1])\n # binfile.write(header)\n # # then loop over columns and write each\n # for i in range(Locs.shape[1]):\n # ddata = struct.pack('%id' % Locs.shape[0], *Locs[:,i])\n # binfile.write(ddata)\n # binfile.close()\n\n # #---------------------------------------------\n # Write the input spike train\n if (len(data.stim)>0):\n stim = data.stim\n #bfname = './'+outdir+'/stim_'+filename+'.bin'\n np.save(\"./\"+outdir+\"/stim_\"+filename+\".npy\", stim)\n\n # create a binary file\n #binfile = file(bfname, 'wb')\n # and write out two integers with the row and column dimension\n #header = struct.pack('2I', stim.shape[0], stim.shape[1])\n #binfile.write(header)\n # then loop over columns and write each\n #for i in range(stim.shape[1]):\n #ddata = struct.pack('%id' % stim.shape[0], *stim[:,i])\n #binfile.write(ddata)\n #binfile.close()\n","sub_path":"OLD_Biophysical/cell_save.py","file_name":"cell_save.py","file_ext":"py","file_size_in_byte":8142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"35842639","text":"import numpy as np\nimport math\nimport csv\n\n\ndef itemdict_to_itemlist(attr_name_list, itemdict):\n itemlist = []\n for attr_name in attr_name_list:\n if attr_name in itemdict:\n itemlist.append(itemdict[attr_name])\n else:\n itemlist.append(0)\n return itemlist\n\n\ndef d2l(itemdict):\n return itemdict_to_itemlist([\"i\", \"e\", \"c\", \"p\", \"l\"], itemdict)\n\n\ndef itemlist_to_itemdict(attr_name_list, itemlist):\n itemdict = {}\n for k, v in zip(attr_name_list, itemlist):\n if v != 0:\n itemdict[k] = v\n return itemdict\n\n\ndef l2d(itemlist):\n return itemlist_to_itemdict([\"i\", \"e\", \"c\", \"p\", \"l\"], itemlist)\n\n\ndef get_all_columns(data, i):\n result = []\n for d in data:\n result.append(d[i])\n return result\n\n\ndef if_match(d, itemset):\n flag = True\n for i in range(5):\n if itemset[i] != 0 and d[i] != itemset[i]:\n flag = False\n break\n return flag\n\n\ndef get_sum_cuboid(data, itemset):\n result = 0\n for d in data:\n if if_match(d[:-1], itemset):\n result += float(d[-1])\n return result\n\n\ndef print_sub_item(data, itemset):\n for d in data:\n if if_match(d[:-1], itemset):\n print(d)\n\n\ndef all_inds(data):\n result = 0\n for d in data:\n result += float(d[5])\n return result\n\n\ndef find_one_index(arr, value):\n return np.nonzero(arr == value)[0][0]\n\n\ndef select_sets(itemsets):\n results = []\n for itemset in itemsets:\n results.append(itemset['set'])\n return results\n\n\ndef distribution_algorithm(goods, workers):\n results = []\n rest_goods = len(goods)\n rest_workers = workers\n\n while rest_goods > 0:\n work_load = math.ceil(rest_goods / rest_workers)\n results.append(goods[:work_load])\n goods = goods[work_load:]\n rest_goods -= work_load\n rest_workers -= 1\n\n return results\n\n\ndef dict2output(root_cause):\n output = \"\"\n for item in root_cause:\n if len(output) != 0:\n output += \"&\"\n output += item\n return output\n\n\ndef dictlist2output(root_cause_list):\n output = \"\"\n for root_cause in root_cause_list:\n if len(output) != 0:\n output += \";\"\n output += dict2output(root_cause)\n return output\n\n\ndef format_root_cause(cause):\n items = cause.split(\"&\")\n new_items = []\n for k in ['i', 'e', 'c', 'p', 'l']:\n for i in items:\n if k in i:\n new_items.append(i)\n return \"&\".join(new_items)\n\n\ndef csv_format(name):\n writer_formated = open(\"{}f.csv\".format(name), \"w\")\n writer_formated = csv.writer(writer_formated)\n writer_formated.writerow([\"timestamp\", \"set\"])\n\n with open('{}.csv'.format(name), newline='') as csvfile:\n algo1_reader = csv.reader(csvfile, delimiter=',')\n rows = []\n for row in algo1_reader:\n rows.append(row)\n\n for row in rows[1:]:\n timestamp = row[0]\n data = row[1]\n results = []\n root_causes = data.split(\";\")\n for cause in root_causes:\n results.append(format_root_cause(cause))\n output = \";\".join(results)\n writer_formated.writerow([timestamp, output])\n\n\ndef get_EP(forecast, real, root_cause_set):\n F = all_inds(forecast)\n A = all_inds(real)\n EP = 0\n for itemset in root_cause_set:\n EP += (get_sum_cuboid(forecast, itemset) - get_sum_cuboid(real, itemset)) / (F - A)\n return EP\n\n\ndef get_count_cuboid(forecast, real, root_cause_set):\n cnt = 0\n F = all_inds(forecast)\n A = all_inds(real)\n # for d in real:\n for i in range(len(real)):\n d = real[i]\n temp = 0\n ep_item = (float(d[-1]) - float(forecast[i][-1])) / (A - F)\n for itemset in root_cause_set:\n if if_match(d[:-1], itemset) and ep_item > 0.001:\n temp += 1\n assert temp <= 1\n cnt += temp\n return cnt\n\n\nif __name__ == \"__main__\":\n csv_format(\"algo_merge2\")\n","sub_path":"Code/AMCTS/AMCTSFullSearch/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"554994486","text":"\"\"\" \nReturns the toroidal and poloidal angles in degrees, starting from\nthe ECS parameter sets P_sy1_g1...P_sy2_g3, parameters GTorPos, gPolPos.\n\nHow to use:\n \n import ech_read_angles\n ec_an = ech_read_angles.ECS2TP\n angle=ec_an(tor_pos,pol_pos,jgy)\n tor_angle=angle.tor\n pol_angle=angle.pol\n\nFor TORBEAM:\n tor_tb = -angle.tor\n pol_tb = -angle.pol\n\nFor TORAY:\n tor_ay = 180 - angle.tor\n pol_ay = 90 - angle.pol\n\n\"\"\"\n\n__author__ = 'Giovanni Tardini (Tel. 1898)'\n__version__ = '0.01'\n__date__ = '07.04.2014'\n\nimport ctypes as ct\nimport os\n\necalib = '/afs/ipp/home/e/ecrh/sys/@sys/libaug_ecrh_setmirrors.so'\nif not os.path.isfile(ecalib):\n ecalib = '/afs/ipp/home/e/ecrh/sys/amd64_sles11/libaug_ecrh_setmirrors.so'\nprint('Using %s' %ecalib)\nlibecrh = ct.cdll.LoadLibrary(ecalib)\n\nclass ECS2TP:\n\n\n def __init__(self, tor_ecs, pol_ecs, jgy, nshot=99999):\n\n if nshot > 33725:\n datum = 0\n elif nshot > 27400:\n datum = 20111122\n else:\n datum = 20000101\n n_gy = [4, 4]\n ngy = n_gy[0] + n_gy[1]\n if jgy < n_gy[0]:\n gy = 101 + jgy\n elif jgy < ngy:\n gy = 201 + jgy - n_gy[0]\n c_err = ct.c_int32(0)\n c_sysunt = ct.c_int32(gy)\n if jgy > 3:\n c_theta = ct.c_double(1e3*pol_ecs) # [m] -> [mm] for ECRH2\n else:\n c_theta = ct.c_double(pol_ecs) # deg for ECRH1\n c_phi = ct.c_double(tor_ecs)\n _err = ct.byref(c_err)\n _sysunt = ct.byref(c_sysunt)\n _theta = ct.byref(c_theta)\n _phi = ct.byref(c_phi)\n c_datum = ct.c_double(datum)\n _datum = ct.byref(c_datum)\n\n s = libecrh.setval2tp_(_err, _sysunt, _theta, _phi, _datum)\n\n self.tor = c_phi.value # deg\n self.pol = c_theta.value # deg\n","sub_path":"ech_read_angles.py","file_name":"ech_read_angles.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"151554981","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nimport sqlite3\r\nroot = Tk()\r\nroot.geometry('600x300')\r\nroot.title(\"Registration Form\")\r\n\r\nFullname=StringVar()\r\nAddresh=StringVar()\r\nID=StringVar()\r\nvar = StringVar()\r\nc=StringVar()\r\nd=IntVar()\r\n\r\n\r\ndef Add():\r\n conn=sqlite3.connect(\"Form.db\")\r\n cursor=conn.cursor()\r\n cursor.execute('CREATE TABLE IF NOT EXISTS FORM(Fullname TEXT,ID TEXT,Addresh TEXT,Gender TEXT,Department TEXT)')\r\n cursor.execute('INSERT INTO Form(FullName,ID,Addresh,Gender,Department) VALUES(:name1,:id1,:addresh,:gender,:dept)',\r\n {\r\n 'name1':Fullname.get(),\r\n 'id1':ID.get(),\r\n 'addresh':Addresh.get(),\r\n 'dept':c.get(),\r\n 'gender':var.get(),\r\n })\r\n \r\n conn.commit()\r\n conn.close()\r\n entry_1.delete(0, END)\r\n entry_2.delete(0, END)\r\n entry_5.delete(0, END)\r\n entry_6.delete(0,END)\r\n\r\ndef delete():\r\n conn=sqlite3.connect(\"Form.db\")\r\n cursor=conn.cursor()\r\n cursor.execute(\"DELETE from FORM WHERE oid=\" + entry_6.get()) \r\n conn.commit()\r\n conn.close()\r\n \r\n \r\ndef querry():\r\n conn=sqlite3.connect(\"Form.db\")\r\n cursor=conn.cursor()\r\n cursor.execute(\"SELECT *,oid FROM FORM\")\r\n records=cursor.fetchall()\r\n print_records=\" \"\r\n for record in records:\r\n print_records += str(record[0])+\" \"+str(record[1])+\" \"+str(record[2])+\" \"+str(record[3])+\" \"+str(record[4])+\" \"+str(record[5])+ \"\\n\"\r\n querry_label=Label(root,text=print_records)\r\n querry_label.grid(row=9,column=1,columnspan=2)\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\nlabel_1 = Label(root, text=\"FullName\",width=20,font=(\"bold\", 10))\r\nlabel_1.grid(row=0,column=0)\r\n\r\nentry_1 = Entry(root,textvar=Fullname)\r\nentry_1.grid(row=0,column=1)\r\n\r\nlabel_2 = Label(root, text=\"ID\",width=20,font=(\"bold\", 10))\r\nlabel_2.grid(row=0,column=2)\r\n\r\nentry_2 = Entry(root,textvar=ID)\r\nentry_2.grid(row=0,column=3)\r\n\r\nlabel_3 = Label(root, text=\"Gender\",width=20,font=(\"bold\", 10))\r\nlabel_3.grid(row=5,column=0)\r\n\r\nentry_3=Radiobutton(root, text=\"Male\",variable=var ,value='MALE').grid(row=5,column=1)\r\nentry_3=Radiobutton(root, text=\"Female\",variable=var,value='FEMALE').grid(row=5,column=2)\r\n\r\nlabel_4 = Label(root, text=\"DEPARTMENT\",width=20,font=(\"bold\", 10))\r\nlabel_4.grid(row=3,column=2)\r\n\r\nlist1 = ['IT','SALES','SUPPORT','DEVELOPERS'];\r\n\r\ndroplist=OptionMenu(root,c, *list1)\r\ndroplist.config(width=15)\r\nc.set('select your department') \r\ndroplist.grid(row=3,column=3)\r\nlabel_5 = Label(root, text=\"ADDRESH\",width=20,font=(\"bold\", 10))\r\nlabel_5.grid(row=3,column=0)\r\n\r\nentry_5 = Entry(root,textvar=Addresh)\r\nentry_5.grid(row=3,column=1)\r\n\r\n\r\nlabel_6=Label(root,text=\"Select ID\")\r\nlabel_6.grid(row=7,column=0)\r\nentry_6=Entry(root,textvar=d)\r\nentry_6.grid(row=7,column=1)\r\n\r\n\r\nButton(root, text='ADD Record to Dataset',width=20,bg='brown',fg='white',command=Add).grid(row=6,column=0)\r\nButton(root, text='Show Record',width=20,bg='brown',fg='white',command=querry).grid(row=8,column=1)\r\nButton(root, text='Delete Record',width=20,bg='brown',fg='white',command=delete).grid(row=6,column=1)\r\n\r\nroot.mainloop()\r\n","sub_path":"FOrm.py","file_name":"FOrm.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"634096474","text":"import numpy as np\r\nimport re\r\nimport os\r\nimport pickle\r\nimport neighbor\r\n#from recommend_system import neighbor\r\nfrom sklearn.model_selection import KFold\r\n\r\n\r\nclass Recommend:\r\n genre_path = None #ジャンル情��のファイルパス\r\n review_path = None #レビュー情報のファイルパス\r\n user_path = None #ユーザ情報のファイルパス\r\n user_num = None #ユーザ数\r\n genre_num = None #ジャンル数\r\n movie_num = None #映画本数\r\n review_num = None #レビュー数\r\n genre_att = None #ジャンルの属性\r\n review_att = None #レビューの属性\r\n movie_att = None #映画の属性\r\n user_att = None #ユーザの属性\r\n pickle_path = None #整形済みデータセットのパス\r\n\r\n def __init__(self, dataset_path=\"./dataset_norm.pickle\"):\r\n \"\"\"\r\n 初期化関数\r\n\r\n :param dataset_path: 作成済みのデータセットファイルのパス\r\n \"\"\"\r\n self.genre_path = \"./ml-100k/u.genre\"\r\n self.review_path = \"./ml-100k/u.data\"\r\n self.user_path = \"./ml-100k/u.user\"\r\n self.movie_path = \"./ml-100k/u.item\"\r\n self.pickle_path = dataset_path\r\n\r\n self.genre_att = [\"unknown\",\"Action\",\"Adventure\",\"Animation\",\"Children's\",\"Comedy\",\"Crime\",\"Documentary\",\r\n \"Drama\",\"Fantasy\",\"Film-Noir\",\"Horror\",\"Musical\",\"Mystery\",\"Romance\",\"Sci-Fi\",\"Thriller\",\r\n \"War\",\"Western\"]\r\n self.review_att = [\"user_id\",\"movie_id\",\"rating\",\"timestamp\"]\r\n self.movie_att = [\"movie_id\",\"movie_title\",\"release_date\",\"video_release_date\",\"IMDb_URL\",\"unknown\",\"Action\",\r\n \"Adventure\",\"Animation\",\"Children's\",\"Comedy\",\"Crime\",\"Documentary\",\"Drama\",\"Fantasy\",\r\n \"Film-Noir\",\"Horror\",\"Musical\",\"Mystery\",\"Romance\",\"Sci-Fi\",\"Thriller\",\"War\",\"Western\"]\r\n self.user_att = [\"user_id\",\"age\",\"gender\",\"occupation\",\"zip_code\"]\r\n\r\n self.user_num = 943\r\n self.genre_num = len(self.genre_att)\r\n self.movie_num = 1682\r\n self.review_num = 100000\r\n\r\n def load_data(self, path, att, split_char):\r\n \"\"\"\r\n ファイルの読み出し\r\n\r\n :param path: 読み込むデータファイルのパス\r\n :param att: 読み込むデータの属性名\r\n :param split_char: データ分割の文字\r\n :return data_list: データのリスト\r\n \"\"\"\r\n data_list = []\r\n with open(path, \"r\", encoding=\"ISO-8859-1\") as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n vec = {}\r\n for i, item in enumerate(re.split(split_char, line[:-1])):\r\n vec[att[i]] = item\r\n data_list.append(vec)\r\n return data_list\r\n\r\n def merge_data(self, review_list, user_list, movie_list):\r\n \"\"\"\r\n user_idとmovie_idから、各詳細データを取り出し、review情報とマージする\r\n\r\n :param review_list: reviewデータのリスト\r\n :param user_list:  userデータのリスト\r\n :param movie_list: movieデータのリスト\r\n :return merged_data_list: user, movieデータを展開して、reviewデータとマージしたリスト\r\n \"\"\"\r\n merged_data_list = []\r\n for review in review_list:\r\n user_id = review[\"user_id\"]\r\n movie_id = review[\"movie_id\"]\r\n user_data = [user for user in user_list if user[\"user_id\"] == user_id]\r\n movie_data = [movie for movie in movie_list if movie[\"movie_id\"] == movie_id]\r\n\r\n user_data[0].update(movie_data[0])\r\n merged_data = user_data[0].copy()\r\n merged_data[\"rating\"] = review[\"rating\"]\r\n merged_data_list.append(merged_data)\r\n return merged_data_list\r\n\r\n def normalize(self, x):\r\n \"\"\"\r\n ベクトルxの正規化をする\r\n :param x: 正規化するベクトル\r\n :return norm_x: 正規化されたベクトル\r\n \"\"\"\r\n norm_x = x / np.linalg.norm(x)\r\n return norm_x\r\n\r\n def make_movie_vec(self, data_list):\r\n \"\"\"\r\n ユーザごとの趣味嗜好ベクトルを作成する。\r\n\r\n :param data_list: reviewデータのリスト\r\n :return movie_vec_array: ユーザごとの趣味嗜好ベクトルのアレイ\r\n :return recommend_movie_array: ユーザごとのレーティングが最も高い映画idのアレイ\r\n \"\"\"\r\n\r\n movie_vec_array = np.empty((0, self.genre_num+1))\r\n recommend_movie_array = np.empty(0)\r\n user_id_array = np.empty(0)\r\n\r\n for user_id in range(self.user_num):\r\n user_id = str(user_id + 1)\r\n best_movie_rating = 0\r\n best_movie_id = None\r\n review_list = [review for review in data_list if review[\"user_id\"] == user_id]\r\n movie_vec = np.array([0.0 for i in range(self.genre_num)])\r\n for review in review_list:\r\n vec = np.empty(0)\r\n rating = int(review[\"rating\"])\r\n if best_movie_rating < rating:\r\n best_movie_id = review[\"movie_id\"]\r\n for genre in self.genre_att:\r\n vec = np.append(vec, int(review[genre]) * (rating - 3)) #レーティング1~5を-2~2にする\r\n movie_vec += vec\r\n\r\n user_id_array = np.append(user_id_array, np.array([review_list[0][\"user_id\"]]), axis=0)\r\n movie_vec = np.reshape(np.append(movie_vec, np.array(0)), (1, -1))\r\n movie_vec = self.normalize(np.asarray(movie_vec, dtype=\"float32\"))\r\n movie_vec_array = np.append(movie_vec_array, movie_vec, axis=0)\r\n recommend_movie_array = np.append(recommend_movie_array, np.array([best_movie_id]), axis=0)\r\n return movie_vec_array, recommend_movie_array, user_id_array\r\n\r\n\r\n\r\n def id_to_title(self, movie_id, movie_list):\r\n \"\"\"\r\n 映画IDから映画タイトルを返す\r\n :param movie_id: 映画ID\r\n :param movie_list: 映画データのリスト\r\n :return movie_title: 映画タイトル\r\n \"\"\"\r\n return [movie[\"movie_title\"] for movie in movie_list if movie[\"movie_id\"] == movie_id][0]\r\n\r\n def load_dataset(self):\r\n \"\"\"\r\n pickleデータにあるデータセットを読み込む\r\n \"\"\"\r\n with open(self.pickle_path, \"rb\") as f:\r\n obj = pickle.load(f)\r\n return obj[0], obj[1], obj[2], obj[3]\r\n\r\n def train(self):\r\n \"\"\"\r\n 映画のレコメンドモデル(最近傍法)の学習を行う\r\n\r\n \"\"\"\r\n\r\n if not os.path.exists(self.pickle_path): #データセットpickleがない場合、作成する\r\n review_list = self.load_data(self.review_path, self.review_att, \"\t\")\r\n user_list = self.load_data(self.user_path, self.user_att, \"\\|\")\r\n movie_list = self.load_data(self.movie_path, self.movie_att, \"\\|\")\r\n\r\n data_list = self.merge_data(review_list, user_list, movie_list)\r\n\r\n x_data, y_data, user_list = self.make_movie_vec(data_list)\r\n\r\n with open(self.pickle_path, \"wb\") as f:\r\n pickle.dump([x_data, y_data, user_list, movie_list], f)\r\n\r\n else:\r\n x_data, y_data, user_list, movie_list = self.load_dataset()\r\n\r\n #テスト用にデータセットを100分割して、内1つをテストセットとする\r\n n_fold = 100\r\n k_fold = KFold(n_fold, shuffle=True)\r\n\r\n for train_idx, test_idx in k_fold.split(x_data, y_data):\r\n train_x = x_data[train_idx]\r\n test_x = x_data[test_idx]\r\n train_y = y_data[train_idx]\r\n test_y = y_data[test_idx]\r\n\r\n #最近傍法によるレコメンドシステムのインスタンスをつくる\r\n nn = neighbor.Neighbor(train_x, train_y)\r\n predict_test = nn.predict(test_x)\r\n\r\n #映画IDから映画のタイトルを表示\r\n predict_movie = []\r\n for movie_id in predict_test:\r\n predict_movie.append(self.id_to_title(movie_id, movie_list))\r\n correct_movie = []\r\n for movie_id in test_y:\r\n correct_movie.append(self.id_to_title(movie_id, movie_list))\r\n print(\"correct_label\")\r\n print(correct_movie)\r\n print(\"predict_label\")\r\n print(predict_movie)\r\n break\r\n\r\n\r\nif __name__ == \"__main__\":\r\n model = Recommend(\"./dataset_norm.pickle\")\r\n model.train()\r\n","sub_path":"recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":8615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"306194132","text":"'''\n问题描述:平面上有一系列点。返回由其中三个点可以形成的三角形最大面积。\n示例:\n输入: points = [[0,0],[0,1],[1,0],[0,2],[2,0]]\n输出: 2\n解释:\n这五个点如图所示,红色三角形面积最大。\n\n思路:暴力法三重循环\n\n'''\n\n\ndef largestTriangleArea(points):\n # write your code here\n max_area = 0\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n for k in range(j + 1, len(points)):\n area = 1/2 * abs(points[i][0] * (points[j][1] - points[k][1]) + points[j][0] * (\n points[k][1] - points[i][1]) + points[k][0] * (points[i][1] - points[j][1]))\n if area > max_area:\n max_area = area\n else:\n continue\n return max_area\n\nif __name__ == '__main__':\n points = [[4, 6], [6, 5], [3, 1]]\n print(largestTriangleArea(points))","sub_path":"1005_done.py","file_name":"1005_done.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"336142855","text":"# -*- coding: utf-8 -*-\r\nimport json\r\n\r\nimport scrapy\r\nfrom design.items import DesignItem\r\n\r\n# DBA设计效能奖\r\ndata = {\r\n 'channel': 'effec',\r\n 'evt': 3,\r\n}\r\n\r\n\r\nclass DesignCaseSpider(scrapy.Spider):\r\n name = 'design_case'\r\n allowed_domains = ['www.effectivedesign.org.uk']\r\n year = 2018\r\n url = 'http://www.effectivedesign.org.uk/winners/'\r\n start_urls = [url + str(year)]\r\n\r\n def parse(self, response):\r\n category_list = response.xpath('//ul[@id=\"sub-nav\"]//a/@href').extract()\r\n for cate_url in category_list:\r\n yield scrapy.Request(url='http://www.effectivedesign.org.uk' + cate_url, callback=self.parse_category)\r\n if self.year > 2013:\r\n self.year -= 1\r\n yield scrapy.Request(url=self.url + str(self.year), callback=self.parse)\r\n\r\n def parse_category(self, response):\r\n design_list = response.xpath('//ul[@class=\"gpWinnersInCategory gp itemList\"]//div[@class=\"in\"]')\r\n tags = response.xpath('//ul[@id=\"sub-nav\"]//a[@class=\"active\"]/text()').extract()[0] # 标签\r\n for design in design_list:\r\n item = DesignItem()\r\n prize = {}\r\n title = design.xpath('.//h3[@class=\"projectTitle\"]//a/text()').extract()[0]\r\n prize_level = design.xpath('.//p[@class=\"award\"]/text()').extract()[0]\r\n try:\r\n designer_name = design.xpath('.//p[@class=\"agency\"]/text()').extract()[1].strip()\r\n except:\r\n designer_name = design.xpath('.//p[@class=\"agency\"]/text()').extract()[0].strip()\r\n detail_url = design.xpath('.//a[1]/@href').extract()[0]\r\n prize['id'] = 20\r\n prize['name'] = 'DBA设计效能奖'\r\n prize['time'] = str(self.year + 1)\r\n prize['level'] = prize_level\r\n item['title'] = title # 标题\r\n item['sub_title'] = title\r\n item['tags'] = tags\r\n item['prize'] = json.dumps(prize)# 奖项级别\r\n item['designer'] = designer_name # 设计者\r\n\r\n for key, value in data.items():\r\n item[key] = value\r\n yield scrapy.Request(url='http://www.effectivedesign.org.uk' + detail_url, callback=self.parse_detail,\r\n meta={'item': item})\r\n\r\n def parse_detail(self, response):\r\n item = response.meta['item']\r\n\r\n img_urls = response.xpath('//div[@class=\"item-list\"]//img/@src').extract()\r\n company = response.xpath('//div[@class=\"projectIntro\"]/h2[3]/a/text()').extract()[0]\r\n remark = response.xpath('//div[@class=\"details bodyContent\"]/p/text()').extract()[0]\r\n if len(remark) > 450:\r\n remark = remark[:450]\r\n item['url'] = response.url\r\n item['img_urls'] = ','.join(img_urls)\r\n item['company'] = company.strip()\r\n item['remark'] = remark.replace('\\n','').replace(' ','').replace('\\r','').strip()\r\n yield item\r\n","sub_path":"design/design/spiders/design_case.py","file_name":"design_case.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"333242943","text":"#Uses python3\nimport math\nimport statistics as stats\n\n# helper functions:\ndef two_point_distance(p0,p1):\n # returns distance between two (x,y) pairs\n return math.sqrt( ((p0[0]-p1[0])*(p0[0]-p1[0])) + \n ((p0[1] - p1[1])*(p0[1] - p1[1])) )\n\ndef combine_xy(x_arr,y_arr):\n # combine x_arr and y_arr to combined list of (x,y) tuples \n return list(zip(x_arr,y_arr))\n\ndef find_closest_distance_brute(xy_arr):\n # brute force approach to find closest distance \n dmin = math.inf\n for i, pnt_i in enumerate(xy_arr[:-1]): \n dis_storage_min = min( two_point_distance(pnt_i, pnt_j) for pnt_j in xy_arr[i+1:]) \n if dis_storage_min < dmin:\n dmin = dis_storage_min \n return dmin\n\ndef calc_median_x(xy_arr):\n # return median of x values in list of (x,y) points\n return stats.median( val[0] for val in xy_arr )\n \ndef filter_set(xy_arr_y_sorted, median, distance):\n# filter initial set such than |x-med|<=d\n return [ val for val in xy_arr_y_sorted if abs(val[0] - median) <= distance ]\n\ndef x_sort(xy_arr):\n # sort array according to x value\n return sorted(xy_arr, key=lambda val: val[0])\n\ndef y_sort(xy_arr):\n # sort array according to y value\n return sorted(xy_arr, key=lambda val: val[1])\n\n\ndef split_array(arr_x_sorted, arr_y_sorted,median):\n # split array of size n to two arrays of n/2\n # input is the same array twice, one sorted wrt x, the other wrt y\n leq_arr_x_sorted = [ val for val in arr_x_sorted if val[0] < median ]\n geq_arr_x_sorted = [ val for val in arr_x_sorted if val[0] > median ]\n eq_arr_x = [ val for val in arr_x_sorted if val[0] == median ]\n \n n = len(eq_arr_x)//2\n leq_arr_x_sorted = leq_arr_x_sorted + eq_arr_x[:n]\n geq_arr_x_sorted = eq_arr_x[n:] + geq_arr_x_sorted\n \n leq_arr_y_sorted = [ val for val in arr_y_sorted if val[0] < median ]\n geq_arr_y_sorted = [ val for val in arr_y_sorted if val[0] > median ]\n eq_arr_y = [ val for val in arr_y_sorted if val[0] == median ]\n \n n = len(eq_arr_y)//2\n leq_arr_y_sorted = leq_arr_y_sorted + eq_arr_y[:n]\n geq_arr_y_sorted = eq_arr_y[n:] + geq_arr_y_sorted\n \n\n return leq_arr_x_sorted, leq_arr_y_sorted, geq_arr_x_sorted, geq_arr_y_sorted\n\ndef find_min_distance_in_rec(xy_arr_y_sorted,dmin):\n # takes in array sorted in y, and minimum distance of n/2 halves\n # for each point it computes distance to 7 subsequent points\n # output min distance encountered\n \n dmin_rec = dmin\n \n if len(xy_arr_y_sorted) == 1:\n return math.inf\n \n if len(xy_arr_y_sorted) > 7: \n for i, pnt_i in enumerate(xy_arr_y_sorted[:-7]):\n dis_storage_min = min(two_point_distance(pnt_i, pnt_j) \n for pnt_j in xy_arr_y_sorted[i+1:i+1+7])\n if dis_storage_min < dmin_rec:\n dmin_rec = dis_storage_min\n \n dis_storage_min = find_closest_distance_brute(xy_arr_y_sorted[-7:])\n if dis_storage_min < dmin_rec:\n dmin_rec = dis_storage_min\n else:\n for k, pnt_k in enumerate(xy_arr_y_sorted[:-1]): \n dis_storage_min = min( two_point_distance(pnt_k, pnt_l) for pnt_l in xy_arr_y_sorted[k+1:]) \n if dis_storage_min < dmin_rec:\n dmin_rec = dis_storage_min \n \n return dmin_rec \n\ndef find_closest_distance_recur(xy_arr_x_sorted, xy_arr_y_sorted):\n # recursive function to find closest distance between points\n if len(xy_arr_x_sorted) <=3 :\n return find_closest_distance_brute(xy_arr_x_sorted)\n \n median = calc_median_x(xy_arr_x_sorted)\n leq_arr_x_sorted, leq_arr_y_sorted , grt_arr_x_sorted, grt_arr_y_sorted = split_array(xy_arr_x_sorted, xy_arr_y_sorted, median)\n \n distance_left = find_closest_distance_recur(leq_arr_x_sorted, leq_arr_y_sorted)\n distance_right = find_closest_distance_recur(grt_arr_x_sorted, grt_arr_y_sorted)\n distance_min = min(distance_left, distance_right)\n \n filt_out = filter_set(xy_arr_y_sorted, median, distance_min)\n distance_filt = find_min_distance_in_rec(filt_out, distance_min)\n \n return min(distance_min, distance_filt)\n\ndef find_closest_point(x_arr, y_arr):\n # input is x,y points in two arrays, all x's in x_arr, all y's in y_arr\n xy_arr = combine_xy(x_arr,y_arr)\n xy_arr_x_sorted = x_sort(xy_arr)\n xy_arr_y_sored = y_sort(xy_arr)\n \n min_distance = find_closest_distance_recur(xy_arr_x_sorted, xy_arr_y_sored)\n \n return min_distance\n\ndef minimum_distance(x, y):\n #write your code here\n return find_closest_point(x,y)\n \ndef test_minimum_distance():\n # correct answer is sqrt(2)\n x_arr = [4,-2,-3,-1,2,-4,1,-1,3,-4,-2]\n y_arr = [4,-2,-4,3,3,0,1,-1,-1,2,4]\n print ('combined array', combine_xy(x_arr, y_arr))\n print ('closest distance' , minimum_distance(x_arr, y_arr))\n return None\ntest_minimum_distance()\n","sub_path":"mo_closest.py","file_name":"mo_closest.py","file_ext":"py","file_size_in_byte":4945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"194973321","text":"#!/usr/bin/env python3\n\n\n# Hint: The socket module from python will be very useful.\n# In this assignment you will develop a program that will send an email message using SMTP using Python\n# with the following requirements:\n# 1 The program will ask for the following information:\n# a. Destination server (fqdn or IP address)\n# b. Sender email address\n# c. Recipient email address\n# d. Subject\n# e. Body\n# 2 Your program will initiate a connection to port 25 of the destination and communicate with\n# the server to send the email.\n# 3 Your program will display the result from the destination server. (Typically the last message\n# from the server such as queued for delivery)\n# 4 The program will exit after the email has been delivered.\n\n# Note from programmer: this does almost no error checking for simplicity\nimport sys\nimport socket\n\nDEFAULT_MAIL_SERVER = 'localhost'\nDEFAULT_PORT = 25\nDEFAULT_TIMEOUT = 10\n\n\ndef main():\n print(\"Client started.\")\n # Change port if a command line argument was provided\n if len(sys.argv) >= 2:\n port = (str(sys.argv[1])).strip()\n if not port.isdigit():\n print('Invalid port number.')\n exit()\n port = int(port)\n else:\n port = DEFAULT_PORT\n\n # commands and their format\n hello = \"HELO %s\\r\\n\" % str(socket.gethostname())\n sender = \"MAIL FROM: <%s>\\r\\n\"\n recipient = \"RCPT TO: <%s>\\r\\n\"\n subject = \"Subject: %s\\r\\n\"\n data = \"DATA\\r\\n\"\n body = \"%s\\r\\n\"\n bye = \"QUIT\\r\\n\"\n\n # read and inject the need input in to commands\n server = input(\"Enter destination server: \").strip()\n sender = sender % input(\"From: \").strip()\n recipient = recipient % input(\"To: \").strip()\n subject = subject % input(\"Subject: \").strip()\n body = body % input(\"Body:\\n\").strip()\n msg = sender[5:] + recipient[5:] + subject + body + \"\\r\\n.\\r\\n\"\n\n # sequence the commands\n commands = [hello, sender, recipient, data, msg, bye]\n\n # init socket communication\n s = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.settimeout(DEFAULT_TIMEOUT)\n\n reply = None\n try:\n s.connect((server, port))\n reply = s.recv(1024).decode().split()\n\n print(\"Connected to %s on port %s\" % (server, DEFAULT_PORT))\n for command in commands:\n print(\"s: %s\" % reply)\n print(\"c: %s\" % command)\n s.send(command.encode())\n reply = s.recv(1024).decode().split()\n\n except socket.timeout:\n print(\"Connection timed out. Try again later.\")\n s.close()\n exit()\n\n print(\"s: %s\" % reply)\n\n s.close()\n print(\"client stopping\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lab_2/SMBClient.py","file_name":"SMBClient.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216220043","text":"from benchmarks.htap.lib.controller import HTAPController\n\n\ndef add_parser(subparsers):\n parser = subparsers.add_parser('htap')\n parser.add_argument(\n '--oltp-workers', default=32, type=int, help=(\n 'The number of OLTP workers executing TPC-C-like transactions (i.e. simulated clients), default: 32.'))\n\n parser.add_argument(\n '--olap-workers', default=1, type=int, help=(\n 'The number of OLAP workers (streams) running TPC-H-like queries, default: 1.'))\n\n parser.add_argument(\n '--target-tps', default=None, type=int, help=(\n 'The target TPS for the OLTP workload, default: unlimited.'))\n\n parser.add_argument(\n '--duration', default=60, type=int, help=(\n 'How many seconds the benchmark should run for, default: 60.'))\n\n parser.add_argument(\n '--olap-timeout', default='5min', help=(\n 'Timeout for OLAP queries, default: 5 minutes'))\n\n parser.add_argument(\n '--csv-interval', default=10, type=int, help=(\n 'How often to report stats to the csv files in seconds, default: 10'))\n\n parser.add_argument(\n '--dry-run', action='store_true', help=(\n \"Only generate transactions and analytical queries but don't send them to the database. \"\n \"Can be useful for measuring script throughput.\"))\n\n parser.add_argument(\n '--monitoring-interval', default=1, type=float, help=(\n 'Number of seconds to wait between updates of the monitoring display, default: 1.0'))\n\n parser.add_argument(\n '--stats-dsn', help=('The DSN to use for collecting statistics into a database. '\n 'Not defining it will disable statistics collection.'))\n\n parser.add_argument('--explain-analyze', action='store_true', default=False,\n help=('Whether to run EXPLAIN ANALYZE. Will save plans into the \"plan\" directory.'\n ))\n\n parser.add_argument('--use-server-side-cursors', default=False, action='store_true',\n required=False, help=('Use server-side cursors for executing the queries')\n )\n\n parser.add_argument('--dont-wait-until-enough-data', default=False, action='store_true',\n required=False, help=('Do NOT wait until there is enough data for OLAP queries to run with a constant dataset size')\n )\n \n parser.add_argument('--olap-dsns', nargs='+',\n required=False, help=('Use separate olap servers')\n )\n\n parser.add_argument('--output', choices=['csv', 'print'], default='print',\n nargs='+', help=('How the results output should look like. '\n 'Multiple options possible, separated by space'\n ))\n\n parser.add_argument('--csv-file', default='results.csv', help=(\n 'Where to save the summary csv file, if csv output is selected. '\n 'The default is results.csv in the current directory.'\n ))\n\n parser.add_argument('--ignored-queries', required=False, nargs='+', default=[], help=(\n 'Optional list of ignored queries for the OLAP workload.'\n ))\n\ndef run(args):\n controller = HTAPController(args)\n controller.run()\n","sub_path":"benchmarks/htap/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"86933097","text":"# %load q07_get_unique_teams_set/build.py\n# Default imports\nfrom greyatomlib.python_intermediate.q05_read_csv_data.build import read_ipl_data_csv\npath = 'data/ipl_matches_small.csv'\n\n# Enter Code Here\nimport pandas as pd\nimport numpy as np\ncolumn_1 = set()\ncolumn_2 = set()\ndef get_unique_teams_set():\n df = pd.read_csv(path)\n '''\n for i in range(0,len(df['match_code'])):\n team1 = df['team1'][i].encode('UTF-8')\n team2 = df['team2'][i].encode('UTF-8')\n if team1 not in column_1:\n column_1.add(team1)\n \n if team2 not in column_2:\n column_2.add(team2)\n '''\n main_arr = read_ipl_data_csv(path,str)\n arr1 = list(np.unique(main_arr[:,3]))\n arr2 = list(np.unique(main_arr[:,4]))\n unique_arr = np.array(np.unique(np.array(arr2+arr1)),dtype=bytes)\n \n return set(unique_arr)\narr = get_unique_teams_set()\ntype(arr)\n#import numpy as np\n\n#arr1\n#arr2\n#arr2+arr1\n#np.array(arr2+arr1)\n#np.unique(np.array(arr2+arr1))\n#unique_arr = np.unique(np.array(arr2+arr1))\n\n\n\n","sub_path":"q07_get_unique_teams_set/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"379947962","text":"from flask import Blueprint, render_template\nfrom flask import request,flash,get_flashed_messages\nfrom flask import abort, redirect, url_for, session\nfrom my_app import db\nfrom my_app.auth.model.user import RegisterForm, LoginForm, User\n#from flask import abort\n#from werkzeug import abort\nfrom flask_login import login_user,logout_user,current_user,login_required\nfrom my_app import login_manager\n\nfauth = Blueprint('fauth',__name__)\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(user_id)\n\n@fauth.route('/register', methods=('GET', 'POST'))\ndef register():\n \n ## validamos si esta logueado\n #if session.get('username'):\n if 'username' in session:\n print(session['username'])\n\n form = RegisterForm(meta={'csrf':False}) \n if form.validate_on_submit(): \n c = User.query.filter_by(username = form.username.data).all()\n if c:\n flash(\"El usuario ya existe en el sistema\",\"danger\") \n else: \n p = User(form.username.data,form.password.data)\n db.session.add(p)\n db.session.commit() \n flash(\"Usuario creado con exito\") \n return redirect(url_for('fauth.register'))\n\n if form.errors:\n flash(form.errors,\"danger\") \n return render_template('auth/register.html',form = form)\n\n@fauth.route('/login', methods=('GET', 'POST'))\ndef login():\n\n if current_user.is_authenticated:\n flash(\"Tu sesión esta abierta\") \n return redirect(url_for('product.index')) \n\n form = LoginForm(meta={'csrf':False}) \n if form.validate_on_submit(): \n user = User.query.filter_by(username = form.username.data).first()\n if user and user.check_password(form.password.data):\n #registramos la sesion\n login_user(user)\n flash(\"Bienvenido de nuevo \" + user.username)\n next = request.form['next']\n print(next)\n # is_safe_url should check if the url is safe for redirects.\n # See http://flask.pocoo.org/snippets/62/ for an example.\n #if not is_safe_url(next):\n # return flask.abort(400) \n return redirect(next or url_for('product.index'))\n else: \n flash(\"Usuario o contraseña incorrectos\") \n\n if form.errors:\n flash(form.errors,\"danger\") \n return render_template('auth/login.html',form = form) \n\n@fauth.route('/logout', methods=('GET', 'POST'))\ndef logout():\n logout_user()\n return redirect(url_for('fauth.login'))\n\n@fauth.route('/protegido')\n@login_required\ndef protegido():\n return \"vista protegida\"","sub_path":"my_app/fauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"571725719","text":"\"\"\"\nThis examples creates an ethylene molecule in the planar geometry and then \nrotates about the C=C bond 90 degrees.\n\nAuthor: James E. T. Smith <james.smith9113@gmail.com>\nDate: 12/20/19\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom mppy.mpMolecule import mpMolecule\nfrom mppy.manipulation import rotate_dihedral\n\n\nxyz = np.array(\n [\n [3.402, 0.773, -9.252],\n [4.697, 0.791, -8.909],\n [2.933, -0.150, -9.521],\n [2.837, 1.682, -9.258],\n [5.262, -0.118, -8.904],\n [5.167, 1.714, -8.641],\n ]\n)\natom = [\"C\", \"C\", \"H\", \"H\", \"H\", \"H\"]\nnpts = 1\nrotor = [2, 3]\nrotated_xyz = rotate_dihedral(0, 1, np.pi / 4, npts, rotor, xyz)\n\nmol = mpMolecule(rotated_xyz[1], atom)\nmol.get_bonds_by_distance()\n\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection=\"3d\")\nmol.plot(ax)\nax.set_xlim3d(-2, 2)\nax.set_ylim3d(-2, 2)\nax.set_zlim3d(-2, 2)\nplt.show()\n","sub_path":"examples/02_rotate_dihedral.py","file_name":"02_rotate_dihedral.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101382385","text":"import pytest\n\nfrom easydata.data import DataBag\nfrom easydata.models import ItemModel\nfrom easydata.parsers.data import Data\nfrom easydata.queries import jp\nfrom tests.factory import data_dict\n\ndb = DataBag(main=data_dict.item_with_options, additional_data=data_dict.stock)\n\n\ndef process_raw_value(value, data):\n return \"{} {}\".format(value, str(data[\"additional_data\"][\"stock\"]))\n\n\ndef test_base_data_query():\n item_data = Data(query=jp(\"info.name\"))\n assert item_data.parse(db) == \"EasyBook pro 15\"\n\n\ndef test_base_data_from_item():\n item_model = ItemModel()\n item_model.item_name = Data(query=jp(\"title\"))\n item_model.item_brand = Data(from_item=\"name\")\n\n result = item_model.parse_item(data_dict.title)\n assert result == {\"brand\": \"Easybook Pro 13\", \"name\": \"Easybook Pro 13\"}\n\n\ndef test_base_data_field_query_as_first_parameter():\n item_data = Data(jp(\"info.name\"))\n assert item_data.parse(db) == \"EasyBook pro 15\"\n\n\n@pytest.mark.parametrize(\n \"query, default, test_data, result\",\n [\n (jp(\"info.namewrong\"), \"Easybook Def 13\", db, \"Easybook Def 13\"),\n (jp(\"info.name\"), \"Easybook Def 13\", db, \"EasyBook pro 15\"),\n ],\n)\ndef test_base_data_default(query, default, test_data, result):\n item_data = Data(query, default=default)\n assert item_data.parse(test_data) == result\n\n\ndef test_base_data_default_from_item():\n item_model = ItemModel()\n item_model.item_name = Data(query=jp(\"title\"))\n item_model.item_brand = Data(query=jp(\"brandwrong\"), default_from_item=\"name\")\n\n result = item_model.parse_item(data_dict.title)\n assert result == {\"brand\": \"Easybook Pro 13\", \"name\": \"Easybook Pro 13\"}\n\n\n@pytest.mark.parametrize(\n \"query, source, test_data, result\",\n [\n (jp(\"stock\"), \"additional_data\", db, True),\n (None, \"additional_data\", db, {\"stock\": True}),\n ],\n)\ndef test_base_data_field_different_source(query, source, test_data, result):\n item_data = Data(query, source=source)\n assert item_data.parse(test_data) == result\n\n\n@pytest.mark.parametrize(\n \"query, process_raw_value_callback, test_data, result\",\n [\n (\n jp(\"info.name\"),\n lambda value, data: value.replace(\"15\", \"13\"),\n db,\n \"EasyBook pro 13\",\n ),\n (jp(\"info.name\"), process_raw_value, db, \"EasyBook pro 15 True\"),\n ],\n)\ndef test_base_data_field_process_raw_value(\n query, process_raw_value_callback, test_data, result\n):\n\n item_data = Data(query, process_raw_value=process_raw_value_callback)\n assert item_data.parse(test_data) == result\n\n\n@pytest.mark.parametrize(\n \"query, process_value_callback, test_data, result\",\n [\n (\n jp(\"info.name\"),\n lambda value, data: \"{} {}\".format(\n value, str(data[\"additional_data\"][\"stock\"])\n ),\n db,\n \"EasyBook pro 15 True\",\n ),\n (jp(\"info.name\"), process_raw_value, db, \"EasyBook pro 15 True\"),\n ],\n)\ndef test_base_data_field_process_value(\n query, process_value_callback, test_data, result\n):\n\n item_data = Data(query, process_value=process_value_callback)\n assert item_data.parse(test_data) == result\n","sub_path":"tests/parsers/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"583019001","text":"# coding: utf-8\n# #!/bin/env python\nimport sys\nimport brainiak.eventseg.event\nimport numpy as np\nimport glob\nimport os\nimport scipy.io\nfrom scipy.stats import stats\nimport logging\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm, zscore, pearsonr\nimport pickle\nimport time\n# this should finish in 3hr for one roi\n\nrname=(sys.argv[1])\nexp=(sys.argv[2])\nexpdir = '/scratch/claire/speaker-listener/'\ntimeUnit='tr'\nfroidir='mor';\nexps=['pieman','bronx','merlin','sherlock']\neventN_test=range(10,121)\nw=5\n\n\nfor fname in glob.glob(os.path.join(expdir + '/' + exp + '/fmri/timeseries/' + timeUnit + '/network/' + froidir , 'zscore_listenerAll_'+rname+'.mat')):\n data_mat=scipy.io.loadmat(fname)\n gdata=data_mat['gdata']\n \n tn=gdata.shape[1]\n subjn=gdata.shape[2]\n voxn=gdata.shape[0]\n \n segments_others= np.empty((120,tn,subjn))\n segments_others[:]=np.nan\n segments_self= np.empty((120,tn,subjn))\n segments_self[:]=np.nan\n \n within_across_real = np.empty((max(eventN_test),subjn))\n within_across_real[:,:]=np.nan\n for s in range(subjn):\n if np.sum(~np.isnan(gdata[:,:,s]))>0:\n othersi=np.arange(subjn)\n othersi=othersi[othersi!=s]\n others=np.nanmean(gdata[:,:,othersi],axis=2)\n self=gdata[:,:,s]\n \n corrs = np.zeros(tn-w)\n for t in range(tn-w):\n corrs[t] = pearsonr(self[:,t],self[:,t+w])[0]\n \n for K in eventN_test:\n Ki=K-1\n \n # Find the events in this dataset\n seg = brainiak.eventseg.event.EventSegment(K)\n seg.fit(others.T) \n \n segments, _=seg.find_events(self.T);#, scramble=True)\n events=segments.argmax(axis=1)+1\n \n _, event_lengths = np.unique(events, return_counts=True)\n \n # Compute within vs across boundary correlations, for real and permuted bounds\n within = corrs[events[:-w] == events[w:]].mean()\n across = corrs[events[:-w] != events[w:]].mean()\n within_across_real[Ki, s] = within - across\n print(s)\n np.save(expdir + exp+ '/fmri/hmm/'+ rname + '_ListenersLeave1Out_withinAcross_real.npy', within_across_real) \n\n\n","sub_path":"old/hmm_ListenersLeave1Out_withinAcross_real.py","file_name":"hmm_ListenersLeave1Out_withinAcross_real.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"217279937","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\n\nfrom .PybulletClient import *\nfrom .BasicInterface import *\n\nimport numpy\nimport os\n\n\nclass AvoidHazardsEnv(gym.Env, BasicInterface):\n metadata = {'render.modes': ['human']}\n\n def __init__(self, render = False):\n gym.Env.__init__(self)\n\n self.lidar_points = 64\n BasicInterface.__init__(self, render = render, lidar_points = self.lidar_points)\n\n self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,), dtype=numpy.float32)\n self.observation_space = spaces.Box(low=-1.0, high=1.0, shape=(5, self.lidar_points), dtype=numpy.float32)\n\n \n def step(self, action):\n\n self.step_interface()\n \n vl = 50.0*numpy.clip(action[0], -1.0, 1.0)\n vr = 50.0*numpy.clip(action[1], -1.0, 1.0)\n\n self.robots[0].set_velocity(vl, vr)\n \n distance = self.target_distance()\n reward = 0.0 #0.001*numpy.exp(-distance)\n \n done = False\n\n if self.steps >= 1000:\n reward = 0.0\n done = True\n elif self.on_target(0, 0):\n reward = 1.0\n done = True \n elif self.on_hazard(0):\n reward = -1.0\n done = True\n elif self.out_board(0):\n reward = -1.0\n done = True\n\n for i in range(4):\n self.pb_client.stepSimulation()\n\n return self._update_observation(robot_id=0, lidar_points=self.lidar_points), reward, done, None\n\n \n \n def reset(self):\n robots_count = 1 \n targets_count = 1\n hazards_count = 5\n obstacles_count = 2\n fragile_count = 0\n moving_count = 0\n foods_count = 0\n \n self.reset_interface(targets_count, robots_count, hazards_count, obstacles_count, fragile_count, moving_count, foods_count)\n\n return self._update_observation(robot_id=0, lidar_points=self.lidar_points)\n \n def render(self):\n pass\n\n def close(self):\n pass\n\n def _update_observation(self, robot_id, lidar_points):\n lidar = self.get_lidar(robot_id)\n\n vl, vr = self.robots[robot_id].get_wheel_velocity()\n\n result = numpy.zeros((5, lidar_points), dtype=numpy.float32)\n\n result[0] = numpy.tanh(vl*numpy.ones(lidar_points)/50.0) #robot velocity, squeezed by tanh\n result[1] = numpy.tanh(vr*numpy.ones(lidar_points)/50.0)\n result[2] = lidar[3] #obstacles lidar\n result[3] = lidar[2] #hazards lidar\n result[4] = lidar[1] #target lidar\n\n\n return result\n\n def _dummy_follow(self):\n items_r, items_yaw = self.get_items_relative_position(self.robots[0].pb_robot, self.targets)\n\n\n if numpy.abs(items_yaw[0]) > 0.3:\n if items_yaw[0] > 0.0:\n self.robots[0].set_velocity(-5.0, 5.0)\n else:\n self.robots[0].set_velocity(5.0, -5.0)\n else:\n self.robots[0].set_velocity(50.0, 50.0)\n","sub_path":"gym-aeris/gym_aeris/envs/avoid_hazards_env.py","file_name":"avoid_hazards_env.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386589427","text":"import os\nimport sys\nimport functions\nreload(functions)\n\n\ndef _export(file_path):\n functions.open_maya(file_path)\n functions.export_animation(file_path)\n\n\nif __name__ == '__main__':\n import maya.standalone\n maya.standalone.initialize()\n\n maya_files = [maya_file for maya_file in sys.argv[1].split(';') if os.path.exists(maya_file)]\n for maya_file in maya_files:\n _export(maya_file)\n os._exit(0)\n\n","sub_path":"Projects/Maya/Tools/Exporter/Package/Scripts/mayaExporter.py","file_name":"mayaExporter.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"59879417","text":"\nfrom setuptools import setup, find_packages\n\n\nrequires = [\n 'pyramid',\n 'pyramid_jinja2',\n 'pyramid_debugtoolbar',\n 'pyramid_tm',\n 'SQLAlchemy',\n 'transaction',\n 'zope.sqlalchemy',\n 'waitress',\n 'psycopg2',\n 'passlib',\n 'yagmail',\n 'keyring',\n 'requests'\n]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n 'pytest', # includes virtualenv\n 'pytest-cov',\n]\n\nsetup(name='pylistener',\n version='0.0',\n description='''A simple tool designed to enable people with Apraxia\n to communicate.''',\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n author='Maelle Vance, Rick Valenzuela, Ted Callahan',\n author_email='',\n url='https://pylistener.herokuapp.com',\n keywords='web wsgi bfg pylons pyramid',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n extras_require={\n 'testing': tests_require,\n },\n install_requires=requires,\n entry_points=\"\"\"\\\n [paste.app_factory]\n main = pylistener:main\n [console_scripts]\n initialize_db = pylistener.scripts.initializedb:main\n \"\"\",\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"547135203","text":"import matplotlib.pyplot as plt\r\nimport csv\r\n\r\nwith open('dataset.csv', newline='') as dataset:\r\n \r\n #read in all the data \r\n file_reader = csv.reader(dataset, delimiter=',')\r\n x = []\r\n y = []\r\n for row in file_reader:\r\n x.append(int(row[0])) \r\n y.append(int(row[1]))\r\n\r\n #create sums of all x's and all y's\r\n x_sum = sum(x)/len(x)\r\n y_sum = sum(y)/len(y)\r\n\r\n #calculate M\r\n m = sum([((x_num - x_sum)*(y_num - y_sum)) for x_num, y_num in zip(x, y)])/sum([ (x_num - x_sum)**2 for x_num in x])\r\n\r\n #calculate B\r\n b = y_sum - (m*x_sum)\r\n\r\n outputs = [(i*m)+ b for i in x]\r\n\r\n plt.plot(x, outputs) \r\n plt.plot(x,y, 'ro')\r\n plt.show()\r\n","sub_path":"linear regression/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"446468756","text":"'''\n870. Advantage Shuffle\nGiven two arrays A and B of equal size, the advantage of A with respect to B is the number of indices i for which A[i] > B[i].\n\nReturn any permutation of A that maximizes its advantage with respect to B.\n\n\n\nExample 1:\n\nInput: A = [2,7,11,15], B = [1,10,4,11]\nOutput: [2,11,7,15]\nExample 2:\n\nInput: A = [12,24,8,32], B = [13,25,32,11]\nOutput: [24,32,8,12]\n'''\n\nimport collections\n\ndef GetMax_Count(lst1,lst2):\n\n cnt=0\n for i in range(0,len(lst1)):\n\n if lst1[i]>lst2[i]:\n cnt=cnt+1\n\n return cnt\n\ndef LeetCode870(a,b):\n\n dict=collections.Counter(a)\n\n lst=[]\n cnt=[]\n\n for key,val in dict.items():\n lst.append(key)\n cnt.append(val)\n\n fnl_lst={}\n tmp=[]\n\n Combinations_recur(lst,cnt,fnl_lst,tmp,a,b)\n\n return sorted(fnl_lst.items(),key=lambda x:x[0],reverse=True)[0]\n\ndef Combinations_recur(lst,cnt,fnl_lst,tmp,a,b):\n\n if len(tmp)==len(a):\n\n count=GetMax_Count(tmp,b)\n if count in fnl_lst.keys():\n fnl_lst[count].append(tmp.copy())\n else:\n res=[]\n res.append(tmp.copy())\n fnl_lst[count]=res\n\n for i in range(0,len(lst)):\n if cnt[i]==0:\n continue\n tmp.append(lst[i])\n cnt[i]=cnt[i]-1\n Combinations_recur(lst, cnt, fnl_lst, tmp, a, b)\n tmp.pop()\n cnt[i]=cnt[i]+1\n\ndef main():\n\n a=[2,7,11,15]\n b=[1,10,4,11]\n print(LeetCode870(a,b))\n\n a = [12,24,8,32]\n b = [13,25,32,11]\n print(LeetCode870(a, b))\n\nif __name__=='__main__':\n main()","sub_path":"python/CodingExercises/LeetCode870.py","file_name":"LeetCode870.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"11497529","text":"import os\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# december highest \n# step 4 b \n\n\nclass frequency_grid(object):\n \n def __init__(self, numpoints=3):\n self.numpoints = numpoints\n self.time_elapsed= 0 # 0\n self.numcells = 160000\n \n self.xmin =-200\n self.xmax = 200\n self.ymin =-200\n self.ymax =200\n \n self.forwardmap = {} # dcmap1\n self.backmap = {}\n self.countmap = {}\n \n self.current_position = []\n self.prevmap = {} # set to current map at end of frame \n self.currentmap = {}\n \n self.tracking_list = {} # list of objects that are tracked \n \n # step 1\n ind_temp = 0\n for ix in range(int(self.xmin), int(self.xmax)):\n for iy in range(int(self.ymin), int(self.ymax)):\n myvec = []\n myvec.append(ix)\n myvec.append(iy)\n self.forwardmap[ind_temp] = myvec\n self.backmap[(ix, iy)]= ind_temp\n ind_temp=ind_temp+1\n \n self.range = 10\n # initialize grid count map\n for i in range(self.numcells):\n p = self.forwardmap[i]\n px = p[0]\n py = p[1]\n for j in range(-self.range, self.range+1):\n jx = px+j\n if jx > self.xmax-1 or jx < self.xmin:\n continue\n for k in range(-self.range, self.range+1):\n jy = py +k\n if jy > self.ymax-1 or jy<self.ymin:\n continue\n p2 = self.backmap[(jx, jy)]\n self.countmap[(i, p2)] = 0 # dtrajcount\n \n \n # set up based on trajectories file : training method \n def setup_grid(): \n \n fileind = 1\n for filename in os.listdir('24hrdata'):\n fname = '24hrdata/'+filename\n fileind =fileind+1\n #irow=0\n obnum=1\n with open(fname) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n trajectory_num = row[0]\n if line_count==0:\n line_count = line_count+1 # skip header\n if line_count==1:\n prevrow = row\n prevx = float(prevrow[6])\n prevy = float(prevrow[7])\n pfx = round(prevx)\n pfy = round(prevy)\n continue\n currentx = float(row[6])\n currenty = float(row[7])\n fx = round(currentx)\n fy = round(currenty)\n if pfx == fx and pfy ==fy:\n #prevframe = frameindex\n continue\n if obnum != trajectory_num:\n pfx = fx\n pfy = fy\n obnum = trajectory_num\n continue\n #save\n fromi = self.backmap[(pfx, pfy)]\n toi = self.backmap[(fx, fy)]\n if abs(pfx - fx)>10 or abs(pfy - fy)>10:\n continue\n mcount = self.countmap[(fromi, toi)]\n self.countmap[(fromi, toi)] = mcount+1\n pfx=fx\n pfy=fy\n \n def highestfreq(fromi):\n highest = 0\n indexhighest = fromi\n (px, py) = self.forwardmap[fromi]\n for j in range(-10, 11):\n jx = px+j\n if jx>xmax-1 or jx<xmin: # check if pts in range\n continue\n for k in range(-10, 11):\n jy = py+k\n # check if pts are in range\n if jy>ymax-1 or jy<ymin:\n continue\n toi = self.backmap[(jx, jy)]\n t = self.countmap[(fromi, toi)]\n #if t>0:\n #print(t)\n if t > highest:\n highest=t\n indexhighest=toi\n return highest, indexhighest\n \n def predict(self, next_frame): # predict next\n \n mf = defaultdict(list)\n \n mx = 20\n for j in range(0, mx):\n mf[j] = 0\n \n matchfreq = mf\n \n f= 0\n \n currentmap = {} # temporary currentmap\n currentmap_freq = {} # holds freq scores\n \n for j,next_key in enumerate(next_frame.keys()):\n pos_next = next_frame[next_key].position\n x_next = pos_next[0]\n y_next = pos_next[1]\n xr = round(xpoint)\n yr = round(ypoint)\n fromi = self.backmap[(xr, yr)]\n h1, i1 = self.highestfreq(fromi)\n \n # save to map\n currentmap[i1] = 1\n currentmap_freq[i1] = h1 \n val = self.prevmap.get(fromi)\n if val ==None:\n pass\n else:\n matchfreq[next_key] = matchfreq[next_key] +1\n \n f = 0\n # version with multiple points in pos_next\n for j,next_key in enumerate(next_frame.keys()):\n pos_next = next_frame[next_key].position\n \n # pos_next = cluster of points\n for pos in pos_next:\n x_next = pos[0]\n y_next = pos[1]\n xr = round(xpoint)\n yr = round(ypoint)\n fromi = self.backmap[(xr, yr)]\n h1, i1 = self.highestfreq(fromi)\n currentmap[i1] = 1\n currentmap_freq[i1] = h1\n val = self.prevmap.get(fromi)\n if val ==None:\n pass\n else:\n matchfreq[pos_next] = matchfreq[pos_next]+1\n \n # at the end of cluster\n \n if matchfreq[pos_next] > f:\n f = matchfreq[pos_next]\n next_cluster = pos_next \n self.prevmap = currentmap\n \n '''\n next_x = next_detection_position[0]\n next_y = next_detection_position[1]\n array_points= next_detection_position\n \n xvalues= []\n yvalues =[]\n prevmap={}\n hxvalues =[]\n hyvalues=[]\n '''\n \n \n # cycle through the different points\n for point in array_points:\n xpoint = float(point[0])\n ypoint = float(point[1])\n xr = round(xpoint)\n yr = round(ypoint)\n fromi = self.backmap[(xr, yr)]\n h1, i1 = self.highestfreq(fromi)\n # save to map\n self.currentmap[i1] = 1\n val = self.prevmap.get(fromi) # from i from previous point\n if val == None:\n pass\n else:\n matchfreq[point] = matchfreq[point]+1\n \n #cycle through different clusters\n for c in clusters:\n xvalues = []\n yvalues = []\n for point in c:\n xpoint = float(point[0])\n ypoint = float(point[1])\n xvalues.append(xpoint)\n yvalues.append(ypoint)\n xr = round(xpoint)\n yr = round(ypoint)\n fromi = self.backmap[(xr,yr)]\n h1, i1 = self.highestfreq(fromi)\n self.currentmap[i1] = 1\n val = self.prevmap.get(fromi)\n if val == None:\n pass\n else:\n matchfreq[c] = matchfreq[c]+1\n \n if matchfreq[c] > f:\n ky = c\n hxvalues = xvalues\n hyvalues= yvalues\n totalmap[ky] = self.currentmap \n \n","sub_path":"may2020/better_visualization/frequency_grid_version2.py","file_name":"frequency_grid_version2.py","file_ext":"py","file_size_in_byte":7990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"108328325","text":"\"\"\"\nThis only for test.\n\"\"\"\n\nimport asyncio\nimport websockets\nimport random\nimport time\nfrom ue4_online.utils.rpc import RpcData, RpcProtocol\n\n\ndef regist():\n\n async def reg(loop):\n async with websockets.connect('ws://192.168.9.202:8765') as websocket:\n random_name = ''.join(random.choices('abcdefghijklmnopqrstuvwxyz' + '0123456789', k=8))\n random_email = random_name + \"@hs.com\"\n\n req = RpcData(cmd=\"regist\",\n passwd=\"d41d8cd98f00b204e9800998ecf8427e\",\n username=random_name,\n email=random_email)\n\n await websocket.send(RpcData.serialize(req))\n\n response_text = await websocket.recv()\n print(response_text)\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(reg(loop))\n\n\nstart = time.time()\n\ntry:\n for i in range(0, 10000):\n regist()\n\nexcept Exception as ex:\n\n print('Oops!')\nfinally:\n stop = time.time()\n ms = stop-start\n\n print(\"{} milliseconds\".format(ms))\n print((\"{} seconds\".format(ms/1000.0)))\n\n\n\n\n\n\n","sub_path":"batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"217165259","text":"import heapq\r\n\r\ndef dijkstra(start, d, graph):\r\n d[start] = 0\r\n minHeap = []\r\n heapq.heappush(minHeap, (0, start))\r\n while len(minHeap) > 0:\r\n du, u = heapq.heappop(minHeap)\r\n if d[u] < du:\r\n continue\r\n #check arrive finish \r\n for v, w in graph[u]:\r\n if d[v] > du + w:\r\n d[v] = du + w\r\n heapq.heappush(minHeap, (d[v], v))\r\n \r\n\r\ntestcase = int(input())\r\nINF = 10**9\r\nfor _ in range(testcase):\r\n n, m, k, s, t = map(int, input().split())\r\n graphS = [[] for _ in range(n + 1)]\r\n graphT = [[] for _ in range(n + 1)]\r\n for i in range(m):\r\n u, v, w = map(int, input().split())\r\n graphS[u].append((v, w))\r\n graphT[v].append((u, w))\r\n\r\n dS = [INF] * (n + 1)\r\n dijkstra(s, dS, graphS)\r\n dT = [INF] * (n + 1)\r\n dijkstra(t, dT, graphT)\r\n #print(dS)\r\n #print(dT)\r\n res = INF\r\n for i in range(k):\r\n u, v, w = map(int, input().split())\r\n if dS[u] + dT[v] + w < res:\r\n res = dS[u] + dT[v] + w\r\n if dT[u] + dS[v] + w < res:\r\n res = dT[u] + dS[v] + w \r\n if res == INF:\r\n print(-1)\r\n else: \r\n print(res)","sub_path":"Lecture09/traffic_network.py","file_name":"traffic_network.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"129034689","text":"#%%\nfrom readHSI import readHSI\nimport matplotlib.pyplot as plt\nimport cv2 as cv2\nimport numpy as np\nimport random\nimport math as math\nfrom hyperSam import hyperSam\nimport struct\nimport pickle\ndef gauss1d(order,sig):\n j=0\n f=[]\n temp = math.trunc(order/2)\n for x in range(-temp,temp+1,1):\n f.append(1/2/math.pi*math.exp(-(x**2)/(2*sig**2)))\n \n f = np.array(f)\n f = f/ sum(f)\n return f\n\nfnameHD = './2020_03_23/HSI_0323-1443.hdr'\nfnameRaw = './2020_03_23/HSI_0323-1443.raw'\n# fnameHD = 'HSI_0103-1640-2.hdr'\n# fnameRaw = 'HSI_0103-1640-2.raw'\nhsi = readHSI(fnameHD, fnameRaw)\ncube, wavelength, _ = hsi.get_HSI()\nS = hsi.RGBimage_HSI()\ncv2.imshow('RGB_image',S)\n\n# ## load paint spectrum\n# f = open('paint_spectrum.raw','rb')\n# paint_spectrum = np.fromfile(f,dtype=np.uint16)\n# paint_spectrum = paint_spectrum.reshape(12,258)\n# f.close()\n\n## load paint mean spectrum\nf = open('mean_paint_spectrum.raw','rb')\npaint_spectrum = np.fromfile(f,dtype=np.float64)\npaint_spectrum = paint_spectrum.reshape(1,258)\nf.close()\n\n\n# pixel_spectral = np.squeeze(cube[r_x,:,r_y])\n# pixel_spectral2 = np.squeeze(cube[r_x2,:,r_y2])\nsig = 1\norder = sig * 3 * 2 + 1\nf = gauss1d(order,sig)\n\ncube_shape = cube.shape\nheight = cube_shape[0]\nwidth = cube_shape[2]\nthreshole = np.zeros((height,width),dtype=np.uint8)\n\n\n# for i in range(height):\n# for j in range(width):\n# angle = hyperSam(paint_spectrum[2,:],cube[i,:,j]).getSam()\n# if angle < 0.07:\n# threshole[i,j] = 255\n# for k in range(3):\n# S[i,j,k] = 255\n\nfor i in range(height):\n for j in range(width):\n spectrum = cube[i,:,j]\n normalize_spectrum = (spectrum-spectrum.min()) / (spectrum.max() - spectrum.min())\n paint_spectrum = paint_spectrum.astype(np.float64)\n angle = hyperSam(paint_spectrum,normalize_spectrum).getSam()\n if angle < 0.07:\n threshole[i,j] = 255\n S[i,j,0] = 0\n S[i,j,1] = 0\n S[i,j,2] = 255\n\ncv2.imshow('find paint', threshole)\ncv2.imshow('find paint at RGB image', S)\n\n\n\n# filtered_pixel_spectral = np.correlate(pixel_spectral,f,\"same\")\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"paint_detection/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"406047487","text":"# natural.py\n# Created on 12/11/18 by Jenna Folsom\n# A program to find the sum of the first n natural numbers.\n\ndef main():\n\n import math\n\n # This is an introduction\n print(\"This is a program to find the sum of the first n natural number.\")\n\n # This is an input\n n = eval(input(\"What would you like to find the sum of? \"))\n t = 0\n a = 1\n\n # This is an output\n for i in range(n):\n t = t + a\n a = a + 1\n\n print(\"The sum of the first natural numbers is\", t, \".\")\n\nmain() \n","sub_path":"1-3 deliverables/natural.py","file_name":"natural.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"343869725","text":"try:\n from django.urls import reverse_lazy\nexcept ImportError:\n from django.core.urlresolvers import reverse_lazy\nfrom django.views import generic\nfrom django.shortcuts import render\n\n\nfrom .forms import Form1, Form2\n\n\ndef multiple_form(request):\n ctx = {}\n form1 = Form1()\n form2 = Form2()\n ctx['form1'] = form1\n ctx['form2'] = form2\n ctx['form_media'] = form1.media + form2.media\n if request.method == 'POST':\n form1 = Form1(request.POST)\n form2 = Form2(request.POST)\n if form1.is_valid():\n form1.save()\n if form2.is_valid():\n form2.save()\n return render(request, 'select2_outside_admin_multiple.html', ctx)","sub_path":"test_project/select2_outside_admin_multiple/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"294314872","text":"from team import TeamServer\nfrom game import Game\n#import logging\nimport names\nimport datetime\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom Database.tabledef import SeasonSchedule\n \nengine = create_engine('sqlite:///E:\\Programming\\Projects\\BaseballGame\\ServerSide\\Database\\littleLeague.db', echo=True)\n \n# create a Session\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\nclass Server:\n\tdef __init__(self):\n\t\tpass\n\tdef runGame(self, homeTeam, awayTeam):\n\t\tself.homeTeam = TeamServer(homeTeam)\n\t\tself.awayTeam = TeamServer(awayTeam)\n\n\n\t\t\n\t\tself.game = Game(self.homeTeam, self.awayTeam)\n\t\tself.game.playGame()\n\t\tdel self.homeTeam\n\t\tdel self.awayTeam\n\t\t\n\t\n \n\t\t\n\t\t\n\t\t\nserver1 = Server()\n\"\"\" for i in range(1):\n\tserver1.runGame(1,2) \"\"\"\ndayInTheSeason = 1\nranGames = False\nsecondOfMinute = 0\nwhile 1:\n\tnow = datetime.datetime.now()\n\tif (int(now.minute) == 30 or int(now.minute) == 00):\n\t\tif ranGames == False:\n\t\t\tranGames = True\n\t\t\tif dayInTheSeason<188:\n\t\t\t\tdayInTheSeason += 1\n\t\t\t\tfor seasonRow in session.query(SeasonSchedule).filter(SeasonSchedule.day==dayInTheSeason):\n\t\t\t\t\tprint(seasonRow.seasonScheduleId, seasonRow.homeTeamId, seasonRow.awayTeamId)\n\t\t\t\t\tserver1.runGame(seasonRow.homeTeamId, seasonRow.awayTeamId)\n\t\t\telse:\n\t\t\t\tfor seasonRow in session.query(SeasonSchedule).filter(SeasonSchedule.day==dayInTheSeason):\n\t\t\t\t\tprint(seasonRow.seasonScheduleId, seasonRow.homeTeamId, seasonRow.awayTeamId)\n\t\t\t\t\tserver1.runGame(seasonRow.homeTeamId, seasonRow.awayTeamId)\n\t\t\t\tdayInTheSeason = 1\n\t\t\n\telse:\n\t\tif secondOfMinute != now.second:\n\t\t\tsecondOfMinute = now.second\n\t\t\tprint(\"Minutes: \",now.minute,\", Seconds: \", now.second,\", RanGames: \", ranGames,\", DayoftheSeason: \", dayInTheSeason)\n\t\t\tranGames = False","sub_path":"ServerSide/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"36964003","text":"\n\nfrom xai.brain.wordbase.nouns._door import _DOOR\n\n#calss header\nclass _DOORS(_DOOR, ):\n\tdef __init__(self,): \n\t\t_DOOR.__init__(self)\n\t\tself.name = \"DOORS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"door\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_doors.py","file_name":"_doors.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"109882966","text":"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for hager_zhang_lib.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow_probability.python.optimizer.linesearch.internal import hager_zhang_lib as hzl\n\ntfe = tf.contrib.eager\n\n\ndef test_function_x_y(x, y):\n \"\"\"Builds a function that passes through the given points.\n\n Args:\n x: A tf.Tensor of shape [n].\n y: A tf.Tensor of shape [n] or [b, n] if batching is desired.\n\n Returns:\n A callable that takes a tf.Tensor `t` as input and returns as output the\n value and derivative of the interpolated function at `t`.\n \"\"\"\n if len(y.shape) == 1: # No batches.\n y = tf.expand_dims(y, axis=0)\n b, n = y.shape\n y = tf.expand_dims(y, axis=-1)\n x = tf.reshape(tf.tile(x, [b]), (b, n, 1)) # Repeat x on all batches.\n\n def f(t):\n t = tf.convert_to_tensor(t)\n while len(t.shape) < 3:\n t = tf.expand_dims(t, axis=-1)\n with tf.GradientTape() as g:\n g.watch(t)\n p = tf.contrib.image.interpolate_spline(x, y, t, 2)\n return tf.squeeze(p), tf.squeeze(g.gradient(p, t))\n\n return f\n\n\ndef test_function_x_y_dy(x, y, dy, eps=0.1):\n \"\"\"Builds a polynomial with (approx) given values and derivatives.\"\"\"\n x1 = x + eps\n y1 = y + eps * dy\n x2 = x - eps\n y2 = y - eps * dy\n return test_function_x_y(tf.concat([x1, x2], -1), tf.concat([y1, y2], -1))\n\n\nclass HagerZhangLibTest(tf.test.TestCase):\n\n @tfe.run_test_in_graph_and_eager_modes\n def test_bisect_simple(self):\n \"\"\"Tests that _bisect works on a 1 variable scalar valued function.\"\"\"\n wolfe_threshold = 1e-6\n x = tf.constant([0.0, 0.5, 1.0])\n y = tf.constant([1.0, 0.6, 1.2])\n dy = tf.constant([-0.8, 0.6, -0.7])\n fun = test_function_x_y_dy(x, y, dy)\n\n val_a = hzl._apply(fun, 0.0) # Value at zero.\n val_b = hzl._apply(fun, 1.0) # Value at initial step.\n f_lim = val_a.f + (wolfe_threshold * tf.abs(val_a.f))\n\n result = self.evaluate(hzl.bisect(fun, val_a, val_b, f_lim))\n self.assertEqual(result.right.x, 0.5)\n\n @tfe.run_test_in_graph_and_eager_modes\n def test_bisect_batching(self):\n \"\"\"Tests that _bisect works in batching mode.\"\"\"\n wolfe_threshold = 1e-6\n # Let's build our example function with 4 batches, each evaluating a\n # different poly. They all have negative slopes both on 0.0 and 1.0,\n # but different slopes (positive, negative) and values (low enough, too\n # high) on their midpoint.\n x = tf.constant([0.0, 0.5, 1.0])\n y = tf.constant([[1.0, 0.6, 1.2],\n [1.0, 0.6, 1.2],\n [1.0, 1.6, 1.2],\n [1.0, 1.6, 1.2]])\n dy = tf.constant([[-0.8, 0.6, -0.7],\n [-0.8, -0.4, -0.7],\n [-0.8, 0.8, -0.7],\n [-0.8, -0.4, -0.7]])\n fun = test_function_x_y_dy(x, y, dy, eps=0.1)\n\n val_a = hzl._apply(fun, tf.zeros(4)) # Values at zero.\n val_b = hzl._apply(fun, tf.ones(4)) # Values at initial step.\n f_lim = val_a.f + (wolfe_threshold * tf.abs(val_a.f))\n\n expected_left = np.array([0.0, 0.5, 0.0, 0.0])\n expected_right = np.array([0.5, 0.75, 0.5, 0.25])\n\n result = self.evaluate(hzl.bisect(fun, val_a, val_b, f_lim))\n self.assertTrue(np.all(result.stopped))\n self.assertTrue(np.all(~result.failed))\n self.assertTrue(np.all(result.left.df < 0))\n self.assertTrue(np.all(result.right.df >= 0))\n self.assertArrayNear(result.left.x, expected_left, 1e-5)\n self.assertArrayNear(result.right.x, expected_right, 1e-5)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib_test.py","file_name":"hager_zhang_lib_test.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"222933843","text":"import cv2\nimport numpy\nfrom GlobalSettings import FilterWindowName, imdebug\n\n#point in quad\ndef PointInQuad(m,l):\n #is the point outside the quad?\n if (m<0 or m>1 or l<0 or l>1):\n return True\n return False\n\n#Test 1\n##CompositeShow(\"Camera 1\", cam1, settings)\n##def mouseback_rect(event,x,y,flags,param):\n## if event==cv.CV_EVENT_LBUTTONUP:\t\t# here event is left mouse button double-clicked\n## new = cam1.FrameCorrect(x,y)\n## print x,y, \"->\", new\n##\n##\n##\n##cv.SetMouseCallback(\"Camera 1\", mouseback_rect);\n##cv.WaitKey()\n\n#cv.NamedWindow(\"Image window\", 1)\n\ndef nothing(da):\n pass\n\ntracks = {}\n\ndef setupGUI(tag, min_default=128, max_default=128):\n global FilterWindowName\n if imdebug:\n cv2.namedWindow(FilterWindowName+tag, 2)\n cv2.createTrackbar(tag+\" Min\", FilterWindowName+tag, min_default, 255, nothing)\n cv2.createTrackbar(tag+\" Max\", FilterWindowName+tag, max_default, 255, nothing)\n else:\n tracks[tag+\" Min\"] = min_default\n tracks[tag+\" Max\"] = max_default\n\n\ndef ErodeTrick(im):\n kernel = numpy.ones((3,3),numpy.uint8)\n im = cv2.erode(im, kernel, iterations=1)\n im = cv2.dilate(im, kernel, iterations = 1)\n\n #kernel = numpy.ones((10,10),numpy.uint8)\n #im = cv2.erode(im, kernel, iterations=2)\n #im = cv2.dilate(im, kernel, iterations = 2)\n\n #cv2.dilate(im, im, None, 3)\n #cv2.erode(im, im, None, 3)\n return im\n\n\ndef getTrack(name, window):\n if imdebug:\n high_bnd = cv2.getTrackbarPos(name, window)\n low_bnd = cv2.getTrackbarPos(name, window)\n else:\n high_bnd = tracks[name+\" Max\"]\n low_bnd = tracks[name+\" Min\"]\n return high_bnd, low_bnd\n","sub_path":"py/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"476080437","text":"# -*- encoding: utf8 -*-\nimport random\nfrom datetime import datetime\nfrom time import sleep\nfrom src.controller.exceptions import InsufficientDataInTransactionException, \\\n MalformedTransactionException\n\n\nclass TransactionClassifier():\n\n def __init__(self,logger,constants_dto):\n self.__logger=logger\n self.__constants_dto=constants_dto\n\n def get_fraud_code(self, transaction_dto):\n \"\"\"\n In order to simplify the development a really simple operation is\n going to be used. A random sleep time will be added to resemble the\n performance of a real Classifier\n\n Those are the only fields taken into consideration:\n\n * commerce_account_iban\n * client_credit_card\n * transaction_amount\n\n The las digit of this three fields are added and, if the last digits of\n the result match the ones specified in the constants, the transaction\n is classified as fraudulent.\n\n :param transaction_dto:\n :return:\n \"\"\"\n self.__check_enough_data_to_classify_transaction(transaction_dto)\n start_time = datetime.now()\n is_positive = self.__transaction_remainder_in_fraudulent_remainders(\n transaction_dto\n )\n self.__sleep_random_amount_if_necessary(start_time)\n if is_positive:\n self.__logger.info(\"Classified as Fraudulent transaction\")\n return random.choice(self.__constants_dto.fraudulent_codes)\n\n else:\n self.__logger.info(\"Classified as Legit transaction\")\n return self.__constants_dto.legit_code\n\n def __check_enough_data_to_classify_transaction(self, transaction_dto):\n complete = (\n transaction_dto.commerce_account_iban is not None and\n transaction_dto.client_credit_card is not None and\n transaction_dto.transaction_amount is not None\n )\n if complete is False:\n raise InsufficientDataInTransactionException()\n\n def __transaction_remainder_in_fraudulent_remainders(self, transaction_dto):\n try:\n operand1 = int(str(transaction_dto.commerce_account_iban)[-1])\n operand2 = int(str(transaction_dto.client_credit_card)[-1])\n operand3 = int(str(int(transaction_dto.transaction_amount))[-1])\n except:\n raise MalformedTransactionException()\n total = operand1 + operand2 + operand3\n remainder = total % self.__constants_dto.divider\n return remainder in self.__constants_dto.positive_remainders\n\n def __sleep_random_amount_if_necessary(self, start_time):\n millis_already_passed = self.millis_interval(start_time, datetime.now())\n min, max = self.__constants_dto.random_sleep_bounds_ms\n sleep_time = random.randint(min, max)/ float(1000)\n sleep_time -= millis_already_passed\n if sleep_time > 0 :\n sleep(sleep_time)\n\n @staticmethod\n def millis_interval(start, end):\n \"\"\"start and end are datetime instances\"\"\"\n diff = end - start\n millis = diff.days * 24 * 60 * 60 * 1000\n millis += diff.seconds * 1000\n millis += diff.microseconds / 1000\n return millis\n","sub_path":"src/controller/endpoint_handlers/transaction_classifier.py","file_name":"transaction_classifier.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"42128182","text":"class Solution(object):\n def isMatch(self, s, p):\n l = len(s)\n if len(p) - p.count('*') > l:\n return False\n dp = [True] + [False] * l\n for letter in p:\n new_dp = [dp[0] and letter == '*']\n if letter == '*':\n for j in range(l):\n new_dp.append(new_dp[-1] or dp[j + 1])\n elif letter == '?':\n new_dp += dp[:l]\n else:\n new_dp += [dp[j] and s[j] == letter for j in range(l)]\n dp = new_dp\n return dp[-1]\n \n\nif __name__ == '__main__':\n solution = Solution()\n number1_string = '123'\n number2_string = '456'\n result = solution.multiply(number1_string, number2_string)\n print(result)\n ","sub_path":"01_LeetCode/0044/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"341657217","text":"# -*- encoding:utf-8 -*-\n\n# Je ne fournis ce script que par soucis de simplicité, n'ayant pas trouvé de zip contenant l'integrale de cette saga.\n# Tout le contenu telechargé est la propriété de DSM Mitch et peut etre retrouvé ici: http://matruc.free.fr/matrick/index.htm\n\n# This piece of software is under the WTF Public Licence.\n# Everyone is permitted to copy and distribute verbatim or modified\n# copies of this program, under the following terms of the WFTPL :\n#\n# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE\n# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n#\n# 0. You just DO WHAT THE FUCK YOU WANT TO.\n\ntry: # Python 3.3\n import urllib.request as urllib\nexcept: # Python 2.7\n import urllib2 as urllib\n range = xrange\n\nimport os\nimport shutil\nimport sys\nimport zipfile\n\nnbEpisode = 22\nstart = 19\nrealNb = 13\nsaut = {22, 23}\npath = \"Matruc\"\ntmpPath = \"Matruc_tmp\"\nbase = \"http://matruc.free.fr/matrick/saga_mp3_dl2.php?id=\"\nattempt = 3\n\n\ntry:\n os.mkdir(tmpPath)\nexcept OSError as e:\n if not os.path.isdir(tmpPath):\n sys.exit(e)\n\ntry:\n os.mkdir(path)\nexcept OSError as e:\n if not os.path.isdir(path):\n sys.exit(e)\n\n# Matrick 1: Pack complet\n\nprint(\"http://matruc.free.fr/matrick/saga_mp3_dl2.php?id=23\")\nzipFile = urllib.urlopen(\"http://matruc.free.fr/matrick/saga_mp3_dl2.php?id=23\")\n\nwith open(tmpPath + \"/matruc1.zip\", 'wb') as output:\n output.write(zipFile.read(500000000))\n\nwith zipfile.ZipFile(\"{}/matruc1.zip\".format(tmpPath)) as zip:\n zip.extractall(path)\n\nshutil.rmtree(tmpPath)\n\n# Matruc 2\n\nfor i in range(start, start + nbEpisode + 1):\n if i in saut:\n continue\n\n realNb += 1\n link = \"{}{}\".format(base, i)\n\n print(link)\n\n for j in range(attempt):\n try:\n mp3file = urllib.urlopen(link)\n break\n except:\n print(\"Error\")\n else:\n print(\"Error, can't download episod {}\".format(i))\n\n fileName = \"{}/matrick{:0>2}.mp3\".format(path, realNb)\n\n with open(fileName, 'wb') as output:\n output.write(mp3file.read(500000000))\n","sub_path":"matruc.py","file_name":"matruc.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"108017624","text":"import time\nfrom datetime import datetime\n\nfrom nanohttp import settings\nfrom sqlalchemy import Integer, Enum, Unicode, DateTime, or_, and_\nfrom sqlalchemy.sql.expression import text\n\nfrom . import logger\nfrom .exceptions import RestfulException\nfrom .orm import TimestampMixin, DeclarativeBase, Field, DBSession, \\\n create_thread_unsafe_session\n\n\nclass TaskPopError(RestfulException):\n pass\n\n\nclass MuleTask(TimestampMixin, DeclarativeBase):\n __tablename__ = 'mule_task'\n\n id = Field(Integer, primary_key=True, json='id')\n at = Field(DateTime, nullable=True, json='at', default=datetime.now)\n status = Field(\n Enum(\n 'new',\n 'in-progress',\n 'expired',\n 'success',\n 'failed',\n\n name='mule_status_enum'\n ),\n default='new',\n nullable=True, json='status'\n )\n expired_at = Field(DateTime, nullable=True, json='expiredAt')\n terminated_at = Field(DateTime, nullable=True, json='terminatedAt')\n type = Field(Unicode(50))\n\n __mapper_args__ = {\n 'polymorphic_identity': __tablename__,\n 'polymorphic_on': type\n }\n\n def do_(self):\n raise NotImplementedError\n\n @classmethod\n def pop(cls, statuses={'new'}, filters=None, session=DBSession):\n\n find_query = session.query(\n cls.id.label('id'),\n cls.created_at,\n cls.at,\n cls.status,\n )\n if filters is not None:\n find_query = find_query.filter(\n text(filters) if isinstance(filters, str) else filters\n )\n\n find_query = find_query \\\n .filter(cls.at <= datetime.now()) \\\n .filter(\n or_(\n cls.status == 'in-progress', cls.status == 'new', \\\n and_(\n cls.status == 'failed',\n cls.expired_at > datetime.now()\n )\n )\n ) \\\n .limit(1) \\\n .with_for_update()\n\n cte = find_query.cte('find_query')\n update_query = MuleTask.__table__.update() \\\n .where(MuleTask.id == cte.c.id) \\\n .values(status='in-progress') \\\n .returning(MuleTask.__table__.c.id)\n\n task_id = session.execute(update_query).fetchone()\n session.commit()\n if not task_id:\n raise TaskPopError('There is no task to pop')\n task_id = task_id[0]\n task = session.query(cls).filter(cls.id == task_id).one()\n return task\n\n def execute(self, context, session=DBSession):\n try:\n isolated_task = session \\\n .query(MuleTask) \\\n .filter(MuleTask.id == self.id) \\\n .one()\n isolated_task.do_(context)\n session.commit()\n except:\n session.rollback()\n raise\n\n\ndef worker(statuses={'new'},filters=None, tries=-1):\n isolated_session = create_thread_unsafe_session()\n context = {'counter': 0}\n tasks = []\n\n while True:\n context['counter'] += 1\n logger.debug('Trying to pop a task, Counter: %s' % context['counter'])\n try:\n task = MuleTask.pop(\n statuses=statuses,\n filters=filters,\n session=isolated_session\n )\n\n except TaskPopError as ex:\n logger.debug('No task to pop: %s' % ex.to_json())\n isolated_session.rollback()\n if tries > -1:\n tries -= 1\n if tries <= 0:\n return tasks\n time.sleep(settings.jobs.interval)\n continue\n\n try:\n task.execute(context)\n\n # Task success\n task.status = 'success'\n task.terminated_at = datetime.utcnow()\n\n except:\n logger.error('Error when executing task: %s' % task.id)\n task.status = 'failed'\n\n finally:\n if isolated_session.is_active:\n isolated_session.commit()\n tasks.append((task.id, task.status))\n\n","sub_path":"restfulpy/mule.py","file_name":"mule.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"533273318","text":"#!/usr/bin/python3\n\nimport sys\nimport glob\nimport re\nimport subprocess\nimport os\nimport string\nimport shutil\n\noutDir = \"fewer_bats_final_aligns\"\n\nif not os.path.isdir(outDir):\n os.makedirs(outDir)\n\ndiscardList = [\"Micropteropus_pusillus\", \"Rhinolophus_ferrumequinum\", \"Rhinolophus_pearsoni\", \"Rhinolophus_yunanensis\", \"Hipposideros_dinops\", \"Molossus_sinaloae\", \"Miniopterus_natalensis\", \"Myotis_davidii\", \"Myotis_lucifugus\", \"Nycteris_tragata\", \"Rhynchonycteris_naso\", \"Saccopteryx_bilineata\", \"Mormoops_megalophylla\", \"Pteronotus_quadridens\", \"Micronycteris_microtis\", \"Chrotopterus_auritus\", \"Mimon_crenulatum\", \"Tonatia_saurophila\", \"Erophylla_sezekorni\", \"Monophyllus_redmani\", \"Carollia_sowelli\", \"Artibeus_intermedius\", \"Artibeus_phaeotis\", \"Artibeus_watsoni\", \"Platyrrhinus_helleri\", \"Sturnira_lilium\", \"Uroderma_bilobatum\", \"Vampyressa_nymphaea\"]\n\nIDPattern = re.compile(\"^>(\\S+)$\")\nseqPattern = re.compile(\"^[A-Z*-]+$\")\n\nfor alignment in glob.glob(\"./*_align_f.fas\"):\n alignment = os.path.basename(alignment)\n print(alignment)\n outAlignName = outDir + \"/\" + alignment.replace(\"_f.fas\",\"_fb.fas\")\n inFile = open(alignment, \"r\")\n outFile = open(outAlignName, \"w\")\n for idx, line in enumerate(inFile):\n if idx == 0 and not IDPattern.match(line):\n sys.exit(\"Error: First line is not an ID line\")\n if IDPattern.match(line):\n species = IDPattern.match(line).group(1)\n sequence = inFile.readline().rstrip()\n if not seqPattern.match(sequence):\n print(line)\n print(sequence)\n sys.exit(\"Error, sequence not in expected format\")\n if species in discardList:\n print(\"Throwaway bat found, skipping\")\n continue\n else:\n outFile.write(\">\" + species + \"\\n\")\n outFile.write(sequence + \"\\n\")\n elif seqPattern.match(line):\n sys.exit(\"Error, sequence shouldn't match here\")\n else:\n sys.exit(\"Error, no match to line\") \n inFile.close()\n outFile.close()\n\n\n","sub_path":"bat_discarder.py","file_name":"bat_discarder.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"300391742","text":"\n\nfrom xai.brain.wordbase.nouns._groan import _GROAN\n\n#calss header\nclass _GROANED(_GROAN, ):\n\tdef __init__(self,): \n\t\t_GROAN.__init__(self)\n\t\tself.name = \"GROANED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"groan\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_groaned.py","file_name":"_groaned.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"507227312","text":"from typing import List\r\n\r\nclass myGraph:\r\n def __init__(self):\r\n self.graph = {}\r\n\r\n def addInfo(self, startV: int, endV: List[int]):\r\n self.graph[startV] = endV\r\n \r\n def addEdge(self, startV: int, endV: int):\r\n self.graph[startV].append(endV)\r\n \r\n def addVertex(self, V: int):\r\n self.graph[V] = []\r\n\r\n def dfs_recursive(self, startV, visited = []):\r\n visited.append(startV)\r\n \r\n for endV in self.graph[startV]:\r\n if endV not in visited:\r\n self.dfs_recursive(endV, visited)\r\n\r\n return visited\r\n\r\n def dfs(self, startV):\r\n s = [startV]\r\n visited = []\r\n while s:\r\n nowV = s.pop()\r\n if nowV not in visited:\r\n visited.append(nowV)\r\n s.extend(self.graph[nowV][::-1])\r\n\r\n return visited\r\n\r\ng = myGraph()\r\ng.addInfo( 'A', ['B', 'E', 'I'])\r\ng.addInfo( 'B', ['A', 'C'])\r\ng.addInfo( 'C', ['B', 'D'])\r\ng.addInfo( 'D', ['C'])\r\ng.addInfo( 'E', ['A', 'F', 'H'])\r\ng.addInfo( 'F', ['E', 'G'])\r\ng.addInfo( 'G', ['F'])\r\ng.addInfo( 'H', ['E'])\r\ng.addInfo( 'I', ['A', 'J'])\r\ng.addInfo( 'J', ['I'])\r\n\r\na = g.dfs('A')\r\n\r\nprint(a)","sub_path":"geonhokim/6.Depth_First_Search/구현.py","file_name":"구현.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"46372231","text":"import datetime, flask, json, pickle\napp = flask.Flask('first_app')\n\ntry:\n with open('post_data.pickle', 'rb') as fin:\n POST_DATA = pickle.load(fin)\nexcept:\n POST_DATA = []\n\ndef write():\n with open('post_data.pickle', 'wb') as fout:\n pickle.dump(POST_DATA, fout)\n\n\nINDEX_HTML = '''\n<ul>\n {}\n</ul>\n'''\n\n@app.route('/')\ndef index():\n lis = ''\n for li in POST_DATA:\n timestamp = li['timestamp']\n text = li['text']\n lis += f'<li> {timestamp} - {text}</li>\\n'\n content = INDEX_HTML.format(lis)\n html = flask.render_template('base.html', title=\"Home\",\n home=\"active\", post=\"\", content=content)\n return html\n\n\nPOST_HTML = '''\n<form action=\"/post\" method=\"POST\">\n <div class=\"form-group\">\n <label for=\"input-post\">Message</label>\n <input type=\"text\" name=\"text\" class=\"form-control\" id=\"input-post\">\n </div>\n <button type=\"submit\" class=\"btn btn-primary\">Submit</button>\n</form>\n'''\n\n@app.route('/post', methods=['GET', 'POST'])\ndef post():\n if flask.request.method == 'POST':\n POST_DATA.append({\n 'timestamp': datetime.datetime.now().isoformat(),\n 'text': flask.request.form['text']\n })\n write()\n\n html = flask.render_template('base.html', title=\"Post\",\n home=\"\", post=\"active\", content=POST_HTML)\n return html\n\napp.run(host='0.0.0.0', port=80, debug=False)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"392622450","text":"from turtle import *\r\nfrom dessinblocs import ovalerelatif\r\ndef undé(taille,x,y):\r\n \"\"\"setup(900,700)\r\n screen=Screen()\r\n \r\n screen.bgpic('jeuxx.gif')\"\"\"\r\n\r\n #bgcolor('yellow')\r\n\r\n up()\r\n goto(x,y)\r\n down()\r\n\r\n t=taille\r\n\r\n tracer(0)\r\n\r\n up()\r\n forward(t*75)\r\n right(90)\r\n forward(t*130)\r\n left(90)\r\n down()\r\n\r\n width(1*t)\r\n pencolor('black')\r\n fillcolor('black')\r\n begin_fill()\r\n right(6)\r\n circle(t*1400,11.5)\r\n circle(t*5,100)\r\n left(10)\r\n circle(t*115,22)\r\n left(124)\r\n circle(t*-20,90)\r\n left(8.5)\r\n forward(t*239)\r\n left(90)\r\n forward(t*25)\r\n end_fill()\r\n\r\n begin_fill()\r\n circle(t*-20,65)\r\n right(2.5)\r\n forward(t*200)\r\n circle(t*-55,58)\r\n left(11)\r\n forward(t*175)\r\n left(60)\r\n circle(t*7,100)\r\n left(10.5)\r\n forward(t*160)\r\n circle(t*90,72)\r\n right(17)\r\n forward(t*206)\r\n circle(t*20,100)\r\n forward(t*15)\r\n left(70)\r\n forward(t*27)\r\n end_fill()\r\n\r\n fillcolor('white')\r\n begin_fill()\r\n width(4*t)\r\n right(101)\r\n forward(t*240)\r\n circle(t*40,80)\r\n right(15)\r\n for i in range(154):\r\n width(3.2+0.02*i)\r\n forward(t*1)\r\n \r\n circle(t*45,55)\r\n right(11)\r\n forward(t*210)\r\n circle(t*20,74)\r\n for i in range(239):\r\n width(6.08-0.01*i)\r\n forward(t*1)\r\n width(3)\r\n circle(t*50,60)\r\n forward(t*40)\r\n (k,l)=pos()\r\n forward(t*110)\r\n circle(t*60,50)\r\n forward(t*205)\r\n circle(t*26,60)\r\n end_fill()\r\n\r\n right(110)\r\n up()\r\n goto(k,l)\r\n backward(5)\r\n left(90)\r\n forward(t*255)\r\n right(46)\r\n down()\r\n angle=[0,141,116]\r\n trait=[229,168,255]\r\n for i in range (3):\r\n left(angle[i])\r\n (a,b)=pos()\r\n for j in range(trait[i]):\r\n width(10-0.03*j)\r\n forward(t*1)\r\n goto(a,b)\r\n \r\n up()\r\n backward(84)\r\n left(90)\r\n forward(t*210)\r\n down()\r\n begin_fill()\r\n width(5)\r\n left(75)\r\n pencolor('black')\r\n forward(t*105)\r\n circle(t*-20,78)\r\n forward(t*250)\r\n width(8)\r\n circle(t*-18,83)\r\n forward(t*120)\r\n (r,t)=pos()\r\n circle(t*-100,14)\r\n for i in range (210):\r\n width(8-0.035*i)\r\n forward(t*1)\r\n \r\n circle(t*-20,85)\r\n forward(t*246)\r\n right(89)\r\n forward(t*223)\r\n left(15)\r\n circle(t*43,70)\r\n end_fill() \r\n up()\r\n left(100)\r\n goto(r,t)\r\n \r\n down()\r\n width(6)\r\n circle(t*-10,95)\r\n for i in range (260):\r\n width(6-0.015*i)\r\n forward(t*1)\r\n ht()\r\n end_fill()\r\n \r\n update()\r\n\r\nundé(1,0,0)\r\n \r\n","sub_path":"quilles/De.py","file_name":"De.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"412656679","text":"from openerp import tools\nfrom openerp.osv import fields,osv\nimport openerp.addons.decimal_precision as dp\nimport time\nimport logging\nfrom openerp.tools.translate import _\n\n_logger = logging.getLogger(__name__)\nKAS_STATES =[('draft','Draft'),('open','Verifikasi'), ('reject','Ditolak'),\n ('done','Disetujui')]\n\nclass kas(osv.osv):\n\t_name \t\t= \"anggaran.kas\"\n\n\tdef _biaya_exists(self, cursor, user, ids, name, arg, context=None):\n\t\tres = {}\n\t\tfor kas in self.browse(cursor, user, ids, context=context):\n\t\t\tres[kas.id] = False\n\t\t\tif kas.biaya_ids:\n\t\t\t\tres[kas.id] = True\n\t\treturn res\n\n\t_columns \t= {\n\t\t'name' \t\t\t\t: fields.char(\"Nomor\", readonly=True ),\n\t\t'tanggal'\t\t\t\t: fields.date(\"Tanggal\", required=True),\n\t\t'tahun_id'\t\t \t: fields.many2one('account.fiscalyear', 'Tahun', required=True),\n\t\t'unit_id' \t\t\t\t: fields.many2one('anggaran.unit', 'Unit Kerja', required=True, help=\"Unit Kerja yang memiliki transaksi ini\"),\n\t\t'sumber_uang' \t: fields.selection([('up','UP'),('tup','TUP'),('gup','GUP')],\n\t\t\t\t\t\t\t\t'Sumber Uang', help=\"Dari UP, TUP, atau GUP\"),\n\t\t'type' \t\t\t\t: fields.selection([('in','Masuk'),('out','Keluar')],'Jenis Kas',required=True),\n\t\t'jenis_item' \t\t\t: fields.selection([('um','Uang Muka'),('def','Definitif')],'Jenis Item',required=True),\n\t\t'journal_id' \t\t\t: fields.many2one('account.journal', 'Journal'),\n\n\t\t'kepada_unit_id' \t\t: fields.many2one('anggaran.unit', 'Dibayarkan Kepada Unit', help=\"Unit penerima uang kas keluar\"),\n\t\t'dari_unit_id' \t\t\t: fields.many2one('anggaran.unit', 'Diterima Dari', help=\"Unit pengirim uang kas masuk\"),\n\t\t'kepada_partner_id'\t \t: fields.many2one('res.partner', 'Dibayarkan Kepada Partner', help=\"Partner (Supplier/Perorangan) penerima kas keluar\"),\n\n\t\t'jumlah' \t\t\t\t: fields.float(\"Jumlah\", required=True),\n\t\t'cheque_nomor'\t\t\t: fields.char(\"Cheque Nomor\"),\n\t\t'rek_nomor'\t\t\t\t: fields.char(\"Rekening Nomor\"),\n\t\t'kegiatan_id' \t\t\t: fields.many2one('anggaran.rka_kegiatan', 'Untuk Keperluan'),\n\t\t'dasar_pembayaran' \t\t: fields.char(\"Dasar Pembayaran\"),\n\n\t\t'bendahara_id' \t\t: fields.many2one('hr.employee', 'Bendahara Penerima'),\n\t\t'nip_bendahara' \t\t: fields.related('bendahara_id', 'otherid' , type='char', relation='hr.employee', string='NIP Bendahara Penerima', store=True, readonly=True),\n\t\t'kadiv_anggaran_id'\t\t: fields.many2one('hr.employee', 'Kepala Divisi Anggaran'),\n\t\t'nip_kadiv_anggaran'\t: fields.related('kadiv_anggaran_id', 'otherid' , type='char', relation='hr.employee', string='NIP Kepala Divisi Anggaran', store=True, readonly=True),\n\t\t'kadiv_akuntansi_id' \t: fields.many2one('hr.employee', 'Kepala Divisi Akuntansi'),\n\t\t'nip_kadiv_akuntansi' \t: fields.related('kadiv_akuntansi_id', 'otherid' , type='char', relation='hr.employee', string='NIP Kepala Divisi Akuntansi', store=True, readonly=True),\n\t\t'dirkeu_id' \t\t\t: fields.many2one('hr.employee', 'Direktur Direktorat Keuangan'),\n\t\t'nip_dirkeu_id'\t\t\t: fields.related('dirkeu_id', 'otherid' , type='char', relation='hr.employee', string='NIP Kepala Divisi Akuntansi', store=True, readonly=True),\n\t\t\n\t\t'state' \t: fields.selection(KAS_STATES,'Status',readonly=True,required=True),\n\t\t'user_id'\t \t\t: fields.many2one('res.users', 'Created'),\n\t\t'spm_id' \t\t\t\t: fields.many2one('anggaran.spm', 'SPM Asal', help=\"SPM untuk Pengeluaran Kas\"),\n\n\t\t'biaya_ids'\t\t\t\t: fields.one2many('anggaran.biaya','kas_id','Biaya'),\n\t\t'biaya_exists'\t\t\t: fields.function(_biaya_exists, \n\t\t\tstring='Biaya Sudah Tercatat', \n\t\t type='boolean', help=\"Apakah kas keluar ini sudah dicatatkan bukti biayanya.\"),\n\n\t} \n\t_defaults = {\n\t\t'state' \t: KAS_STATES[0][0],\n\t\t'tanggal' \t: lambda *a : time.strftime(\"%Y-%m-%d\") ,\n\t\t'user_id'\t\t: lambda obj, cr, uid, context: uid,\n\t\t'name'\t\t\t: lambda obj, cr, uid, context: '/',\t\t\n\t}\n\n\tdef action_view_biaya(self, cr, uid, ids, context=None):\n\t\t'''\n\t\tThis function returns an action that display existing biaya \n\t\tof given kas ids. It can either be a in a list or in a form view, \n\t\tif there is only one biaya to show.\n\t\t'''\n\t\tmod_obj = self.pool.get('ir.model.data')\n\t\tact_obj = self.pool.get('ir.actions.act_window')\n\n\t\tresult = mod_obj.get_object_reference(cr, uid, 'anggaran', 'action_biaya_list')\n\t\tid = result and result[1] or False\n\t\tresult = act_obj.read(cr, uid, [id], context=context)[0]\n\t\t#compute the number of biaya to display\n\t\tbiaya_ids = []\n\t\tfor kas in self.browse(cr, uid, ids, context=context):\n\t\t\tbiaya_ids += [biaya.id for biaya in kas.biaya_ids]\n\t\t#choose the view_mode accordingly\n\t\tif len(biaya_ids)>1:\n\t\t\tresult['domain'] = \"[('id','in',[\"+','.join(map(str, biaya_ids))+\"])]\"\n\t\telse:\n\t\t\tres = mod_obj.get_object_reference(cr, uid, 'anggaran', 'view_biaya_form')\n\t\t\tresult['views'] = [(res and res[1] or False, 'form')]\n\t\t\tresult['res_id'] = biaya_ids and biaya_ids[0] or False\n\t\treturn result\n\n\tdef create(self, cr, uid, vals, context=None):\n\t\tif context is None:\n\t\t\tcontext = {}\n\t\tif vals.get('name', '/') == '/':\n\t\t\tvals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'anggaran.kas.%s' % vals['type'] ) or '/'\n\t\tnew_id = super(kas, self).create(cr, uid, vals, context=context)\n\t\treturn new_id\n\n\tdef action_draft(self,cr,uid,ids,context=None):\n\t\t#set to \"draft\" state\n\t\treturn self.write(cr,uid,ids,{'state':KAS_STATES[0][0]},context=context)\n\t\n\tdef action_confirm(self,cr,uid,ids,context=None):\n\t\t#set to \"confirmed\" state\n\t\treturn self.write(cr,uid,ids,{'state':KAS_STATES[1][0]},context=context)\n\t\n\tdef action_done(self,cr,uid,ids,context=None):\n\t\t#set to \"done\" state\n\n\t\t# bentuk kas masuk di unit tujuan\n\t\tkas = self.browse(cr, uid, ids[0], context=context)\n\t\tif kas.kepada_unit_id:\n\t\t\tcontext.update({\n\t\t\t\t'tahun_id' \t\t\t: kas.tahun_id.id, \n\t\t\t\t'dasar_pembayaran' \t: kas.dasar_pembayaran, \n\t\t\t\t'jumlah' \t\t\t: kas.jumlah, \n\t\t\t\t'unit_id' \t\t\t: kas.kepada_unit_id.id, \n\t\t\t\t'contra_unit' \t\t: kas.unit_id.id, \n\t\t\t\t'kegiatan_id' \t\t: kas.kegiatan_id.id,\n\t\t\t\t'jenis_item' \t\t: kas.jenis_item,\n\t\t\t\t'sumber_uang' \t\t: kas.sumber_uang,\n\t\t\t\t'spm_id' \t\t\t: kas.spm_id.id,\n\t\t\t})\n\t\t\tkas_id = self.create_kas(cr, uid, 'in', context )\n\t\treturn self.write(cr,uid,ids,{'state':KAS_STATES[3][0]},context=context)\n\t\n\tdef action_reject(self,cr,uid,ids,context=None):\n\t\t#set to \"done\" state\n\t\treturn self.write(cr,uid,ids,{'state':KAS_STATES[2][0]},context=context)\n\n\tdef create_kas(self, cr, uid, type, context=None):\n\n\t\t#################################################################\n\t\t# cari journal kas keluar\n\t\t#################################################################\n\t\tjournal_ids = False\n\t\tif type=='out':\n\t\t\tjournal_ids = self.pool.get(\"account.journal\").search(cr,uid,[('code','=','BNK2')], context=context)\n\t\t\tif not journal_ids:\n\t\t\t\traise osv.except_osv(_('Error'),_(\"Journal untuk transaksi Kas Keluar tidak ditemukan\") ) \n\t\telif type=='in':\n\t\t\tjournal_ids = self.pool.get(\"account.journal\").search(cr,uid,[('code','=','BNK2')], context=context)\n\t\t\tif not journal_ids:\n\t\t\t\traise osv.except_osv(_('Error'),_(\"Journal untuk transaksi Kas Masuk tidak ditemukan\") ) \n\n\t\tdata = {\n\t\t\t'name' \t\t\t: '/',\n\t\t\t'tanggal'\t\t\t: time.strftime(\"%Y-%m-%d\") ,\n\t\t\t'tahun_id' \t\t\t: context['tahun_id'],\n\t\t\t'unit_id' \t\t\t: context['unit_id'],\n\t\t\t'type' \t\t\t: type,\n\t\t\t'dasar_pembayaran' \t: context['dasar_pembayaran'], \n\t\t\t'kegiatan_id' \t\t: context['kegiatan_id'], \n\t\t\t'journal_id' \t\t: journal_ids[0],\n\t\t\t'kepada_unit_id' \t: context['contra_unit'] if type=='out' else False,\n\t\t\t'dari_unit_id' \t\t: context['contra_unit'] if type=='in' else False,\n\t\t\t'jumlah' \t\t\t: context['jumlah'],\n\t\t\t'jenis_item' \t\t: context['jenis_item'],\n\t\t\t'sumber_uang' \t\t: context['sumber_uang'],\n\t\t\t'spm_id' \t\t\t: context['spm_id'],\n\t\t\t'cheque_nomor'\t\t: '',\n\t\t\t'rek_nomor'\t\t\t: '',\n\t\t\t'state' : 'draft',\n\t\t\t'user_id'\t \t: uid, \n\t\t}\n\t\tkas_id = self.create(cr, uid, data, context=context)\n\t\treturn kas_id \n\n\tdef action_create_biaya(self, cr, uid, ids, context=None):\n\t\tkas = self.browse(cr, uid, ids[0], context=context)\n\t\tbiaya = self.pool.get(\"anggaran.biaya\")\n\t\tdata = {\n\t\t\t'name' \t\t\t\t: '/',\n\t\t\t'tanggal' \t\t\t: time.strftime(\"%Y-%m-%d\") ,\n\t\t\t'biaya_line_ids' \t: False,\n\t\t\t'tahun_id' \t\t\t: kas.tahun_id.id,\n\t\t\t'unit_id' \t\t\t: kas.unit_id.id,\t \n\t\t\t'kepada_partner_id' : kas.kepada_partner_id.id,\t \n\t\t\t'total'\t\t\t\t: kas.jumlah,\n\t\t\t'kas_id'\t\t : kas.id,\n\t\t\t'user_id'\t \t: uid, \n\t\t\t'state' \t: 'draft'\n\t\t}\n\t\tbiaya_id = biaya.create(cr, uid, data, context=context)\n\t\treturn biaya_id\n","sub_path":"anggaran_academic_istn/model/kas.py","file_name":"kas.py","file_ext":"py","file_size_in_byte":8356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160627425","text":"\nmin_face_size = 20.0\n\nwidth = 100\nheight = 1000\nmin_length = min(height, width)\n\nmin_detection_size = 12\nfactor = 0.707 # sqrt(0.5)\n\nscales = [] # scales for scaling the image\nprint(min_length)\n# scales the image so that\n# minimum size that we can detect equals to\n# minimum face size that we want to detect\nm = min_detection_size/min_face_size\nmin_length *= m\n\nfactor_count = 0\nwhile min_length > min_detection_size:\n print(min_length)\n scales.append(m*factor**factor_count)\n min_length *= factor\n factor_count += 1\n\n\nprint(scales)","sub_path":"scale-factor-extractor.py","file_name":"scale-factor-extractor.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"165364679","text":"\n\nfrom datetime import datetime\n\nbrithDay= input ('Please enter your birth date (mm/dd/yyyy): ')\n\n##year = int(input('age: '))\n\n\ndate1 = datetime.strptime(brithDay,'%m/%d/%Y')\ndate2 = datetime.today()\n\ndef dateInSeconds(dt2, dt1):\n##need to carry over input age, divide it by by timedelta output and subtract new number from initial timedelta output\n\n timedelta = dt2 - dt1 \n ##3600 seconds (1hr), 24hrs per day, \n ##return timedelta.days days between input and current\n return timedelta.days * 3600 * 24 + timedelta.seconds \nprint(\"You have been alive for \\n%d seconds!\" %(dateInSeconds(date2, date1)))\n","sub_path":"labs/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"470220278","text":"\"\"\"Unitesting for the Word manager's functions.\"\"\"\n\nfrom django.test import TestCase\nfrom ..models import Word\nfrom ..managers.word_managers import WordManager\nimport logging\n\nlogger = logging.getLogger('spanglish')\n\nclass WordsManagerTestClass(TestCase):\n \"\"\"Test the WordManager class.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Run at the start of the test of this class.\"\"\"\n logger.debug(\"setUpClass started\")\n\n def test_get_word_object_instance(self):\n \"\"\"Call the get_all_words_by_language and expect to\n get back a Word object instance.\n \"\"\"\n\n\n data = Word.words.get_all_words_by_language()\n\n records = []\n for d in data:\n records.append(d)\n\n self.assertIsInstance(records[0], Word)\n\n def test_get_all_words_by_iso1(self):\n \"\"\"Provide the function with then 'en' value as pararm.\n\n expect to get back one record.\n \"\"\"\n ios_param = 'en'\n data = Word.words.get_all_words_by_language(iso1=ios_param)\n\n records = []\n for d in data:\n records.append(d.word)\n\n logger.debug(\"records: %s\" % records)\n\n self.assertTrue(len(records) == 1)\n\n def test_get_all_words_by_iso1_default_value(self):\n \"\"\"No parameter will be provided, expect to get the\n same result as providing the en parameter.\n \"\"\"\n\n data = Word.words.get_all_words_by_language()\n records = []\n for d in data:\n records.append(d.word)\n\n logger.debug(\"records: %s\" % records)\n\n self.assertTrue(len(records) == 1)\n\n def test_get_all_words_by_iso1_from_manager(self):\n \"\"\"using the manager's method\n directly, expect to get the same result as providing\n the en parameter.\n \"\"\"\n\n data = Word.words.get_all_words_by_language()\n records = []\n for d in data:\n records.append(d.word)\n\n self.assertTrue(len(records) == 1)\n self.assertTrue(WordManager.__dict__['get_all_words_by_language'])\n\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Run after all tests are done.\"\"\"\n logger.debug(\"tearDownClass started\")\n","sub_path":"api/spanglish/tests/test_manager_word.py","file_name":"test_manager_word.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"451301519","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/krogager/Projects/VoigtFit/build/lib/VoigtFit/Asplund.py\n# Compiled at: 2020-03-26 14:00:05\n# Size of source mod 2**32: 1176 bytes\n__author__ = 'Jens-Kristian Krogager'\nimport numpy as np, os\nroot_path = os.path.dirname(os.path.abspath(__file__))\ndatafile = root_path + '/static/Asplund2009.dat'\ndt = [\n ('element', 'U2'), ('N', 'f4'), ('N_err', 'f4'), ('N_m', 'f4'), ('N_m_err', 'f4')]\ndata = np.loadtxt(datafile, dtype=dt)\nfname = root_path + '/static/Lodders2009.dat'\nLodders2009 = np.loadtxt(fname, usecols=(1, 2), dtype=str)\nphotosphere = dict()\nmeteorite = dict()\nsolar = dict()\nfor element, N_phot, N_phot_err, N_met, N_met_err in data:\n photosphere[element] = [N_phot, N_phot_err]\n meteorite[element] = [N_met, N_met_err]\n idx = (Lodders2009 == element).nonzero()[0][0]\n typeN = Lodders2009[idx][1]\n if typeN == 's':\n solar[element] = [\n N_phot, N_phot_err]\n elif typeN == 'm':\n solar[element] = [\n N_met, N_met_err]\n elif typeN == 'a':\n this_N = np.array([N_phot, N_met])\n this_e = np.array([N_phot_err, N_met_err])\n w = 1.0 / this_e ** 2\n N_avg = np.sum(w * this_N) / np.sum(w)\n N_err = np.round(1.0 / np.sqrt(np.sum(w)), 3)\n solar[element] = [N_avg, N_err]","sub_path":"pycfiles/VoigtFit-3.11.5.1-py37-none-any/Asplund.cpython-37.py","file_name":"Asplund.cpython-37.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"76663987","text":"from simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\nfrom sys import stdout\nimport numpy as np\nimport random\nimport os\n\n \n\nclass MolecularDynamics(object):\n def __init__(self, S, Sim, seed = None):\n self.address_inicial = S[\"Input_address\"]\n self.address_output_pdb_MD = S[\"Output_address\"]+'/Trajectory_MD.pdb'\n self.address_output_states = S[\"Output_address\"]+'/States_MD.csv'\n self.Time = S[\"Time\"]\n self.stepsize = S[\"stepsize\"]\n self.Temperature = S[\"Temperature\"]\n self.Pressure = S[\"Pressure\"]\n self.tol = S[\"Tolerance\"]\n # self.Integrator = S[\"Integrator\"]\n self.FF = S[\"Force_Field\"][\"Force_Field_name\"]\n self.FF_WM = S[\"Force_Field\"][\"Water_Model\"] \n self.Name = S[\"Name\"]\n self.SimType = Sim\n self.nsteps = self.Time/self.stepsize\n self.seed = seed\n self.structure_name = S[\"Structure_name\"]\n self.conformations = S[\"Conformations\"]\n \n def SystemPreparation(self, address = None):\n if address is None:\n address = self.address_inicial\n pdb = PDBFile(address) \n print('PDB loaded') \n forcefield = ForceField(self.FF, self.FF_WM) \n print('Force field created') \n modeller = Modeller(pdb.topology, pdb.positions)\n if self.seed != None:\n random.seed(self.seed) \n modeller.addHydrogens(forcefield) \n print('Hydrogens addded')\n if self.SimType == \"MD\":\n modeller.addSolvent(forcefield, model = 'tip3p',padding=1.0*nanometers, neutralize = True) \n system = forcefield.createSystem(modeller.topology, nonbondedMethod=PME, nonbondedCutoff=1*nanometer, constraints=HBonds, switchDistance=0.9*nanometer) \n system.addForce(MonteCarloBarostat(self.Pressure*bar, self.Temperature[0]*kelvin))\n print('System created')\n if self.SimType == \"P\":\n system = forcefield.createSystem(modeller.topology)\n print('System created')\n return pdb, system, modeller\n \n def SimulationPreparation(self, modeller, system, tolerance = None, maxIt = None): \n integrator = LangevinMiddleIntegrator(self.Temperature[0]*kelvin, 1/picosecond, self.stepsize*picoseconds)\n print('Integrator created')\n if self.seed != None:\n integrator.setRandomNumberSeed(self.seed)\n print('seeed')\n simulation = Simulation(modeller.topology, system, integrator)\n print('Simulation created')\n simulation.context.setPositions(modeller.positions)\n if self.seed != None:\n simulation.context.setVelocitiesToTemperature(self.Temperature[0]*kelvin, self.seed)\n print('seed')\n else:\n simulation.context.setVelocitiesToTemperature(self.Temperature[0]*kelvin)\n print('Minimizing energy...')\n if tolerance == '' and maxIt == '':\n simulation.minimizeEnergy()\n if tolerance != '' and maxIt == '':\n print('Tolerance = %s' %(tolerance))\n simulation.minimizeEnergy(tolerance = (tolerance*kilojoule/mole))\n if maxIt != '' and tolerance == '':\n print('Iterations = %s' %(maxIt))\n simulation.minimizeEnergy(maxIterations = int(maxIt))\n if maxIt != '' and tolerance != '':\n print('Tolerance = %s \\nIterations = %s' %(tolerance, maxIt))\n simulation.minimizeEnergy(tolerance = (tolerance*kilojoule/mole), maxIterations = maxIt)\n print('Energy minimized') \n return integrator, simulation\n \n \n def RunSimulation(self, integrator, simulation):\n if self.nsteps/self.conformations/10 < 250:\n rep = 250\n else:\n rep = int(self.nsteps/self.conformations/10)\n \n simulation.reporters.append(PDBReporter(self.address_output_pdb_MD, self.nsteps/self.conformations))\n simulation.reporters.append(StateDataReporter(self.address_output_states, rep, step=True, time=True,\n potentialEnergy=True, temperature=True))\n simulation.reporters.append(StateDataReporter(stdout, 500, step=True, time=True, temperature=True))\n print('Starting simulation...')\n for i in self.Temperature:\n integrator.setTemperature(i*kelvin)\n simulation.step(self.nsteps/np.size(self.Temperature))\n print('Simulation done')\n \n def EnergyCalculation(self, simulation):\n\n state = simulation.context.getState(getEnergy=True)\n print(state.getPotentialEnergy())\n \n Energy = state.getPotentialEnergy()\n return Energy\n \ndef MolecularDynamicsSimulation(S, Sim, seed = None):\n MD = MolecularDynamics(S, Sim, seed)\n pdb, system, modeller = MD.SystemPreparation()\n integrator, simulation = MD.SimulationPreparation(modeller, system)\n MD.RunSimulation(integrator, simulation)\n \n \n \n","sub_path":"MolecularDynamics/MD_Simulation.py","file_name":"MD_Simulation.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"292806106","text":"from unittest import TestCase\n\nfrom utils.errors import TextError\n\n\nclass ErrorsTest(TestCase):\n def test_raise(self):\n try:\n try:\n d = {'a': 1}\n print(d['b'])\n except KeyError as inner:\n raise TextError('message', 'text') from inner\n self.fail('TextError should be raised before here')\n except TextError as e:\n self.assertEqual('message', str(e))\n self.assertEqual('text', e.text)\n self.assertIn(\"KeyError: 'b'\", e.stack_trace())\n","sub_path":"src/tests/utils/test_errors.py","file_name":"test_errors.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"162688629","text":"# print (False and True and False)\n#\n# print ((False and True) and (False))\n# age=20\n# for a in age:\n# print(a)\n\n\nmember=0\nfor i in range(0,10):\n ag=input(\"请问你的年龄是多少:\")\n if ag.isdigit():\n age=int(ag)\n sex=input(\"请问您的性别是(男生请输入m,女生请输入f):\")\n if sex!='m' or sex!='f':\n print(\"您输入的性别有误,请重新输入(男生请输入m,女生请输入f):\")\n if age>=10 and age<=12 and sex== 'f':\n print(\"恭喜你,你可以加球队\")\n member += 1\n else:\n print(\"抱歉,你不能加入球队\")\n else:\n print(\"您输入的年龄有误\")\n input(\"请输入一个整数:\")\n sex = input(\"请问您的性别是(男生请输入m,女生请输入f):\")\n if sex!='m' or sex!='f':\n print(\"您输入的性别有误\")\n input(\"请重新输入性别(男生请输入m,女生请输入f):\")\n if ag.isdigit():\n age = int(ag)\n if age >= 10 and age <= 12 and sex == 'f':\n print(\"恭喜你,你可以加球队\")\n member += 1\n else:\n print(\"抱歉,你不能加入球队\")\n\nprint(\"符合条件的总人数是:%d\" %member)","sub_path":"test/zuoye/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"394117786","text":"\"\"\"add draft fields\n\nRevision ID: 3d429503a29a\nRevises: 2a11dd14665\nCreate Date: 2014-08-30 13:26:03.698902\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3d429503a29a'\ndown_revision = '2a11dd14665'\n\nimport warnings\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom hubtty.dbsupport import sqlite_alter_columns, sqlite_drop_columns\n\ndef upgrade():\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n op.add_column('message', sa.Column('draft', sa.Boolean()))\n op.add_column('comment', sa.Column('draft', sa.Boolean()))\n op.add_column('approval', sa.Column('draft', sa.Boolean()))\n\n conn = op.get_bind()\n conn.execute(\"update message set draft=pending\")\n conn.execute(\"update comment set draft=pending\")\n conn.execute(\"update approval set draft=pending\")\n\n sqlite_alter_columns('message', [\n sa.Column('draft', sa.Boolean(), index=True, nullable=False),\n ])\n\n sqlite_alter_columns('comment', [\n sa.Column('draft', sa.Boolean(), index=True, nullable=False),\n ])\n\n sqlite_alter_columns('approval', [\n sa.Column('draft', sa.Boolean(), index=True, nullable=False),\n ])\n\n sqlite_drop_columns('comment', ['pending'])\n sqlite_drop_columns('approval', ['pending'])\n\n\ndef downgrade():\n pass\n","sub_path":"hubtty/alembic/versions/3d429503a29a_add_draft_fields.py","file_name":"3d429503a29a_add_draft_fields.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"298283703","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, msg, site\n\nimport rulib\n\ndef process_page(page, index, parsed):\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n pagemsg(\"Processing\")\n\n text = str(page.text)\n parsed = blib.parse(page)\n notes = []\n for t in parsed.filter_templates():\n origt = str(t)\n if str(t.name) == \"wikipedia\":\n val = getparam(t, \"1\")\n newval = rulib.remove_accents(val)\n if val != newval:\n pagemsg(\"Removing accents from 1= in {{wikipedia|...}}\")\n notes.append(\"remove accents from 1= in {{wikipedia|...}}\")\n t.add(\"1\", newval)\n newt = str(t)\n if origt != newt:\n pagemsg(\"Replaced %s with %s\" % (origt, newt))\n\n return str(parsed), notes\n\nparser = blib.create_argparser(\"Remove accents from 1= in {{wikipedia|...}}\",\n include_pagefile=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nblib.do_pagefile_cats_refs(args, start, end, process_page, edit=True)\n","sub_path":"fix_wikipedia.py","file_name":"fix_wikipedia.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"27297102","text":"import json\nfrom markov_dict import MarkovDict\nfrom word_generator import WordGenerator\n\ndef main():\n word_generator = WordGenerator(False)\n\n for i in range(1, 4):\n markov_dict = MarkovDict(lookback_amount=i, lookahead_amount=1)\n markov_dict.process_file(\"lorem_ipsum.txt\")\n markov_dict.save_to_file(f\"./dicts/lorem_ipsum_{i}.markov\")\n\n word_generator.add_dictionary(markov_dict, weight=4-i)\n\n sentence = \"\"\n for i in range(5):\n sentence += \" \" + word_generator.generate_word()\n\n print(sentence)\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"333619421","text":"__author__ = 'gregortaube'\n\n\"\"\"\nGiven S segments. Colors of red and blue. segments cannot be connected to segments of same color\neach knot takes up 0.5 of segments length. Meaning at each meeting 1 cm will be used only for knot.\n\n\"\"\"\n\n\ndef ans(testcase, length):\n print(\"case #%d: %d\" % (testcase, length))\n\n\ntestcases = int(input())\n\nfor x in range(testcases):\n length = 0\n numSegments = int(input())\n segments = input().split()\n reds = []\n blues = []\n\n for seg in segments:\n if seg[-1] == 'R':\n reds.append(int(seg[:-1]))\n else:\n blues.append(int(seg[:-1]))\n sorted(blues)\n reds = list(reversed(sorted(reds)))\n blues = list(reversed(sorted(blues)))\n\n if len(reds) > len(blues):\n numKnots = int(len(blues))*2\n length += sum(blues) # the min removed since odd number of segments cannot be used\n for index in range(len(blues)):\n length+=reds[index]\n elif len(reds) == len(blues):\n numKnots = len(reds)*2 #can use both when equal length\n length += sum(blues)\n length += sum(reds)\n else:\n numKnots = len(reds)*2\n length += sum(reds)\n for index in range(len(reds)):\n length+=blues[index]\n\n ans(x + 1, length - (numKnots))\n","sub_path":"intermediate2/clsingtheloop.py","file_name":"clsingtheloop.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"470367093","text":"import sys\r\nsys.stdin= open(\"D-sample.in\", \"r\")\r\n#sys.stdout= open(\"D-.out\", 'w')\r\n\r\n\r\nfor t in range(int(input())):\r\n\r\n N, M= map(int, input().split())\r\n\r\n grid= {}\r\n row, col, sub, add= set(range(N)), set(range(N)), set(range(-N + 1, N)), set(range(2*N - 1))\r\n changes= {}\r\n\r\n for m in range(M):\r\n value, i, j= input().split()\r\n i, j= int(i)-1, int(j)-1\r\n\r\n grid[i,j]= value\r\n if value in ('x', 'o'):\r\n row.remove(i)\r\n col.remove(j)\r\n if value in ('+', 'o'):\r\n sub.remove(j-i)\r\n add.remove(i+j)\r\n\r\n print(sub)\r\n\r\n for i in [0, N-1]:\r\n for j in range(N):\r\n\r\n if j-i in sub and i+j in add:\r\n\r\n c= ''\r\n if (i, j) not in grid:\r\n c= '+'\r\n elif grid[i, j] == 'x':\r\n c= 'o'\r\n else:\r\n assert False, str(grid[i, j])\r\n\r\n grid[i, j]= c\r\n changes[i, j]= c\r\n sub.add(j-i)\r\n add.add(i+j)\r\n\r\n\r\n for i in row:\r\n for j in col:\r\n\r\n c= ''\r\n if (i, j) not in grid:\r\n c= 'x'\r\n elif grid[i, j] == '+':\r\n c= 'o'\r\n else:\r\n assert False, str(grid[i, j])\r\n\r\n grid[i, j]= c\r\n changes[i, j]= c\r\n row.add(i)\r\n col.add(j)\r\n\r\n\r\n points= 0\r\n for (i, j) in grid:\r\n if grid[i, j] == 'o':\r\n points+= 2\r\n else:\r\n points+= 1\r\n\r\n #print(grid)\r\n print(\"Case #{}: {} {}\".format(t+1, points, len(changes)))\r\n #for (i, j) in changes:\r\n # print(\"{} {} {}\".format(changes[i, j], i, j))\r\n","sub_path":"2017/Qualification Round/D/D-Fashion Show.py","file_name":"D-Fashion Show.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"498002068","text":"\"\"\"Post-process images of Q-criterion to compute inclination angles.\"\"\"\n\nimport collections\nimport math\nfrom matplotlib import pyplot\nimport pathlib\n\nimport rodney\n\n\nPoint = collections.namedtuple('Point', ['x', 'y'])\n\n\nclass Line(object):\n \"\"\"Define a line.\"\"\"\n\n def __init__(self, point1, point2):\n \"\"\"Compute slope and intercept given two reference points.\"\"\"\n self.a, self.b = self._slope_intercept(point1, point2)\n\n def _slope_intercept(self, point1, point2):\n \"\"\"Compute and return slope and intercept.\"\"\"\n a = (point2.y - point1.y) / (point2.x - point1.x)\n b = point1.y - a * point1.x\n return a, b\n\n def y(self, x):\n \"\"\"Compute y given x.\"\"\"\n if hasattr(x, \"__iter__\"):\n return (self.a * xi + self.b for xi in x)\n return self.a * x + self.b\n\n def get_inclination(self, degrees=True):\n \"\"\"Compute adn return inclination angle w.r.t. horizontal axis.\"\"\"\n x1, x2 = 0.0, 1.0\n y1, y2 = self.y([x1, x2])\n length = math.sqrt((y2 - y1)**2 + (x2 - x1)**2)\n alpha = math.acos(abs(x2 - x1) / length)\n if degrees:\n alpha *= 180.0 / math.pi\n return alpha\n\n def create_line_guide(self, point1, point2,\n extend_left=0.0, extend_right=0.0):\n \"\"\"Return line guide for Matplotlib figure.\"\"\"\n x1, x2 = point1.x - extend_left, point2.x + extend_right\n y1, y2 = self.y([x1, x2])\n return (x1, x2), (y1, y2)\n\n\n# Parse command line and set directories.\nargs = rodney.parse_command_line()\nsimudir = pathlib.Path(__file__).absolute().parents[1]\nfigdir = simudir / 'figures'\n\n# Lateral view: Load PNG image from file.\nfilepath = figdir / 'qcrit_wx_lateral_view_0008500.png'\nwith open(filepath, 'rb') as infile:\n img = pyplot.imread(infile)\n\n# Lateral view: Plot the image.\nfig, ax = pyplot.subplots(figsize=(12.0, 6.0))\nax.imshow(img)\nlims = ax.axis('scaled', adjustable='box')\nxstart, xend, yend, ystart = lims\nax.axhline(0.5 * (yend - ystart), xmin=xstart, xmax=xend,\n color='black', linestyle='-.', linewidth=2.0)\n\n# Lateral view: Create inclination line for alpha.\nalpha = (Point(292, 193), Point(598, 60)) # in pixels\nline = Line(*alpha)\nprint(f'alpha: {line.get_inclination():.2f}')\nax.plot(*line.create_line_guide(*alpha, extend_left=50, extend_right=250),\n color='black', linestyle='--', linewidth=2.0)\n\n# Lateral view: Set limits and remove axis.\nax.axis((xstart, xend - 100, yend, ystart))\nax.axis('off')\nfig.tight_layout()\n\n# Lateral view: Save figure.\nif args.save_figures:\n filepath = figdir / 'qcrit_wx_lateral_view_0008500_post.png'\n fig.savefig(filepath, dpi=300, bbox_inches='tight')\n\n# Top view: Load PNG image from file.\nfilepath = figdir / 'qcrit_wx_top_view_0008500.png'\nwith open(filepath, 'rb') as infile:\n img = pyplot.imread(infile)\n\n# Top view: Plot the image.\nfig, ax = pyplot.subplots(figsize=(12.0, 6.0))\nax.imshow(img)\nlims = ax.axis('scaled', adjustable='box')\nxstart, xend, yend, ystart = lims\nax.axhline(0.5 * (yend - ystart), xmin=xstart, xmax=xend,\n color='black', linestyle='-.', linewidth=2.0)\n\n# Top view: Compute inclination angle.\ngamma = (Point(316, 170), Point(665, 151)) # in pixels\nline = Line(*gamma)\nprint(f'gamma: {line.get_inclination():.2f}')\nax.plot(*line.create_line_guide(*gamma, extend_left=20, extend_right=20),\n color='black', linestyle='--', linewidth=2.0)\n\n# Top view: Set limits and remove axis.\nax.axis((xstart, xend - 100, yend - 50, ystart + 50))\nax.axis('off')\nfig.tight_layout()\n\n# Top view: Save figure.\nif args.save_figures:\n filepath = figdir / 'qcrit_wx_top_view_0008500_post.png'\n fig.savefig(filepath, dpi=300, bbox_inches='tight')\n\n# Display figures.\nif args.show_figures:\n pyplot.show()\n","sub_path":"runs/Re200_St0.6_AR1.27_psi120/scripts/process_qcrit_wx_snapshot.py","file_name":"process_qcrit_wx_snapshot.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"326598462","text":"def state_present(module, existing, proposed, candidate):\n commands = list()\n proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)\n existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)\n for (key, value) in proposed_commands.items():\n if (key == 'associate-vrf'):\n command = 'member vni {0} {1}'.format(module.params['vni'], key)\n if (not value):\n command = 'no {0}'.format(command)\n commands.append(command)\n elif ((key == 'peer-ip') and (value != [])):\n for peer in value:\n commands.append('{0} {1}'.format(key, peer))\n elif ((key == 'mcast-group') and (value != existing_commands.get(key))):\n commands.append('no {0}'.format(key))\n vni_command = 'member vni {0}'.format(module.params['vni'])\n if (vni_command not in commands):\n commands.append('member vni {0}'.format(module.params['vni']))\n if (value != PARAM_TO_DEFAULT_KEYMAP.get('multicast_group', 'default')):\n commands.append('{0} {1}'.format(key, value))\n elif ((key == 'ingress-replication protocol') and (value != existing_commands.get(key))):\n evalue = existing_commands.get(key)\n dvalue = PARAM_TO_DEFAULT_KEYMAP.get('ingress_replication', 'default')\n if (value != dvalue):\n if (evalue != dvalue):\n commands.append('no {0} {1}'.format(key, evalue))\n commands.append('{0} {1}'.format(key, value))\n else:\n commands.append('no {0} {1}'.format(key, evalue))\n elif (value is True):\n commands.append(key)\n elif (value is False):\n commands.append('no {0}'.format(key))\n elif ((value == 'default') or (value == [])):\n if existing_commands.get(key):\n existing_value = existing_commands.get(key)\n if (key == 'peer-ip'):\n for peer in existing_value:\n commands.append('no {0} {1}'.format(key, peer))\n else:\n commands.append('no {0} {1}'.format(key, existing_value))\n elif (key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS):\n commands.append('no {0}'.format(key.lower()))\n else:\n command = '{0} {1}'.format(key, value.lower())\n commands.append(command)\n if commands:\n vni_command = 'member vni {0}'.format(module.params['vni'])\n ingress_replications_command = 'ingress-replication protocol static'\n ingress_replicationb_command = 'ingress-replication protocol bgp'\n ingress_replicationns_command = 'no ingress-replication protocol static'\n ingress_replicationnb_command = 'no ingress-replication protocol bgp'\n interface_command = 'interface {0}'.format(module.params['interface'])\n if any(((c in commands) for c in (ingress_replications_command, ingress_replicationb_command, ingress_replicationnb_command, ingress_replicationns_command))):\n static_level_cmds = [cmd for cmd in commands if ('peer' in cmd)]\n parents = [interface_command, vni_command]\n for cmd in commands:\n parents.append(cmd)\n candidate.add(static_level_cmds, parents=parents)\n commands = [cmd for cmd in commands if ('peer' not in cmd)]\n elif ('peer-ip' in commands[0]):\n static_level_cmds = [cmd for cmd in commands]\n parents = [interface_command, vni_command, ingress_replications_command]\n candidate.add(static_level_cmds, parents=parents)\n if (vni_command in commands):\n parents = [interface_command]\n commands.remove(vni_command)\n if (module.params['assoc_vrf'] is None):\n parents.append(vni_command)\n candidate.add(commands, parents=parents)","sub_path":"Data Set/bug-fixing-5/981c9f6a79404252390efc65410811bd7f483d9b-<state_present>-bug.py","file_name":"981c9f6a79404252390efc65410811bd7f483d9b-<state_present>-bug.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"583149801","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('account', '0001_initial'),\n ('conference', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Review',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('review_submitted', models.BooleanField(default=False, verbose_name='Iesniegta recenzija')),\n ('review_submitted_corr', models.BooleanField(default=False, verbose_name='Iesniegta atbilde par labojumiem')),\n ],\n ),\n migrations.CreateModel(\n name='ReviewPaper',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('points', models.CommaSeparatedIntegerField(max_length=200, verbose_name='Punkti')),\n ('avg_points', models.FloatField(verbose_name='Vid\\u0113jie punkti')),\n ('total', models.CharField(default=b'1', max_length=200, verbose_name='Kop\\u0113jais v\\u0113rt\\u0113jums', choices=[(b'1', b'Noteikti public\\xc4\\x93t'), (b'2', b'Iesp\\xc4\\x93jams public\\xc4\\x93t'), (b'3', b'Nepublic\\xc4\\x93t')])),\n ('recom', models.CharField(default=b'1', max_length=200, verbose_name='Rekomend\\u0101cijas', choices=[(b'1', b'Raksts neprasa labojumus/papildin\\xc4\\x81jumus'), (b'2', b'Raksts prasa s\\xc4\\xabkus labojumus/papildin\\xc4\\x81jumus'), (b'3', b'Raksts prasa pamat\\xc4\\xabgu p\\xc4\\x81rstr\\xc4\\x81d\\xc4\\x81\\xc5\\xa1anu')])),\n ('paper_decision_corr', models.NullBooleanField(default=None, verbose_name='Recenzenta l\\u0113mums par raksta atbilst\\u012bbu p\\u0113c labojumiem')),\n ],\n ),\n migrations.CreateModel(\n name='ReviewPaperComments',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comments_paper_reviewer', models.TextField(max_length=2000, verbose_name='Koment\\u0101ri/ieteikumi autoram raksta uzlabo\\u0161anai')),\n ('comments_paper_author', models.TextField(max_length=2000, verbose_name='Autora koment\\u0101ri par rakstu')),\n ('review', models.ForeignKey(blank=True, to='review.ReviewPaper', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='ReviewTheses',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('theses_rating', models.NullBooleanField(verbose_name='T\\u0113\\u017eu kvalit\\u0101te')),\n ('theses_decision_corr', models.NullBooleanField(default=None, verbose_name='Recenzenta l\\u0113mums par t\\u0113\\u017eu atbilst\\u012bbu p\\u0113c labojumiem')),\n ],\n ),\n migrations.CreateModel(\n name='ReviewThesesComments',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comments_theses_reviewer', models.TextField(max_length=2000, verbose_name='Koment\\u0101ri par t\\u0113\\u017eu kvalit\\u0101ti (ja t\\u0113zes ir iesniegtas) un ieteikumi to labojumiem')),\n ('comments_theses_author', models.TextField(max_length=2000, verbose_name='Autora koment\\u0101ri par t\\u0113z\\u0113m')),\n ('review', models.ForeignKey(blank=True, to='review.ReviewTheses', null=True)),\n ],\n ),\n migrations.AddField(\n model_name='review',\n name='review_paper',\n field=models.OneToOneField(null=True, blank=True, to='review.ReviewPaper'),\n ),\n migrations.AddField(\n model_name='review',\n name='review_theses',\n field=models.OneToOneField(null=True, blank=True, to='review.ReviewTheses'),\n ),\n migrations.AddField(\n model_name='review',\n name='reviewer',\n field=models.ForeignKey(to='account.ReviewerProfile'),\n ),\n migrations.AddField(\n model_name='review',\n name='submission',\n field=models.ForeignKey(to='conference.Submission'),\n ),\n ]\n","sub_path":"conference_site/apps/review/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"113031512","text":"#!/usr/bin/env python3\nfrom sys import argv\n\ndef int_check (argument):\n print(\n -15 < argument <= 12 or\n 14 < argument < 17 or\n 19 <= argument\n )\n\nif __name__ == '__main__':\n if len(argv) > 1:\n try:\n int_check(int(argv[1]))\n except ValueError as e:\n print(\"Error! couldn't convert argument to int\\n\",e)\n else:\n print('Error! script has no argument.')\n","sub_path":"python/module2/hw1_1_number_in.py","file_name":"hw1_1_number_in.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"466037455","text":"import json\nimport csv\nimport numpy as np\nimport cv2\n# import get_pose\n\n\ndef apply_transformation(trans, rot_axis_ang, point=[0,0,1]):\n\n t = np.array(trans).reshape(-1, 1)\n point = np.array(point + [1])\n rot_mat, jac = cv2.Rodrigues(np.array(rot_axis_ang))\n\n\n mat = np.append(rot_mat, t, axis=1)\n mat = np.append(mat, np.array([[0,0,0,1]]), axis=0)\n\n result = mat @ point\n new_pt = result[:-1]\n\n return new_pt\n# measure in meters\n\n\ndef get_translations(folder):\n \"\"\"\n\n :param folder:\n :param rot:\n :return: dictionary of image name : [translations(x,y,z), rotations]\n where rotations = None\n OR rotations = [a, b, c]\n OR rotations = [a, b, c, d, e, f]\n \"\"\"\n tr = {}\n input_file=open(folder + '/reconstruction.json', 'r')\n json_decode=json.load(input_file)\n for layer in json_decode:\n images = layer[\"shots\"]\n for img, vals in images.items():\n location = vals[\"translation\"]\n rotation = vals[\"rotation\"]\n tr[folder + \"/images/\" + img] = np.array([location, rotation])\n return tr\n\ndef get_prediction(f):\n input_file = open(f + '/predicted_translations.json', 'r')\n json_decode = json.load(input_file)\n trans = []\n rot = []\n imgs = []\n for img, vals in json_decode.items():\n t = vals[\"translation\"]\n r = vals[\"rot_matrix\"]\n trans.append(t)\n rot.append(r)\n imgs.append(img)\n rot = np.array(rot)\n trans = np.array(trans)\n imgs = np.array(imgs)\n\n # get the relative trans and rotation\n rel_trans = np.diff(trans, axis=0)\n rel_trans = np.array([np.matmul(rot[i], rel_trans[i].T).T for i in range(rel_trans.shape[0])])\n rel_rot = np.array([np.matmul(rot[i + 1], rot[i].T) for i in range(rot.shape[0] - 1)])\n\n\n\n return imgs, rel_trans, rel_rot\n\ndef get_relative(f):\n input_file = open(f + '/relative_translations.json', 'r')\n json_decode = json.load(input_file)\n trans = []\n rot = []\n imgs = []\n for img, vals in json_decode.items():\n t = vals[\"translation\"]\n r = vals[\"rot_matrix\"]\n trans.append(t)\n rot.append(r)\n imgs.append(f + \"/images/\" + img)\n rot = np.array(rot)\n trans = np.array(trans)\n imgs = np.array(imgs)\n\n\n # THIS DOES NOT WORKp ON PABTI BC OPENSFM IS NOT INSTALLED\n # imgs, trans, rot = get_pose.get_pose(f)\n\n # get the relative trans and rotation\n rel_trans = np.diff(trans, axis=0)\n rel_trans = np.array([np.matmul(rot[i], rel_trans[i].T).T for i in range(rel_trans.shape[0])])\n rel_rot = np.array([np.matmul(rot[i + 1], rot[i].T) for i in range(rot.shape[0] - 1)])\n\n\n # old translations.\n data_file = f + '/reconstruction.json'\n trans = []\n\n with open(data_file, 'r') as f:\n data = json.load(f)\n\n for frame in data[0]['shots'].keys():\n trans.append(data[0]['shots'][frame]['translation'])\n\n trans = np.array(trans)\n old_trans = np.diff(trans, axis=0)\n\n return imgs, rel_trans, rel_rot\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('dir')\n\n args = parser.parse_args()\n\n # write_trajectory(args.dir)\n","sub_path":"utils/jsonreader.py","file_name":"jsonreader.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"275830834","text":"from pyglet.gl import *\nimport ShaderLoader\nfrom ObjLoader import ObjLoader\nfrom pyrr import Vector3, matrix44, Matrix44\nimport time\nimport numpy\n\n\nclass Monkey:\n def __init__(self):\n\n mesh = ObjLoader()\n mesh.load_model(\"../models/monkey.obj\")\n\n num_verts = len(mesh.model_vertices) // 3\n\n self.verts = pyglet.graphics.vertex_list(num_verts, ('v3f', mesh.model_vertices),\n ('t2f', mesh.model_textures),\n ('n3f', mesh.model_normals))\n\n shader = ShaderLoader.compile_shader(\"shaders/video_12_vert.glsl\", \"shaders/video_12_frag.glsl\")\n\n glUseProgram(shader)\n\n # vertices\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, self.verts.vertices)\n glEnableVertexAttribArray(0)\n # textures\n glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, self.verts.tex_coords)\n glEnableVertexAttribArray(1)\n # normals\n glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, self.verts.normals)\n glEnableVertexAttribArray(2)\n\n projection = matrix44.create_perspective_projection_matrix(45.0, 1280 / 720, 0.1, 100.0).flatten().astype(\"float32\")\n view = matrix44.create_from_translation(Vector3([0.0, 0.0, -2.0])).flatten().astype(\"float32\")\n model = matrix44.create_from_translation(Vector3([0.0, 0.0, -1.0])).flatten().astype(\"float32\")\n\n c_projection = numpy.ctypeslib.as_ctypes(projection)\n c_view = numpy.ctypeslib.as_ctypes(view)\n c_model = numpy.ctypeslib.as_ctypes(model)\n\n view_loc = glGetUniformLocation(shader, b\"view\")\n proj_loc = glGetUniformLocation(shader, b\"projection\")\n model_loc = glGetUniformLocation(shader, b\"model\")\n self.rotate_loc = glGetUniformLocation(shader, b'rotate')\n self.light_loc = glGetUniformLocation(shader, b\"light\")\n\n glUniformMatrix4fv(view_loc, 1, GL_FALSE, c_view)\n glUniformMatrix4fv(proj_loc, 1, GL_FALSE, c_projection)\n glUniformMatrix4fv(model_loc, 1, GL_FALSE, c_model)\n\n # texture settings and loading\n texture = GLuint(0)\n glGenTextures(1, texture)\n glBindTexture(GL_TEXTURE_2D, texture)\n # set the texture wrapping\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n # set the texture filtering\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n\n xmas = pyglet.image.load('../models/monkey.jpg')\n image_data = xmas.get_data('RGB', xmas.pitch)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, xmas.width, xmas.height, 0, GL_RGB, GL_UNSIGNED_BYTE, image_data)\n\n def rotate(self):\n ct = time.clock()\n rot_y = Matrix44.from_y_rotation(ct).flatten().astype(\"float32\")\n c_rotate = numpy.ctypeslib.as_ctypes(rot_y)\n\n glUniformMatrix4fv(self.rotate_loc, 1, GL_FALSE, c_rotate)\n glUniformMatrix4fv(self.light_loc, 1, GL_FALSE, c_rotate)\n\n\nclass MyWindow(pyglet.window.Window):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.set_minimum_size(400, 300)\n glClearColor(0.2, 0.3, 0.2, 1.0)\n glEnable(GL_DEPTH_TEST)\n\n self.monkey = Monkey()\n\n def on_draw(self):\n self.clear()\n self.monkey.rotate()\n self.monkey.verts.draw(GL_TRIANGLES)\n\n def on_resize(self, width, height):\n glViewport(0, 0, width, height)\n\n def update(self, dt):\n pass\n\nif __name__ == \"__main__\":\n window = MyWindow(1280, 720, \"My Pyglet Window\", resizable=True)\n pyglet.clock.schedule_interval(window.update, 1/60.0)\n pyglet.app.run()\n","sub_path":"Video_12_3D_model_revisited/video_12_model_loading_rev.py","file_name":"video_12_model_loading_rev.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"88317523","text":"# -*- coding: utf-8 -*-\n__author__ = 'SlovEnt'\n__date__ = '2019/5/4 20:08'\n\n\nimport time\nimport os\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# 单一小说网址\n# NOVEL_URL = 'http://www.dzwx.org/7_7435/'\nNOVEL_URL = 'https://www.fuguoduxs.com/4_4128/'\n\nCHAPTER_POST = 66\n\nheaders = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'}\nheaders = { 'Host' : '9453hot.com'}\nheaders = { 'Referer' : 'https://www.fuguoduxs.com/'}\nheaders = { 'Upgrade-Insecure-Requests' : '1'}\n# headers = { 'Cookie' : '{0}'.format(\"djsk_2132_saltkey=W5zSRrzA; djsk_2132_lastvisit=1525257765; UM_distinctid=16320aa25276de-054e64fd4be03b-3f63440c-140000-16320aa252880e; djsk_2132_visitedfid=46D58; visid_incap_840311=1IRaRsdLRX6s8/Svt4FqZzSk6VoAAAAAQUIPAAAAAACLbrjvf/mhLSbIbAbMRXtJ; incap_ses_432_840311=R/UWSsCf0TF07B+95Mb+Bfyv6VoAAAAABZ+7li8gWgY8liDu9KuxEw==; djsk_2132_st_t=0%7C1525266655%7C6ee60796e3f603e08ce5f538e4e897d2; djsk_2132_forum_lastvisit=D_58_1525261404D_46_1525266655; djsk_2132_st_p=0%7C1525267310%7Ccea5be0db24a3396de6751b700dade9d; djsk_2132_viewid=tid_236869; djsk_2132_sendmail=1; CNZZDATA1257937871=166026316-1525256282-http%253A%252F%252F9453hot.com%252F%7C1525267082; __tins__19465647=%7B%22sid%22%3A%201525266427309%2C%20%22vd%22%3A%2018%2C%20%22expires%22%3A%201525269111175%7D; __51cke__=; __51laig__=28; djsk_2132_sid=UndopY; djsk_2132_lastact=1525267373%09forum.php%09ajax\")}\n\n\nhtml = requests.get(url=NOVEL_URL, params=headers)\nhtml = html.content.decode('gbk', 'ignore')\n# print(html)\n\nsoup = BeautifulSoup(html, 'html.parser')\n\ntxtFileName = soup.find_all(name=\"div\", attrs={\"id\": \"info\"})[0].h1.text\n\nallTxtFileName = \"{0}.txt\".format(txtFileName)\nallTxtFileName = allTxtFileName.replace(\"/\", \"、\")\nallTxtFileNamePath = r\"\\\\SE-NAS\\Public\\Temp\\{0}\".format(allTxtFileName)\n\nif CHAPTER_POST == 1:\n if (os.path.exists(allTxtFileNamePath)):\n os.remove(allTxtFileNamePath)\n\nprint(\"文件下载路径:{0}\".format(allTxtFileNamePath))\n\nchapterList = soup.find_all(name=\"dd\")\nn=0\nfor chapterInfo in chapterList:\n\n # time.sleep(2)\n\n n += 1\n print(n, chapterInfo)\n\n if n < CHAPTER_POST:\n continue\n\n chapterName = \"{0} - {1}\".format('%03d' % n, chapterInfo.find_all(name=\"a\")[0].text)\n chapterUrl = \"{0}{1}\".format(\"https://www.fuguoduxs.com\", chapterInfo.find_all(name=\"a\")[0]['href'])\n print(chapterName, chapterUrl)\n\n chapterHtml = requests.get(url=chapterUrl, params=headers)\n chapterHtml = chapterHtml.content.decode('gbk', 'ignore')\n # print(chapterHtml)\n\n chapterSoup = BeautifulSoup(chapterHtml, 'html.parser')\n\n textOne = chapterSoup.find_all(name=\"div\", attrs={\"id\": \"content\"})[0].text\n textOne = textOne.replace(\"\\n\\r\", \"\\n\")\n textOne = textOne.replace(\"    \", \"\")\n\n\n\n textTwo = \"\"\n\n print(textOne)\n\n with open(allTxtFileNamePath, 'a', encoding='utf-8') as f:\n\n f.write(\"\\n\\n\")\n # f.write(\"{0}\\n\\n\".format(chapterName))\n f.write(textOne)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Get_BaiShuLou_Txt.py","file_name":"Get_BaiShuLou_Txt.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"525109209","text":"\"\"\"\nchuong trinh tim ra cac ion NA trong file ions.gro( duoc tao ra sau khi module genion)\nde chuong trinh chay tiet kiem thoi gian, cho cac ion gan vao ADN\nADN nam trong vung 13-16 theo chieu x,y, theo chieu z nam tu -2 den 16\n\"\"\"\nimport re\nimport numpy as np\nx=np.random.uniform(10,18,129)\ny=np.random.uniform(10,18,129)\nz=np.random.uniform(-2,16,129)\narr=np.zeros((129,3))\n\nindex=0\nfor idx, idy,idz in zip(x,y,z):\n while(idx>13 and idx< 16):\n idx=np.random.uniform(10,18)\n arr[index,0]= idx\n while(idy>13 and idy< 16):\n idy=np.random.uniform(10,18)\n arr[index,1]= idy\n arr[index,2]= idz\n index=index+1\n\nindex=0\n#read file\nwith open('ions.gro') as f:\n content=f.read().splitlines()\n for line in content:\n #print line[5:8]\n if \"NA\" in line:\n #print line\n var=re.split(r'\\s+',line)\n #print var\n var[3]=arr[index,0]\n var[4]=arr[index,1]\n var[5]=arr[index,2]\n index +=1\n print(\"%s %s %3d %5.3f %5.3f %5.3f\" % (var[0],var[1],int(var[2]),float(var[3]), float(var[4]),float(var[5]) ))","sub_path":"psd_packDNA.py","file_name":"psd_packDNA.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"184655606","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Main python script for calculating OSS Criticality Score.\"\"\"\n\nimport argparse\nimport csv\nimport datetime\nimport json\nimport logging\nimport math\nimport os\nimport sys\nimport threading\nimport time\nimport urllib\n\nimport github\nimport gitlab\nimport requests\n\nfrom .constants import * # pylint: disable=wildcard-import\n\nlogger = logging.getLogger()\n\n_CACHED_GITHUB_TOKEN = None\n_CACHED_GITHUB_TOKEN_OBJ = None\n\nPARAMS = [\n 'description', 'created_since', 'updated_since', 'contributor_count', 'watchers_count', 'org_count',\n 'commit_frequency', 'recent_releases_count', 'updated_issues_count',\n 'closed_issues_count', 'comment_frequency', 'dependents_count'\n]\n\n\nclass Repository:\n \"\"\"General source repository.\"\"\"\n def __init__(self, repo):\n self._repo = repo\n self._last_commit = None\n self._created_since = None\n\n @property\n def name(self):\n raise NotImplementedError\n\n @property\n def url(self):\n raise NotImplementedError\n\n @property\n def language(self):\n raise NotImplementedError\n\n @property\n def description(self):\n raise NotImplementedError\n\n @property\n def last_commit(self):\n raise NotImplementedError\n\n @property\n def created_since(self):\n raise NotImplementedError\n\n @property\n def updated_since(self):\n raise NotImplementedError\n\n @property\n def contributor_count(self):\n raise NotImplementedError\n\n @property\n def watchers_count(self):\n raise NotImplementedError\n\n @property\n def org_count(self):\n raise NotImplementedError\n\n @property\n def commit_frequency(self):\n raise NotImplementedError\n\n @property\n def recent_releases_count(self):\n raise NotImplementedError\n\n @property\n def updated_issues_count(self):\n raise NotImplementedError\n\n @property\n def closed_issues_count(self):\n raise NotImplementedError\n\n @property\n def comment_frequency(self):\n raise NotImplementedError\n\n def _request_url_with_auth_headers(self, url):\n headers = {}\n if 'github.com' in url and _CACHED_GITHUB_TOKEN:\n headers = {'Authorization': f'token {_CACHED_GITHUB_TOKEN}'}\n\n return requests.get(url, headers=headers)\n\n @property\n def dependents_count(self):\n # TODO: Take package manager dependency trees into account. If we decide\n # to replace this, then find a solution for C/C++ as well.\n match = None\n parsed_url = urllib.parse.urlparse(self.url)\n repo_name = parsed_url.path.strip('/')\n dependents_url = (\n f'https://github.com/search?q=\"{repo_name}\"&type=commits')\n for i in range(FAIL_RETRIES):\n result = self._request_url_with_auth_headers(dependents_url)\n if result.status_code == 200:\n match = DEPENDENTS_REGEX.match(result.content)\n break\n time.sleep(2**i)\n if not match:\n return 0\n return int(match.group(1).replace(b',', b''))\n\n\nclass GitHubRepository(Repository):\n \"\"\"Source repository hosted on GitHub.\"\"\"\n # General metadata attributes.\n @property\n def name(self):\n return self._repo.name\n\n @property\n def url(self):\n return self._repo.html_url\n\n @property\n def language(self):\n return self._repo.language\n\n @property\n def description(self):\n return self._repo.description\n\n @property\n def last_commit(self):\n if self._last_commit:\n return self._last_commit\n try:\n self._last_commit = self._repo.get_commits()[0]\n except Exception:\n pass\n return self._last_commit\n\n def get_first_commit_time(self):\n def _parse_links(response):\n link_string = response.headers.get('Link')\n if not link_string:\n return None\n\n links = {}\n for part in link_string.split(','):\n match = re.match(r'<(.*)>; rel=\"(.*)\"', part.strip())\n if match:\n links[match.group(2)] = match.group(1)\n return links\n\n for i in range(FAIL_RETRIES):\n result = self._request_url_with_auth_headers(\n f'{self._repo.url}/commits')\n links = _parse_links(result)\n if links and links.get('last'):\n result = self._request_url_with_auth_headers(links['last'])\n if result.status_code == 200:\n commits = json.loads(result.content)\n if commits:\n last_commit_time_string = (\n commits[-1]['commit']['committer']['date'])\n return datetime.datetime.strptime(last_commit_time_string,\n \"%Y-%m-%dT%H:%M:%SZ\")\n time.sleep(2**i)\n\n return None\n\n # Criteria important for ranking.\n @property\n def created_since(self):\n if self._created_since:\n return self._created_since\n\n creation_time = self._repo.created_at\n\n # See if there are exist any commits before this repository creation\n # time on GitHub. If yes, then the repository creation time is not\n # correct, and it was residing somewhere else before. So, use the first\n # commit date.\n if self._repo.get_commits(until=creation_time).totalCount:\n first_commit_time = self.get_first_commit_time()\n if first_commit_time:\n creation_time = min(creation_time, first_commit_time)\n\n difference = datetime.datetime.utcnow() - creation_time\n self._created_since = round(difference.days / 30)\n return self._created_since\n\n @property\n def updated_since(self):\n last_commit_time = self.last_commit.commit.author.date\n difference = datetime.datetime.utcnow() - last_commit_time\n return round(difference.days / 30)\n\n @property\n def contributor_count(self):\n try:\n return self._repo.get_contributors(anon='true').totalCount\n except Exception:\n # Very large number of contributors, i.e. 5000+. Cap at 5,000.\n return 5000\n\n @property\n def watchers_count(self):\n return self._repo.watchers_count\n\n @property\n def org_count(self):\n def _filter_name(org_name):\n return org_name.lower().replace('inc.', '').replace(\n 'llc', '').replace('@', '').replace(' ', '').rstrip(',')\n\n orgs = set()\n contributors = self._repo.get_contributors()[:TOP_CONTRIBUTOR_COUNT]\n try:\n for contributor in contributors:\n if contributor.company:\n orgs.add(_filter_name(contributor.company))\n except Exception:\n # Very large number of contributors, i.e. 5000+. Cap at 10.\n return 10\n return len(orgs)\n\n @property\n def commit_frequency(self):\n total = 0\n for week_stat in self._repo.get_stats_commit_activity():\n total += week_stat.total\n return round(total / 52, 1)\n\n @property\n def recent_releases_count(self):\n total = 0\n for release in self._repo.get_releases():\n if (datetime.datetime.utcnow() -\n release.created_at).days > RELEASE_LOOKBACK_DAYS:\n continue\n total += 1\n if not total:\n # Make rough estimation of tags used in last year from overall\n # project history. This query is extremely expensive, so instead\n # do the rough calculation.\n days_since_creation = self.created_since * 30\n if not days_since_creation:\n return 0\n total_tags = 0\n try:\n total_tags = self._repo.get_tags().totalCount\n except Exception:\n # Very large number of tags, i.e. 5000+. Cap at 26.\n logger.error(f'get_tags is failed: {self._repo.url}')\n return RECENT_RELEASES_THRESHOLD\n total = round(\n (total_tags / days_since_creation) * RELEASE_LOOKBACK_DAYS)\n return total\n\n @property\n def updated_issues_count(self):\n issues_since_time = datetime.datetime.utcnow() - datetime.timedelta(\n days=ISSUE_LOOKBACK_DAYS)\n return self._repo.get_issues(state='all',\n since=issues_since_time).totalCount\n\n @property\n def closed_issues_count(self):\n issues_since_time = datetime.datetime.utcnow() - datetime.timedelta(\n days=ISSUE_LOOKBACK_DAYS)\n return self._repo.get_issues(state='closed',\n since=issues_since_time).totalCount\n\n @property\n def comment_frequency(self):\n issues_since_time = datetime.datetime.utcnow() - datetime.timedelta(\n days=ISSUE_LOOKBACK_DAYS)\n issue_count = self._repo.get_issues(state='all',\n since=issues_since_time).totalCount\n if not issue_count:\n return 0\n comment_count = self._repo.get_issues_comments(\n since=issues_since_time).totalCount\n return round(comment_count / issue_count, 1)\n\n\nclass GitLabRepository(Repository):\n \"\"\"Source repository hosted on GitLab.\"\"\"\n @staticmethod\n def _date_from_string(date_string):\n return datetime.datetime.strptime(date_string,\n \"%Y-%m-%dT%H:%M:%S.%f%z\")\n\n @property\n def name(self):\n return self._repo.name\n\n @property\n def url(self):\n return self._repo.web_url\n\n @property\n def language(self):\n languages = self._repo.languages()\n return (max(languages, key=languages.get)).lower()\n\n @property\n def last_commit(self):\n if self._last_commit:\n return self._last_commit\n self._last_commit = next(iter(self._repo.commits.list()), None)\n return self._last_commit\n\n @property\n def created_since(self):\n creation_time = self._date_from_string(self._repo.created_at)\n commit = None\n for commit in self._repo.commits.list(until=creation_time,\n as_list=False):\n pass\n if commit:\n creation_time = self._date_from_string(commit.created_at)\n difference = datetime.datetime.now(\n datetime.timezone.utc) - creation_time\n return round(difference.days / 30)\n\n @property\n def updated_since(self):\n difference = datetime.datetime.now(\n datetime.timezone.utc) - self._date_from_string(\n self.last_commit.created_at)\n return round(difference.days / 30)\n\n @property\n def contributor_count(self):\n return len(self._repo.repository_contributors(all=True))\n\n @property\n def org_count(self):\n # Not possible to calculate as this feature restricted to admins only.\n # https://docs.gitlab.com/ee/api/users.html#user-memberships-admin-only\n return 1\n\n @property\n def commit_frequency(self):\n commits_since_time = datetime.datetime.utcnow() - datetime.timedelta(\n days=365)\n iterator = self._repo.commits.list(since=commits_since_time,\n as_list=False)\n commits_count = sum(1 for _ in iterator)\n return round(commits_count / 52, 1)\n\n @property\n def recent_releases_count(self):\n count = 0\n for release in self._repo.releases.list():\n release_time = self._date_from_string(release.released_at)\n if (datetime.datetime.now(datetime.timezone.utc) -\n release_time).days > RELEASE_LOOKBACK_DAYS:\n break\n count += 1\n count = 0\n if not count:\n for tag in self._repo.tags.list():\n tag_time = self._date_from_string(tag.commit['created_at'])\n if (datetime.datetime.now(datetime.timezone.utc) -\n tag_time).days > RELEASE_LOOKBACK_DAYS:\n break\n count += 1\n return count\n\n @property\n def updated_issues_count(self):\n issues_since_time = datetime.datetime.utcnow() - datetime.timedelta(\n days=ISSUE_LOOKBACK_DAYS)\n return self._repo.issuesstatistics.get(\n updated_after=issues_since_time).statistics['counts']['all']\n\n @property\n def closed_issues_count(self):\n issues_since_time = datetime.datetime.utcnow() - datetime.timedelta(\n days=ISSUE_LOOKBACK_DAYS)\n return self._repo.issuesstatistics.get(\n updated_after=issues_since_time).statistics['counts']['closed']\n\n @property\n def comment_frequency(self):\n comments_count = 0\n issues_since_time = datetime.datetime.utcnow() - datetime.timedelta(\n days=ISSUE_LOOKBACK_DAYS)\n for issue in self._repo.issues.list(updated_after=issues_since_time,\n as_list=False):\n try:\n comments_count += issue.notes.list(as_list=False).total\n except Exception:\n pass\n return round(comments_count / self.updated_issues_count, 1)\n\n\ndef get_param_score(param, max_value, weight=1):\n \"\"\"Return paramater score given its current value, max value and\n parameter weight.\"\"\"\n return (math.log(1 + param) / math.log(1 + max(param, max_value))) * weight\n\n\ndef get_repository_stats(repo, additional_params=None):\n \"\"\"Return repository stats, including criticality score.\"\"\"\n # Validate and compute additional params first.\n if not repo.last_commit:\n logger.error(f'Repo is empty: {repo.url}')\n return None\n if additional_params is None:\n additional_params = []\n additional_params_total_weight = 0\n additional_params_score = 0\n for additional_param in additional_params:\n try:\n value, weight, max_threshold = [\n int(i) for i in additional_param.split(':')\n ]\n except ValueError:\n logger.error('Parameter value in bad format: ' + additional_param)\n sys.exit(1)\n additional_params_total_weight += weight\n additional_params_score += get_param_score(value, max_threshold,\n weight)\n\n def _worker(repo, param, return_dict):\n \"\"\"worker function\"\"\"\n return_dict[param] = getattr(repo, param)\n\n threads = []\n return_dict = {}\n for param in PARAMS:\n thread = threading.Thread(target=_worker,\n args=(repo, param, return_dict))\n thread.start()\n threads.append(thread)\n for thread in threads:\n thread.join()\n\n # Guarantee insertion order.\n result_dict = {\n 'name': repo.name,\n 'url': repo.url,\n 'language': repo.language,\n }\n for param in PARAMS:\n result_dict[param] = return_dict[param]\n\n total_weight = (CREATED_SINCE_WEIGHT + UPDATED_SINCE_WEIGHT +\n CONTRIBUTOR_COUNT_WEIGHT + ORG_COUNT_WEIGHT +\n COMMIT_FREQUENCY_WEIGHT + RECENT_RELEASES_WEIGHT +\n CLOSED_ISSUES_WEIGHT + UPDATED_ISSUES_WEIGHT +\n COMMENT_FREQUENCY_WEIGHT + DEPENDENTS_COUNT_WEIGHT +\n additional_params_total_weight)\n\n criticality_score = round(\n ((get_param_score(result_dict['created_since'],\n CREATED_SINCE_THRESHOLD, CREATED_SINCE_WEIGHT)) +\n (get_param_score(result_dict['updated_since'],\n UPDATED_SINCE_THRESHOLD, UPDATED_SINCE_WEIGHT)) +\n (get_param_score(result_dict['contributor_count'],\n CONTRIBUTOR_COUNT_THRESHOLD,\n CONTRIBUTOR_COUNT_WEIGHT)) +\n (get_param_score(result_dict['org_count'], ORG_COUNT_THRESHOLD,\n ORG_COUNT_WEIGHT)) +\n (get_param_score(result_dict['commit_frequency'],\n COMMIT_FREQUENCY_THRESHOLD,\n COMMIT_FREQUENCY_WEIGHT)) +\n (get_param_score(result_dict['recent_releases_count'],\n RECENT_RELEASES_THRESHOLD, RECENT_RELEASES_WEIGHT)) +\n (get_param_score(result_dict['closed_issues_count'],\n CLOSED_ISSUES_THRESHOLD, CLOSED_ISSUES_WEIGHT)) +\n (get_param_score(result_dict['updated_issues_count'],\n UPDATED_ISSUES_THRESHOLD, UPDATED_ISSUES_WEIGHT)) +\n (get_param_score(\n result_dict['comment_frequency'], COMMENT_FREQUENCY_THRESHOLD,\n COMMENT_FREQUENCY_WEIGHT)) + (get_param_score(\n result_dict['dependents_count'], DEPENDENTS_COUNT_THRESHOLD,\n DEPENDENTS_COUNT_WEIGHT)) + additional_params_score) /\n total_weight, 5)\n\n # Make sure score between 0 (least-critical) and 1 (most-critical).\n criticality_score = max(min(criticality_score, 1), 0)\n\n result_dict['criticality_score'] = criticality_score\n return result_dict\n\n\ndef get_github_token_info(token_obj):\n \"\"\"Return expiry information given a github token.\"\"\"\n rate_limit = token_obj.get_rate_limit()\n near_expiry = rate_limit.core.remaining < 50\n wait_time = (rate_limit.core.reset - datetime.datetime.utcnow()).seconds\n return near_expiry, wait_time\n\n\ndef get_github_auth_token():\n \"\"\"Return an un-expired github token if possible from a list of tokens.\"\"\"\n global _CACHED_GITHUB_TOKEN\n global _CACHED_GITHUB_TOKEN_OBJ\n if _CACHED_GITHUB_TOKEN_OBJ:\n near_expiry, _ = get_github_token_info(_CACHED_GITHUB_TOKEN_OBJ)\n if not near_expiry:\n return _CACHED_GITHUB_TOKEN_OBJ\n\n github_auth_token = os.getenv('GITHUB_AUTH_TOKEN')\n assert github_auth_token, 'GITHUB_AUTH_TOKEN needs to be set.'\n tokens = github_auth_token.split(',')\n\n min_wait_time = None\n token_obj = None\n for token in tokens:\n token_obj = github.Github(token)\n near_expiry, wait_time = get_github_token_info(token_obj)\n if not min_wait_time or wait_time < min_wait_time:\n min_wait_time = wait_time\n if not near_expiry:\n _CACHED_GITHUB_TOKEN = token\n _CACHED_GITHUB_TOKEN_OBJ = token_obj\n return token_obj\n\n logger.warning(\n f'Rate limit exceeded, sleeping till reset: {round(min_wait_time / 60, 1)} minutes.'\n )\n time.sleep(min_wait_time)\n return token_obj\n\n\ndef get_gitlab_auth_token(host):\n \"\"\"Return a gitlab token object.\"\"\"\n gitlab_auth_token = os.getenv('GITLAB_AUTH_TOKEN')\n try:\n token_obj = gitlab.Gitlab(host, gitlab_auth_token)\n token_obj.auth()\n except gitlab.exceptions.GitlabAuthenticationError:\n logger.info(\"Auth token didn't work, trying un-authenticated. \"\n \"Some params like comment_frequency will not work.\")\n token_obj = gitlab.Gitlab(host)\n return token_obj\n\n\ndef get_repository(url):\n \"\"\"Return repository object, given a url.\"\"\"\n if not '://' in url:\n url = 'https://' + url\n\n parsed_url = urllib.parse.urlparse(url)\n repo_url = parsed_url.path.strip('/')\n if parsed_url.netloc.endswith('github.com'):\n repo = None\n try:\n repo = get_github_auth_token().get_repo(repo_url)\n except github.GithubException as exp:\n if exp.status == 404:\n return None\n return GitHubRepository(repo)\n if 'gitlab' in parsed_url.netloc:\n repo = None\n host = parsed_url.scheme + '://' + parsed_url.netloc\n token_obj = get_gitlab_auth_token(host)\n repo_url_encoded = urllib.parse.quote_plus(repo_url)\n try:\n repo = token_obj.projects.get(repo_url_encoded)\n except gitlab.exceptions.GitlabGetError as exp:\n if exp.response_code == 404:\n return None\n return GitLabRepository(repo)\n\n raise Exception('Unsupported url!')\n\n\ndef initialize_logging_handlers():\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('').handlers.clear()\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logging.getLogger('').addHandler(console)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Gives criticality score for an open source project')\n parser.add_argument(\"--repo\",\n type=str,\n required=True,\n help=\"repository url\")\n parser.add_argument(\n \"--format\",\n type=str,\n default='default',\n choices=['default', 'csv', 'json'],\n help=\"output format. allowed values are [default, csv, json]\")\n parser.add_argument(\n '--params',\n nargs='+',\n default=[],\n help='Additional parameters in form <value>:<weight>:<max_threshold>',\n required=False)\n\n initialize_logging_handlers()\n\n args = parser.parse_args()\n repo = get_repository(args.repo)\n if not repo:\n logger.error(f'Repo is not found: {args.repo}')\n return\n output = get_repository_stats(repo, args.params)\n if not output:\n return\n if args.format == 'default':\n for key, value in output.items():\n logger.info(f'{key}: {value}')\n elif args.format == 'json':\n logger.info(json.dumps(output, indent=4))\n elif args.format == 'csv':\n csv_writer = csv.writer(sys.stdout)\n csv_writer.writerow(output.keys())\n csv_writer.writerow(output.values())\n else:\n raise Exception(\n 'Wrong format argument, use one of default, csv or json!')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"criticality_score/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":22395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"383756777","text":"from django.conf import settings\nfrom django.http import JsonResponse, HttpResponse\nfrom utils.exception import ErrorType, ErrorException\nimport json\n\ndef processRequest(request, POST=[], GET=[], FILES=[]):\n\n\tdata = dict()\n\n\tfor key in POST:\n\t\tvalue = request.POST.get(key)\n\t\tif value:\n\t\t\tdata[key] = value\n\t\telse:\n\t\t\traise ErrorException(ErrorType.ParameterError)\n\n\tfor key in GET:\n\t\tvalue = request.GET.get(key)\n\t\tif value:\n\t\t\tdata[key] = value\n\t\telse:\n\t\t\traise ErrorException(ErrorType.ParameterError)\n\n\tfor key in FILES:\n\t\tvalue = request.FILES.get(key)\n\t\tif value:\n\t\t\tdata[key] = value\n\t\telse:\n\t\t\traise ErrorException(ErrorType.ParameterError)\n\n\t#print (data)\n\n\treturn data\n\ndef convertRequestDataType(data, keys, type='str'):\n\tfor key in keys:\n\t\ttry:\n\t\t\tif type == 'int':\n\t\t\t\tdata[key] = int(data[key])\n\n\t\t\telif type == 'int[]':\n\t\t\t\tdata[key] = json.loads(data[key])\n\t\t\t\tfor i in range(len(data[key])):\n\t\t\t\t\tdata[key][i] = int(data[key][i])\n\t\t\t\n\t\t\telif type == 'save':\n\t\t\t\tpass\n\t\t\t\t# json.loads(data[key])\n\t\t\t\t# 判断是否为存档文件类型\n\t\t\t# 其他类型判断\n\n\t\texcept:\n\t\t\traise ErrorException(ErrorType.ParameterError)\n\ndef convertRequestDataTypeAll(data, type='str'):\n\tconvertRequestDataType(data, data, type)\n\ndef getSuccessResponse(dict={}):\n\t#if 'data' in dict:\n\tprocessData(dict) # json.dumps(dict['data'],ensure_ascii=False)\n\t\n\tdict['status'] = ErrorType.Success.value\n\n\t#print (dict)\n\n\tif settings.HTML_TEST:\n\t\t# 测试代码\n\t\tresponse = JsonResponse(dict)\n\t\tresponse[\"X-Frame-Options\"] = ''\n\n\t\treturn response\n\telse:\n\t\treturn JsonResponse(dict)\n\ndef processData(data):\n\tif type(data) == list:\n\t\tfor i in range(len(data)):\n\t\t\tprocessData(data[i])\n\t\t\tif type(data[i]) == list:\n\t\t\t\tdata[i] = {'list': data[i]}\n\telif type(data) == dict:\n\t\tfor key in data:\n\t\t\tprocessData(data[key])\n\t\t\tif type(data[key]) == list:\n\t\t\t\tdata[key] = {'list': data[key]}\n\ndef getErrorResponse(exception: ErrorException):\n\tdict = {\n\t\t'status': exception.error_type.value,\n\t\t'errmsg': str(exception)\n\t}\n\n\tif settings.HTML_TEST:\n\t\t# 测试代码\n\t\tresponse = JsonResponse(dict)\n\t\tresponse[\"X-Frame-Options\"] = ''\n\n\t\treturn response\n\telse:\n\t\treturn JsonResponse(dict)\n\t","sub_path":"Server/OneHundredDaysServer/utils/view_func_utils.py","file_name":"view_func_utils.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"85785636","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\nurlpatterns = [\n\tpath('', views.home, name = 'home'),\n path('products', views.all, name='products'),\n path('products/new', views.create_product, name = 'create_product'),\n path('products/<str:slug>/', views.single, name='single-product'),\n path('s/', views.search, name = 'search'),\n \n]\n","sub_path":"products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"72213248","text":"# -*- coding: utf-8 -*-\n\"\"\"Tests using pytest_resilient_circuits\"\"\"\n\nfrom unittest.mock import patch\n\nimport pytest\nfrom resilient_circuits import FunctionResult, SubmitTestFunction\nfrom resilient_circuits.util import get_function_definition\n\nPACKAGE_NAME = \"fn_darktrace\"\nFUNCTION_NAME = \"darktrace_list_similar_devices\"\n\nconfig_data = \"\"\"[fn_darktrace]\napi_key=abcd-efgh\napi_secret=1234-abcd-56789-efgh\ndarktrace_base_url=https://fake.cloud.darktrace.com\n\"\"\"\n\n# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)\nresilient_mock = \"pytest_resilient_circuits.BasicResilientMock\"\n\n\ndef call_darktrace_list_similar_devices_function(circuits, function_params, timeout=5):\n # Create the submitTestFunction event\n evt = SubmitTestFunction(\"darktrace_list_similar_devices\", function_params)\n\n # Fire a message to the function\n circuits.manager.fire(evt)\n\n # circuits will fire an \"exception\" event if an exception is raised in the FunctionComponent\n # return this exception if it is raised\n exception_event = circuits.watcher.wait(\"exception\", parent=None, timeout=timeout)\n\n if exception_event is not False:\n exception = exception_event.args[1]\n raise exception\n\n # else return the FunctionComponent's results\n else:\n event = circuits.watcher.wait(\"darktrace_list_similar_devices_result\", parent=evt, timeout=timeout)\n assert event\n assert isinstance(event.kwargs[\"result\"], FunctionResult)\n pytest.wait_for(event, \"complete\", True)\n return event.kwargs[\"result\"].value\n\n\nclass TestDarktraceListSimilarDevices:\n \"\"\" Tests for the darktrace_list_similar_devices function\"\"\"\n\n def test_function_definition(self):\n \"\"\" Test that the package provides customization_data that defines the function \"\"\"\n func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)\n assert func is not None\n\n mock_inputs_1 = {\n \"darktrace_device_count\": 2,\n \"darktrace_device_id\": \"4\"\n }\n\n expected_results_1 = {\"similar_devices\": [{\"did\": 5}, {\"did\": 6}], \"base_url\": \"https://fake.cloud.darktrace.com\"}\n\n mock_inputs_2 = {\n \"darktrace_device_count\": 1,\n \"darktrace_device_id\": \"<a href='https://fake.cloud.darktrace.com/#device/4' target='_blank'>4</a>\"\n }\n\n expected_results_2 = {\"similar_devices\": [{\"did\": 5}], \"base_url\": \"https://fake.cloud.darktrace.com\"}\n\n @pytest.mark.parametrize(\"mock_inputs, expected_results\", [\n (mock_inputs_1, expected_results_1),\n (mock_inputs_2, expected_results_2)\n ])\n def test_success(self, circuits_app, mock_inputs, expected_results):\n \"\"\" Test calling with sample values for the parameters \"\"\"\n with patch(\"fn_darktrace.components.funct_darktrace_list_similar_devices.AppCommon.get_similar_devices\") as patch_get_similar:\n patch_get_similar.return_value = expected_results.get(\"similar_devices\")\n\n results = call_darktrace_list_similar_devices_function(circuits_app, mock_inputs)\n assert(expected_results == results.get(\"content\"))\n patch_get_similar.assert_called_once_with(\"4\", count=mock_inputs.get(\"darktrace_device_count\"))\n","sub_path":"fn_darktrace/tests/test_funct_darktrace_list_similar_devices.py","file_name":"test_funct_darktrace_list_similar_devices.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"245228257","text":"import math\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.ops as ops\nimport nvidia.dali.tfrecord as tfrec\nimport os.path\nimport tempfile\nimport numpy as np\n\ntest_data_root = os.environ['DALI_EXTRA_PATH']\n\ndef skip_second(src, dst):\n with open(src, 'r') as tmp_f:\n with open(dst, 'w') as f:\n second = False\n for l in tmp_f:\n if not second:\n f.write(l)\n second = not second\n\ndef test_tfrecord():\n class TFRecordPipeline(Pipeline):\n def __init__(self, batch_size, num_threads, device_id, num_gpus, data, data_idx):\n super(TFRecordPipeline, self).__init__(batch_size, num_threads, device_id)\n self.input = ops.TFRecordReader(path = data,\n index_path = data_idx,\n features = {\"image/encoded\" : tfrec.FixedLenFeature((), tfrec.string, \"\"),\n \"image/class/label\": tfrec.FixedLenFeature([1], tfrec.int64, -1)\n })\n\n def define_graph(self):\n inputs = self.input(name=\"Reader\")\n images = inputs[\"image/encoded\"]\n return images\n\n tfrecord = os.path.join(test_data_root, 'db', 'tfrecord', 'train')\n tfrecord_idx_org = os.path.join(test_data_root, 'db', 'tfrecord', 'train.idx')\n tfrecord_idx = \"tfr_train.idx\"\n\n idx_files_dir = tempfile.TemporaryDirectory()\n idx_file = os.path.join(idx_files_dir.name, tfrecord_idx)\n\n skip_second(tfrecord_idx_org, idx_file)\n\n pipe = TFRecordPipeline(1, 1, 0, 1, tfrecord, idx_file)\n pipe_org = TFRecordPipeline(1, 1, 0, 1, tfrecord, tfrecord_idx_org)\n pipe.build()\n pipe_org.build()\n iters = pipe.epoch_size(\"Reader\")\n for _ in range(iters):\n out = pipe.run()\n out_ref = pipe_org.run()\n for a, b in zip(out, out_ref):\n assert np.array_equal(a.as_array(), b.as_array())\n _ = pipe_org.run()\n\ndef test_recordio():\n class MXNetReaderPipeline(Pipeline):\n def __init__(self, batch_size, num_threads, device_id, num_gpus, data, data_idx):\n super(MXNetReaderPipeline, self).__init__(batch_size, num_threads, device_id)\n self.input = ops.MXNetReader(path = [data], index_path=[data_idx],\n shard_id = device_id, num_shards = num_gpus)\n\n def define_graph(self):\n images, _ = self.input(name=\"Reader\")\n return images\n\n recordio = os.path.join(test_data_root, 'db', 'recordio', 'train.rec')\n recordio_idx_org = os.path.join(test_data_root, 'db', 'recordio', 'train.idx')\n recordio_idx = \"rio_train.idx\"\n\n idx_files_dir = tempfile.TemporaryDirectory()\n idx_file = os.path.join(idx_files_dir.name, recordio_idx)\n\n skip_second(recordio_idx_org, idx_file)\n\n pipe = MXNetReaderPipeline(1, 1, 0, 1, recordio, idx_file)\n pipe_org = MXNetReaderPipeline(1, 1, 0, 1, recordio, recordio_idx_org)\n pipe.build()\n pipe_org.build()\n iters = pipe.epoch_size(\"Reader\")\n for _ in range(iters):\n out = pipe.run()\n out_ref = pipe_org.run()\n for a, b in zip(out, out_ref):\n assert np.array_equal(a.as_array(), b.as_array())\n _ = pipe_org.run()","sub_path":"dali/test/python/test_operator_index_reader.py","file_name":"test_operator_index_reader.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"317251225","text":"#!/usr/bin/python\n\nimport covalence\nimport json\nimport argparse\nimport sys\n\nfrom requests.exceptions import ConnectionError\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--external\", action=\"store_true\", default=None, help=\"show external nodes\")\nparser.add_argument(\"--internal\", action=\"store_true\", default=None, help=\"show internal nodes\")\nparser.add_argument(\"--nodeid\", metavar=\"GUID\", dest=\"nodeid\", default=None, help=\"operate on a specific node\")\nparser.add_argument(\"--setname\", dest=\"newname\", default=None, help=\"sets the node name, use with --nodeid\")\nparser.add_argument(\"--maxcount\", metavar=\"COUNT\", dest=\"maxCount\", default=100, type=int,\n help=\"maximum number of records to return\")\nparser.add_argument(\"--outputfile\", metavar=\"FILE\", dest=\"outputFile\", default=None, help=\"print nodes to file\")\n\nargs = parser.parse_args()\n\n\n# This part needs fixing in the future - will fix in SDK shortly\ndef denodify(node):\n if isinstance(node, covalence.node.Node):\n s = node.__str__()\n s = s.replace(\"'\", \"\\\"\").replace(\": True\", \": 1\").replace(\": False\", \": 0\").replace(\": None\",\n \": \\\"\\\"\").replace(\n \": u\\\"\", \": \\\"\")\n node = json.loads(s)\n if node['notes'] == \"\":\n node['notes'] = None\n return node\n\n\ndef print_external_to_file(outputFile=None, maxCount=100):\n \"\"\"Print external nodes to file.\"\"\"\n node_list = covalence.node.list_external(maxCount=maxCount)\n if outputFile:\n with open(outputFile, \"w\") as output:\n # Header Row\n output.write(\"%s,%s,%s,%s\\n\" % ('id', 'ipAddress', 'name', 'notes'))\n for node in node_list:\n node = denodify(node)\n output.write(\"%s,%s,%s,%s\\n\" % (node['id'], node['ipAddress'], node['name'],\n node['notes'] if node['notes'] else ''))\n\n print(\"%d records written to outputFile\" % (len(node_list)))\n\n\ndef print_internal_to_file(outputFile=None, maxCount=100):\n \"\"\"Print internal nodes to file.\"\"\"\n node_list = covalence.node.list_internal(maxCount=maxCount)\n if outputFile:\n with open(outputFile, \"w\") as output:\n # Header Row\n output.write(\"%s,%s,%s,%s\\n\" % ('id', 'macAddress', 'name', 'notes'))\n for node in node_list:\n node = denodify(node)\n output.write(\"%s,%s,%s,%s\\n\" % (node['id'], node['macAddress'], node['name'],\n node['notes'] if node['notes'] else ''))\n\n print(\"%d records written to outputFile\" % (len(node_list)))\n\n\ndef print_external(maxCount=100):\n \"\"\"Print external nodes to screen.\"\"\"\n print(\"Loading data...\")\n external_nodes = covalence.node.list_external(maxCount)\n print(\"Done\")\n print(\"\\n%-36s %-15s %-30s %s\" % (\"ID\", \"IP\", \"Name\", \"Notes\"))\n for node in external_nodes:\n node = denodify(node)\n print(\"%-36s %-15s %-30s %s\" % (node['id'], node['ipAddress'], node['name'], node['notes']))\n print(\"%d records\" % (len(external_nodes)))\n\n\ndef print_internal(maxCount=100):\n \"\"\"Print internal nodes to screen.\"\"\"\n\n print(\"Loading data...\")\n internal_nodes = covalence.node.list_internal(maxCount=maxCount)\n print(\"Done\")\n\n print(\"\\n%-36s %-17s %-30s %s\" % (\"ID\", \"MAC Address\", \"Name\", \"Notes\"))\n\n for node in internal_nodes:\n node = denodify(node)\n print(\"%-36s %-17s %-30s %s\" % (node['id'], node['macAddress'], node['name'], node['notes']))\n\n print(\"\\n%d records\" % (len(internal_nodes)))\n\n\ndef set_name(nodeid=None, name=None):\n \"\"\"Set the name of a node\"\"\"\n\n if nodeid and name:\n print(\"Setting node ID: %s to new name: %s\" % (nodeid, name))\n covalence.node.set_name(nodeid, name)\n\n\ndef main():\n \"\"\"Login to Covalence and list SSL connections.\"\"\"\n try:\n covalence.login(credentials_file=\"credentials.txt\", shouldVerifyCert=False)\n except Exception as e:\n if isinstance(e, ConnectionError):\n print(\"The server you connected to did not respond to our request - please confirm the server address.\")\n elif e.__str__().endswith(\"404\"):\n print(\"The server you connected to does not seem to be a valid Covalence instance.\")\n else:\n print(\"Unknown username/password or server error occurred.\")\n sys.exit(1)\n\n if args.external and args.internal and args.outputFile:\n print(\"You can't specify both internal and external nodes at once when printing to a file.\")\n sys.exit(1)\n\n if args.newname and not args.nodeid:\n print(\"You must specify the --nodeid value to change names.\")\n sys.exit(1)\n\n if args.external:\n if args.outputFile != None:\n print_external_to_file(args.outputFile, args.maxCount)\n else:\n print_external(args.maxCount)\n\n if args.internal or ( not args.external and not args.nodeid ):\n if args.external:\n print\n if args.outputFile != None:\n print_internal_to_file(args.outputFile, args.maxCount)\n else:\n print_internal(args.maxCount)\n\n if args.nodeid and args.newname:\n set_name(args.nodeid, args.newname)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"data/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":5366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"540688805","text":"\nimport math\nfrom quaternions import Quaternion\n\ndef quat_mul(p, q) :\n puat = Quaternion(p[0], p[1], p[2], p[3]) \n quat = Quaternion(q[0], q[1], q[2], q[3]) \n m = puat * quat\n return (m.w, m.x, m.y, m.z) \n \n\nif __name__ == \"__main__\" :\n import sys, os, subprocess\n if len(sys.argv) < 2 :\n print(\"Exit. [There is not enough argument. Usage: python quaternion2angleaxis.py path/to/work_position]\")\n exit(1)\n\n print(\"Execution of quaternion2angleaxis.py started.\")\n \n workPos = sys.argv[1]\n if os.path.isdir(workPos) :\n print(\"Detected work position:\", workPos)\n else :\n print(\"Exit. [Work position:\", workPos, \"does not exist.]\")\n exit(1)\n \n\n inputFileName = workPos + \"/input.txt\"\n\n if os.path.exists(inputFileName) :\n print(\"Detected input file: \", inputFileName)\n else :\n print(\"Exit. [Input file:\", inputFileName, \"does not exist.]\")\n exit(1)\n\n\n talkeronHome = None\n p = None\n q = None\n \n with open(inputFileName, 'r') as f :\n for line in f :\n lineSplit = line.split(\"\\t\")\n if lineSplit[0].strip() == \"talkeron\" and len(lineSplit) > 1:\n talkeronHome = lineSplit[1].strip() \n if lineSplit[0].strip() == \"p\" and len(lineSplit) > 1:\n pl = lineSplit[1].strip().split(' ')\n p = (float(pl[0].strip(\", \")), float(pl[1].strip(\", \")), float(pl[2].strip(\", \")), float(pl[3].strip(\", \")))\n if lineSplit[0].strip() == \"q\" and len(lineSplit) > 1:\n ql = lineSplit[1].strip().split(' ')\n q = (float(ql[0].strip(\", \")), float(ql[1].strip(\", \")), float(ql[2].strip(\", \")), float(ql[3].strip(\", \")))\n\n if p is not None :\n print(\"p: \", p)\n else :\n print(\"Exit. [Input parameter \\\"p\\\" is missing or invalid.]\")\n exit(1)\n \n if q is not None :\n print(\"q: \", q)\n else :\n print(\"Exit. [Input parameter \\\"q\\\" is missing or invalid.]\")\n exit(1)\n\n resultFileName = workPos + \"/result.txt\"\n \n m = quat_mul(p, q) \n line = \"result: (%f, %f, %f, %f) \" % (m[0], m[1], m[2], m[3])\n print(line)\n with open(resultFileName, 'w') as f :\n f.write(line)\n\n print(\"Done.\")\n","sub_path":"ext/Actions/Transform/quaternionmul.py","file_name":"quaternionmul.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"393842391","text":"\n\n#calss header\nclass _COCK():\n\tdef __init__(self,): \n\t\tself.name = \"COCK\"\n\t\tself.definitions = [u'an adult male chicken: ', u'used with the name of a bird to refer to the adult male of that type: ', u'a penis', u'a friendly form of address, used especially by a man talking to another man: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_cock.py","file_name":"_cock.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"436115288","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\ndef question33_dict(start, end):\n if (not isinstance(start, int)) or (not isinstance(end, int)):\n raise ValueError('Exception: Function only accpets integer value')\n return dict((x, x ** 2) for x in range(start, end))\n\n\nprint(question33_dict(1, 21))\n","sub_path":"answers/q33.py","file_name":"q33.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"86988937","text":"import csv\nimport yaml\nimport tqdm\nimport random\nimport numpy as np\n\nfrom procs import Proc\n\nfrom ImageTools import roi as imRoi\nfrom ImageTools import tools as imTool\nfrom ImageTools import coords as coordTool\n\nfrom DataLoader import loader\nfrom DataLoader import sampler\n\nimport vectorizer\n\n\n# load config\nwith open('config/train.yaml', 'rt') as f:\n config = yaml.safe_load(f.read())\nconfig['dataset'] = 'test'\n\n# load dataset\n## set sampler\n_sampler = getattr(sampler, 'default')\ndata, key_all = loader.load(config, _sampler)\n\n# build processor\nprocessor = Proc(data, 0, config) # actucally not used data passed by params\nprocessor.build_models()\n\n# randomly permute data by each subject, exam\nrandom.shuffle(key_all)\n\n## processing by each subject, exam\nresult = dict()\nfor TARGET in key_all:\n dcm_dict = data[TARGET]['dcm']\n exam_dict= data[TARGET]['exam']\n\n s_id, e_id = TARGET\n\n for l in dcm_dict.keys():\n new_key = (s_id, l)\n if new_key not in result:\n result[new_key] = [0.0]\n\n views = dcm_dict[l].keys()\n\n imgs = {\n view: processor.process_image(dcm_dict[l][view])\n for view in views\n }\n rois = {\n view: processor.process_roi(imgs[view])\n for view in views\n }\n patches = {\n view: processor.generate_patches(imgs[view]['rgb'], rois[view])\n for view in views\n }\n scores = {\n view: processor.compute_patch_scores(patches[view], rois[view])\n for view in views\n }\n\n # calculating score\n for view in views:\n _ = [ s for view in views for s in scores[view]['class']['mass'] ]\n\n result[new_key].extend(_)\n\nfor k in result.keys():\n result[k] = np.array(result[k])\n result[k] = np.average(result[k])\n################################################\nf = open('/output/predictions.tsv', 'w')\nwriter = csv.DictWriter(f, fieldnames=['subjectId', 'laterality', 'confidence'], delimiter='\\t')\nwriter.writeheader()\n\nfor k in result.keys():\n writer.writerow({\n 'subjectId': k[0],\n 'laterality': k[1],\n 'confidence': result[k]\n })\n\nf.close()\n","sub_path":"inference-sklearn.py","file_name":"inference-sklearn.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"162918634","text":"import ctypes\n\n#타이틀 감지 함수\ndef gettitle():\n lib = ctypes.windll.LoadLibrary('user32.dll')\n handle = lib.GetForegroundWindow()\n buffer = ctypes.create_unicode_buffer(255)\n lib.GetWindowTextW(handle, buffer, ctypes.sizeof(buffer))\n\n return buffer.value\n","sub_path":"Python/Hacking/Keylog/v1/winmanager.py","file_name":"winmanager.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"399087789","text":"import random\n\nfrom django import http\n\nfrom meiduo_mall.libs.yuntongxun.ccp_sms import CCP\nfrom meiduo_mall.utils import constants\nfrom meiduo_mall.utils.response_code import RETCODE\n\n\nfrom django.shortcuts import render\nimport logging\nfrom celery_tasks.sms.tasks import ccp_send_sms_code\nlogger = logging.getLogger('django')\n\n# Create your views here.\nfrom django.views import View\nfrom django_redis import get_redis_connection\n\nfrom meiduo_mall.libs.captcha.captcha import captcha\n\n\nclass ImageCodeView(View):\n\n def get(self,request,uuid):\n # 生成图形验证码\n text,image = captcha.generate_captcha()\n\n # 打开redis \"verify_code\"库\n redis_conn = get_redis_connection('verify_code')\n # 将图形验证码保存到redis库中\n redis_conn.setex('img_%s' % uuid, constants.IMAGE_CODE_REDIS_EXPIRES, text)\n # 返回图形验证码\n return http.HttpResponse(image, content_type='imgae/jpg')\n\n\nclass SMSCodeView(View):\n\n def get(self,request,mobile):\n\n conn = get_redis_connection(\"verify_code\")\n send_sms = conn.get('sms_code_%s' % mobile)\n if send_sms:\n return http.HttpResponseForbidden('短信验证码发送频繁')\n\n\n image_code_cilent = request.GET.get(\"image_code\")\n uuid = request.GET.get(\"image_code_id\")\n # 判断参数\n if not all([image_code_cilent,uuid]):\n return http.JsonResponse({'code':RETCODE.IMAGECODEERR,\n 'errmsg':\"缺少必传参数\"})\n # 教研参数\n image_code = conn.get('img_%s' % uuid)\n if image_code is None:\n return http.JsonResponse({'code':RETCODE.IMAGECODEERR,\n 'errmsg':\"图片验证码过期\"})\n try:\n conn.delete('img_%s' % uuid)\n except Exception as e:\n logger.error(e)\n image_code_server = image_code.decode()\n if image_code_cilent.lower() != image_code_server.lower():\n return http.JsonResponse({'code':RETCODE.IMAGECODEERR,\n 'errmsg':\"图片验证码错误\"})\n # 生成验证码\n sms_code = '%06d'% random.randint(0,999999)\n print(sms_code)\n # 保存到redis\n pl = conn.pipeline()\n pl.setex('sms_code_%s' % mobile,constants.IMAGE_CODE_REDIS_EXPIRES,sms_code)\n pl.setex('send_flag_%s' % mobile,constants.IMAGE_CODE_REDIS_EXPIRES,1)\n pl.execute()\n\n # 发送验证码\n\n # CCP().send_template_sms(mobile,[sms_code,5],constants.SMS_CDDE_ID)\n ccp_send_sms_code.delay(mobile,sms_code)\n\n\n return http.JsonResponse({'code':RETCODE.OK,\n 'error':'ok'})\n\n\n\n\n\n","sub_path":"meiduo_mall/meiduo_mall/apps/verifications/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"474535904","text":"def intersection(list1, list2):\n new_list=[]\n \n for a in list1:\n for b in list2:\n if a == b :\n new_list.append(a)\n return new_list\n\nprint(intersection([1,2,3], [2,4,3,5,6,6,6]))","sub_path":"W9D3-speaking_python/booster.py","file_name":"booster.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"357664327","text":"import pandas as pd\nimport os\nimport sys\nsys.path.append('/home/maciek/pyCharmProjects/mc-doi')\nfrom matplotlib import pyplot as plt\nimport csv\nimport numpy as np\nimport pickle\nfrom data.data import Data\nfrom collections import defaultdict\nimport functools\nimport itertools\nimport copy\nfrom scipy.spatial.distance import hamming\nfrom tqdm import tqdm\n\nsets_to_evaluate_file = list(sys.argv)[1]\nwith open(sets_to_evaluate_file, 'r', encoding='utf-8') as sets_to_evaluate:\n sets_to_evaluate = sets_to_evaluate.readlines()\nsets_to_evaluate = [x.strip() for x in sets_to_evaluate]\n\nstart_time = 1332565200\nend_time = 1335416399\nduration_24h_in_sec = 60*60*24\ntime_grid = np.arange(start_time+duration_24h_in_sec,end_time+duration_24h_in_sec,duration_24h_in_sec)\n\nmodel = list(sys.argv)[2]\n\ndirectory = '/nfs/maciej/mcdoi/'+model+'/'\n\nevaluated = set()\nfor batch_size in [3600, 43200, 86400, 604800]:\n with open(directory + 'frequencies/hamming_'+str(batch_size), 'r', encoding='utf-8') as file:\n e = file.readlines()\n evaluated.update([x.strip() for x in e])\n\ndef diff(first, second):\n second = set(second)\n return [item for item in first if item not in second]\n\ndef evaluate(path, iter_length, model):\n new_path = path.split('/')\n new_path[4] = model\n new_path = '/' + os.path.join(*new_path)\n history = int(path.split('/')[6].split('_')[1])\n batch_size = int(path.split('/')[8].split('_')[1])\n with open(os.path.dirname(os.path.dirname(os.path.dirname(path)))+'/edges', 'r', encoding='utf-8') as f:\n edges = pd.read_csv(f, header=None, names=[Data.user_1, Data.user_2])\n\n user_dict = defaultdict(functools.partial(next, itertools.count()))\n edges[Data.user_1] = edges[Data.user_1].map(user_dict)\n edges[Data.user_2] = edges[Data.user_2].map(user_dict)\n\n with open(os.path.dirname(os.path.dirname(os.path.dirname(path)))+'/event_log', 'r', encoding='utf-8') as f:\n whole_event_log = pd.read_csv(f, header=None, names=[Data.time_stamp, Data.user, Data.contagion])\n whole_event_log.user = whole_event_log.user.map(user_dict)\n\n with open(os.path.dirname(os.path.dirname(path))+'/data_obj.pickle', 'rb') as f:\n d=pickle.load(f)\n\n with open(os.path.dirname(os.path.dirname(path))+'/contagion_dict.pickle', 'rb') as f:\n contagion_dict=pickle.load(f)\n\n max_contagion_id = max(contagion_dict.values())\n\n whole_event_log[Data.contagion_id] = whole_event_log[Data.contagion].apply(lambda x: contagion_dict[x])\n whole_event_log=whole_event_log[whole_event_log[Data.contagion_id]<=max_contagion_id]\n\n indicators = []\n I = np.full((d.num_users, d.num_contagions), False, dtype=bool)\n for i in range(1,min(7,33-history)+1):\n for index, row in whole_event_log[whole_event_log[Data.time_stamp]<=time_grid[history-1] + i * iter_length].iterrows():\n I[row[Data.user]][row[Data.contagion_id]] = True\n indicators.append(I)\n I = copy.deepcopy(I)\n\n results = []\n for i in range(0, 7):\n with open(new_path + '/result_' + str(i) + '.pickle', 'rb') as result:\n results.append(pickle.load(result))\n\n for i in range(1,min(7,33-history)+1):\n open(new_path + '/hamming_' + str(i - 1), 'w', encoding='utf-8').close()\n for user in range(d.num_users):\n with open(new_path + '/hamming_' + str(i - 1), 'a', encoding='utf-8') as file:\n file.write(str(user) + ',' + str(hamming(indicators[i-1][user,:],results[i-1][user,:])) + '\\n')\n with open(directory + 'frequencies/hamming_' + str(batch_size), 'a', encoding='utf-8') as file:\n file.write(new_path + '/hamming_' + str(i - 1) + '\\n')\n\nif __name__ == '__main__':\n paths = diff(sets_to_evaluate,evaluated)\n for path in tqdm(paths):\n evaluate(path,86400,model)","sub_path":"experiments/DynamicLinearThreshold/evaluation/hamming.py","file_name":"hamming.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"166533088","text":"'''Group: Almudena Chapa, Daniel Lazaro, Jon Ander Martin'''\n\n'''Policy 1'''\n\nfrom random import shuffle\n\nfrom unhappy_and_empty_lists import unhappy_and_empty_info_extraction_for_policies\nfrom happiness import happiness\n\ndef relocation_policy1(segregation_intmap, happiness_map, k, p, q):\n \n unhappies, empties = unhappy_and_empty_info_extraction_for_policies(segregation_intmap, happiness_map, k, p)\n #we want this process to be random so we shuffle the list\n shuffle(unhappies)\n\n \n for unhappy in unhappies:\n \n shuffle(empties)\n #now we pick q empties\n empties_q = empties[0:q]\n \n coords_unhappy = unhappy[0]\n race_unhappy = int(unhappy[3]) #race can be 1 or 2 that is the same value of the indexes of\n # the n_blues and n_reds information\n found_happy_spot = False\n #let's find a happy spot\n i = 0\n less_unhappy_spot = unhappy #initialize the vble that searches for the less unhappy spot\n found_less_unhappy_spot = False\n\n while i < q and not found_happy_spot: \n empty = empties_q[i]\n \n if empty[race_unhappy] >= k: #check if that empty is happy\n\n found_happy_spot = True\n new_empty_spot = unhappy\n new_empty_spot.pop(3)\n index_happy = i\n happy_spot = empty\n #print(empties[i])\n \n else:\n if less_unhappy_spot[race_unhappy] < empty[race_unhappy]:\n\n less_unhappy_spot = empty\n index_less_unhappy = i\n found_less_unhappy_spot = True\n \n if i<q:\n i += 1\n else:\n pass\n \n if not found_happy_spot and found_less_unhappy_spot:\n\n new_coords = less_unhappy_spot[0] #coords of the least unhappy\n segregation_intmap[new_coords[0], new_coords[1]] = race_unhappy #update segregation map\n segregation_intmap[coords_unhappy[0], coords_unhappy[1]] = 0\n empties.pop(index_less_unhappy)\n new_empty_spot = unhappy\n new_empty_spot.pop(3)\n empties.append(new_empty_spot)\n \n \n elif found_happy_spot:\n new_coords = happy_spot[0]\n segregation_intmap[new_coords[0],new_coords[1]] = race_unhappy\n segregation_intmap[coords_unhappy[0], coords_unhappy[1]] = 0\n empties.pop(index_happy)\n empties.append(new_empty_spot)\n \n else:\n pass\n \n happiness_map = happiness(segregation_intmap, k, p)\n unhappies1, empties = unhappy_and_empty_info_extraction_for_policies(segregation_intmap, happiness_map, k, p)\n \n counter = 0\n for unhappy in unhappies:\n for unhappy1 in unhappies1:\n if unhappy[0] == unhappy1[0]:\n unhappies[counter] = unhappy1\n counter +=1\n \n \n \n happiness_map = happiness(segregation_intmap, k, p)\n \n return segregation_intmap, happiness_map \n\n\n\n\n","sub_path":"policy_1.py","file_name":"policy_1.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"364391867","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass RNN(nn.Module):\n def __init__(self,token,args):\n super(RNN,self).__init__()\n \n self.word_emb = nn.Embedding(len(token['tokens']),args.word_dim,padding_idx=0)\n self.ner_emb = nn.Embedding(len(token['nodes']),args.word_dim,padding_idx=0)\n self.edge_emb = nn.Embedding(3,args.word_dim,padding_idx=0)\n \n self.rnn = nn.LSTM(\n input_size=args.word_dim,\n hidden_size=args.hidden_dim,\n num_layers=args.num_layer,\n batch_first=args.batch_first,\n dropout=args.dropout,\n bidirectional=args.bidirectional\n )\n \n self.word_size = args.word_dim\n self.hidden_size = args.hidden_dim\n self.num_layer = args.num_layer\n self.batch_first = args.batch_first\n self.dropout = args.dropout\n self.bidirectional = args.bidirectional\n \n self.dense_1 = nn.Linear(2*args.hidden_dim,32)\n self.act_1 = nn.ReLU()\n self.dense_2 = nn.Linear(32,len(token['edges']))\n \n if(args.mode == 'pretrain'):\n self.load()\n self.word_emb.weight.requires_grad = False\n print(\"here\",self.word_emb.weight.requires_grad)\n\n def load(self):\n with open('./data/embedding/glove.6B.100d.txt') as f:\n arr = np.zeros((self.word_emb.weight.shape[0],self.word_emb.weight.shape[1]),dtype=np.float32)\n for i,line in enumerate(f):\n for j,num in enumerate(line.strip().split()[1:]):\n arr[i+1,j] = float(num)\n \n self.word_emb.weight = nn.Parameter(torch.tensor(arr))\n\n\n def forward(self,data,data_len,data_ner,data_point):\n def pack(seq,seq_length):\n sorted_seq_lengths, indices = torch.sort(seq_length, descending=True)\n _, desorted_indices = torch.sort(indices, descending=False)\n\n if self.batch_first:\n seq = seq[indices]\n else:\n seq = seq[:, indices]\n packed_inputs = nn.utils.rnn.pack_padded_sequence(seq,\n sorted_seq_lengths.cpu().numpy(),\n batch_first=self.batch_first)\n\n return packed_inputs,desorted_indices\n\n def unpack(res, state,desorted_indices):\n padded_res,_ = nn.utils.rnn.pad_packed_sequence(res, batch_first=self.batch_first)\n\n state = [state[i][:,desorted_indices] for i in range(len(state)) ] \n\n if(self.batch_first):\n desorted_res = padded_res[desorted_indices]\n else:\n desorted_res = padded_res[:, desorted_indices]\n\n return desorted_res,state\n\n def feat_extract(output,length,mask):\n \"\"\"\n answer_output: batch*sentence*feat_len\n query_output: batch*sentence*feat_len\n for simple rnn, we just take the output from \n \"\"\"\n if( self.batch_first == False ):\n output = output.transpose(0,1) \n\n output = [torch.cat([ output[i][ length[i]-1 ][:self.hidden_size] , \n output[i][0][self.hidden_size:]] , dim=-1 ) for i in range(length.shape[0])]\n output = torch.stack(output,dim=0)\n\n return output\n #first check for the mask ans the embedding\n mask = data.eq(0)\n\n word = self.word_emb(data)\n\t\t#word = word + self.ner_emb(data_ner)\n word = word + self.edge_emb(data_point)\n \n #query part\n packed_inputs,desorted_indices = pack(word,data_len)\n res, state = self.rnn(packed_inputs)\n query_res,_ = unpack(res, state,desorted_indices)\n\n #extract the representation of the sentence\n query_result = feat_extract(query_res,data_len.int(),mask)\n\n output = self.dense_1(query_result)\n output = self.act_1(output)\n output = self.dense_2(output)\n \n return output\n","sub_path":"lstm/model/birnn_ner.py","file_name":"birnn_ner.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"45010121","text":"\"\"\"\nImplements IPC communication between the APP and its KMIP server.\n\nUses a custom binary protocol.\n\n\"\"\"\n\nimport os\nimport socket\nimport struct\n\n_KMIP_SOCKET_DIR = 'socket'\n_KMIP_SOCKET_FILE = \"kmip-server.socket\"\n\n# Convert protocols to its string representation\nTTLV_PROTOCOL = b'\\x00'\nXML_PROTOCOL = b'\\x01'\nJSON_PROTOCOL = b'\\x02'\n\n_protocols = [TTLV_PROTOCOL, XML_PROTOCOL, JSON_PROTOCOL]\n\n\ndef recvall(sock, count):\n \"\"\"\n Receive the specified amount of bytes from a socket.\n\n Args:\n sock (socket.socket): the socket\n count (int): number of bytes to read.\n\n Returns:\n bytes: the data read.\n \"\"\"\n buf = bytearray(count)\n bufv = memoryview(buf)\n nread = 0\n while nread < count:\n nread += sock.recv_into(bufv[nread:])\n return buf\n\n\nclass InvalidProtocolError(Exception):\n \"\"\" Error return when an invalid protocol is received \"\"\"\n\n def __init__(self, protocol):\n self.protocol = protocol\n\n def __str__(self):\n return 'Invalid value for protocol: {}'.format(self.protocol)\n\n\nclass ResponseProtocolError(Exception):\n def __init__(self, reason):\n self.reason = reason\n\n def __str__(self):\n return 'Error on response protocol: {}'.format(self.reason)\n\n\ndef send_request(request, ip='', port=0, protocol=XML_PROTOCOL):\n \"\"\"\n Sends a request to the designed KMIP server.\n\n Args:\n request (bytes): The message to be sent\n ip (str): IP of the HSM, if connecting remotely\n port (int): Port of the KMIP server within the HSM, if connecting remotely\n protocol (int): Messaging protocol; Anything other than TTLV implies HTTPS;\n TTLV is only HTTPS if port is different from 5696\n\n Returns:\n The response\n \"\"\"\n if protocol not in _protocols:\n raise InvalidProtocolError(protocol)\n\n socket_path = os.getenv('KMIP_SERVER_SOCKET')\n\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(socket_path)\n try:\n sock.sendall(protocol)\n\n request_len_bytes = struct.pack(\"<I\", len(request))\n sock.sendall(request_len_bytes)\n\n sock.sendall(request)\n sock.shutdown(socket.SHUT_WR)\n\n # Unpack the received length from little-endian to the native endianess\n response_len_bytes = recvall(sock, 4)\n response_len = struct.unpack(\"<I\", response_len_bytes)[0]\n\n response = recvall(sock, response_len)\n finally:\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\n\n return response\n","sub_path":"reporter/kkmip/build/lib/kkmip/conn.py","file_name":"conn.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"308436257","text":"import json\nimport datetime\nfrom functools import wraps\nimport jwt\nimport pandas as pd\nfrom flask import Flask\nfrom flask import request, render_template\nfrom flask_restplus import Resource, Api, abort\nfrom flask_restplus import fields\nfrom flask_restplus import inputs\nfrom flask_restplus import reqparse\nfrom flask_cors import CORS\n\n\nclass AuthenticationToken:\n def __init__(self, secret_key, expires_in):\n self.secret_key = secret_key\n self.expires_in = expires_in\n\n def generate_token(self, username):\n info = {\n 'username': username,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=self.expires_in)\n }\n return jwt.encode(info, self.secret_key, algorithm='HS256')\n\n def validate_token(self, token):\n info = jwt.decode(token, self.secret_key, algorithms=['HS256'])\n return info['username']\n\n\nSECRET_KEY = \"FOOTBALL API SECRET KEY\"\nexpires_in = 600\nauth = AuthenticationToken(SECRET_KEY, expires_in)\n\napp = Flask(__name__)\napp.config['SWAGGER_UI_JSONEDITOR'] = True\napp.config['RESTPLUS_MASK_SWAGGER'] = False\napi = Api(app, authorizations={\n 'API-KEY': {\n 'type': 'apiKey',\n 'in': 'header',\n 'name': 'AUTH-TOKEN'\n }\n},\n security='API-KEY',\n default = \"Football Players\", # Default namespace\n title = \"Football Player Dataset\", # Documentation Title\n description = \"A football player api service\") # Documentation Description\nCORS(app)\n\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n\n token = request.headers.get('AUTH-TOKEN')\n if not token:\n abort(401, 'Authentication token is missing')\n\n try:\n user = auth.validate_token(token)\n except Exception as e:\n abort(401, e)\n\n return f(*args, **kwargs)\n\n return decorated\n\n\n#remove prefix in pandas\ndef drop_prefix(self, prefix):\n self.columns = self.columns.str.lstrip(prefix)\n return self\n\n\nplayer_info_dict = {\n \"player_name\": fields.String,\n \"age\": fields.Integer,\n \"birth_date\": fields.String,\n \"birth_country\": fields.String,\n \"height\": fields.String,\n \"weight\": fields.String,\n}\nplayer_stats_dict = {\n \"team_name\": fields.String,\n \"position\": fields.String,\n \"gp\": fields.Integer,\n \"mins\": fields.Integer,\n \"g\": fields.Integer,\n \"a\": fields.Integer,\n \"shts\": fields.Integer,\n \"sog\": fields.Integer,\n \"y\": fields.Integer,\n \"r\": fields.Integer,\n \"sc\": fields.Float,\n \"sog_rate\": fields.Float,\n \"mg\": fields.Float,\n \"ma\": fields.Float,\n \"year\": fields.String\n}\n\nplayer_stats_model = api.model('Player one season stats', player_stats_dict)\nplayer_all_stats_model = api.model('Player all season stats', {\n \"seasons\": fields.List(fields.Nested(player_stats_model))\n})\nplayer_get_model = api.inherit('Play data model', player_all_stats_model, player_info_dict)\nmarket_model = api.model('Player market value', {\n \"player_name\": fields.String,\n \"2014\": fields.Integer,\n \"2015\": fields.Integer,\n \"2016\": fields.Integer,\n \"2017\": fields.Integer,\n \"2018\": fields.Integer\n})\nplayer_season_parser = reqparse.RequestParser()\nplayer_season_parser.add_argument('year', choices=list(range(2014,2019)), type=inputs.natural, help='If select, only return data for one season', required=False)\n\n\nmarket_post_model = api.model('Player market value with player id', {\n \"player_id\": fields.Integer,\n \"player_name\": fields.String,\n \"2014\": fields.Integer,\n \"2015\": fields.Integer,\n \"2016\": fields.Integer,\n \"2017\": fields.Integer,\n \"2018\": fields.Integer\n})\n\npredict_model = api.model('Player predicted value', {\n \"player_name\": fields.String,\n \"2019_value\": fields.Integer\n})\n\n\n# credential_model = api.model('credential', {\n# 'username': fields.String,\n# 'password': fields.String\n# })\n#\n# credential_parser = reqparse.RequestParser()\n# credential_parser.add_argument('username', type=str)\n# credential_parser.add_argument('password', type=str)\n\n\nparser = reqparse.RequestParser()\norder_list = ['player_id']\norder_list.extend(list(column for column in player_get_model.keys()))\nparser.add_argument('order', choices=order_list, help='ordered by selected item')\nparser.add_argument('ascending', type=inputs.boolean)\n\nm_order_list = ['player_id']\nm_order_list.extend(list(column for column in market_model.keys()))\nmarkets_parser = reqparse.RequestParser()\nmarkets_parser.add_argument('order', choices=m_order_list, help='ordered by selected item')\nmarkets_parser.add_argument('ascending', type=inputs.boolean)\n\nusage_parser = reqparse.RequestParser()\nusage_parser.add_argument('period', choices=['yesterday', 'today', 'total'], help='select the period of data', required=True)\n\n\n\n@app.route('/index')\ndef indexpage():\n return render_template('index.html')\n@app.route('/search')\ndef searchpage():\n return render_template('search.html')\n@app.route('/signin')\ndef loginpage():\n return render_template('login.html')\n@app.route('/signup')\ndef signuppage():\n return render_template('signup.html')\n\n\n@api.route('/login')\nclass Token(Resource):\n @api.response(200, 'Successful')\n @api.response(404, \"Username doesn't exist\")\n @api.response(401, \"Authorization has been refused\")\n # @api.expect(credential_parser, validate=True)\n def get(self):\n '''\n User login entry\n\n User Login and generates a authentication token.\n username and password must be included in the header\n '''\n global refresh\n route = '/login'\n total_df.loc[route, 'get_num'] += 1\n cur_df.loc[route, 'get_num'] += 1\n if datetime.datetime.hour != 24 and refresh == 0:\n refresh = 1\n for col in cur_df.columns:\n prev_df[col] = cur_df[col]\n cur_df[col].values[:] = 0\n if datetime.datetime.hour == 24:\n refresh == 0\n print(refresh)\n # args = credential_parser.parse_args()\n # username = args.get('username')\n # password = args.get('password')\n\n username = request.headers.get('username')\n password = request.headers.get('password')\n if not username or not password:\n return {\"message\": \"missing username or password\"}, 401\n if username not in list(user_df['username']):\n return {\"message\": \"Username doesn't exist\"}, 404\n print(user_df[user_df.username == username].password.values[0])\n print(password)\n if password == user_df[user_df.username == username].password.values[0]:\n return {\"token\": auth.generate_token(username).decode('utf-8')}\n\n return {\"message\": \"authorization has been refused for those credentials.\"}, 401\n\n@api.route('/user')\nclass Users(Resource):\n @api.response(201, 'User Created Successfully')\n @api.response(400, 'Format Validation Error')\n def post(self):\n '''\n Create new user account\n\n '''\n route = '/user'\n total_df.loc[route, 'post_num'] += 1\n cur_df.loc[route, 'post_num'] += 1\n\n username = request.headers.get('username')\n password = request.headers.get('password')\n if not username or not password:\n return {\"message\": \"missing username or password\"}, 400\n if username in list(user_df['username']):\n return {\"message\": \"username already exist\"}, 400\n last_id = user_df.index[-1]\n new_id = last_id + 1\n user_df.loc[new_id, 'username'] = username\n user_df.loc[new_id, 'password'] = password\n\n return {\"message\": \"New user is created successfully\"}, 201\n\n@api.route('/players')\nclass PlayersList(Resource):\n @api.expect(parser)\n @api.response(200, 'Successful')\n # @api.marshal_with(player_get_model)\n def get(self):\n \"\"\"\n Get all players data\n\n Get all players data from the database.\n \"\"\"\n\n route = '/players'\n total_df.loc[route, 'get_num'] += 1\n cur_df.loc[route, 'get_num'] += 1\n\n # get players as JSON string\n args = parser.parse_args()\n\n # retrieve the query parameters\n order_by = args.get('order')\n ascending = args.get('ascending', True)\n\n if order_by:\n player_df.sort_values(by=order_by, inplace=True, ascending=ascending)\n\n\n json_str = player_df.to_json(orient='index')\n\n # convert the string JSON to a real JSON\n ds = json.loads(json_str)\n\n ret = []\n\n for idx in ds:\n player = ds[idx]\n format_player = {}\n #copy basic info\n format_player['player_id'] = idx\n for key in player_info_dict.keys():\n format_player[key] = player[key]\n #read each season stats into a list, then put under 'seasons' key\n season_stats_list = []\n for year in range(2014, 2019):\n season_stats = {}\n for stat_key in player_stats_dict.keys():\n prf_key = str(year) + '_' + stat_key\n if stat_key != 'year':\n season_stats[stat_key] = player[prf_key]\n else:\n season_stats['year'] = str(year)\n season_stats_list.append(season_stats)\n format_player['seasons'] = season_stats_list\n ret.append(format_player)\n\n return ret\n\n @api.response(201, 'Player Created Successfully')\n @api.response(400, 'Format Validation Error')\n @api.expect(player_get_model, validate=True)\n @requires_auth\n def post(self):\n '''\n Add a new player data\n\n The player id will be generated automatically\n '''\n route = '/players'\n total_df.loc[route, 'post_num'] += 1\n cur_df.loc[route, 'post_num'] += 1\n #generate an id\n player = request.json\n player_df.sort_values(by='player_id', inplace=True, ascending=True)\n last_id = player_df.index[-1]\n new_id = last_id + 1\n\n if 'seasons' in player:\n season_stats_list = player.pop(\"seasons\")\n else:\n season_stats_list = []\n # Put the values into the dataframe\n for key in player:\n if key not in player_get_model.keys():\n # unexpected column\n return {\"message\": \"Property {} is invalid\".format(key)}, 400\n player_df.loc[new_id, key] = player[key]\n\n\n for season_stats_dict in season_stats_list:\n for key in season_stats_dict:\n if key not in player_stats_model.keys():\n return {\"message\": \"Property {} is invalid\".format(key)}, 400\n year = season_stats_dict.pop(\"year\")\n format_dict = {str(year)+'_'+ key: value for key, value in season_stats_dict.items()}\n for key in format_dict:\n player_df.loc[new_id, key] = format_dict[key]\n #change nan to 0 for int columns\n if pd.isna(player_df.loc[new_id,'age']):\n player_df.loc[new_id, 'age'] = 0\n int_cols = []\n for y in range(2014, 2019):\n end_col = 76 - (2018 - y) * 14\n int_cols.extend(i for i in range(end_col - 12, end_col))\n for col in int_cols:\n if pd.isna(player_df.iloc[new_id - 1, col]):\n player_df.iloc[new_id - 1, col] = 0\n\n return {\"message\": \"Player id:{} is created\".format(new_id)}, 201\n\n#-------------------------------------------\n@api.route('/players/<int:id>')\nclass Players(Resource):\n @api.expect(player_season_parser)\n @api.response(404, \"Player doesn't exist\")\n @api.response(200, 'Successful')\n @api.marshal_with(player_get_model)\n def get(self, id):\n \"\"\"\n Get a player data\n\n return player stats for all seasons\n \"\"\"\n route = '/players/id'\n total_df.loc[route, 'get_num'] += 1\n cur_df.loc[route, 'get_num'] += 1\n\n args = player_season_parser.parse_args()\n\n # retrieve the query parameters\n season = args.get('year')\n if id not in player_df.index:\n api.abort(404, \"Player {} doesn't exist\".format(id))\n # determine the stats of which year is needed\n seasons_list = []\n if season:\n seasons_list.append(season)\n else:\n seasons_list.extend(list(range(2014, 2019)))\n\n # get basic info\n list1 = list(range(0, 6))\n basic_info = dict(player_df.iloc[id-1, list1])\n #get season stats info\n stats_list = []\n for y in seasons_list:\n end_col = 76 - (2018 - y) * 14\n list2 = list(range(end_col - 14, end_col))\n stats_df = player_df.iloc[:, list2]\n year_pre = str(y) + '_'\n stats_df.drop_prefix(year_pre)\n season_stats = dict(stats_df.iloc[id-1])\n season_stats[\"year\"] = y\n stats_list.append(season_stats)\n #return data\n basic_info[\"seasons\"] = stats_list\n\n return basic_info\n\n @api.response(404, \"Player doesn't exist\")\n @api.response(200, 'Delete successful')\n @requires_auth\n def delete(self, id):\n \"\"\"\n Delete a player data\n \"\"\"\n route = '/players/id'\n total_df.loc[route, 'delete_num'] += 1\n cur_df.loc[route, 'delete_num'] += 1\n\n if id not in player_df.index:\n api.abort(404, \"Player {} doesn't exist\".format(id))\n\n player_df.drop(id, inplace=True)\n\n return {\"message\": \"Player {} is removed.\".format(id)}, 200\n\n @api.response(404, 'Player was not found')\n @api.response(400, 'Format validation Error')\n @api.response(200, 'Successful')\n @api.expect(player_get_model, validate=True)\n @requires_auth\n def put(self, id):\n \"\"\"\n Update a player data\n \"\"\"\n route = '/players/id'\n total_df.loc[route, 'put_num'] += 1\n cur_df.loc[route, 'put_num'] += 1\n\n if id not in player_df.index:\n api.abort(404, \"Player {} doesn't exist\".format(id))\n\n # get the payload and convert it to a JSON\n player = request.json\n if 'seasons' in player:\n season_stats_list = player.pop(\"seasons\")\n else:\n season_stats_list = []\n # Update the values\n for key in player:\n if key not in player_get_model.keys():\n # unexpected column\n return {\"message\": \"Property {} is invalid\".format(key)}, 400\n player_df.loc[id, key] = player[key]\n\n\n for season_stats_dict in season_stats_list:\n for key in season_stats_dict:\n if key not in player_stats_model.keys():\n return {\"message\": \"Property {} is invalid\".format(key)}, 400\n year = season_stats_dict.pop(\"year\")\n format_dict = {str(year)+'_'+ key: value for key, value in season_stats_dict.items()}\n for key in format_dict:\n player_df.loc[id, key] = format_dict[key]\n\n return {\"message\": \"Player {} has been successfully updated\".format(id)}, 200\n\n# #########################################\n#market value\n#################################################\n@api.route('/markets/<int:id>')\n@api.param('id', 'The Player id')\nclass Markets(Resource):\n @api.response(404, 'Market value for player was not found')\n @api.response(200, 'Successful')\n @api.marshal_with(market_model)\n def get(self, id):\n '''\n Get market value for a player\n\n To get market value for a player by its ID(unit: million euro)\n '''\n route = '/markets/id'\n total_df.loc[route, 'get_num'] += 1\n cur_df.loc[route, 'get_num'] += 1\n\n if id not in market_df.index:\n api.abort(404, \"Market value for player {} doesn't exist\".format(id))\n\n market_value = dict(market_df.loc[id])\n\n return market_value\n\n @api.response(404, 'Market value for player was not found')\n @api.response(200, 'Successful')\n @requires_auth\n def delete(self, id):\n '''\n Delete market value for a player\n\n To delete market value for a player by its ID\n '''\n route = '/markets/id'\n total_df.loc[route, 'delete_num'] += 1\n cur_df.loc[route, 'delete_num'] += 1\n if id not in market_df.index:\n api.abort(404, \"Market value for player {} doesn't exist\".format(id))\n\n market_df.drop(id, inplace=True)\n\n return {\"message\": \"Market value for player {} is removed.\".format(id)}, 200\n\n @api.response(404, 'Market value for player was not found')\n @api.response(400, 'Format validation Error')\n @api.response(200, 'Successful')\n @api.expect(market_model, validate=True)\n @requires_auth\n def put(self, id):\n '''\n Update market value for a player\n\n To update market value for a player by its ID\n '''\n if id not in market_df.index:\n api.abort(404, \"Market value for player {} doesn't exist\".format(id))\n\n # get the payload and convert it to a JSON\n market_values = request.json\n\n # Update the values\n for key in market_values:\n if key not in market_model.keys():\n # unexpected column\n return {\"message\": \"Property {} is invalid\".format(key)}, 400\n market_df.loc[id, key] = market_values[key]\n\n route = '/markets/id'\n total_df.loc[route, 'put_num'] += 1\n cur_df.loc[route, 'put_num'] += 1\n return {\"message\": \"Market value for player {} has been successfully updated\".format(id)}, 200\n\n#-------------------------------------------\n@api.route('/markets')\nclass MarketsList(Resource):\n @api.expect(markets_parser)\n @api.response(200, 'Successful')\n def get(self):\n '''\n Get market value for all players\n\n (unit: million euro)\n '''\n route = '/markets'\n total_df.loc[route, 'get_num'] += 1\n cur_df.loc[route, 'get_num'] += 1\n\n # get books as JSON string\n args = markets_parser.parse_args()\n\n # retrieve the query parameters\n order_by = args.get('order')\n ascending = args.get('ascending', True)\n\n if order_by:\n market_df.sort_values(by=order_by, inplace=True, ascending=ascending)\n\n json_str = market_df.to_json(orient='index')\n\n # convert the string JSON to a real JSON\n market_ds = json.loads(json_str)\n ret = []\n\n for idx in market_ds:\n values = market_ds[idx]\n values['player_id'] = int(idx)\n ret.append(values)\n\n return ret\n\n @api.response(201, 'Market value record created Successfully')\n @api.response(400, 'Format validation Error')\n @api.expect(market_post_model, validate=True)\n @requires_auth\n def post(self):\n '''\n Create a new market value record for a player\n\n Please check if the player has been created in the player database first\n '''\n route = '/markets'\n total_df.loc[route, 'post_num'] += 1\n cur_df.loc[route, 'post_num'] += 1\n\n market_values = request.json\n\n if 'player_id' not in market_values:\n return {\"message\": \"Missing player_id\"}, 400\n\n id = market_values['player_id']\n # check if the given player exist\n if id not in player_df.index:\n return {\"message\": \"Can't find the player with id={} in player database\".format(id)}, 400\n # check if the given player record exist\n if id in market_df.index:\n return {\"message\": \"A market value record with player_id={} is already in the dataset\".format(id)}, 400\n\n # Put the values into the dataframe\n for key in market_values:\n if key not in market_post_model.keys():\n # unexpected column\n return {\"message\": \"Property {} is invalid\".format(key)}, 400\n market_df.loc[id, key] = market_values[key]\n\n return {\"message\": \"A market value record for player id={} is created\".format(id)}, 201\n\n@api.route('/predicts/<int:id>')\n@api.param('id', 'The Player id')\nclass Predicts(Resource):\n @api.response(404, 'Predicted value for player was not found')\n @api.response(200, 'Successful')\n @api.marshal_with(predict_model)\n def get(self, id):\n '''\n Get predicted value for a player\n\n To get predict value for a player on 2019 by its ID(unit: million euro)\n '''\n route = '/predicts/id'\n total_df.loc[route, 'get_num'] += 1\n cur_df.loc[route, 'get_num'] += 1\n\n if id not in ml_df.index:\n api.abort(404, \"Predicted value for player {} doesn't exist\".format(id))\n\n predict_value = dict(ml_df.loc[id])\n\n return predict_value\n\n@api.route('/usage')\nclass Usage(Resource):\n @api.expect(usage_parser)\n @api.response(200, 'Successful')\n def get(self):\n '''\n Get usage statics of API\n\n Get total analytic result or only one day\n '''\n\n args = usage_parser.parse_args()\n\n # retrieve the query parameters\n period = args.get('period')\n if period == 'yesterday':\n json_str = prev_df.to_json(orient='index')\n\n # convert the string JSON to a real JSON\n prev_ds = json.loads(json_str)\n return prev_ds\n elif period == 'today':\n json_str = cur_df.to_json(orient='index')\n\n # convert the string JSON to a real JSON\n current_ds = json.loads(json_str)\n return current_ds\n else:\n json_str = total_df.to_json(orient='index')\n\n # convert the string JSON to a real JSON\n total_ds = json.loads(json_str)\n return total_ds\n\n\n\nif __name__ == '__main__':\n pd.core.frame.DataFrame.drop_prefix = drop_prefix\n csv_file = \"player_formatted.csv\"\n player_df = pd.read_csv(csv_file)\n csv_market = \"market_value.csv\"\n market_df = pd.read_csv(csv_market, encoding=\"latin1\")\n csv_user = \"userdb.csv\"\n user_df = pd.read_csv(csv_user)\n csv_mlresult = \"df_output.csv\"\n ml_df = pd.read_csv(csv_mlresult)\n player_df.set_index('player_id', inplace=True)\n market_df.set_index('player_id', inplace=True)\n user_df.set_index('user_id', inplace=True)\n ml_df.set_index('player_id', inplace=True)\n csv_totl = \"total_usage.csv\"\n csv_prev = \"prev_usage.csv\"\n csv_cur = \"current_usage.csv\"\n total_df = pd.read_csv(csv_totl)\n prev_df = pd.read_csv(csv_prev)\n cur_df = pd.read_csv(csv_cur)\n total_df.set_index('route_name', inplace=True)\n prev_df.set_index('route_name', inplace=True)\n cur_df.set_index('route_name', inplace=True)\n global refresh\n refresh = 0\n # run the application\n app.run(host='0.0.0.0', debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":22868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"168052208","text":"import pandas as pd\nimport pymysql\n\nconn = pymysql.connect('ip', 'name', 'pass', 'dbname', charset=utf8)\nsql1 = 'select * from data'\n# select * from data\ns1 = pd.read_sql(sql1, conn)\n\n# select * from data limit 10\ns2 = s1.head(10)\n\n# select id from data\ns3 = s1['id']\n\n#select count(id) from data\ns4 = s1['id'].count()\n\n# select * from data where id < 1000 and age > 30\ns5 = s1[(s1['id'] < 1000) & (s1['age'] > 30)]\n\n\nsql2 = 'select * from table1'\nt1 = pd.read_sql(sql2,conn)\n# SELECT id,COUNT(DISTINCT order_id) FROM table1 GROUP BY id;\n# 这个我还没弄明白,disinct count\ns6 = t1.groupby('id').aggregate({'id':'count', 'order_id':'count'})\n\nsql3 = 'select * from table2'\nt2 = pd.read_sql(sql2,conn)\n\n#SELECT * FROM table1 t1 INNER JOIN table2 t2 ON t1.id = t2.id\ns7 = pd.merge(t1, t2, on = 'id')\n\n# SELECT * FROM table1 UNION SELECT * FROM table2\ns8 = pd.merge(t1, t2, how = 'outer')\n\n#DELETE FROM table1 WHERE id=10\nids = t1['id']\ncols = [i for i in range(ids.size) if ids.iat[i] == 10]\ns9 = t1.drop(cols)\n\n#ALTER TABLE table1 DROP COLUMN column_name\ns10 = t1.drop(['columns_name'])\n","sub_path":"week04/pdsql.py","file_name":"pdsql.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"25022337","text":"'''\nmapを使ってみる\n'''\n\na = [1,2,3,4,5,6]\n\n#for i in map(lambda x:x+1,a):\n# print(i)\ntest = list(map(lambda x:x+1,a))\nprint(test)\n\ndef addOne(p):\n '''+1'''\n return p+1\n\n#for i in map(addOne, a):\n# print(i)\ntest2 = list(map(addOne, a))\nprint(test)\n\n\n\n#\n# dict\n#\nd = {'a':1,'b':2,'c':3}\n#for i in map(lambda x: x[0] + str(x[1]), d.items()):\n# print(i)\ntest3 = list(map(lambda x: x[0] + str(x[1]), d.items()))\nprint(test3)\n\n\n#\n# generator\n#\n\ng = list((2*i*i for i in range(9)))\nprint(g)\ntest4 = list(map(lambda x: 2*x, (i*i for i in range(9))))\nprint(test4)\n\ntest5 = list(k+str(v) for k,v in d.items())\nprint(test5)","sub_path":"mapTest.py","file_name":"mapTest.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"321235600","text":"from django.urls import path\n\nfrom ProyectoWebApp import views\n\nurlpatterns = [\n path('', views.home, name=\"Home\"),\n path('tienda', views.tienda, name=\"Tienda\"),\n path('blog', views.blog, name=\"Blog\"),\n path('contacto', views.contacto, name=\"Contacto\"),\n path('login', views.login, name=\"Login\"),\n path('registro', views.registro, name=\"Registro\"),\n]","sub_path":"ProyectoWebApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"548211980","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('happenings', '0015_add_placeholder_is_published'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='eventpackage',\n name='call_to_action_label',\n field=models.CharField(default='Buy Now', max_length=50, verbose_name='call to action'),\n ),\n ]\n","sub_path":"happenings/migrations/0016_add_package_cta_label.py","file_name":"0016_add_package_cta_label.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"54761121","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2018 Kyoto University (Hirofumi Inaguma)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Attention-based RNN sequence-to-sequence model (including CTC).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport numpy as np\nimport torch\n\nfrom neural_sp.bin.train_utils import load_checkpoint\nfrom neural_sp.models.base import ModelBase\nfrom neural_sp.models.modules.embedding import Embedding\nfrom neural_sp.models.modules.linear import LinearND\nfrom neural_sp.models.lm.rnnlm import RNNLM\nfrom neural_sp.models.seq2seq.decoders.fwd_bwd_attention import fwd_bwd_attention\nfrom neural_sp.models.seq2seq.decoders.rnn import RNNDecoder\nfrom neural_sp.models.seq2seq.decoders.transformer import TransformerDecoder\nfrom neural_sp.models.seq2seq.encoders.rnn import RNNEncoder\nfrom neural_sp.models.seq2seq.encoders.transformer import TransformerEncoder\nfrom neural_sp.models.seq2seq.frontends.sequence_summary import SequenceSummaryNetwork\nfrom neural_sp.models.seq2seq.frontends.frame_stacking import stack_frame\nfrom neural_sp.models.seq2seq.frontends.splicing import splice\nfrom neural_sp.models.torch_utils import np2tensor\nfrom neural_sp.models.torch_utils import pad_list\n\n\nlogger = logging.getLogger(\"training\")\n\n\nclass Seq2seq(ModelBase):\n \"\"\"Attention-based RNN sequence-to-sequence model (including CTC).\"\"\"\n\n def __init__(self, args):\n\n super(ModelBase, self).__init__()\n\n # for encoder\n self.input_type = args.input_type\n self.input_dim = args.input_dim\n self.n_stacks = args.n_stacks\n self.n_skips = args.n_skips\n self.n_splices = args.n_splices\n self.enc_type = args.enc_type\n self.enc_n_units = args.enc_n_units\n if args.enc_type in ['blstm', 'bgru']:\n self.enc_n_units *= 2\n self.bridge_layer = args.bridge_layer\n\n # for OOV resolution\n self.enc_n_layers = args.enc_n_layers\n self.enc_n_layers_sub1 = args.enc_n_layers_sub1\n self.subsample = [int(s) for s in args.subsample.split('_')]\n\n # for attention layer\n self.attn_n_heads = args.attn_n_heads\n\n # for decoder\n self.vocab = args.vocab\n self.vocab_sub1 = args.vocab_sub1\n self.vocab_sub2 = args.vocab_sub2\n self.blank = 0\n self.unk = 1\n self.eos = 2\n self.pad = 3\n # NOTE: reserved in advance\n\n # for the sub tasks\n self.main_weight = 1 - args.sub1_weight - args.sub2_weight\n self.sub1_weight = args.sub1_weight\n self.sub2_weight = args.sub2_weight\n self.mtl_per_batch = args.mtl_per_batch\n self.task_specific_layer = args.task_specific_layer\n\n # for CTC\n self.ctc_weight = min(args.ctc_weight, self.main_weight)\n self.ctc_weight_sub1 = min(args.ctc_weight_sub1, self.sub1_weight)\n self.ctc_weight_sub2 = min(args.ctc_weight_sub2, self.sub2_weight)\n\n # for backward decoder\n self.bwd_weight = min(args.bwd_weight, self.main_weight)\n self.fwd_weight = self.main_weight - self.bwd_weight - self.ctc_weight\n self.fwd_weight_sub1 = self.sub1_weight - self.ctc_weight_sub1\n self.fwd_weight_sub2 = self.sub2_weight - self.ctc_weight_sub2\n\n # Feature extraction\n self.ssn = None\n if args.sequence_summary_network:\n assert args.input_type == 'speech'\n self.ssn = SequenceSummaryNetwork(args.input_dim,\n n_units=512,\n n_layers=3,\n bottleneck_dim=100,\n dropout=0)\n\n # Encoder\n if args.enc_type == 'transformer':\n self.enc = TransformerEncoder(\n input_dim=args.input_dim if args.input_type == 'speech' else args.emb_dim,\n attn_type=args.transformer_attn_type,\n attn_n_heads=args.transformer_attn_n_heads,\n n_layers=args.transformer_enc_n_layers,\n d_model=args.d_model,\n d_ff=args.d_ff,\n # pe_type=args.pe_type,\n pe_type=False,\n dropout_in=args.dropout_in,\n dropout=args.dropout_enc,\n dropout_att=args.dropout_att,\n layer_norm_eps=args.layer_norm_eps,\n n_stacks=args.n_stacks,\n n_splices=args.n_splices,\n conv_in_channel=args.conv_in_channel,\n conv_channels=args.conv_channels,\n conv_kernel_sizes=args.conv_kernel_sizes,\n conv_strides=args.conv_strides,\n conv_poolings=args.conv_poolings,\n conv_batch_norm=args.conv_batch_norm,\n conv_residual=args.conv_residual,\n conv_bottleneck_dim=args.conv_bottleneck_dim)\n else:\n self.enc = RNNEncoder(\n input_dim=args.input_dim if args.input_type == 'speech' else args.emb_dim,\n rnn_type=args.enc_type,\n n_units=args.enc_n_units,\n n_projs=args.enc_n_projs,\n n_layers=args.enc_n_layers,\n n_layers_sub1=args.enc_n_layers_sub1,\n n_layers_sub2=args.enc_n_layers_sub2,\n dropout_in=args.dropout_in,\n dropout=args.dropout_enc,\n subsample=list(map(int, args.subsample.split('_'))) +\n [1] * (args.enc_n_layers - len(args.subsample.split('_'))),\n subsample_type=args.subsample_type,\n n_stacks=args.n_stacks,\n n_splices=args.n_splices,\n conv_in_channel=args.conv_in_channel,\n conv_channels=args.conv_channels,\n conv_kernel_sizes=args.conv_kernel_sizes,\n conv_strides=args.conv_strides,\n conv_poolings=args.conv_poolings,\n conv_batch_norm=args.conv_batch_norm,\n conv_residual=args.conv_residual,\n conv_bottleneck_dim=args.conv_bottleneck_dim,\n residual=args.enc_residual,\n nin=args.enc_nin,\n task_specific_layer=args.task_specific_layer)\n # NOTE: pure CNN/TDS encoders are also included\n\n if args.freeze_encoder:\n for p in self.enc.parameters():\n p.requires_grad = False\n\n # Bridge layer between the encoder and decoder\n self.is_bridge = False\n if (args.enc_type in ['conv', 'tds', 'gated_conv', 'transformer'] and args.ctc_weight < 1)or args.dec_type == 'transformer' or args.bridge_layer:\n self.bridge = LinearND(self.enc.output_dim,\n args.d_model if args.dec_type == 'transformer' else args.dec_n_units,\n dropout=args.dropout_enc)\n self.is_bridge = True\n if self.sub1_weight > 0:\n self.bridge_sub1 = LinearND(self.enc.output_dim, args.dec_n_units,\n dropout=args.dropout_enc)\n if self.sub2_weight > 0:\n self.bridge_sub2 = LinearND(self.enc.output_dim, args.dec_n_units,\n dropout=args.dropout_enc)\n self.enc_n_units = args.dec_n_units\n\n # main task\n directions = []\n if self.fwd_weight > 0 or self.ctc_weight > 0:\n directions.append('fwd')\n if self.bwd_weight > 0:\n directions.append('bwd')\n for dir in directions:\n # Cold fusion\n if args.lm_fusion and dir == 'fwd':\n lm = RNNLM(args.lm_conf)\n lm, _ = load_checkpoint(lm, args.lm_fusion)\n else:\n args.lm_conf = False\n lm = None\n # TODO(hirofumi): cold fusion for backward RNNLM\n\n # Decoder\n if args.dec_type == 'transformer':\n dec = TransformerDecoder(\n eos=self.eos,\n unk=self.unk,\n pad=self.pad,\n blank=self.blank,\n enc_n_units=self.enc.output_dim,\n attn_type=args.transformer_attn_type,\n attn_n_heads=args.transformer_attn_n_heads,\n n_layers=args.transformer_dec_n_layers,\n d_model=args.d_model,\n d_ff=args.d_ff,\n pe_type=args.pe_type,\n tie_embedding=args.tie_embedding,\n vocab=self.vocab,\n dropout=args.dropout_dec,\n dropout_emb=args.dropout_emb,\n dropout_att=args.dropout_att,\n lsm_prob=args.lsm_prob,\n layer_norm_eps=args.layer_norm_eps,\n ctc_weight=self.ctc_weight if dir == 'fwd' else 0,\n ctc_fc_list=[int(fc) for fc in args.ctc_fc_list.split(\n '_')] if args.ctc_fc_list is not None and len(args.ctc_fc_list) > 0 else [],\n backward=(dir == 'bwd'),\n global_weight=self.main_weight - self.bwd_weight if dir == 'fwd' else self.bwd_weight,\n mtl_per_batch=args.mtl_per_batch)\n else:\n dec = RNNDecoder(\n eos=self.eos,\n unk=self.unk,\n pad=self.pad,\n blank=self.blank,\n enc_n_units=self.enc.output_dim,\n attn_type=args.attn_type,\n attn_dim=args.attn_dim,\n attn_sharpening_factor=args.attn_sharpening,\n attn_sigmoid_smoothing=args.attn_sigmoid,\n attn_conv_out_channels=args.attn_conv_n_channels,\n attn_conv_kernel_size=args.attn_conv_width,\n attn_n_heads=args.attn_n_heads,\n rnn_type=args.dec_type,\n n_units=args.dec_n_units,\n n_projs=args.dec_n_projs,\n n_layers=args.dec_n_layers,\n loop_type=args.dec_loop_type,\n residual=args.dec_residual,\n bottleneck_dim=args.dec_bottleneck_dim,\n emb_dim=args.emb_dim,\n tie_embedding=args.tie_embedding,\n vocab=self.vocab,\n dropout=args.dropout_dec,\n dropout_emb=args.dropout_emb,\n dropout_att=args.dropout_att,\n ss_prob=args.ss_prob,\n ss_type=args.ss_type,\n lsm_prob=args.lsm_prob,\n fl_weight=args.focal_loss_weight,\n fl_gamma=args.focal_loss_gamma,\n ctc_weight=self.ctc_weight if dir == 'fwd' else 0,\n ctc_fc_list=[int(fc) for fc in args.ctc_fc_list.split(\n '_')] if args.ctc_fc_list is not None and len(args.ctc_fc_list) > 0 else [],\n input_feeding=args.input_feeding,\n backward=(dir == 'bwd'),\n # lm=args.lm_conf,\n lm=lm, # TODO(hirofumi): load RNNLM in the model init.\n lm_fusion_type=args.lm_fusion_type,\n contextualize=args.contextualize,\n lm_init=args.lm_init,\n lmobj_weight=args.lmobj_weight,\n share_lm_softmax=args.share_lm_softmax,\n global_weight=self.main_weight - self.bwd_weight if dir == 'fwd' else self.bwd_weight,\n mtl_per_batch=args.mtl_per_batch,\n adaptive_softmax=args.adaptive_softmax)\n setattr(self, 'dec_' + dir, dec)\n\n # sub task\n for sub in ['sub1', 'sub2']:\n if getattr(self, sub + '_weight') > 0:\n if args.dec_type == 'transformer':\n raise NotImplementedError\n else:\n dec_sub = RNNDecoder(\n eos=self.eos,\n unk=self.unk,\n pad=self.pad,\n blank=self.blank,\n enc_n_units=self.enc_n_units,\n attn_type=args.attn_type,\n attn_dim=args.attn_dim,\n attn_sharpening_factor=args.attn_sharpening,\n attn_sigmoid_smoothing=args.attn_sigmoid,\n attn_conv_out_channels=args.attn_conv_n_channels,\n attn_conv_kernel_size=args.attn_conv_width,\n attn_n_heads=1,\n rnn_type=args.dec_type,\n n_units=args.dec_n_units,\n n_projs=args.dec_n_projs,\n n_layers=args.dec_n_layers,\n loop_type=args.dec_loop_type,\n residual=args.dec_residual,\n bottleneck_dim=args.dec_bottleneck_dim,\n emb_dim=args.emb_dim,\n tie_embedding=args.tie_embedding,\n vocab=getattr(self, 'vocab_' + sub),\n dropout=args.dropout_dec,\n dropout_emb=args.dropout_emb,\n dropout_att=args.dropout_att,\n ss_prob=args.ss_prob,\n ss_type=args.ss_type,\n lsm_prob=args.lsm_prob,\n fl_weight=args.focal_loss_weight,\n fl_gamma=args.focal_loss_gamma,\n ctc_weight=getattr(self, 'ctc_weight_' + sub),\n ctc_fc_list=[int(fc) for fc in getattr(args, 'ctc_fc_list_' + sub).split('_')\n ] if getattr(args, 'ctc_fc_list_' + sub) is not None and len(getattr(args, 'ctc_fc_list_' + sub)) > 0 else [],\n input_feeding=args.input_feeding,\n global_weight=getattr(self, sub + '_weight'),\n mtl_per_batch=args.mtl_per_batch)\n setattr(self, 'dec_fwd_' + sub, dec_sub)\n\n if args.input_type == 'text':\n if args.vocab == args.vocab_sub1:\n # Share the embedding layer between input and output\n self.embed_in = dec.embed\n else:\n self.embed_in = Embedding(vocab=args.vocab_sub1,\n emb_dim=args.emb_dim,\n dropout=args.dropout_emb,\n ignore_index=self.pad)\n\n # Initialize parameters in CNN layers\n self.reset_parameters(args.param_init,\n # dist='xavier_uniform',\n # dist='kaiming_uniform',\n dist='lecun',\n keys=['conv'], ignore_keys=['score'])\n\n # Initialize parameters in the encoder\n if args.enc_type == 'transformer':\n self.reset_parameters(args.param_init, dist='xavier_uniform',\n keys=['enc'], ignore_keys=['embed_in'])\n self.reset_parameters(args.d_model**-0.5, dist='normal',\n keys=['embed_in'])\n else:\n self.reset_parameters(args.param_init, dist=args.param_init_dist,\n keys=['enc'], ignore_keys=['conv'])\n\n # Initialize parameters in the decoder\n if args.dec_type == 'transformer':\n self.reset_parameters(args.param_init, dist='xavier_uniform',\n keys=['dec'], ignore_keys=['embed'])\n self.reset_parameters(args.d_model**-0.5, dist='normal',\n keys=['embed'])\n else:\n self.reset_parameters(args.param_init, dist=args.param_init_dist,\n keys=['dec'])\n\n # Initialize bias vectors with zero\n self.reset_parameters(0, dist='constant', keys=['bias'])\n\n # Recurrent weights are orthogonalized\n if args.rec_weight_orthogonal:\n self.reset_parameters(args.param_init, dist='orthogonal',\n keys=['rnn', 'weight'])\n\n # Initialize bias in forget gate with 1\n # self.init_forget_gate_bias_with_one()\n\n # Initialize bias in gating with -1 for cold fusion\n if args.lm_fusion:\n self.reset_parameters(-1, dist='constant', keys=['linear_lm_gate.fc.bias'])\n\n if args.lm_fusion_type == 'deep' and args.lm_fusion:\n for n, p in self.named_parameters():\n if 'output' in n or 'output_bn' in n or 'linear' in n:\n p.requires_grad = True\n else:\n p.requires_grad = False\n\n def scheduled_sampling_trigger(self):\n # main task\n directions = []\n if self.fwd_weight > 0:\n directions.append('fwd')\n if self.bwd_weight > 0:\n directions.append('bwd')\n for dir in directions:\n getattr(self, 'dec_' + dir).start_scheduled_sampling()\n\n # sub task\n for sub in ['sub1', 'sub2']:\n if getattr(self, sub + '_weight') > 0:\n directions = []\n if getattr(self, 'fwd_weight_' + sub) > 0:\n directions.append('fwd')\n for dir_sub in directions:\n getattr(self, 'dec_' + dir_sub + '_' + sub).start_scheduled_sampling()\n\n def forward(self, batch, reporter=None, task='all', is_eval=False):\n \"\"\"Forward computation.\n\n Args:\n batch (dict):\n xs (list): input data of size `[T, input_dim]`\n xlens (list): lengths of each element in xs\n ys (list): reference labels in the main task of size `[L]`\n ys_sub1 (list): reference labels in the 1st auxiliary task of size `[L_sub1]`\n ys_sub2 (list): reference labels in the 2nd auxiliary task of size `[L_sub2]`\n utt_ids (list): name of utterances\n speakers (list): name of speakers\n reporter ():\n task (str): all or ys* or ys_sub*\n is_eval (bool): the history will not be saved.\n This should be used in inference model for memory efficiency.\n Returns:\n loss (FloatTensor): `[1]`\n reporter ():\n\n \"\"\"\n if is_eval:\n self.eval()\n with torch.no_grad():\n loss, reporter = self._forward(batch, task, reporter)\n else:\n self.train()\n loss, reporter = self._forward(batch, task, reporter)\n\n return loss, reporter\n\n def _forward(self, batch, task, reporter):\n # Encode input features\n if self.input_type == 'speech':\n if self.mtl_per_batch:\n flip = True if 'bwd' in task else False\n enc_outs = self.encode(batch['xs'], task, flip=flip)\n else:\n flip = True if self.bwd_weight == 1 else False\n enc_outs = self.encode(batch['xs'], 'all', flip=flip)\n else:\n enc_outs = self.encode(batch['ys_sub1'])\n\n observation = {}\n loss = torch.zeros((1,), dtype=torch.float32).cuda(self.device_id)\n\n # for the forward decoder in the main task\n if (self.fwd_weight > 0 or self.ctc_weight > 0) and task in ['all', 'ys', 'ys.ctc', 'ys.lmobj']:\n loss_fwd, obs_fwd = self.dec_fwd(enc_outs['ys']['xs'], enc_outs['ys']\n ['xlens'], batch['ys'], task, batch['ys_hist'])\n loss += loss_fwd\n observation['loss.att'] = obs_fwd['loss_att']\n observation['loss.ctc'] = obs_fwd['loss_ctc']\n observation['loss.lmobj'] = obs_fwd['loss_lmobj']\n observation['acc.att'] = obs_fwd['acc_att']\n observation['acc.lmobj'] = obs_fwd['acc_lmobj']\n observation['ppl.att'] = obs_fwd['ppl_att']\n observation['ppl.lmobj'] = obs_fwd['ppl_lmobj']\n\n # for the backward decoder in the main task\n if self.bwd_weight > 0 and task in ['all', 'ys.bwd']:\n loss_bwd, obs_bwd = self.dec_bwd(enc_outs['ys']['xs'], enc_outs['ys']['xlens'], batch['ys'], task)\n loss += loss_bwd\n observation['loss.att-bwd'] = obs_bwd['loss_att']\n observation['loss.ctc-bwd'] = obs_bwd['loss_ctc']\n observation['loss.lmobj-bwd'] = obs_bwd['loss_lmobj']\n observation['acc.att-bwd'] = obs_bwd['acc_att']\n observation['acc.lmobj-bwd'] = obs_bwd['acc_lmobj']\n observation['ppl.att-bwd'] = obs_bwd['ppl_att']\n observation['ppl.lmobj-bwd'] = obs_bwd['ppl_lmobj']\n\n # only fwd for sub tasks\n for sub in ['sub1', 'sub2']:\n # for the forward decoder in the sub tasks\n if (getattr(self, 'fwd_weight_' + sub) > 0 or getattr(self, 'ctc_weight_' + sub) > 0) and task in ['all', 'ys_' + sub, 'ys_' + sub + '.ctc', 'ys_' + sub + '.lmobj']:\n loss_sub, obs_fwd_sub = getattr(self, 'dec_fwd_' + sub)(\n enc_outs['ys_' + sub]['xs'], enc_outs['ys_' + sub]['xlens'], batch['ys_' + sub], task)\n loss += loss_sub\n observation['loss.att-' + sub] = obs_fwd_sub['loss_att']\n observation['loss.ctc-' + sub] = obs_fwd_sub['loss_ctc']\n observation['loss.lmobj-' + sub] = obs_fwd_sub['loss_lmobj']\n observation['acc.att-' + sub] = obs_fwd_sub['acc_att']\n observation['acc.lmobj-' + sub] = obs_fwd_sub['acc_lmobj']\n observation['ppl.att-' + sub] = obs_fwd_sub['ppl_att']\n observation['ppl.lmobj-' + sub] = obs_fwd_sub['ppl_lmobj']\n\n if reporter is not None:\n is_eval = not self.training\n reporter.add(observation, is_eval)\n\n return loss, reporter\n\n def encode(self, xs, task='all', flip=False):\n \"\"\"Encode acoustic or text features.\n\n Args:\n xs (list): A list of length `[B]`, which contains Tensor of size `[T, input_dim]`\n task (str): all or ys* or ys_sub1* or ys_sub2*\n flip (bool): if True, flip acoustic features in the time-dimension\n Returns:\n enc_outs (dict):\n\n \"\"\"\n if 'lmobj' in task:\n eouts = {'ys': {'xs': None, 'xlens': None},\n 'ys_sub1': {'xs': None, 'xlens': None},\n 'ys_sub2': {'xs': None, 'xlens': None}}\n return eouts\n else:\n if self.input_type == 'speech':\n # Frame stacking\n if self.n_stacks > 1:\n xs = [stack_frame(x, self.n_stacks, self.n_skips)for x in xs]\n\n # Splicing\n if self.n_splices > 1:\n xs = [splice(x, self.n_splices, self.n_stacks) for x in xs]\n\n xlens = [len(x) for x in xs]\n # Flip acoustic features in the reverse order\n if flip:\n xs = [torch.from_numpy(np.flip(x, axis=0).copy()).float().cuda(self.device_id) for x in xs]\n else:\n xs = [np2tensor(x, self.device_id).float() for x in xs]\n xs = pad_list(xs, 0.0)\n\n elif self.input_type == 'text':\n xlens = [len(x) for x in xs]\n xs = [np2tensor(np.fromiter(x, dtype=np.int64), self.device_id).long() for x in xs]\n xs = pad_list(xs, self.pad)\n xs = self.embed_in(xs)\n\n # sequence summary network\n if self.ssn is not None:\n xs += self.ssn(xs, xlens)\n\n # encoder\n enc_outs = self.enc(xs, xlens, task.split('.')[0])\n\n if self.main_weight < 1 and self.enc_type in ['conv', 'tds', 'gated_conv', 'transformer']:\n for sub in ['sub1', 'sub2']:\n enc_outs['ys_' + sub]['xs'] = enc_outs['ys']['xs'].clone()\n enc_outs['ys_' + sub]['xlens'] = enc_outs['ys']['xlens'][:]\n\n # Bridge between the encoder and decoder\n if self.main_weight > 0 and self.is_bridge:\n enc_outs['ys']['xs'] = self.bridge(enc_outs['ys']['xs'])\n if self.sub1_weight > 0 and self.is_bridge:\n enc_outs['ys_sub1']['xs'] = self.bridge_sub1(enc_outs['ys_sub1']['xs'])\n if self.sub2_weight > 0 and self.is_bridge:\n enc_outs['ys_sub2']['xs'] = self.bridge_sub2(enc_outs['ys_sub2']['xs'])\n\n return enc_outs\n\n def get_ctc_probs(self, xs, task='ys', temperature=1, topk=None):\n self.eval()\n with torch.no_grad():\n enc_outs = self.encode(xs, task)\n dir = 'fwd' if self.fwd_weight >= self.bwd_weight else 'bwd'\n if task == 'ys_sub1':\n dir += '_sub1'\n elif task == 'ys_sub2':\n dir += '_sub2'\n\n if task == 'ys':\n assert self.ctc_weight > 0\n elif task == 'ys_sub1':\n assert self.ctc_weight_sub1 > 0\n elif task == 'ys_sub2':\n assert self.ctc_weight_sub2 > 0\n ctc_probs, indices_topk = getattr(self, 'dec_' + dir).ctc_probs_topk(\n enc_outs[task]['xs'], temperature, topk)\n return ctc_probs, indices_topk, enc_outs[task]['xlens']\n\n def decode(self, xs, params, idx2token, nbest=1, exclude_eos=False,\n refs_id=None, refs_text=None, utt_ids=None, speakers=None,\n task='ys', ensemble_models=[]):\n \"\"\"Decoding in the inference stage.\n\n Args:\n xs (list): A list of length `[B]`, which contains arrays of size `[T, input_dim]`\n params (dict): hyper-parameters for decoding\n beam_width (int): the size of beam\n min_len_ratio (float):\n max_len_ratio (float):\n len_penalty (float): length penalty\n cov_penalty (float): coverage penalty\n cov_threshold (float): threshold for coverage penalty\n lm_weight (float): the weight of RNNLM score\n resolving_unk (bool): not used (to make compatible)\n fwd_bwd_attention (bool):\n idx2token (): converter from index to token\n nbest (int):\n exclude_eos (bool): exclude <eos> from best_hyps_id\n refs_id (list): gold token IDs to compute log likelihood\n refs_text (list): gold transcriptions\n utt_ids (list):\n speakers (list):\n task (str): ys* or ys_sub1* or ys_sub2*\n ensemble_models (list): list of Seq2seq classes\n Returns:\n best_hyps_id (list): A list of length `[B]`, which contains arrays of size `[L]`\n aws (list): A list of length `[B]`, which contains arrays of size `[L, T, n_heads]`\n\n \"\"\"\n self.eval()\n with torch.no_grad():\n if task.split('.')[0] == 'ys':\n dir = 'bwd' if self.bwd_weight > 0 and params['recog_bwd_attention'] else 'fwd'\n elif task.split('.')[0] == 'ys_sub1':\n dir = 'fwd_sub1'\n elif task.split('.')[0] == 'ys_sub2':\n dir = 'fwd_sub2'\n else:\n raise ValueError(task)\n\n # encode\n if self.input_type == 'speech' and self.mtl_per_batch and 'bwd' in dir:\n enc_outs = self.encode(xs, task, flip=True)\n else:\n enc_outs = self.encode(xs, task, flip=False)\n\n #########################\n # CTC\n #########################\n if (self.fwd_weight == 0 and self.bwd_weight == 0) or (self.ctc_weight > 0 and params['recog_ctc_weight'] == 1):\n lm = None\n if params['recog_lm_weight'] > 0 and hasattr(self, 'lm_fwd') and self.lm_fwd is not None:\n lm = getattr(self, 'lm_' + dir)\n\n best_hyps_id = getattr(self, 'dec_' + dir).decode_ctc(\n enc_outs[task]['xs'], enc_outs[task]['xlens'],\n params['recog_beam_width'], lm, params['recog_lm_weight'])\n return best_hyps_id, None, (None, None)\n\n #########################\n # Attention\n #########################\n else:\n cache_info = (None, None)\n\n if params['recog_beam_width'] == 1 and not params['recog_fwd_bwd_attention']:\n best_hyps_id, aws = getattr(self, 'dec_' + dir).greedy(\n enc_outs[task]['xs'], enc_outs[task]['xlens'],\n params['recog_max_len_ratio'], exclude_eos, idx2token, refs_id,\n speakers, params['recog_oracle'])\n else:\n assert params['recog_batch_size'] == 1\n\n ctc_log_probs = None\n if params['recog_ctc_weight'] > 0:\n ctc_log_probs = self.dec_fwd.ctc_log_probs(enc_outs[task]['xs'])\n\n # forward-backward decoding\n if params['recog_fwd_bwd_attention']:\n # forward decoder\n lm_fwd, lm_bwd = None, None\n if params['recog_lm_weight'] > 0 and hasattr(self, 'lm_fwd') and self.lm_fwd is not None:\n lm_fwd = self.lm_fwd\n if params['recog_reverse_lm_rescoring'] and hasattr(self, 'lm_bwd') and self.lm_bwd is not None:\n lm_bwd = self.lm_bwd\n\n # ensemble (forward)\n ensmbl_eouts_fwd = []\n ensmbl_elens_fwd = []\n ensmbl_decs_fwd = []\n if len(ensemble_models) > 0:\n for i_e, model in enumerate(ensemble_models):\n enc_outs_e_fwd = model.encode(xs, task, flip=False)\n ensmbl_eouts_fwd += [enc_outs_e_fwd[task]['xs']]\n ensmbl_elens_fwd += [enc_outs_e_fwd[task]['xlens']]\n ensmbl_decs_fwd += [model.dec_fwd]\n # NOTE: only support for the main task now\n\n nbest_hyps_id_fwd, aws_fwd, scores_fwd, cache_info = self.dec_fwd.beam_search(\n enc_outs[task]['xs'], enc_outs[task]['xlens'],\n params, idx2token, lm_fwd, lm_bwd, ctc_log_probs,\n params['recog_beam_width'], False, refs_id, utt_ids, speakers,\n ensmbl_eouts_fwd, ensmbl_elens_fwd, ensmbl_decs_fwd)\n\n # backward decoder\n lm_bwd, lm_fwd = None, None\n if params['recog_lm_weight'] > 0 and hasattr(self, 'lm_bwd') and self.lm_bwd is not None:\n lm_bwd = self.lm_bwd\n if params['recog_reverse_lm_rescoring'] and hasattr(self, 'lm_fwd') and self.lm_fwd is not None:\n lm_fwd = self.lm_fwd\n\n # ensemble (backward)\n ensmbl_eouts_bwd = []\n ensmbl_elens_bwd = []\n ensmbl_decs_bwd = []\n if len(ensemble_models) > 0:\n for i_e, model in enumerate(ensemble_models):\n if self.input_type == 'speech' and self.mtl_per_batch:\n enc_outs_e_bwd = model.encode(xs, task, flip=True)\n else:\n enc_outs_e_bwd = model.encode(xs, task, flip=False)\n ensmbl_eouts_bwd += [enc_outs_e_bwd[task]['xs']]\n ensmbl_elens_bwd += [enc_outs_e_bwd[task]['xlens']]\n ensmbl_decs_bwd += [model.dec_bwd]\n # NOTE: only support for the main task now\n # TODO(hirofumi): merge with the forward for the efficiency\n\n flip = False\n if self.input_type == 'speech' and self.mtl_per_batch:\n flip = True\n enc_outs_bwd = self.encode(xs, task, flip=True)\n else:\n enc_outs_bwd = enc_outs\n nbest_hyps_id_bwd, aws_bwd, scores_bwd, _ = self.dec_bwd.beam_search(\n enc_outs_bwd[task]['xs'], enc_outs[task]['xlens'],\n params, idx2token, lm_bwd, lm_fwd, ctc_log_probs,\n params['recog_beam_width'], False, refs_id, utt_ids, speakers,\n ensmbl_eouts_bwd, ensmbl_elens_bwd, ensmbl_decs_bwd)\n\n # forward-backward attention\n best_hyps_id = fwd_bwd_attention(\n nbest_hyps_id_fwd, aws_fwd, scores_fwd,\n nbest_hyps_id_bwd, aws_bwd, scores_bwd,\n flip, self.eos, params['recog_gnmt_decoding'], params['recog_length_penalty'],\n idx2token, refs_id)\n aws = None\n else:\n # ensemble\n ensmbl_eouts = []\n ensmbl_elens = []\n ensmbl_decs = []\n if len(ensemble_models) > 0:\n for i_e, model in enumerate(ensemble_models):\n if model.input_type == 'speech' and model.mtl_per_batch and 'bwd' in dir:\n enc_outs_e = model.encode(xs, task, flip=True)\n else:\n enc_outs_e = model.encode(xs, task, flip=False)\n ensmbl_eouts += [enc_outs_e[task]['xs']]\n ensmbl_elens += [enc_outs_e[task]['xlens']]\n ensmbl_decs += [getattr(model, 'dec_' + dir)]\n # NOTE: only support for the main task now\n\n lm, lm_rev = None, None\n if params['recog_lm_weight'] > 0 and hasattr(self, 'lm_' + dir) and getattr(self, 'lm_' + dir) is not None:\n lm = getattr(self, 'lm_' + dir)\n if params['recog_reverse_lm_rescoring']:\n if dir == 'fwd':\n lm_rev = self.lm_bwd\n else:\n raise NotImplementedError\n\n nbest_hyps_id, aws, scores, cache_info = getattr(self, 'dec_' + dir).beam_search(\n enc_outs[task]['xs'], enc_outs[task]['xlens'],\n params, idx2token, lm, lm_rev, ctc_log_probs,\n nbest, exclude_eos, refs_id, utt_ids, speakers,\n ensmbl_eouts, ensmbl_elens, ensmbl_decs)\n\n if nbest == 1:\n best_hyps_id = [hyp[0] for hyp in nbest_hyps_id]\n aws = [aw[0] for aw in aws]\n else:\n return nbest_hyps_id, aws, scores, cache_info\n # NOTE: nbest >= 2 is used for MWER training only\n\n return best_hyps_id, aws, cache_info\n","sub_path":"neural_sp/models/seq2seq/seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":35874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"5364788","text":"def json_value(json_dict, search_string):\n \"\"\" (dict, str) -> obj\n Return the value in the json given a formatted json dictionary and a search\n string given in dot notation. Returns None if search_string does not exist\n in json dictionary.\n \"\"\"\n result = json_dict\n search_tokens = search_string.split(\".\")\n # loop through all tokens\n for token in search_tokens:\n # try acccessing element\n try:\n result = result[token]\n # if token does not exist in json dict\n except TypeError:\n # try accessing the list that wraps around the dictionary\n try:\n result = result[0]\n # then access the element\n result = result[token]\n except (IndexError, KeyError) as e:\n return None\n return result\n","sub_path":"engine/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"290782332","text":"'''\nType of the rectangular\n'''\n\nimport math\n\na = int(input())\nb = int(input())\nc = int(input())\nif a + b <= c or a + c <= b or b + c <= a or a + b + c <= 0:\n print('impossible')\nelse:\n if b < c:\n t = b\n b = c\n c = t\n if a < b:\n t = a\n a = b\n b = t\n if b < c:\n t = b\n b = c\n c = t\n if a < b:\n t = a\n a = b\n b = t\n if a ** 2 == b ** 2 + c ** 2:\n print('rectangular')\n else:\n alpha = math.acos((b * b + c * c - a * a) / (2 * b * c))\n if alpha < math.pi / 2:\n print('acute')\n else:\n print('obtuse')\n","sub_path":"Coursera/Week.2/Task.13.py","file_name":"Task.13.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"100801432","text":"\"\"\"\nBackward and forward\n====================\n\nThe sign outside reads: Name no one man.\n\n\"Escape. We must escape.\" Staring at the locked door of his cage, Beta Rabbit,\nspy and brilliant mathematician, has a revelation. \"Of course! Name no one\nman - it's a palindrome! Palindromes are the key to opening this lock!\"\n\nTo help Beta Rabbit crack the lock, write a function answer(n) which returns\nthe smallest positive integer base b, at least 2, in which the integer n is a\npalindrome. The input n will satisfy \"0 <= n <= 1000.\"\n\nLanguages\n=========\n\nTo provide a Python solution, edit solution.py\nTo provide a Java solution, edit solution.java\n\nTest cases\n==========\n\nInputs:\n (int) n = 0\nOutput:\n (int) 2\n\nInputs:\n (int) n = 42\nOutput:\n (int) 4\n\nUse verify [file] to test your solution and see how it does. When you are finished editing your code, use submit [file] to submit your answer. If your solution passes the test cases, it will be removed from your home folder.\n\"\"\"\n\ndef change_base(dec, base):\n digits = []\n while dec > 0:\n rem = dec % base\n dec /= base\n digits.append(rem)\n return digits\n\ndef is_palindrome(nums):\n palindrome = True\n L = len(nums)\n for i in range(0, L / 2):\n if nums[i] != nums[L-1-i]:\n palindrome = False\n break\n return palindrome\n\ndef answer(n):\n for b in range(2, 1002):\n digits = change_base(n, b)\n palindrome = is_palindrome(digits)\n if palindrome:\n return b\n return -1\n\nif __name__ == \"__main__\":\n answer()\n","sub_path":"Google_Foobar/L1_backward_and_forward.py","file_name":"L1_backward_and_forward.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"130560942","text":"def answer(increase, decrease):\n ans = []\n for inc, dec in zip(increase, decrease):\n ans.append(inc + dec)\n return max(ans) - 1\n\nN = int(input())\nsequence = [int(n) for n in input().split()]\n\nincrease = [0] * N\ndecrease = [0] * N\nincrease[0] = 1\ndecrease[0] = 1\n\nfor i in range(1, N):\n dvalue = 0\n ivalue = 0\n for j in range(i):\n if(sequence[i] > sequence[j]):\n dvalue = max(decrease[j], dvalue)\n for j in range(N - i, N):\n if(sequence[N - i] > sequence[j]):\n ivalue = max(increase[j], ivalue)\n decrease[i] = dvalue + 1\n increase[N - i] = ivalue + 1\n\nprint(answer(decrease, increase))\n","sub_path":"python/backjoon/Q11054/Q11054.py","file_name":"Q11054.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"589749096","text":"import baostock as bs\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef refresh_all_stock(current_date = \"2020-03-27\"):\n db_conn = create_engine('sqlite:///mystock.db')\n lg = bs.login()\n rs = bs.query_all_stock(day=current_date)\n\n data_list = []\n while (rs.error_code == '0') & rs.next():\n data_list.append(rs.get_row_data())\n db_conn.execute(r'''\n INSERT OR REPLACE INTO allstock VALUES (?, ?, ?)\n ''', data_list)\n\ndef refresh_stock_day_k(code=\"sh.000001\",start_date=\"2020-03-27\",current_date = \"2020-03-30\"):\n bs.login()\n\n data_list = []\n k_rs = bs.query_history_k_data_plus(code,\n \"date,code,open,high,low,close,preclose,volume,amount,adjustflag,turn,tradestatus,pctChg,peTTM,pbMRQ,psTTM,pcfNcfTTM,isST\",\n start_date=start_date, end_date='', \n frequency=\"d\", adjustflag=\"2\")\n while (k_rs.error_code == '0') & k_rs.next():\n data_list.append(k_rs.get_row_data())\n print('query_history_k_data_plus code:'+code)\n print(len(data_list))\n bs.logout()\n db_conn = create_engine('sqlite:///mystock.db')\n\n db_conn.execute(r'''\n INSERT OR REPLACE INTO stock_day_k VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\n ''', data_list)\n\ndef refresh_all_stock_day_k(start_date=\"2020-03-27\",current_date = \"2020-03-30\"):\n bs.login()\n\n stock_rs = bs.query_all_stock(day=current_date)\n stock_df = stock_rs.get_data()\n data_list = []\n for code in stock_df[\"code\"]:\n k_rs = bs.query_history_k_data_plus(code,\n \"date,code,open,high,low,close,preclose,volume,amount,adjustflag,turn,tradestatus,pctChg,peTTM,pbMRQ,psTTM,pcfNcfTTM,isST\",\n start_date=start_date, end_date='', \n frequency=\"d\", adjustflag=\"2\")\n while (k_rs.error_code == '0') & k_rs.next():\n data_list.append(k_rs.get_row_data())\n print('query_history_k_data_plus code:'+code)\n bs.logout()\n db_conn = create_engine('sqlite:///mystock.db')\n\n db_conn.execute(r'''\n INSERT OR REPLACE INTO stock_day_k VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\n ''', data_list)\n\nif __name__=='__main__':\n #refresh_all_stock(\"2020-04-20\")\n refresh_stock_day_k(\"sh.000001\", \"2020-04-20\", \"2020-04-27\")\n #refresh_all_stock_day_k()","sub_path":"parse_stock_data.py","file_name":"parse_stock_data.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"207208690","text":"\"\"\"\nleetcode 93 -- Restore IP Addresses\n\"\"\"\n\nclass Solution(object):\n def restoreIpAddresses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n depth = 1\n result_list = []\n self.dfs(s, 0, depth, [], result_list)\n IP_list = self.transformToIP(result_list)\n return IP_list\n\n def dfs(self, s, pointer,depth, temp_list, results):\n if pointer >= len(s):\n return\n result = []\n if depth == 4:\n if int(s[pointer:]) <= 255 and (pointer == len(s)-1 or s[pointer] != \"0\"):\n results.append(temp_list+[s[pointer:]])\n else: \n for length in range(1, 4):\n if int(s[pointer:pointer+length]) <= 255 and (length == 1 or s[pointer] != \"0\"):\n self.dfs(s, pointer+length, depth+1, temp_list+[s[pointer:pointer+length]], results)\n #result.append([s[pointer:pointer+length]]+ x for x in self.dfs(s, pointer+length, depth+1) )\n \n \n def transformToIP(self, result_list):\n IP_list = []\n for x in result_list:\n IP_list.append('.'.join(x))\n return IP_list \n\ndef test_case():\n\ts = \"0000\"\n\t\n","sub_path":"python/restore_ip_address.py","file_name":"restore_ip_address.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"360081797","text":"# Scraping electricity interchange between balancing authorities\n\n# Hello! The purpose of this script is to scrape interchange data between balancing authorities.\n# Find out more about the interchange data here: https://www.eia.gov/opendata/qb.php?category=2123637\n# As I understand it, the API is updated regularly, so depending on when you run this script, your exported data file might have more data than an earlier version.\n\n# Please note that this script was not created with the intention of being applicable to all datasets in the EIA Open Data API.\n# You might be able to use it as a basis, but I really can't say it this will be useful for downloading any of the other datasets.\n\n# To run the script, you'll need to have an API key from the EIA.\n# If you don't have one, request it here: https://www.eia.gov/opendata/register.php\n\n# Change the other inputs (save_path, save_file) as well!\n\n# This might take a while to run, so be patient and don't panic if you don't get your CSV file right away.\n# For reference, it takes my laptop anywhere between 436 seconds to 654 seconds to finish running the script.\n\n# import libraries\nimport pandas as pd\nimport requests\nimport re\nimport os\n\n# input variables\napikey = '' # put your api key here\nsave_path = '' # directory where you want the csv file with interchange data to be saved\nsave_file = '' # name for csv file with interchange data\n\n# get interchange data\ncat_id = '2123637'\ncat_url = 'http://api.eia.gov/category/?api_key=' + apikey + '&category_id=' + cat_id\ncat_json = requests.get(cat_url).json()\n\n# get balancing authority children categories\nba_cat = pd.DataFrame(cat_json['category']['childcategories'][14:80]).category_id.unique()\n\n# loop to read in each balancing authority child category as separate dataframe\n# create list of dataframes\napp_df = []\nfor j in ba_cat:\n ba_url = 'http://api.eia.gov/category/?api_key=' + apikey + '&category_id=' + str(j)\n ba_json = requests.get(ba_url).json()\n res_id = pd.DataFrame(ba_json['category']['childseries']).series_id.unique()\n res_id = [item for item in res_id if item.endswith('.H')]\n for i in res_id:\n series_url = 'http://api.eia.gov/series/?api_key=' + apikey + '&series_id=' + str(i)\n series_res = requests.get(series_url).json()\n series_name = series_res['series'][0]['name']\n ba_from = series_name.split(' to ')[0]\n ba_to = series_name.split(' to ')[1]\n ba_from = re.search('\\(([^)]+)', ba_from).group(1)\n ba_to = re.search('\\(([^)]+)', ba_to).group(1)\n df = pd.DataFrame(series_res['series'][0]['data'], columns = ['datetime', 'mwh'])\n df['ba_from'] = ba_from\n df['ba_to'] = ba_to\n app_df.append(df) # append new dataframe to list of dataframes\n\n# concatenate the list of dataframes together as single dataframe\ndf_all = pd.concat(app_df)\n\n# split the EIA-formated datatime column into two separate columns (one for date, one for time)\ndf_all[['date','time']] = df_all['datetime'].str.split('T',expand = True)\n\n# remove the Z at the end of the time column (replace it with a blank)\ndf_all['time'] = df_all['time'].str.replace('Z','',regex=True)\n\n# extract year from the date column into new column\ndf_all['year'] = df_all['date'].str[:4]\n\n# select columns\ndf_all = df_all[['datetime', 'year', 'date', 'time', 'ba_from', 'ba_to', 'mwh']]\n\n# export dataframe to csv file\nos.chdir(save_path)\ndf_all.to_csv(save_file, index=False)\n","sub_path":"scrape-interchange.py","file_name":"scrape-interchange.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"75702867","text":"import functools\nfrom typing import Dict, List, Set, Union, cast\n\nimport pampy\nimport pandas as pd\n\nfrom gazel.core import assign_tokens_to_gazes, make_versions\nfrom gazel.core_types import GazeChange, Snapshot, SnapshotDiff, TokenChange\n\n\ndef get_edit_time(edit: Dict) -> float:\n if edit[\"type\"] == \"aggregated\":\n return edit[\"edits\"][-1][\"timestamp\"]\n else:\n return edit[\"timestamp\"]\n\n\ndef _merge_edits(*edits: Dict) -> Dict:\n assert len(edits) > 0\n\n first_edit = edits[0]\n\n if first_edit[\"type\"] == \"aggregated\":\n first_edit[\"edits\"].extend(edits[1:])\n return first_edit\n else:\n return {\"type\": \"aggregated\", \"edits\": list(edits)}\n\n\ndef _aggregate_edits(edits: List[Dict], aggregation_window=3):\n def _reducer(accumulated_edits: List[Dict], next_edit: Dict) -> List[Dict]:\n if not accumulated_edits:\n return [next_edit]\n else:\n last_edit = accumulated_edits[-1]\n last_time = get_edit_time(last_edit)\n curr_time = get_edit_time(next_edit)\n if curr_time - last_time < aggregation_window:\n return accumulated_edits[:-1] + [_merge_edits(last_edit, next_edit)]\n\n return accumulated_edits + [next_edit]\n\n return functools.reduce(_reducer, edits, cast(List[Dict], []))\n\n\nclass Tracker:\n def __init__(\n self,\n source: str,\n gazes: Union[List[dict], pd.DataFrame],\n changelog: List[dict],\n source_language: str,\n edit_aggregation_window: float = 3.0,\n ):\n changelog = _aggregate_edits(changelog, edit_aggregation_window)\n self.changelog = changelog\n # self.changelog = changelog\n\n self.edit_aggregation_window = edit_aggregation_window\n self.snapshots = make_versions(source, source_language, self.changelog)\n gazes = assign_tokens_to_gazes(gazes.to_dict(\"records\"), self.snapshots)\n self.gazes: pd.dataFrame = pd.DataFrame(gazes)\n\n def diff(self, start: int = 0, end: int = -1) -> SnapshotDiff:\n \"\"\"Returns a diff between the snapshot versions\n `start` and `end`. The diff includes all changes to\n tokens and gazes that occur between `end` and `start`.\n\n Parameters\n ----------\n start : int\n the index of the starting snapshot\n end : int\n the index of the modified snapshot\n\n Returns\n -------\n SnapshotDiff\n token and gaze changes that occur for this diff.\n \"\"\"\n changes: List[TokenChange] = []\n\n for i in range(start, end):\n snapshot = self.snapshots[i]\n changes.extend(snapshot.changes)\n\n gaze_changes: List[GazeChange] = []\n\n last_snapshot_time = self.snapshots[end - 1].time\n gazes: pd.DataFrame = self.gazes\n potentially_changed_gazes = gazes[gazes[\"system_time\"] < last_snapshot_time]\n\n # TODO: Add gaze changes\n for token_change in changes:\n if token_change.type == \"moved\":\n assert (\n token_change.old and token_change.new\n ), \"TokenChange must have `old` and `new` tokens if change type is `moved`\"\n affected_gazes = potentially_changed_gazes[\n potentially_changed_gazes.syntax_node_id == token_change.old.id\n ]\n affected_gazes = affected_gazes.to_dict(\"records\")\n\n for gaze in affected_gazes:\n new_col = (\n token_change.new.range.start.point.col\n + gaze[\"syntax_node_offset\"]\n )\n new_gaze = {\n **gaze,\n \"source_file_line\": token_change.new.range.start.point.line,\n \"source_file_col\": new_col,\n }\n gaze_changes.append(\n GazeChange(type=\"moved\", old=gaze, new=new_gaze)\n )\n\n return SnapshotDiff(\n old=self.snapshots[start],\n new=self.snapshots[end - 1],\n token_changes=changes,\n gaze_changes=gaze_changes,\n )\n\n def fixations_for_edit_window(\n self, index: int, snapshot_only=False\n ) -> pd.DataFrame:\n \"\"\"returns all the gazes for the snapshot at index `index`.\n By default, it will provide all gazes from the start of the experiment,\n until the snapshot `i`. If you want gazes only for the duration of time\n that this snapshot exists, use `snapshot_only=True`.\n\n Parameters\n ----------\n index : int\n snapshot index\n snapshot_only : bool, optional\n if false, returns all gazes until snapshot `i`, else\n returns only gazes within the time window of snapshot `i`, by default False\n\n Returns\n -------\n pd.DataFrame\n A dataframe containing the requested gazes.\n \"\"\"\n assert index < len(self.snapshots)\n\n start_time = pampy.match(\n # fmt: off\n index, \n 0, 0, \n default=self.changelog[index][\"timestamp\"]\n # fmt: on\n )\n end_time = pampy.match(\n index,\n len(self.snapshots) - 1,\n float(\"inf\"),\n default=self.changelog[index + 1][\"timestamp\"],\n )\n\n filtered_index = (self.gazes[\"system_time\"] >= start_time) & (\n self.gazes[\"system_time\"] < end_time\n )\n filtered_gazes = self.gazes[filtered_index]\n\n return filtered_gazes\n\n def snapshot(self, index: int) -> Snapshot:\n \"\"\"Returns the snapshot at index `index`\n\n Parameters\n ----------\n index : [type]\n the snapshot index. \n This is `0` for the original source version,\n `1` for the first edit, `2` for second edit \n and so on.\n\n Returns\n -------\n Snapshot\n The snapshot at index `index`\n \"\"\"\n return self.snapshots[index]\n\n def snapshot_at_time(self, t: float) -> Snapshot:\n \"\"\"Returns the snapshot that exists at time `t`\n\n Parameters\n ----------\n t : float\n timestamp for which you need the snapshot\n\n Returns\n -------\n Snapshot\n The snapshot at time `t`\n \"\"\"\n for i, _ in enumerate(self.snapshots[1:]):\n if t >= self.snapshots[i - 1].time and t < self.snapshots[i].time:\n return self.snapshots[i - 1]\n\n return self.snapshots[-1]\n\n def get_fixations(self):\n return self.gazes\n\n def get_fixations_for_snapshot(self, i: int) -> pd.DataFrame:\n assert i >= 0, \"index must be >= 0\"\n assert i < len(self.snapshots), \"invalid snapshot id\"\n\n start_time = self.snapshots[i].time\n end_time = (\n self.snapshots[i + 1].time if i + 1 < len(self.snapshots) else float(\"inf\")\n )\n gazes = self.gazes\n return gazes[(gazes.system_time >= start_time) & (gazes.system_time < end_time)]\n\n def get_token_history(\n self, id_or_ids: Union[Set[int], int], start_snapshot=0\n ) -> List[TokenChange]:\n if isinstance(id_or_ids, int):\n id_or_ids = {id_or_ids}\n\n diff = self.diff(start_snapshot, len(self.snapshots))\n token_changes = diff.token_changes\n return list(\n filter(\n lambda change: _get_token_id_for_change(change)\n in cast(Set[int], id_or_ids),\n token_changes,\n )\n )\n\n def get_first_token_by_text(self, text):\n for snapshot in self.snapshots:\n for token in snapshot.tokens:\n start, end = token.range.start.index, token.range.end.index\n token_text = token.source[start:end]\n if token_text == text:\n return token\n\n\ndef _get_token_id_for_change(change: TokenChange) -> int:\n if change.old:\n return change.old.id\n if change.new:\n return change.new.id\n\n raise Exception(\"Either old or new token need to exist in TokenChange\")\n\n","sub_path":"build/lib/gazel/Tracker.py","file_name":"Tracker.py","file_ext":"py","file_size_in_byte":8203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"325216221","text":"\"\"\"\n This spider is a SFExpressJobs spider created on top of the ATSSpider\n scrapy crawl sfexpressjobs -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://hr.sf-express.com/index.php/position/list.html\"\n\n sample job url:\n http://hr.sf-express.com/index.php/position/detail/job/138316.html\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix\n\nRef_Num = compile(r\"job\\/(\\d+).htm\")\n\n\nclass SFExpressJobs(ATSSpider):\n\n name = \"sfexpressjobs\"\n\n def parse(self, response):\n selector = Selector(response)\n if not self.expected_job_count_set:\n self.expected_job_count = selector.xpath('//span[@class=\"deepRed total\"]//text()').extract()\n\n jobs = selector.xpath('//table[@class=\"tablelist\"]/tbody/tr[@class=\"odd\" or @class=\"even\"]')\n for job in jobs:\n url = job.xpath('./td[@class=\"l square\"]/a/@href').extract()\n if url:\n yield Request(\n callback=self.parse_job_callback(),\n meta={\n 'date': job.xpath('./td[5]//text()').extract(),\n 'location': job.xpath('./td[4]//text()').extract(),\n 'jobcategory': job.xpath('./td[2]//text()').extract(),\n 'title': job.xpath('./td[@class=\"l square\"]/a/text()').extract(),\n },\n url=urljoin(response.url, url[0])\n )\n\n next_page_url = selector.xpath('//ul[@class=\"yiiPager\"]/li[@class=\"next\"]/a/@href').extract()\n if next_page_url:\n yield Request(url=urljoin(response.url, next_page_url[0]), callback=self.parse)\n\n def parse_job(self, response):\n selector = Selector(response)\n loader = BrightcorpItemLoader(selector=selector)\n\n loader.add_xpath(\n 'description',\n '//table[@class=\"tablelist textl\"]//tr[@class=\"c\"][not(descendant-or-self::a or descendant-or-self::button)]'\n )\n\n loader.add_value('apply_url', response.url)\n loader.add_value('date', response.meta.get('date'))\n loader.add_value('jobcategory', response.meta.get('jobcategory'))\n loader.add_value('location', response.meta.get('location'))\n loader.add_value('referencenumber', response.url, Prefix('%s-' % self.name), re=Ref_Num)\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/sfexpressjobs.py","file_name":"sfexpressjobs.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"212171897","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport ee\nimport sys\n\n\"\"\"Module for create map access tokens\n\n>>> ee_map_py(eeobject, vizparams)\n\nThese functions are using in R/ee_map.R\n\"\"\"\n\n#EE_TILES = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'\n# eeobject = r['eeobject']\n# vizparams = r['vizparams']\ndef ee_map_py(eeobject, vizparams):\n \"\"\"Fetch and return a map id and token\n\n Args:\n eeobject (ee): Earth Engine object of class: Geometry, Feature, FeatureCollection or Image.\n vizparams (dict): The visualization parameters. See ee.data.getMapId.\n\n Returns:\n An object containing a mapid and access token, or an error message.\n\n Examples:\n >>> eeobject = ee.Image()\n >>> vizparams = {}\n >>> ee_map_py(eeobject, vizparams)\n \"\"\"\n if eeobject.name() is 'Geometry':\n try:\n EE_TILES = ee.FeatureCollection(ee.Feature(eeobject))\\\n .draw(**vizparams)\\\n .getMapId()[\"tile_fetcher\"].url_format\n except:\n print('Error: The Earth Engine Geometry object malformed')\n elif eeobject.name() is 'Feature':\n try:\n EE_TILES = ee.FeatureCollection(ee.Feature(eeobject))\\\n .draw(**vizparams)\\\n .getMapId()[\"tile_fetcher\"].url_format\n except:\n print('Error: The Earth Engine Feature object malformed')\n elif eeobject.name() is 'FeatureCollection':\n try:\n EE_TILES = eeobject.draw(**vizparams).getMapId()[\"tile_fetcher\"].url_format\n except:\n print('Error: The Earth Engine FeatureCollection object malformed')\n elif eeobject.name() is 'Image':\n try:\n EE_TILES = eeobject.visualize(**vizparams).getMapId()[\"tile_fetcher\"].url_format\n except:\n print('Error: The Earth Engine Image object malformed')\n else:\n sys.exit('ee_map only support Geometry, Image, Feature and FeatureCollection')\n return EE_TILES\n","sub_path":"inst/python/ee_map.py","file_name":"ee_map.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"158416120","text":"\"\"\"\n43. 名詞を含む文節が動詞を含む文節に係るものを抽出\n名詞を含む文節が,動詞を含む文節に係るとき,これらをタブ区切り形式で抽出せよ.\nただし,句読点などの記号は出力しないようにせよ.\n\"\"\"\nimport os\nimport sys\n\nfrom knock41 import Chunk, cabocha_into_chunks\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../\"))\nfrom kiyuna.utils.message import message # noqa: E402 isort:skip\n\n\nclass ChunkNormalized(Chunk):\n def __init__(self, chunk):\n self.morphs, self.dst, self.srcs = (*chunk,)\n self.norm = self.get_norm()\n\n def get_norm(self):\n clause = \"\".join(m.surface for m in self.morphs if m.pos != \"記号\")\n return clause\n\n def has_pos(self, pos):\n return any(m.pos == pos for m in self.morphs)\n\n\nif __name__ == \"__main__\":\n res = []\n for chunks in cabocha_into_chunks():\n chunks = {k: ChunkNormalized(v) for k, v in chunks.items()}\n for c in chunks.values():\n if c.dst == -1:\n continue\n if c.dst not in chunks:\n continue\n if c.has_pos(\"名詞\") and chunks[c.dst].has_pos(\"動詞\"):\n res.append(f\"{c.norm}\\t{chunks[c.dst].norm}\\n\")\n sys.stdout.writelines(res)\n message(f\"write {len(res)} lines\", type=\"success\")\n","sub_path":"kiyuna/chapter05/knock43.py","file_name":"knock43.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"515422578","text":"import xml.etree.ElementTree as ET\n\ntree = ET.parse('kjv.xml')\n\nroot = tree.getroot()\n\nfor bn in root.findall('BIBLEBOOK'):\n\n\tbname=bn.attrib['bname']\n\n\tfor cn in bn.findall('CHAPTER'):\n\n\t\tcnumber=cn.attrib['cnumber']\n\n\t\tfor vn in cn.findall('VERS'):\n\n\t\t\tvnumber=vn.attrib['vnumber']\n\t\t\tvers=vn.text\n\n\t\t\tprint(bname+\"|\"+cnumber+\":\"+vnumber+\"|\"+vers)\n\n\n","sub_path":"bibly-00/CODES/archive/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"38591825","text":"# coding: utf-8\n\nimport logging\nimport datetime\nimport re\n\nfrom scrapy import Request\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor as le\n\nfrom fang.items import World_agents\n\n\nclass WorldAgentsSpider(CrawlSpider):\n logger = logging.getLogger()\n name = 'world_agents'\n allowed_domains = ['world.fang.com']\n start_url = 'http://world.fang.com/agent/all/'\n count = 0\n rules = [\n Rule(\n le(allow=('http://world.fang.com/agent/all/page[0-9]*/')),\n follow=True\n ),\n Rule(\n le(allow=('http://world.fang.com/agentshop/\\d+/$')),\n follow=False, callback='parse_shop'\n ),\n Rule(\n le(allow=('http://world.fang.com/agent/\\d+/')),\n follow=False, callback='parse_agent'\n )\n ]\n\n def start_requests(self):\n yield Request(self.start_url)\n\n def parse_shop(self, response):\n res = response\n url = res.url\n name = res.xpath(\"//div[@class='Company clearfix']/div[@class='CompanyName']/h2/text()\").extract()[0]\n region = res.xpath(\"//div[@class='CompanyName']/div[@class='Add']/a/text()\").extract()\n contact_xpath = res.xpath(\"//div[@class='SidebarBox']/ul[@class='Contact']/li\")\n contact = contact_xpath.xpath(\"string(.)\").extract()\n item = World_agents()\n item['from_site'] = 'world_fang'\n item['name'] = name\n item['world_type'] = 'shop'\n item['region'] = region\n item['contact'] = contact\n item['url'] = url\n for i in contact:\n print(i)\n item['url'] = url\n self.count += 1\n self.logger.info('-----------------%s-----------------' % self.count)\n return item\n\n def parse_agent(self, response):\n res = response\n url = res.url\n name = res.xpath(\"//div[@class='introbox']/h2/text()\").extract()[0]\n name = name.strip()\n contact_xpath = res.xpath(\"//ul[@class='userIntro']/li\")\n contact = contact_xpath.xpath(\"string(.)\").extract()\n for i in contact:\n print(i)\n item = World_agents()\n item['from_site'] = 'world_fang'\n item['world_type'] = 'agent'\n item['name'] = name\n item['contact'] = contact\n item['url'] = url\n self.count += 1\n self.logger.info('+++++++++++++++++%s+++++++++++++++++' % self.count)\n self.logger.info(url)\n return item\n","sub_path":"fang/fang/spiders/fang_world_agents.py","file_name":"fang_world_agents.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"122955431","text":"import util\nimport tensorflow as tf\n\ndef classifier(x):\n\t#parameters for convolution kernels\n\tIMG_DEPTH=1\n\tC1_KERNEL_SIZE, C2_KERNEL_SIZE, C3_KERNEL_SIZE=5,5,5\n\tC1_OUT_CHANNELS,C2_OUT_CHANNELS,C3_OUT_CHANNELS=6,16,120\n\tC1_STRIDES,C2_STRIDES,C3_STRIDES=1,1,1\n\n\tP1_SIZE,P2_SIZE=2,2\n\tP1_STRIDE,P2_STRIDE=2,2\n\n\tF4_SIZE,F5_SIZE=84,10\n\n\tC1_kernel=util.weights([C1_KERNEL_SIZE,C1_KERNEL_SIZE,IMG_DEPTH, C1_OUT_CHANNELS],0.1,'C1_kernel')\n\tC2_kernel=util.weights([C2_KERNEL_SIZE,C2_KERNEL_SIZE, C1_OUT_CHANNELS, C2_OUT_CHANNELS],0.1,'C2_kernel')\n\tC3_kernel=util.weights([C3_KERNEL_SIZE,C3_KERNEL_SIZE, C2_OUT_CHANNELS, C3_OUT_CHANNELS],0.1,'C3_kernel')\n\n\tC1_bias=util.bias([C1_OUT_CHANNELS], 'C1_bias')\n\tC2_bias=util.bias([C2_OUT_CHANNELS], 'C2_bias')\n\tC3_bias=util.bias([C3_OUT_CHANNELS], 'C3_bias')\n\n\t#LeNet-5 structure\n\tC1=util.convLayer(x, C1_kernel, C1_STRIDES, 'SAME')\n\tReLU1=tf.nn.relu(C1+C1_bias)\n\tP1=util.max_pool(ReLU1,P1_SIZE, P1_STRIDE)\n\n\tC2=util.convLayer(P1, C2_kernel, C2_STRIDES, 'SAME')\n\tReLU2=tf.nn.relu(C2+C2_bias)\n\tP2=util.max_pool(ReLU2,P2_SIZE, P2_STRIDE)\n\n\tC3=util.convLayer(P2, C3_kernel, C3_STRIDES, 'SAME')\n\tReLU3=tf.nn.relu(C3+C3_bias)\n\t\n\tnum_F4_in=(int)(ReLU3.shape[1]*ReLU3.shape[2]*ReLU3.shape[3])\n\tF4_in=tf.reshape(ReLU3,[-1,num_F4_in])\n\n\tF4_weights=util.weights([num_F4_in, F4_SIZE],0.1,'F4_weights')\n\tF4_bias=util.bias([F4_SIZE],'F4_bias')\n\tF4=tf.matmul(F4_in, F4_weights)\n\tReLU4=tf.nn.relu(F4+F4_bias)\n\n\tF5_weights=util.weights([F4_SIZE, F5_SIZE],0.1,'F5_weights')\n\tF5_bias=util.bias([F5_SIZE],'F5_bias')\n\tF5=tf.matmul(ReLU4, F5_weights)+F5_bias\n\n\treturn F5\n","sub_path":"LeNet_5.py","file_name":"LeNet_5.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"650104952","text":"from django.test import TestCase\nfrom .models import Friend\nfrom .models import Profile\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\ntiff = User.objects.get(username = 'Tiffany')\nmark = User.objects.get(username = 'Mark')\nollie =User.objects.get(username = 'Fuck')\n\n# tiffProfile = Profile(user = tiff, bio = 'NY')\n# markProfile = Profile(user = mark, bio = 'CA')\n# ollieProfile = Profile(user = ollie, bio = 'WI')\n\n# tiffProfile.save()\n# markProfile.save()\n# ollieProfile.save()\n\n# tiffFr = Friend(current_user=tiff)\n# markFr = Friend(current_user=mark)\n# ollieFr = Friend(current_user=ollie)\n\n# tiffFr.save()\n# markFr.save()\n# ollieFr.save()\n\ntiffFr = Friend.objects.get(current_user = tiff)\nmarkFr = Friend.objects.get(current_user = mark)\nollieFr = Friend.objects.get(current_user = ollie)\n\ntiffFr.make_friend(tiff, mark)\ntiffFr.make_friend(tiff, ollie)\nmarkFr.make_friend(mark, tiff)\n\ntiffFr.save()\nprint(tiffFr.users.all())\nprint(markFr.users.all())\nprint(ollieFr.users.all())","sub_path":"mysite/accounts/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"363045772","text":"from enum import Enum\nfrom typing import List, Optional\n\nimport pyshark\n\n\nclass Direction(Enum):\n ALL = 0\n FROM_CLIENT = 1\n FROM_SERVER = 2\n\n\nclass TraceAnalyzer:\n _filename = \"\"\n\n def __init__(self, filename: str, keylog_file: Optional[str] = None):\n self._filename = filename\n self._keylog_file = keylog_file\n\n def _get_direction_filter(self, d: Direction) -> str:\n f = \"(quic && !icmp) && \"\n if d == Direction.FROM_CLIENT:\n return f + \"ip.src==193.167.0.100 && \"\n elif d == Direction.FROM_SERVER:\n return f + \"ip.src==193.167.100.100 && \"\n else:\n return f\n\n def _get_packets(self, f: str) -> List:\n override_prefs = {}\n if self._keylog_file is not None:\n override_prefs[\"ssl.keylog_file\"] = self._keylog_file\n cap = pyshark.FileCapture(\n self._filename,\n display_filter=f,\n override_prefs=override_prefs,\n decode_as={\"udp.port==443\": \"quic\"},\n )\n packets = []\n # If the pcap has been cut short in the middle of the packet, pyshark will crash.\n # See https://github.com/KimiNewt/pyshark/issues/390.\n try:\n for p in cap:\n packets.append(p)\n cap.close()\n except Exception as e:\n print(e)\n return packets\n\n def get_1rtt(self, direction: Direction = Direction.ALL) -> List:\n \"\"\" Get all QUIC packets, one or both directions. \"\"\"\n packets = []\n for packet in self._get_packets(\n self._get_direction_filter(direction) + \"quic.header_form==0\"\n ):\n for layer in packet.layers:\n if layer.layer_name == \"quic\" and not hasattr(\n layer, \"long_packet_type\"\n ):\n layer.sniff_time = packet.sniff_time\n packets.append(layer)\n return packets\n\n def get_vnp(self, direction: Direction = Direction.ALL) -> List:\n return self._get_packets(\n self._get_direction_filter(direction) + \"quic.version==0\"\n )\n\n def get_retry(self, direction: Direction = Direction.ALL) -> List:\n packets = []\n for packet in self._get_packets(\n self._get_direction_filter(direction) + \"quic.long.packet_type==Retry\"\n ):\n for layer in packet.layers:\n if layer.layer_name == \"quic\":\n packets.append(layer)\n return packets\n\n def get_initial(self, direction: Direction = Direction.ALL) -> List:\n \"\"\" Get all Initial packets. \"\"\"\n packets = []\n for packet in self._get_packets(\n self._get_direction_filter(direction) + \"quic.long.packet_type\"\n ):\n for layer in packet.layers:\n if (\n layer.layer_name == \"quic\"\n and hasattr(layer, \"long_packet_type\")\n and layer.long_packet_type == \"0\"\n ):\n packets.append(layer)\n return packets\n\n def get_handshake(self, direction: Direction = Direction.ALL) -> List:\n \"\"\" Get all Handshake packets. \"\"\"\n packets = []\n for packet in self._get_packets(\n self._get_direction_filter(direction) + \"quic.long.packet_type\"\n ):\n for layer in packet.layers:\n if (\n layer.layer_name == \"quic\"\n and hasattr(layer, \"long_packet_type\")\n and layer.long_packet_type == \"2\"\n ):\n packets.append(layer)\n return packets\n\n def get_0rtt(self) -> List:\n \"\"\" Get all 0-RTT packets. \"\"\"\n packets = []\n for packet in self._get_packets(\n self._get_direction_filter(Direction.FROM_CLIENT) + \"quic.long.packet_type\"\n ):\n for layer in packet.layers:\n if (\n layer.layer_name == \"quic\"\n and hasattr(layer, \"long_packet_type\")\n and layer.long_packet_type == \"1\"\n ):\n packets.append(layer)\n return packets\n","sub_path":"trace.py","file_name":"trace.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"203685134","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 4 09:56:02 2019\r\n\r\n@author: rkrishnan\r\n\"\"\"\r\nimport tweepy as tw\r\nimport json\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\nimport re\r\nimport pandas as pd\r\nimport os\r\n\r\ncwd = os.getcwd()\r\n\r\n# Load credentials from json file\r\nwith open(\"twitter_credentials.json\", \"r\") as file: \r\n creds = json.load(file)\r\n\r\n# Instantiate an object\r\nauth = tw.OAuthHandler(creds['CONSUMER_KEY'], creds['CONSUMER_SECRET'])\r\nauth.set_access_token(creds['ACCESS_TOKEN'], creds['ACCESS_SECRET'])\r\napi = tw.API(auth, wait_on_rate_limit=True)\r\n\r\n\r\nsearch_str = \"artificial+intelligence -filter:retweets\"\r\n\r\ntweets = tw.Cursor(api.search,\r\n q=search_str,\r\n lang=\"en\",\r\n since='2018-04-23',tweet_mode='extended').items(1000)\r\n\r\nall_tweets = [tweet.full_text for tweet in tweets]\r\nall_tweets[:5]\r\n\r\n#Convert list of tuples to dataframe and set column names and indexes\r\ntweet_df = pd.DataFrame(all_tweets, columns = ['tweet']) \r\n\r\ntweet_df['new_tweet']=tweet_df['tweet']\r\n\r\nfor eachrow in tweet_df.index:\r\n tweet_df['new_tweet'][eachrow] = re.sub(r\"http\\S+\", \"\", tweet_df['new_tweet'][eachrow])\r\n tweet_df['new_tweet'][eachrow] = re.sub('RT @[^\\s]+','', tweet_df['new_tweet'][eachrow])\r\n tweet_df['new_tweet'][eachrow] = re.sub('#[^\\s]+','', tweet_df['new_tweet'][eachrow])\r\n tweet_df['new_tweet'][eachrow] = re.sub('@[^\\s]+','', tweet_df['new_tweet'][eachrow])\r\n tweet_df['new_tweet'][eachrow] = ''.join([c for c in tweet_df['new_tweet'][eachrow] if ord(c) < 128])\r\n\r\ntweet_df['new_tweet'].to_csv(r'ai_tweets_tw.txt', header=['new_tweet'], index=None, sep=' ', mode='w')\r\n\r\nai_tweets = pd.read_csv('ai_tweets_tw.txt' )\r\n\r\nsid = SentimentIntensityAnalyzer()\r\nfor sentence in ai_tweets['new_tweet']:\r\n print(sentence)\r\n ss = sid.polarity_scores(sentence)\r\n for k in sorted(ss):\r\n print('{0}: {1}, '.format(k, ss[k]), end='')\r\n print()\r\n\r\n","sub_path":"06-IST736-TextMining/Assignments/Week1/AI_tweets_tw.py","file_name":"AI_tweets_tw.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"412477783","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\n# Theano\nimport theano.tensor as T\n# Hyperopt\nfrom hyperopt import hp\nfrom math import log\n#\nimport numpy as np\nimport sys\n\nfrom Rnnrbm_original import RnnRbm\n\n\ndef get_header():\n return ['temporal_order', 'n_hidden', 'n_hidden_recurrent', 'batch_size', 'accuracy']\n\n\ndef get_hp_space():\n space = (hp.qloguniform('temporal_order', log(1), log(30), 1),\n hp.qloguniform('n_hidden', log(100), log(5000), 10),\n hp.qloguniform('n_hidden_recurrent', log(100), log(5000), 10),\n # hp.loguniform('learning_rate', log(0.0001), log(1)),\n hp.quniform('batch_size', 10, 500, 10)\n # hp.choice('activation_func', ['tanh', 'sigmoid']),\n # hp.choice('sampling_positive', ['true', 'false'])\n # gibbs_sampling_step_test ???\n )\n return space\n\n\ndef train(params, dataset, temporal_granularity, max_iter, log_file_path):\n # Hyperparams\n # n_hidden, temporal_order, learning_rate, \\\n # batch_size, activation_func, sampling_positive = params\n temporal_order, n_hidden, n_hidden_recurrent, batch_size = params\n\n # Cast the hp\n n_hidden = int(n_hidden)\n temporal_order = int(temporal_order)\n batch_size = int(batch_size)\n\n # Log them\n with open(log_file_path, 'ab') as log_file:\n log_file.write((u'# n_hidden : {}'.format(n_hidden)).encode('utf8'))\n log_file.write((u'# temporal_order : {}'.format(temporal_order)).encode('utf8'))\n # log_file.write((u'# learning_rate : {}'.format(learning_rate)).encode('utf8'))\n log_file.write((u'# batch_size : {}'.format(batch_size)).encode('utf8'))\n # Print\n print((u'# n_hidden : {}'.format(n_hidden)).encode('utf8'))\n print((u'# temporal_order : {}'.format(temporal_order)).encode('utf8'))\n # print((u'# learning_rate : {}'.format(learning_rate)).encode('utf8'))\n print((u'# batch_size : {}'.format(batch_size)).encode('utf8'))\n\n # Some hp not optimized\n gibbs_sampling_step_test = 40\n\n # Load data\n # Dimension : time * pitch\n ################################################\n ################################################\n ################################################\n orch, orch_mapping, piano, piano_mapping, train_index, val_index, _ \\\n = load_data_tvt(data_path=dataset,\n log_file_path='bullshit.txt',\n temporal_granularity=temporal_granularity,\n temporal_order=temporal_order,\n shared_bool=True,\n bin_unit_bool=True,\n minibatch_size=batch_size,\n split=(0.7, 0.1, 0.2))\n ################################################\n ################################################\n ################################################\n\n n_train_batches = train_index.shape[0]\n last_batch_size = (train_index[-1]).shape[0]\n\n n_val_batches = len(val_index)\n val_size = 0\n for i in xrange(n_val_batches):\n val_size += val_index[i].size\n\n # construct the RBM class\n model = RnnRbm(\n n_hidden=n_hidden,\n n_hidden_recurrent=n_hidden_recurrent,\n lr=0.001,\n r=(0, ),\n )\n\n # Train\n costs = []\n for epoch in range(max_iter):\n for batch_index in xrange(n_train_batches - 1):\n cost = model.train_function(data_train[train_index[batch_index]])\n costs.append(cost)\n\n print('Epoch %i/%i' % (epoch + 1, max_iter))\n print(np.mean(costs))\n sys.stdout.flush()\n\n # Accuracy ??\n\n best_accuracy = np.amax(val_tab)\n dico_res = {'n_hidden': n_hidden,\n 'n_hidden_recurrent': n_hidden_recurrent,\n 'temporal_order': temporal_order,\n # 'learning_rate': learning_rate,\n 'batch_size': batch_size,\n 'accuracy': best_accuracy}\n\n return best_accuracy, dico_res\n\n\ndef create_past_vector(piano, orch, batch_size, delay, orch_dim):\n # Piano is a matrix : num_batch x piano_dim\n # Orch a matrix : num_batch x ()\n orch_reshape = T.reshape(orch, (batch_size, delay * orch_dim))\n past = T.concatenate((piano, orch_reshape), axis=1)\n return past\n\nif __name__ == '__main__':\n # Main can't be used because of relative import\n # Just here for an example of the hyperparameters structure\n # Hyper-parameter\n hyper_parameter = {}\n hyper_parameter['n_hidden'] = 500\n hyper_parameter['temporal_order'] = 10\n hyper_parameter['training_epochs'] = 1000\n hyper_parameter['batch_size'] = 100\n # File\n dataset = '../../../Data/data.p'\n","sub_path":"Source/RnnRbm.py","file_name":"RnnRbm.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"520033345","text":"from typing import Union, List, Dict\n\nfrom rest_framework import serializers\n\n\nclass SerializerSaver(serializers.BaseSerializer):\n @classmethod\n def save_data(Serializer, data: Union[List, Dict]):\n if isinstance(data, list):\n serializer = Serializer(data=data, many=True)\n elif isinstance(data, dict):\n serializer = Serializer(data=data)\n else:\n raise serializers.ValidationError(\n f\"Type of data is not List nor Dict.\"\n )\n if serializer.is_valid():\n serializer.save()\n return serializer.instance, None\n else:\n print(\n f\"serializer {Serializer.__name__} is not valid\\n\" \n f\"reasons: {serializer.errors}\"\n )\n return None, {\"errors\": serializer.errors}\n","sub_path":"utils/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"62089352","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/opengen/main.py\n# Compiled at: 2020-04-13 16:50:17\n# Size of source mod 2**32: 2070 bytes\nimport casadi.casadi as cs, opengen as og\nu = cs.SX.sym('u', 5)\np = cs.SX.sym('p', 2)\nphi = og.functions.rosenbrock(u, p)\nf2 = cs.fmax(0.0, u[2] - u[3] + 0.1)\nf1 = cs.vertcat(1.5 * u[0] - u[1], cs.sin(u[2] + cs.pi / 5) - 0.2)\nC = og.constraints.Ball2(None, 1.0)\nUA = og.constraints.FiniteSet([[1, 2, 3], [1, 2, 2], [1, 2, 4], [0, 5, -1]])\nUB = og.constraints.Ball2(None, 1.0)\nU = og.constraints.CartesianProduct(5, [2, 4], [UA, UB])\nproblem = og.builder.Problem(u, p, phi).with_constraints(U).with_aug_lagrangian_constraints(f1, C)\nmeta = og.config.OptimizerMeta().with_version('0.0.0').with_authors([\n 'P. Sopasakis', 'E. Fresk']).with_licence('CC4.0-By').with_optimizer_name('the_optimizer')\nbuild_config = og.config.BuildConfiguration().with_build_directory('my_optimizers').with_build_mode('debug').with_tcp_interface_config()\nsolver_config = og.config.SolverConfiguration().with_tolerance(1e-05).with_initial_penalty(1000).with_initial_tolerance(1e-05).with_max_outer_iterations(30).with_delta_tolerance(0.0001).with_penalty_weight_update_factor(2).with_sufficient_decrease_coefficient(0.5)\nbuilder = og.builder.OpEnOptimizerBuilder(problem, metadata=meta,\n build_configuration=build_config,\n solver_configuration=solver_config)\nbuilder.build()\nmng = og.tcp.OptimizerTcpManager('my_optimizers/the_optimizer')\nmng.start()\npong = mng.ping()\nprint(pong)\nsolution = mng.call([1.0, 50.0])\nprint(solution.get().solution)\nmng.kill()","sub_path":"pycfiles/opengen-0.4.1-py3.6/main.cpython-36.py","file_name":"main.cpython-36.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"262967309","text":"import tkinter as tk\nfrom tkinter import ttk\n\nroot = tk.Tk()\nroot.title(\"Distance Converter\")\n\nmetres_value = tk.StringVar()\n\ndef calculate_feet(*args):\n try:\n metres = float(metres_value.get())\n feet = metres * 3.28084\n print(f\"{metres} metres is equal to {feet:.3f} feet.\")\n except ValueError:\n pass\n\n\nmain = ttk.Frame(root, padding=(30, 15))\nmain.grid()\n\nroot.columnconfigure(0, weight=1)\n\n# -- Widgets --\n\nmetres_label = ttk.Label(main, text=\"metres\")\nmetres_input = ttk.Entry(main, width=10, textvariable=metres_value)\nfeet_label = ttk.Label(main, text=\"feet\")\nfeet_display = ttk.Label(main, text=\"Feet shown here\")\ncalc_button = ttk.Button(main, text=\"Calculate\", command=calculate_feet)\n\n# -- Layout --\n\nmetres_label.grid(column=0, row=0, sticky=\"W\", padx=5, pady=5)\nmetres_input.grid(column=1, row=0, sticky=\"EW\", padx=5, pady=5)\nmetres_input.focus()\n\nfeet_label.grid(column=0, row=1, sticky=\"W\", padx=5, pady=5)\nfeet_display.grid(column=1, row=1, sticky=\"EW\", padx=5, pady=5)\n\ncalc_button.grid(column=0, row=2, columnspan=2, sticky=\"EW\", padx=5, pady=5)\n\nroot.mainloop()","sub_path":"App Distance converter/2. calculating feet.py","file_name":"2. calculating feet.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"209833397","text":"import time\r\n\r\n\r\ndef insertion_sort(data, drawData, timeTick):\r\n #for iter in range(len(data)-1):\r\n for i in range(1,len(data)):\r\n key = data[i]\r\n last = i-1\r\n while last>=0 and key<data[last]:\r\n data[last+1]=data[last]\r\n last = last-1\r\n data[last+1]=key\r\n #if data[index]>data[index+1]:\r\n #data[index],data[index+1]=data[index+1],data[last]\r\n\r\n drawData(data, ['green' if x == i or x == i+1 else 'red' for x in range(len(data))] )\r\n time.sleep(timeTick)\r\n drawData(data, ['green' for x in range(len(data))])\r\n","sub_path":"insertionsort.py","file_name":"insertionsort.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"524350840","text":"#import dependency modles.\nimport socket\nimport time\nimport requests\nimport platform\nimport re\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom smtplib import SMTP_SSL\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\nfrom aip import AipFace\nimport cv2\nimport base64\n\n# Get PCname local time and device platform information&take photo.\npcname = socket.gethostname()\nlocaltime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\nPCplatform = platform.platform()\ndef get_host_ip():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n finally:\n s.close()\n return ip\n\nlocalip = get_host_ip()\n\ndef getip():\n year = time.strftime(\"%Y\",time.localtime())\n IpGeturl = str('http://'+year+'.ip138.com/ic.asp')\n response = requests.get(IpGeturl)\n ip = re.search(r\"\\[\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\]\",response.content.decode(errors='ignore')).group(0)\n return ip\n\nIPaddress = getip()\n\ndef send_email():\n host_server = 'smtp.163.com'\n\n sender = 'ly9460730472580800@163.com'\n\n pwd = 'w696919696w'\n\n sender_mail = 'ly9460730472580800@163.com'\n\n receiver = open(\"account.txt\", 'r').read()\n\n mail_content = '您好,您的移动硬盘在陌生设备接入,设备信息如下: ' + '\\n' + '接入设备名: '+pcname + '\\n' + '设备系统版本: ' + PCplatform + '\\n' + '设备公网IP地址: ' + IPaddress + '\\n' + '内网IP地址: ' + localip + '\\n' + \"接入时间: \" + localtime + '\\n' + '使用者未通过人脸识别验证'+'\\n'+'请注意您的数据安全!'\n mail_title = '硬盘接入未知设备预警'\n\n msg = MIMEMultipart()\n # msg = MIMEText(mail_content, \"plain\", 'utf-8')\n msg[\"Subject\"] = Header(mail_title, 'utf-8')\n msg[\"From\"] = sender_mail\n msg[\"To\"] = Header('usr', 'utf-8')\n\n # 邮件正文内容\n msg.attach(MIMEText(mail_content, 'plain', 'utf-8'))\n\n fp = open('NowUserFace.jpg', 'rb')\n msgImage = MIMEImage(fp.read())\n fp.close()\n msgImage.add_header('invader', '<image1>')\n msg.attach(msgImage)\n\n smtp = SMTP_SSL(host_server)\n\n smtp.set_debuglevel(0)\n smtp.ehlo(host_server)\n smtp.login(sender, pwd)\n\n smtp.sendmail(sender_mail, receiver, msg.as_string())\n smtp.quit()\n\ndef Face_Compared():\n APP_ID = '15033725'\n API_KEY = 'he0CtkD7MQUImD8UxVX4Zr4F'\n SECRET_KEY = 'sK97hzEpoRHNlnWNIqa0tHHC8RyNKEj1'\n client = AipFace(APP_ID, API_KEY, SECRET_KEY)\n\n IMAGE_TYPE = 'BASE64'\n\n f1 = open('orginal.jpg', 'rb')\n f2 = open('NowUserFace.jpg', 'rb')\n #image parameters\n img1 = base64.b64encode(f1.read())\n img2 = base64.b64encode(f2.read())\n image_1 = str(img1, 'utf-8')\n image_2 = str(img2, 'utf-8')\n\n ptr = client.match([{'image': image_1, 'image_type': 'BASE64', }, {'image': image_2, 'image_type': 'BASE64', }])\n ptr = ptr['result']\n\n if ptr['score'] <= 50:\n send_email()\n\n else:\n pass\n\ndef Camera_control():\n if cv2.VideoCapture(0).isOpened():\n cap1 = cv2.VideoCapture(0)\n while True:\n ret, frame = cap1.read()\n if ret:\n file_name = \"NowUserFace.jpg\"\n cv2.imwrite(file_name, frame)\n break\n else:\n break\n Face_Compared()\n\n elif cv2.VideoCapture(1).isOpened():\n cap2 = cv2.VideoCapture(1)\n while True:\n ret, frame = cap2.read()\n if ret:\n file_name = \"NowUserFace.jpg\"\n cv2.imwrite(file_name, frame)\n break\n else:\n break\n Face_Compared()\n else:\n pass\n\ndef write_data():\n IPaddress = getip()\n pcinfo = str(pcname + ' ' + IPaddress + ' ' + localtime + '\\n')\n # Write to data.txt file.\n Datafile = open('data.txt', 'a')\n Datafile.write(pcinfo)\n Datafile.close()\n\n#Judge whether there have recode in the data.txt file.\n\ndef Judge():\n f = ''.join(open('data.txt').readlines())\n if pcname not in f:\n Camera_control() # Begin to take photo and identify user.\n send_email()\n else:\n pass\nJudge()\n\nwrite_data()\n\n'''完成于戊戌八月廿八未时,2018年10月7日周日下午1:50分。\n制作人: 闽清第一中学,高一五班,吴禹荣'''\n\n#2018.12.06.23:04 update to V6.0\n#2018.12.08,20:52 update to V7.0\n","sub_path":"DiskWarnerFinalQQ.py","file_name":"DiskWarnerFinalQQ.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"466718493","text":"import requests\n\nurl = 'https://httpbin.org/post'\nfiles = {'file': open('report.xls', 'rb')}\n\nreponse = requests.post(url, files=files)\n\n###### Need to look into the below code ##########\n# reponse.text\n# {\n# ...\n# \"files\": {\n# \"file\": \"<censored...binary...data>\"\n# },\n# ...\n# }","sub_path":"pythonBackend/APITesting/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"607410867","text":"from tkinter import *\r\nimport os\r\nimport random\r\nclass RockPaperScissors(Toplevel):\r\n def __init__(self):\r\n super(RockPaperScissors,self).__init__()\r\n\r\n self.THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))\r\n\r\n self.canvasl=Canvas(self,width=200,height=200, bg=\"red\")\r\n self.canvasl.grid(row=0,column=0)\r\n\r\n self.label=Label(self,text=\"\")\r\n self.label.grid(row=0, column=1,columnspan=2)\r\n\r\n self.canvasr=Canvas(self,width=200,height=200, bg=\"blue\")\r\n self.canvasr.grid(row=0,column=3)\r\n\r\n\r\n self.my_filer = os.path.join(self.THIS_FOLDER, 'img\\-tas.png')\r\n self.photor = PhotoImage(file=self.my_filer)\r\n self.photoimager = self.photor.subsample(3, 3)\r\n self.rockButton=Button(self,text=\"Rock\",image=self.photoimager,command=lambda :self.oyna('img\\-tas.png',1))\r\n self.rockButton.grid(row=1,column=0)\r\n\r\n self.my_filep = os.path.join(self.THIS_FOLDER, 'img\\-kagit.png')\r\n self.photop = PhotoImage(file=self.my_filep)\r\n self.photoimagep = self.photop.subsample(3, 3)\r\n self.paperButton = Button(self, text=\"Paper\",image=self.photoimagep,command=lambda :self.oyna('img\\-kagit.png',2))\r\n self.paperButton.grid(row=1,column=1,columnspan=2)\r\n\r\n self.my_files = os.path.join(self.THIS_FOLDER, 'img\\-makas.png')\r\n self.photos = PhotoImage(file=self.my_files)\r\n self.photoimages = self.photos.subsample(3, 3)\r\n self.scissorsButton = Button(self, text=\"Scissors\",image=self.photoimages, command=lambda :self.oyna('img\\-makas.png',3))\r\n self.scissorsButton.grid(row=1,column=3)\r\n\r\n def oyna(self,index,t):\r\n self.canvasr.delete(\"all\")\r\n self.canvasl.delete(\"all\")\r\n self.my_filelc = os.path.join(self.THIS_FOLDER, index)\r\n self.photolc = PhotoImage(file=self.my_filelc)\r\n self.photoimagelc = self.photolc.subsample(3, 3)\r\n self.canvasl.create_image(100,100,image=self.photoimagelc)\r\n\r\n self.random=round(random.uniform(1,3))\r\n if self.random==1:\r\n self.my_file1 = os.path.join(self.THIS_FOLDER, 'img\\-tas.png')\r\n self.photo1 = PhotoImage(file=self.my_file1)\r\n self.photoimage1 = self.photo1.subsample(3, 3)\r\n self.canvasr.create_image(100, 100, image=self.photoimage1)\r\n if t==1:\r\n self.label.config(text=\"Berabere\")\r\n elif t==2:\r\n self.label.config(text=\"Kazandın\")\r\n elif t==3:\r\n self.label.config(text=\"Kaybettin\")\r\n elif self.random==2:\r\n self.my_file2 = os.path.join(self.THIS_FOLDER, 'img\\-makas.png')\r\n self.photo2 = PhotoImage(file=self.my_file2)\r\n self.photoimage2 = self.photo2.subsample(3, 3)\r\n self.canvasr.create_image(100, 100, image=self.photoimage2)\r\n if t==1:\r\n self.label.config(text=\"Kazandın\")\r\n elif t==2:\r\n self.label.config(text=\"Kaybettin\")\r\n elif t==3:\r\n self.label.config(text=\"Berabere\")\r\n elif self.random==3:\r\n self.my_file3 = os.path.join(self.THIS_FOLDER, 'img\\-kagit.png')\r\n self.photo3 = PhotoImage(file=self.my_file3)\r\n self.photoimage3 = self.photo3.subsample(3, 3)\r\n self.canvasr.create_image(100, 100, image=self.photoimage3)\r\n if t==1:\r\n self.label.config(text=\"Kaybettin\")\r\n elif t==2:\r\n self.label.config(text=\"Berabere\")\r\n elif t==3:\r\n self.label.config(text=\"Kazandın\")\r\n\r\nif __name__ == '__main__':\r\n app=RockPaperScissors()\r\n app.mainloop()","sub_path":"src/GameRockPaperScissors.py","file_name":"GameRockPaperScissors.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"423749470","text":"from __future__ import print_function, division\n\nimport csv\nimport pickle\nimport os\nimport warnings\nimport tqdm\nfrom progressbar import *\n\nimport numpy as np\nimport skimage\nimport skimage.color\nimport skimage.io\nimport skimage.transform\nimport pandas as pd\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nimport shelve\nimport itertools\nimport scipy\n\nimport pdb\n\ndef get_labels(metadata_dir, version='v5'):\n if version == 'v4' or version == 'v5' or version == 'challenge2018':\n csv_file = 'class-descriptions-boxable.csv' if version == 'v4' or version == 'v5' else 'challenge-2018-class-descriptions-500.csv'\n\n boxable_classes_descriptions = os.path.join(metadata_dir, csv_file)\n id_to_labels = {}\n id_to_labels_idx = {}\n cls_index = {}\n\n i = 1\n with open(boxable_classes_descriptions) as f:\n for row in csv.reader(f):\n # make sure the csv row is not empty (usually the last one)\n if len(row):\n label = row[0]\n description = row[1].replace(\"\\\"\", \"\").replace(\"'\", \"\").replace('`', '')\n\n id_to_labels_idx[i] = label\n id_to_labels[i] = description\n cls_index[label] = i\n\n i += 1\n\n # Add background class\n id_to_labels[0] = '__background__'\n id_to_labels_idx[0] = '/m/back'\n cls_index['/m/back'] = 0\n\n else:\n raise NotImplementedError()\n\n return id_to_labels, id_to_labels_idx, cls_index\n\n\ndef get_attribute_relationships_labels(metadata_dir, version='v5'):\n if version == 'v4' or version == 'v5' or version == 'challenge2018':\n attributes_csv_file = 'challenge-2018-attributes-description.csv'\n relationships_csv_file = 'challenge-2018-relationships-description.csv'\n\n attribute_descriptions = os.path.join(metadata_dir, attributes_csv_file)\n relationship_descriptions = os.path.join(metadata_dir, relationships_csv_file)\n attr_id_to_labels = {}\n attr_id_to_labels_idx = {}\n attr_index = {}\n rel_id_to_labels = {}\n rel_id_to_labels_idx = {}\n rel_index = {}\n\n # Handle attributes\n i = 1\n with open(attribute_descriptions) as f:\n for row in csv.reader(f):\n # make sure the csv row is not empty (usually the last one)\n if len(row):\n label = row[0]\n description = row[1].replace(\"\\\"\", \"\").replace(\"'\", \"\").replace('`', '')\n\n attr_id_to_labels_idx[i] = label\n attr_id_to_labels[i] = description\n attr_index[label] = i\n\n i += 1\n\n # Add non class to attributes\n attr_id_to_labels[0] = '__none__'\n attr_id_to_labels_idx[0] = '/m/none'\n attr_index['/m/none'] = 0\n\n # Handle relationships\n i = 1\n with open(relationship_descriptions) as f:\n for row in csv.reader(f):\n # make sure the csv row is not empty (usually the last one)\n if len(row):\n label = row[0]\n description = row[1].replace(\"\\\"\", \"\").replace(\"'\", \"\").replace('`', '')\n if description == 'is':\n # 'is' is not a real relationship\n continue\n\n rel_id_to_labels_idx[i] = label\n rel_id_to_labels[i] = description\n rel_index[label] = i\n\n i += 1\n\n # Add non class to relationships\n rel_id_to_labels[0] = '__none__'\n rel_id_to_labels_idx[0] = '/m/none'\n rel_index['/m/none'] = 0\n\n else:\n raise NotImplementedError()\n\n return attr_id_to_labels, attr_id_to_labels_idx, attr_index, rel_id_to_labels, rel_id_to_labels_idx, rel_index\n\n\ndef generate_images_annotations_json(main_dir, metadata_dir, subset, cls_index, version='v5'):\n validation_image_ids = {}\n\n if version == 'v4' or version == 'v5':\n annotations_path = os.path.join(metadata_dir, '{}-annotations-bbox.csv'.format(subset))\n elif version == 'challenge2018':\n validation_image_ids_path = os.path.join(metadata_dir, 'challenge-2018-image-ids-valset-od.csv')\n\n with open(validation_image_ids_path, 'r') as csv_file:\n reader = csv.DictReader(csv_file, fieldnames=['ImageID'])\n reader.next()\n for line, row in enumerate(reader):\n image_id = row['ImageID']\n validation_image_ids[image_id] = True\n\n annotations_path = os.path.join(metadata_dir, 'challenge-2018-train-annotations-bbox.csv')\n else:\n annotations_path = os.path.join(metadata_dir, subset, 'annotations-human-bbox.csv')\n\n fieldnames = ['ImageID', 'Source', 'LabelName', 'Confidence',\n 'XMin', 'XMax', 'YMin', 'YMax',\n 'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction', 'IsInside']\n\n id_annotations = dict()\n if os.path.exists(annotations_path):\n with open(annotations_path, 'r') as csv_file:\n reader = csv.DictReader(csv_file, fieldnames=fieldnames)\n next(reader)\n\n images_sizes = {}\n for line, row in enumerate(tqdm.tqdm(reader)):\n frame = row['ImageID']\n\n if version == 'challenge2018':\n if subset == 'train':\n if frame in validation_image_ids:\n continue\n elif subset == 'validation':\n if frame not in validation_image_ids:\n continue\n else:\n raise NotImplementedError('This generator handles only the train and validation subsets')\n\n class_name = row['LabelName']\n\n if class_name not in cls_index:\n continue\n\n cls_id = cls_index[class_name]\n\n if version == 'challenge2018':\n # We recommend participants to use the provided subset of the training set as a validation set.\n # This is preferable over using the V4 val/test sets, as the training set is more densely annotated.\n img_path = os.path.join(main_dir, 'train', frame + '.jpg')\n else:\n img_path = os.path.join(main_dir, subset, frame + '.jpg')\n\n if frame in images_sizes:\n width, height = images_sizes[frame]\n else:\n try:\n with Image.open(img_path) as img:\n width, height = img.width, img.height\n images_sizes[frame] = (width, height)\n except Exception as ex:\n if version == 'challenge2018':\n raise ex\n continue\n\n x1 = float(row['XMin'])\n x2 = float(row['XMax'])\n y1 = float(row['YMin'])\n y2 = float(row['YMax'])\n\n x1_int = int(round(x1 * width))\n x2_int = int(round(x2 * width))\n y1_int = int(round(y1 * height))\n y2_int = int(round(y2 * height))\n\n # Check that the bounding box is valid.\n if x2 <= x1:\n raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))\n if y2 <= y1:\n raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))\n\n if y2_int == y1_int:\n warnings.warn('filtering line {}: rounding y2 ({}) and y1 ({}) makes them equal'.format(line, y2, y1))\n continue\n\n if x2_int == x1_int:\n warnings.warn('filtering line {}: rounding x2 ({}) and x1 ({}) makes them equal'.format(line, x2, x1))\n continue\n\n img_id = row['ImageID']\n annotation = {'cls_id': cls_id, 'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2}\n\n if img_id in id_annotations:\n annotations = id_annotations[img_id]\n annotations['boxes'].append(annotation)\n else:\n id_annotations[img_id] = {'w': width, 'h': height, 'boxes': [annotation]}\n else:\n # simply cache image informations from the image folder.\n # This is needed for test detections for challenge submission\n print('WARNING: annotation file not present! Supposing test dataset without annotations')\n images_fld = os.path.join(main_dir, subset)\n for image in tqdm.tqdm(os.listdir(images_fld)):\n img_id = os.path.splitext(image)[0]\n img_path = os.path.join(images_fld, image)\n with Image.open(img_path) as img:\n width, height = img.width, img.height\n\n # dummy annotation\n annotation = {'cls_id': 0, 'x1': 0, 'x2': 0, 'y1': 0, 'y2': 0}\n id_annotations[img_id] = {'w': width, 'h': height, 'boxes': [annotation]}\n\n return id_annotations\n\n\ndef generate_images_annotations_vrd_json(main_dir, metadata_dir, subset, cls_index, attr_index, rel_index, version='v5'):\n\n if version == 'v4' or version == 'v5' or version == 'challenge2018':\n filename = 'challenge-2018-train-vrd.csv' if subset == 'train' else '{}-annotations-vrd.csv'.format(subset)\n annotations_path = os.path.join(metadata_dir, filename)\n\n fieldnames = ['ImageID', 'LabelName1', 'LabelName2', 'XMin1', 'XMax1', 'YMin1', 'YMax1', 'XMin2', 'XMax2', 'YMin2',\n 'YMax2', 'RelationshipLabel']\n\n # load the box annotations\n box_annotations = cache_annotations(subset=subset, annotation_fn=generate_images_annotations_json,\n annotation_cache_dir='annotations_cache',\n kwargs=dict(main_dir=main_dir, metadata_dir=metadata_dir,\n cls_index=cls_index, version=version)\n )\n\n id_annotations = dict()\n if os.path.exists(annotations_path):\n with open(annotations_path, 'r') as csv_file:\n vrd_annotations = pd.read_csv(csv_file)\n maxval = len((vrd_annotations['ImageID'].unique()))\n pbar = ProgressBar(widgets=[Percentage(), Bar(), AdaptiveETA()], maxval=maxval).start()\n now = 0\n\n for frame, group in vrd_annotations.groupby('ImageID'):\n if frame not in box_annotations:\n print('Warning! Frame {} not in the object detection dictionary'.format(frame))\n continue\n # search for all the bounding boxes in the image\n num_boxes = len(box_annotations[frame]['boxes'])\n all_frame_boxes = np.zeros((num_boxes, 5))\n all_frame_labels = np.zeros(num_boxes)\n for idx, f in enumerate(box_annotations[frame]['boxes']):\n all_frame_boxes[idx, 0] = f['x1']\n all_frame_boxes[idx, 1] = f['y1']\n all_frame_boxes[idx, 2] = f['x2']\n all_frame_boxes[idx, 3] = f['y2']\n all_frame_boxes[idx, 4] = f['cls_id']\n\n rel_boxes = group.loc[:, [\"XMin1\", \"YMin1\", \"XMax1\", \"YMax1\", \"XMin2\", \"YMin2\", \"XMax2\", \"YMax2\"]].values.astype(np.float32)\n rel_obj_labels = group.loc[:, [\"LabelName1\", \"LabelName2\"]]\n rel_labels = group.loc[:, [\"RelationshipLabel\"]]\n # insert also class information, otherwise there are problems in case of overlapping boxes\n cls_and_attr_index = {**cls_index, **attr_index}\n rel_boxes = np.insert(rel_boxes, 4, [cls_and_attr_index[i] for i in rel_obj_labels.values[:, 0]], axis=1)\n rel_boxes = np.insert(rel_boxes, 9, [cls_and_attr_index[i] for i in rel_obj_labels.values[:, 1]], axis=1)\n rel_boxes_int = (rel_boxes * 100).astype(np.int)\n\n # Handle relationships\n relationships = np.zeros((num_boxes, num_boxes))\n\n for (idx1, box1), (idx2, box2) in itertools.permutations(enumerate(all_frame_boxes), 2):\n actual_couple_int = (np.concatenate((box1, box2), axis=None) * 100).astype(np.int)\n\n found_idxs = (rel_boxes_int == actual_couple_int).all(axis=1).nonzero()[0]\n # found_idx = rel_boxes_int_list.index(list(actual_couple_int))\n if len(found_idxs) > 0:\n # this is a relationship match\n actual_rel_labels = rel_labels.values[found_idxs, 0]\n '''# it is possible that two different boxes have the same coordinates.\n # Ensure that the 'is' relationship, in this case, is being excluded\n actual_rel_labels = actual_rel_labels[actual_rel_labels != 'is']\n if len(actual_rel_labels) > 0:\n pdb.set_trace()\n # get the relation that is not the 'is' one\n actual_rel_label = actual_rel_labels[0]\n actual_rel_id = rel_index[actual_rel_label]\n else:\n actual_rel_id = 0\n '''\n # There should be no 'is' relationships at this point, so the first relation that matches\n # should be the right one\n actual_rel_label = actual_rel_labels[0]\n assert actual_rel_label != 'is', \"Should NOT be an is relationship!\"\n actual_rel_id = rel_index[actual_rel_label]\n else:\n # this couple is not linked\n actual_rel_id = 0\n\n relationships[idx1, idx2] = actual_rel_id\n\n # Handle attributes\n # Create a one-hot coding for each object in the frame.\n attributes = np.zeros((num_boxes, len(attr_index)))\n for idx, box in enumerate(all_frame_boxes):\n actual_couple_int = (np.concatenate((box, box), axis=None) * 100).astype(np.int)\n\n # find all the indexes of the objects having at least one attribute\n found_idxs = (rel_boxes_int[:, :9] == actual_couple_int[:9]).all(axis=1).nonzero()[0]\n if len(found_idxs) > 0:\n # this object has possibly many attributes\n actual_rel_labels = rel_labels.values[found_idxs, 0]\n # filter spurious entries that are not 'is'\n filtered = actual_rel_labels == 'is'\n attributes_labels = rel_obj_labels.values[found_idxs[filtered], 1]\n attributes_id = [attr_index[i] for i in attributes_labels]\n\n attributes[idx, attributes_id] = 1\n\n # else:\n # do nothing, this attributes row is already full of zeros\n\n # sparsify relationships and attributes for space efficiency\n relationships = scipy.sparse.csr_matrix(relationships)\n attributes = scipy.sparse.csr_matrix(attributes)\n\n id_annotations[frame] = {'w': box_annotations[frame]['w'], 'h': box_annotations[frame]['h'],\n 'boxes': box_annotations[frame]['boxes'],\n 'relationships': relationships, 'attributes': attributes}\n now += 1\n pbar.update(now)\n else:\n # simply cache image informations from the image folder.\n # This is needed for test detections for challenge submission\n print('WARNING: annotation file not present! Supposing test dataset without annotations')\n images_fld = os.path.join(main_dir, subset)\n for image in tqdm.tqdm(os.listdir(images_fld)):\n img_id = os.path.splitext(image)[0]\n img_path = os.path.join(images_fld, image)\n with Image.open(img_path) as img:\n width, height = img.width, img.height\n\n # dummy annotations\n boxes = {'cls_id': 0, 'x1': 0, 'x2': 0, 'y1': 0, 'y2': 0}\n attributes = scipy.sparse.csr_matrix(np.zeros((1,1)))\n relationships = scipy.sparse.csr_matrix(np.zeros((1,1)))\n id_annotations[img_id] = {'w': width, 'h': height, 'boxes': [boxes], 'attributes': attributes,\n 'relationships': relationships}\n\n return id_annotations\n\n\ndef cache_annotations(subset, annotation_fn, all_in_memory=True, annotation_cache_dir='annotations_cache', kwargs={}):\n ext = '.pkl' if all_in_memory else '.db'\n annotation_cache_filename = os.path.join(annotation_cache_dir, subset + ext)\n if os.path.exists(annotation_cache_filename + '.dat') or os.path.exists(annotation_cache_filename):\n print('Loading cached annotations: {}'.format(annotation_cache_filename))\n if all_in_memory:\n with open(annotation_cache_filename, 'rb') as f:\n annotations = pickle.load(f)\n else:\n annotations = shelve.open(annotation_cache_filename)\n\n else:\n print('Caching annotations to file: {}'.format(annotation_cache_filename))\n annotations = annotation_fn(subset=subset, **kwargs)\n if all_in_memory:\n with open(annotation_cache_filename, \"wb\") as f:\n pickle.dump(annotations, f)\n else:\n with shelve.open(annotation_cache_filename) as f:\n for id, ann in annotations.items():\n f[id] = ann\n annotations = shelve.open(annotation_cache_filename)\n\n return annotations\n\n\nclass OidDataset(Dataset):\n \"\"\"Oid dataset.\"\"\"\n\n def __init__(self, main_dir, subset, version='v5', annotation_cache_dir='annotations_cache', transform=None, all_in_memory=True):\n if version == 'v4':\n metadata = '2018_04'\n elif version == 'challenge2018':\n metadata = 'challenge2018'\n elif version == 'v3':\n metadata = '2017_11'\n elif version == 'v5':\n metadata = 'metadata'\n else:\n raise NotImplementedError('There is currently no implementation for versions older than v3')\n\n self.transform = transform\n\n if version == 'challenge2018':\n self.base_dir = os.path.join(main_dir, 'images', 'train')\n else:\n self.base_dir = os.path.join(main_dir, subset)\n\n metadata_dir = os.path.join(main_dir, metadata)\n ext = '.pkl' if all_in_memory else '.db'\n annotation_cache_filename = os.path.join(annotation_cache_dir, subset + ext)\n\n self.id_to_labels, self.id_to_labels_idx, cls_index = get_labels(metadata_dir, version=version)\n\n self.annotations = cache_annotations(subset=subset, annotation_fn=generate_images_annotations_json,\n annotation_cache_dir='annotations_cache',\n kwargs=dict(main_dir=main_dir, metadata_dir=metadata_dir,\n cls_index=cls_index, version=version)\n )\n\n self.id_to_image_id = dict([(i, k) for i, k in enumerate(self.annotations)])\n\n # (label -> name)\n self.labels = self.id_to_labels\n\n def __len__(self):\n return len(self.annotations)\n\n def __getitem__(self, idx):\n\n img = self.load_image(idx)\n annot = self.load_annotations(idx)\n # sample = {'img': img, 'annot': annot}\n target = {}\n target['boxes'] = annot[:, :4]\n target['labels'] = annot[:, 4]\n if self.transform:\n img, target = self.transform(img, target)\n\n return img, target\n\n def image_path(self, image_index):\n path = os.path.join(self.base_dir, self.id_to_image_id[image_index] + '.jpg')\n return path\n\n def load_image(self, image_index):\n path = self.image_path(image_index)\n img = skimage.io.imread(path)\n\n if len(img.shape) == 1:\n img = img[0]\n\n if len(img.shape) == 2:\n img = skimage.color.gray2rgb(img)\n\n try:\n return img.astype(np.float32) / 255.0\n except Exception:\n print(path)\n exit(0)\n\n def load_annotations(self, image_index):\n # get ground truth annotations\n image_annotations = self.annotations[self.id_to_image_id[image_index]]\n\n labels = image_annotations['boxes']\n\n height, width = image_annotations['h'], image_annotations['w']\n\n boxes = np.zeros((len(labels), 5))\n for idx, ann in enumerate(labels):\n cls_id = ann['cls_id']\n x1 = ann['x1'] * width\n x2 = ann['x2'] * width\n y1 = ann['y1'] * height\n y2 = ann['y2'] * height\n\n boxes[idx, 0] = x1\n boxes[idx, 1] = y1\n boxes[idx, 2] = x2\n boxes[idx, 3] = y2\n boxes[idx, 4] = cls_id\n\n return boxes\n\n # used for aspect ratio sampler\n def image_aspect_ratio(self, image_index):\n img_annotations = self.annotations[self.id_to_image_id[image_index]]\n height, width = img_annotations['h'], img_annotations['w']\n return float(width) / float(height)\n\n # used for balanced sampler\n def build_class_frequencies(self):\n freq = {}\n idxs = list(range(len(self)))\n for idx in tqdm.tqdm(idxs):\n ann = self.annotations[self.id_to_image_id[idx]]\n classes = [v['cls_id'] for v in ann['boxes']]\n for c in classes:\n if c not in freq:\n freq[c] = set()\n freq[c].add(idx)\n return freq\n\n def num_classes(self):\n return len(self.id_to_labels)\n\n def evaluate(self, all_detections, output_dir, file_identifier=\"\"):\n \"\"\"\n Evaluates detections and put the results in a file into outdir\n :param all_detections: list[image_index, list[boxes], list[labels]]\n :param output_dir: file where detection results will be stored\n :param file_identifier: optionally, a identifier for the file\n :return: optionally, a dictionary of metrics\n \"\"\"\n\n # MODE 1 (python evaluation)\n det_dict = {\n 'ImageID': [],\n 'XMin': [],\n 'XMax': [],\n 'YMin': [],\n 'YMax': [],\n 'Score': [],\n 'LabelName': []\n }\n\n for image_index, boxes, labels, scores in all_detections:\n img_annotations = self.annotations[self.id_to_image_id[image_index]]\n for box, label, score in zip(boxes, labels, scores):\n # add this detection to the dict\n det_dict['ImageID'].append(self.id_to_image_id[image_index])\n det_dict['XMin'].append(np.clip(box[0] / img_annotations['w'], 0, 1))\n det_dict['YMin'].append(np.clip(box[1] / img_annotations['h'], 0, 1))\n det_dict['XMax'].append(np.clip(box[2] / img_annotations['w'], 0, 1))\n det_dict['YMax'].append(np.clip(box[3] / img_annotations['h'], 0, 1))\n det_dict['Score'].append(score)\n det_dict['LabelName'].append(self.id_to_labels_idx[label])\n\n # dump dict on a csv file\n df = pd.DataFrame(det_dict)\n out_filename = os.path.join(output_dir, 'detections_{}.csv'.format(file_identifier))\n df.to_csv(out_filename, index=False, float_format='%.6f')\n\n # MODE 2 (challenge)\n\n predictions = []\n\n for image_index, boxes, labels, scores in all_detections:\n detections = []\n img_annotations = self.annotations[self.id_to_image_id[image_index]]\n for box, label, score in zip(boxes, labels, scores):\n # add this detection to the dict\n det_str = \"{} {:f} {:f} {:f} {:f} {:f}\".format(\n self.id_to_labels_idx[label],\n score,\n np.clip(box[0] / img_annotations['w'], 0, 1),\n np.clip(box[1] / img_annotations['h'], 0, 1),\n np.clip(box[2] / img_annotations['w'], 0, 1),\n np.clip(box[3] / img_annotations['h'], 0, 1)\n )\n detections.append(det_str)\n\n predictions.append(\n {'ImageID': self.id_to_image_id[image_index],\n 'PredictionString': \" \".join(detections)}\n )\n\n # dump dict on a csv file\n df = pd.DataFrame(predictions)\n out_filename = os.path.join(output_dir, 'detections_{}_competitionformat.csv'.format(file_identifier))\n df.to_csv(out_filename, index=False, float_format='%.6f')\n\n\nclass OidDatasetVRD(Dataset):\n \"\"\"Oid dataset.\"\"\"\n\n def __init__(self, main_dir, subset, version='v5', transform=None):\n if version == 'v4':\n metadata = '2018_04'\n elif version == 'challenge2018':\n metadata = 'challenge2018'\n elif version == 'v3':\n metadata = '2017_11'\n elif version == 'v5':\n metadata = 'metadata'\n else:\n raise NotImplementedError('There is currently no implementation for versions older than v3')\n\n self.transform = transform\n\n if version == 'challenge2018':\n self.base_dir = os.path.join(main_dir, 'images', 'train')\n else:\n self.base_dir = os.path.join(main_dir, subset)\n\n metadata_dir = os.path.join(main_dir, metadata)\n\n self.id_to_labels, self.id_to_labels_idx, cls_index = get_labels(metadata_dir, version=version)\n self.attr_id_to_labels, \\\n self.attr_id_to_labels_idx, \\\n attr_index, \\\n self.rel_id_to_labels, \\\n self.rel_id_to_labels_idx, \\\n rel_index = get_attribute_relationships_labels(metadata_dir, version=version)\n\n self.annotations = cache_annotations(subset=subset, annotation_fn=generate_images_annotations_vrd_json,\n annotation_cache_dir='annotations_cache_vrd',\n kwargs=dict(main_dir=main_dir, metadata_dir=metadata_dir,\n cls_index=cls_index, attr_index=attr_index, rel_index=rel_index,\n version=version)\n )\n\n self.id_to_image_id = dict([(i, k) for i, k in enumerate(self.annotations)])\n\n # (label -> name)\n self.labels = self.id_to_labels\n\n def __len__(self):\n return len(self.annotations)\n\n def __getitem__(self, idx):\n\n img = self.load_image(idx)\n boxes, attributes, relationships = self.load_annotations(idx)\n # sample = {'img': img, 'annot': annot}\n target = {}\n target['boxes'] = boxes[:, :4]\n target['labels'] = boxes[:, 4]\n\n target['attributes'] = attributes\n target['relationships'] = relationships\n\n if self.transform:\n img, target = self.transform(img, target)\n return img, target\n\n def image_path(self, image_index):\n path = os.path.join(self.base_dir, self.id_to_image_id[image_index] + '.jpg')\n return path\n\n def load_image(self, image_index):\n path = self.image_path(image_index)\n img = skimage.io.imread(path)\n\n if len(img.shape) == 1:\n img = img[0]\n\n if len(img.shape) == 2:\n img = skimage.color.gray2rgb(img)\n\n try:\n return img.astype(np.float32) / 255.0\n except Exception:\n print(path)\n exit(0)\n\n def load_annotations(self, image_index):\n # get ground truth annotations\n image_annotations = self.annotations[self.id_to_image_id[image_index]]\n\n labels = image_annotations['boxes']\n attributes = image_annotations['attributes']\n relationships = image_annotations['relationships']\n\n height, width = image_annotations['h'], image_annotations['w']\n\n boxes = np.zeros((len(labels), 5))\n for idx, ann in enumerate(labels):\n cls_id = ann['cls_id']\n x1 = ann['x1'] * width\n x2 = ann['x2'] * width\n y1 = ann['y1'] * height\n y2 = ann['y2'] * height\n\n boxes[idx, 0] = x1\n boxes[idx, 1] = y1\n boxes[idx, 2] = x2\n boxes[idx, 3] = y2\n boxes[idx, 4] = cls_id\n\n return boxes, attributes, relationships\n\n # used for aspect ratio sampler\n def image_aspect_ratio(self, image_index):\n img_annotations = self.annotations[self.id_to_image_id[image_index]]\n height, width = img_annotations['h'], img_annotations['w']\n return float(width) / float(height)\n\n def build_relationships_frequencies(self):\n freq = {}\n idxs = list(range(len(self)))\n for idx in tqdm.tqdm(idxs):\n ann = self.annotations[self.id_to_image_id[idx]]\n relationships = ann['relationships'].toarray().reshape(-1)\n for r in relationships:\n if r not in freq:\n freq[r] = set()\n freq[r].add(idx)\n # the 0 index does not care at this point, delete\n del freq[0]\n return freq\n\n def build_attributes_frequencies(self):\n freq = {}\n idxs = list(range(len(self)))\n for idx in tqdm.tqdm(idxs):\n ann = self.annotations[self.id_to_image_id[idx]]\n _, attributes = np.nonzero(ann['attributes'].toarray())\n for a in attributes:\n if a not in freq:\n freq[a] = set()\n freq[a].add(idx)\n return freq\n\n def num_classes(self):\n return len(self.id_to_labels)\n\n def num_attributes(self):\n return len(self.attr_id_to_labels)\n\n def num_relationships(self):\n return len(self.rel_id_to_labels)\n\n def evaluate(self, all_detections, output_dir, file_identifier=\"\"):\n \"\"\"\n Evaluates detections and put the results in a file into outdir\n\n :param all_detections: list[image_index, list[subj_boxes], list[subj_labels], list[obj_boxes], list[obj_labels], list[rel_labels], list[rel_scores]]\n :param output_dir: file where detection results will be stored\n :param file_identifier: optionally, a identifier for the file\n :return: optionally, a dictionary of metrics\n\n \"\"\"\n\n # MODE 1 (python evaluation)\n '''\n det_dict = {\n 'ImageID': [],\n 'XMin': [],\n 'XMax': [],\n 'YMin': [],\n 'YMax': [],\n 'Score': [],\n 'LabelName': []\n }\n\n for image_index, boxes, labels, scores in all_detections:\n img_annotations = self.annotations[self.id_to_image_id[image_index]]\n for box, label, score in zip(boxes, labels, scores):\n # add this detection to the dict\n det_dict['ImageID'].append(self.id_to_image_id[image_index])\n det_dict['XMin'].append(np.clip(box[0] / img_annotations['w'], 0, 1))\n det_dict['YMin'].append(np.clip(box[1] / img_annotations['h'], 0, 1))\n det_dict['XMax'].append(np.clip(box[2] / img_annotations['w'], 0, 1))\n det_dict['YMax'].append(np.clip(box[3] / img_annotations['h'], 0, 1))\n det_dict['Score'].append(score)\n det_dict['LabelName'].append(self.id_to_labels_idx[label])\n\n # dump dict on a csv file\n df = pd.DataFrame(det_dict)\n out_filename = os.path.join(output_dir, 'detections_{}.csv'.format(file_identifier))\n df.to_csv(out_filename, index=False, float_format='%.6f')\n\n '''\n # MODE 2 (challenge)\n\n predictions = []\n # handle attributes\n for image_index, s_boxes, s_labels, o_boxes, o_labels, r_labels, r_scores in all_detections:\n detections = []\n img_annotations = self.annotations[self.id_to_image_id[image_index]]\n\n for s_box, s_label, o_box, o_label, r_label, r_score in zip(s_boxes, s_labels, o_boxes, o_labels, r_labels, r_scores):\n # add this detection to the dict\n det_str = \"{:f} {} {:f} {:f} {:f} {:f} {} {:f} {:f} {:f} {:f} {}\".format(\n r_score,\n self.id_to_labels_idx[s_label],\n np.clip(s_box[0] / img_annotations['w'], 0, 1),\n np.clip(s_box[1] / img_annotations['h'], 0, 1),\n np.clip(s_box[2] / img_annotations['w'], 0, 1),\n np.clip(s_box[3] / img_annotations['h'], 0, 1),\n self.id_to_labels_idx[o_label] if r_label is not -1 else self.attr_id_to_labels_idx[o_label],\n np.clip(o_box[0] / img_annotations['w'], 0, 1),\n np.clip(o_box[1] / img_annotations['h'], 0, 1),\n np.clip(o_box[2] / img_annotations['w'], 0, 1),\n np.clip(o_box[3] / img_annotations['h'], 0, 1),\n self.rel_id_to_labels_idx[r_label] if r_label is not -1 else 'is'\n )\n detections.append(det_str)\n\n predictions.append(\n {'ImageID': self.id_to_image_id[image_index],\n 'PredictionString': \" \".join(detections)}\n )\n\n # dump dict on a csv file\n df = pd.DataFrame(predictions)\n out_filename = os.path.join(output_dir, 'detections_{}_competitionformat.csv'.format(file_identifier))\n df.to_csv(out_filename, index=False, float_format='%.6f')\n\n\nif __name__ == '__main__':\n #from dataloader import BalancedSampler\n #import sys\n #sys.path.append('..')\n from dataloader import collate_fn\n from torch.utils.data import DataLoader\n from collections import Counter\n\n dataset_train = OidDatasetVRD('/media/nicola/SSD/Datasets/OpenImages', subset='train')\n # sampler = BalancedSampler(dataset_train, batch_size=4, drop_last=False)\n dataloader_train = DataLoader(dataset_train, num_workers=8, batch_size=1, collate_fn=collate_fn)\n\n attr_list = []\n rel_list = []\n imgs_with_no_rels = 0\n imgs_with_no_attrs = 0\n imgs_with_no_attrs_rels = 0\n\n ### Calculate some stats\n '''for idx, (_, target) in enumerate(tqdm.tqdm(dataloader_train)):\n # calculate some vrd stat\n na = False\n nr = False\n target = target[0]\n attributes = np.asarray(target['attributes'].todense(), dtype=np.int).reshape(-1)\n relationships = np.asarray(target['relationships'].todense(), dtype=np.int).reshape(-1)\n\n attr_list.extend(list(attributes))\n rel_list.extend(list(relationships))\n\n if np.all(attributes == 0):\n imgs_with_no_attrs += 1\n na = True\n if np.all(relationships == 0):\n imgs_with_no_rels += 1\n nr = True\n if na and nr:\n imgs_with_no_attrs_rels += 1\n\n print('Relations frequencies: {}'.format(Counter(rel_list)))\n print('Attributes frequencies: {}'.format(Counter(attr_list)))\n print('Num images with no attributes: {}'.format(imgs_with_no_attrs))\n print('Num images with no relationships: {}'.format(imgs_with_no_rels))\n print('Num images with both no attributes and relationships: {}'.format(imgs_with_no_attrs_rels))'''\n\n ### Test frequencies methods\n print('Attributes Frequencies')\n freq = dataset_train.build_attributes_frequencies()\n for k,v in freq.items():\n print('{}: {}'.format(k, len(v)))\n\n print('Relationships Frequencies')\n freq = dataset_train.build_relationships_frequencies()\n for k, v in freq.items():\n print('{}: {}'.format(k, len(v)))\n\n\n\n","sub_path":"datasets/oid_dataset.py","file_name":"oid_dataset.py","file_ext":"py","file_size_in_byte":36162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"500638421","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, request, redirect, url_for, render_template\nimport os\nimport json\nimport glob\nfrom uuid import uuid4\nimport app.project as project\nimport sys\n\n#reload(sys)\n#sys.setdefaultencoding(\"utf-8\")\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/index_contacts')\ndef contacts():\n return render_template(\"index_contacts.html\")\n\n@app.route('/index_blog')\ndef blog():\n return render_template(\"index_blog.html\")\n\n@app.route('/index_about')\ndef about():\n return render_template(\"index_about.html\")\n\n@app.route(\"/upload\", methods=[\"POST\"])\ndef upload():\n \"\"\"Handle the upload of a file.\"\"\"\n form = request.form\n\n # Create a unique \"session ID\" for this particular batch of uploads.\n upload_key = str(uuid4())\n\n # Is the upload using Ajax, or a direct POST by the form?\n is_ajax = False\n if form.get(\"__ajax\", None) == \"true\":\n is_ajax = True\n\n # Target folder for these uploads.\n target = \"app/static/uploads/{}\".format(upload_key)\n try:\n os.mkdir(target)\n except:\n if is_ajax:\n return ajax_response(False, \"Couldn't create upload directory: {}\".format(target))\n else:\n return \"Couldn't create upload directory: {}\".format(target)\n\n print(\"=== Form Data ===\")\n for key, value in form.items():\n print(key, \"=>\", value)\n\n # upload = request.files['file_ukr']\n # filename = 'file_ukr'\n # destination = \"/\".join([target, filename])\n # print \"Accept incoming file:\", filename\n # print \"Save it to:\", destination\n # upload.save(destination)\n #\n # upload = request.files['file_source']\n # filename = 'file_source'\n # destination = \"/\".join([target, filename])\n # print \"Accept incoming file:\", filename\n # print \"Save it to:\", destination\n # upload.save(destination)\n for upload in request.files.getlist(\"file\"):\n filename = upload.filename.rsplit(\"/\")[0]\n destination = \"/\".join([target, filename])\n print(\"Accept incoming file:\", filename)\n print(\"Save it to:\", destination)\n upload.save(destination)\n \n if is_ajax:\n return ajax_response(True, upload_key)\n else:\n return redirect(url_for(\"upload_complete\", uuid=upload_key))\n\n@app.route(\"/files/<uuid>\")\ndef upload_complete(uuid):\n \"\"\"The location we send them to at the end of the upload.\"\"\"\n\n # Get their files.\n root = \"app/static/uploads/{}\".format(uuid)\n if not os.path.isdir(root):\n return \"Error: UUID not found!\"\n\n files = []\n for file in glob.glob(\"{}/*.*\".format(root)):\n fname = file.split(os.sep)[-1]\n files.append(fname)\n\n #try:\n return render_template(\"files.html\",\n uuid=uuid,\n files=project.project(os.path.join(root, files[0]), os.path.join(root, files[1]))\n )\n #except:\n #return redirect(url_for(\"index\"))\n\ndef ajax_response(status, msg):\n status_code = \"ok\" if status else \"error\"\n return json.dumps(dict(\n status=status_code,\n msg=msg,\n ))\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"568363378","text":"\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome('C:/Users/Rikman/Documents/Devops/chromedriver.exe')\n\n\ndomain_file = open(\"C:\\\\Users\\\\Rikman\\\\Documents\\\\Devops\\\\domain_list.txt\" , 'r', encoding='utf-8')\n\nweb_address = domain_file.read()\n\ndriver.get(web_address)\n\nprint\n\ngg\n\n\n#driver.implicitly_wait(5)\n\n#Show the list of Dropdown Price Point. Make is visible\nprice_point = driver.find_elements_by_class_name(\"chosen-single\")\nprice_point[0].click()\n\n#Choose a price for gift\nprice_point2 = driver.find_elements_by_class_name(\"active-result\")\nprice_point2[3].click()\n\n#Show the list of Area. Make is visible\nprice_point = driver.find_elements_by_class_name(\"chosen-single\")\nprice_point[1].click()\n\n#Choose an area from the list\nprice_point2 = driver.find_elements_by_class_name(\"active-result\")\nprice_point2[3].click()\n\n#Show category list - make it visible\nprice_point = driver.find_elements_by_class_name(\"chosen-single\")\nprice_point[2].click()\n\n#Choose category from the list\nprice_point2 = driver.find_elements_by_class_name(\"active-result\")\nprice_point2[2].click()\n\n\n\n#Search gifts\nmy_submit2 = driver.find_element_by_class_name(\"ui-btn\")\nmy_submit2.click()\n\n#Choosing the business\nbusiness1 = driver.find_elements_by_class_name(\"supplier-logo\")\nbusiness1[1].click()\n\n#Enter price for Gift card\nfrom selenium.webdriver.common.keys import Keys\n\nmoney1 = driver.find_element_by_xpath(\"//input[@type='tel']\")\nmoney1.send_keys(\"200\")\n\nmoney1 = driver.find_element_by_xpath(\"//input[@type='tel']\")\nmoney1.send_keys(Keys.ENTER)\n\n#Press Radio button for someone else\n\n\n# Enter Receiver name\nwho = driver.find_elements_by_class_name(\"ui-input\")\nwho[0].send_keys(\"אבא\")\n# Enter sender name\nwho[2].send_keys(\"עינת\")\n\n#Enter Blessings\nblessings = driver.find_element_by_class_name(\"ui-textarea\")\nblessings.send_keys(\"מזל טוב אבא! תהנה מהמתנה :)\")\n\n# Pick the event\nprice_point = driver.find_elements_by_class_name(\"chosen-single\")\nprice_point[10].click()\n\nprice_point2 = driver.find_elements_by_class_name(\"active-result\")\nprice_point2[3].click()\n\n#Upload a picture\ndriver.find_element_by_name(\"fileUpload\").send_keys('C://Users/Einat/Pictures/2.jpg')\n\n#Send by mail l\ndriver.find_element_by_class_name('icon-envelope').click()\n\n#Email info\ndriver.find_element_by_class_name('inpnut-mail').send_keys(\"einat@gmail.com\")\ndriver.find_element_by_class_name('input-mail').send_keys(Keys.ENTER)\n\n#Final gift Submit\ndriver.find_element_by_class_name('shubmit-wrapper').click()\n\n#Close Window\n#driver.close()","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"308876917","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 25 02:43:47 2019\r\n\r\n@author: Salim\r\n\"\"\"\r\nfrom a import *\r\n\r\nprint(\"\"\" köpekler \r\n 1=ekle\r\n 2=göster\r\n \"\"\") \r\nwhile True:\r\n işlem=input(\"İşlem girinizz::::\")\r\n if(işlem==\"q\"):\r\n break\r\n elif(işlem==\"1\"):\r\n ad=input(\"Köpegin adınız giriniz::\")\r\n cinsi=input(\"Köpegin cinsini giriniz::\")\r\n renk=input(\"Köpegin rengini giriniz::::\")\r\n #köpekler.adı1=ad\r\n #köpekler.cinsi1=cinsi\r\n #köpekler.renk1=renk\"\"\"\r\n köpekler.köpekekle(ad,cinsi,renk)\r\n elif(işlem==\"2\"):\r\n print(\"KÖPEGİN Adı:{}\\ncinsi:{}\\nrenkği:{}\".format(köpekler.adı1,köpekler.cinsi1,köpekler.renk1))\r\n else:\r\n print(\"Ynalış şashdkaö\")\r\n \r\n \r\n\r\n","sub_path":"Hayvanlar.py","file_name":"Hayvanlar.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"639327062","text":"from sys import argv\nfrom math import log2\n\ndef entropia(s):\n d = {}\n T = len(s)\n\n for i in s:\n if i in d: d[i] += 1/T\n else: d[i] = 1/T\n\n return -sum(map(lambda c: c*log2(c), d.values()))\n\nfor a in argv[1:]:\n f = open(a)\n print(a+\": \"+str(entropia(f.read())))\n","sub_path":"entropia/entropia.py","file_name":"entropia.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"218515002","text":"\nfrom subprocess import check_output, CalledProcessError\nimport re\nimport time\nimport threading\nimport curses\nfrom link_hub_gui import screen_lock\nimport http.client\nimport logging\nimport requests\n\npending_requests = {}\nrequest_lock = threading.Lock()\ninterface = None\n\ndef set_interface(interface_name):\n\tlogger = logging.getLogger('wifi ')\n\tlogger.debug(\"setting interface=%s\", interface_name)\n\tglobal interface\n\tinterface = interface_name\n\ndef disconnect(ssid):\n\tlogger = logging.getLogger('wifi ')\n\t\n\tlogger.debug('disconnect %s', ssid)\n\tdone_condition = threading.Condition()\n\trequest = {'ssid':ssid, 'command':'disconnect', 'done_condition':done_condition}\n\twith request_lock:\n\t\tcommand_queue = [ request ]\n\t\tif (ssid in pending_requests):\n\t\t\tlogger.debug('pending_requests[ssid]=%s', str(pending_requests[ssid]))\n\t\t\tlogger.debug('appending pending request')\n\t\t\tcommand_queue = pending_requests[ssid].extend(command_queue)\n\t\t\tlogger.debug('pending_requests[ssid]=%s', str(pending_requests[ssid]))\n\t\telse:\n\t\t\tlogger.debug('new http request list')\n\t\t\tpending_requests[ssid] = command_queue\n\t\t\tlogger.debug('pending_requests[ssid]=%s', str(pending_requests[ssid]))\n\ndef scan():\n\tlogger = logging.getLogger('wifi ')\n\n\tlogger.debug('start scan')\n\tdone_condition = threading.Condition()\n\trequest = {'ssid':'scan', 'done_condition':done_condition}\n\n\twith request_lock:\n\t\tcommand_queue = [ request ]\n\t\tif ('scan' in pending_requests):\n\t\t\tlogger.debug('appending to pending requests')\n\t\t\tcommand_queue = pending_requests['scan'].extend(command_queue)\n\t\telse:\n\t\t\tlogger.debug('new scan request list')\n\t\t\tpending_requests['scan'] = command_queue\n\n\tlogger.debug('released lock')\n\twith done_condition:\n\t\tdone_condition.wait();\n\n\tlogger.debug('finished scan request')\n\treturn request['ssids']\n\ndef queue_http_request_async(ssid, url, command, params, headers):\n\tlogger = logging.getLogger('wifi ')\n\t\n\trequest = {\n\t\t'ssid':ssid, \n\t\t'url':url, \n\t\t'command':command, \n\t\t'params':params, \n\t\t'headers':headers, \n\t\t'done_condition':threading.Condition()\n\t}\n\n\tif (not url):\n\t\trequest['url'] = r'/'\n\n\tlogger.debug('request=%s', request)\n\n\twith request_lock:\n\t\tcommand_queue = [ request ]\n\t\tif (ssid in pending_requests):\n\t\t\tlogger.debug('appending pending request')\n\t\t\tcommand_queue = pending_requests[ssid].extend(command_queue)\n\t\telse:\n\t\t\tlogger.debug('new http request list')\n\t\t\tpending_requests[ssid] = command_queue\n\treturn request\n\ndef queue_http_request_blocking(ssid, url, command, params, headers):\n\tlogger = logging.getLogger('wifi ')\n\tlogger.debug('requesting async')\n\trequest = queue_http_request_async(ssid, url, command, params, headers)\n\tlogger.debug('waiting')\n\tdone_condition = request['done_condition']\n\twith done_condition:\n\t\tdone_condition.wait()\n\tlogger.debug(\"http request done\")\n\treturn request\n\ndef thread(stop_event):\n\tlogger = logging.getLogger('wifi ')\n\n\twith screen_lock:\n\t\twindow = curses.newwin(3, 40, 3, 0)\n\t\twindow.clear()\n\t\t#window.addstr(2, 0, \"thread top \")\n\tconnected_ssid = None\n\tcount = 0\n\n\tlogger.debug('starting thread loop')\n\twhile (not stop_event.is_set()):\n\t\tssid = None\n\t\trequest = None\n\t\t\n\t\tlogger.debug('starting request poll loop')\n\t\twhile not request:\n\t\t\twith request_lock:\n\t\t\t\tif (connected_ssid and connected_ssid in pending_requests):\n\t\t\t\t\tlogger.debug('connected ssid %s has pending', connected_ssid)\n\t\t\t\t\tssid = connected_ssid\n\t\t\t\telif (len(pending_requests.keys()) > 0):\n\t\t\t\t\tlogger.debug('no pending requests for %s, but pending requests for someone else', connected_ssid)\n\t\t\t\t\tssid = list(pending_requests.keys())[0]\n\t\t\t\t\tlogger.debug('found request for ssid %s', ssid)\n\n\t\t\t\tif (ssid):\n\t\t\t\t\tlogger.debug('getting next request from %s pending queue', ssid)\n\t\t\t\t\tssid_requests = pending_requests[ssid]\n\t\t\t\t\tlogger.debug('new request: %s', requests)\n\t\t\t\t\trequest = ssid_requests.pop(0)\n\t\t\t\t\tlogger.debug('request: %s', request)\n\t\t\t\t\tlogger.debug('requests len=%d, %s', len(pending_requests.keys()), ssid_requests)\n\t\t\t\t\tlogger.debug('pending_requests=%s', pending_requests)\n\t\t\t\t\tif (len(ssid_requests) == 0):\n\t\t\t\t\t\tlogger.debug('last request, removing entry')\n\t\t\t\t\t\tdel pending_requests[ssid]\n\n\t\t\tif (not request):\n\t\t\t\ttime.sleep(1)\n\n\t\twith screen_lock:\n\t\t\twindow.clear()\n\n\t\tlogger.debug('starting request for %s', ssid)\n\t\tif (ssid == \"scan\"):\n\t\t\tlogger.debug('scan request')\n\t\t\tcount += 1\n\t\t\tlogger.debug('scan start for ssid %s iface %s', ssid, interface)\n#\t\t\toutput = check_output(\"iwlist \" + interface +\" scan\", shell=True).decode(\"utf-8\")\n#\t\t\tpattern=\"ESSID:\\\"(QuirkySetup-[0-9a-fA-F]{4})\"\n\t\t\toutput = check_output(\"nmcli dev wifi list\", shell=True).decode(\"utf-8\")\n\t\t\tlogger.debug('output= %s', output)\n\t\t\tpattern=\"\\'(QuirkySetup-[0-9a-fA-F]{4})\\'\"\n\t\t\tssids = set(re.findall(pattern, output, re.M))\n\t\t\tlogger.debug('ssids= %s', ssids)\n\t\t\trequest['ssids'] = ssids\n\t\t\t\n\t\t\twith screen_lock:\n\t\t\t\twindow.addstr(1, 0, str(len(output)))\n\t\t\t\twindow.addstr(1, 6, output[:34])\n\t\t\tlogger.debug('done scanning')\n\t\telse:\n\t\t\tlogger.debug(\"ssid=%s\", ssid)\n\t\t\tcount += 1\n\t\t\twith screen_lock:\n\t\t\t\twindow.addstr(0, 2, ssid)\n\t\t\t\twindow.refresh()\n\t\t\tlogger.debug('requst=%s', str(request))\n\t\t\tcommand = request['command']\n\t\t\tlogger.debug('command=%s', command)\n\n\t\t\tif (command == 'disconnect'):\n\t\t\t\tconnected_ssid = None\n\t\t\t\toutput = check_output(\"nmcli device disconnect iface \" + interface, shell=True).decode(\"utf-8\")\n\t\t\telse:\n\t\t\t\tif (not ssid == connected_ssid):\n\t\t\t\t\tlogger.debug(\"new wifi\")\n\t\t\t\t\twith screen_lock:\n\t\t\t\t\t\twindow.addstr(2,0,\"connecting wifi to \" + ssid)\n\t\t\t\t\t\twindow.refresh()\n\t\t\t\t\tlogger.debug(\"connecting wifi %s\", ssid)\n\t\t\t\t\ttries = 3\n\t\t\t\t\tok = False\n\t\t\t\t\twhile (ok == False and tries > 0):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\toutput = check_output(\"nmcli device wifi connect \" + ssid, shell=True).decode(\"utf-8\")\n\t\t\t\t\t\t\tok = True\n\t\t\t\t\t\texcept CalledProcessError as err:\n\t\t\t\t\t\t\tlogger.error('non-zero return value %d from %s', err.returncode, err.cmd)\n\t\t\t\t\t\t\ttries -= 1\n\n\t\t\t\t\t# need error handling\n\t\t\t\t\t\t\n\t\t\t\t\tlogger.debug(\"connected wifi %s: %s\", ssid, output)\n\t\t\t\t\twith screen_lock:\n\t\t\t\t\t\twindow.addstr(2,0,\"connected wifi to \" + ssid)\n\t\t\t\t\t\twindow.refresh()\n\t\t\t\t\tconnected_ssid = ssid\n\n\t\t\t\turl = request['url']\n\t\t\t\tparams = request['params']\n\t\t\t\theaders = request['headers']\n\n\t\t\t\tlogger.debug(\"request %s %s %s %s\", command, url, params, headers)\n\n\t\t\t\twith screen_lock:\n\t\t\t\t\twindow.addstr(0, 20, url)\n\t\t\t\t\twindow.addstr(1, 0, command)\n\t\n\t\t\t\tresponse = None \n\t\t\t\tdata = None\n\t\t\t\twith screen_lock:\n\t\t\t\t\twindow.addstr(2, 0, \"issuing request \");\n\t\t\t\tif (command == 'GET'):\n\t\t\t\t\tlogger.debug(\"requesting GET url=%s, params=%s, header=%s\", url, params, headers)\n\t\t\t\t\tresponse = requests.get(url, params=params, headers=headers)\n\t\t\t\telif (command == 'POST'):\n\t\t\t\t\tlogger.debug(\"requesting POST url=%s, params=%s, header=%s\", url, params, headers)\n\t\t\t\t\tresponse = requests.post(url, data=params, headers=headers)\n\n\t\t\t\tlogger.debug(\"response=%s\", response)\n\t\t\t\trequest['response'] = response\n\n\t\tlogger.debug('finished request')\n\t\tif ('done_condition' in request):\n\t\t\tlogger.debug('signalling done condition')\n\t\t\trequest_done_condition = request['done_condition']\n\t\t\twith request_done_condition:\n\t\t\t\trequest_done_condition.notify_all();\n\t\t\tlogger.debug('signalled')\n\t\twith screen_lock:\n\t\t\twindow.refresh()\n\n\t\tlogger.debug('end of thread loop')\n\n\twith screen_lock:\n\t\twindow.addstr(2, 0, \"done!\")\n\t\twindow.refresh()\n\tlogger.debug('exiting thread')\n\ndef run():\n\tstop_event = threading.Event()\n\tt = threading.Thread(target=thread, args=[stop_event])\n\tt.daemon = True\n\tt.start()\n\treturn stop_event\n\t\n\n\t\t\t\t\n\t\t\t\t\n","sub_path":"wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":7528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"176292420","text":"\"\"\"\"\nSimplified patients graph\n\"\"\"\n\nfrom plotly.graph_objs import *\nimport plotly.io as pio\nimport pandas as pd\nfrom plotly import graph_objs as go\nfrom plotly.graph_objs import *\nimport time\nimport re\nimport datetime\nfrom django_plotly_dash import DjangoDash\n\n# General notes: I've only checked a few points to see if this works. One issue is that dots in the same year for the same patient overlap each othe\n# this can be solved by offsetting them slightly or a better way would be for the Y axis to show the full date rather than just the year\n# since every swab will have a diff day and none should overlap\n# will upload dataset with full dates soon. If you need help with filtering dataframes or whatever check the other visualisations \n# minor things: more space between patients, order patients numerically \n\napp = DjangoDash('patients_simple')\ndf = pd.DataFrame(pd.read_csv('static/data/patientData.csv'))\ndf2 = pd.DataFrame(pd.read_csv('static/data/patientData.csv'))\n\nfor i in range(len(df)):\n if (df.loc[i, \"HIVexposed\"]==True):\n df.loc[i, \"participant_id\"]=df.loc[i, \"participant_id\"]+\" \"+'\\U0001f397'\n\nfor i in range(len(df)):\n if (df.loc[i, \"sex\"]==\"Male\"):\n df.loc[i, \"participant_id\"]=df.loc[i, \"participant_id\"]+\" \"+'\\u2642'\n\nfor i in range(len(df)):\n if (df.loc[i, \"sex\"]==\"Female\"):\n df.loc[i, \"participant_id\"]=df.loc[i, \"participant_id\"]+\" \"+'\\u2640'\n\nfor i in range(len(df)):\n if (df.loc[i, \"DateVaccinated\"]!=\" \"):\n df.loc[i, \"participant_id\"]=df.loc[i, \"participant_id\"]+\" \"+'\\uD83D\\uDC89'\n\nfor i in range(len(df)):\n if (df.loc[i, \"SmokingExposed\"]==\"True\"):\n df.loc[i, \"participant_id\"]=df.loc[i, \"participant_id\"]+\" \"+'\\uD83D\\uDEAC'\n\n\n\ntrace1 = {\n \"mode\": \"markers\", \n \"type\": \"scatter\", \n \"x\": df[\"participant_id\"], \n \"y\": df[\"swabDate\"],\n \"hoverinfo\": \"text\",\n \"hovertext\": [[\"participant_id: {}<br>Serotype: {}<br>Date: {}\".format(i,j,k)]\n for i,j,k in zip(df2[\"participant_id\"],df[\"serotype\"],df[\"swabDate\"])],\n \"name\": \"Date of Serotype Colonization\",\n \n} #you would need to do this for every serotype present in the file using a loop. Make sure the x and y vals still correspond though \ntrace2 = {\n \"mode\": \"markers\", \n \"type\": \"scatter\", \n \"x\": df[\"participant_id\"], \n \"y\": df[\"DateVaccinated\"],\n \"name\": \"Vaccination Date\"\n}\ndata = Data([trace1, trace2])\nlayout = dict(\n title = 'Patient Timelines',\n yaxis = go.layout.YAxis(title = 'Year',type='date',range=(2011,2017)),\n xaxis = go.layout.XAxis(title = 'Patient',type='category',\n rangeslider=dict(\n visible=True\n )\n ),\n yaxis_range=[datetime.datetime(2012,1,1),\n datetime.datetime(2017,1,1)]\n)\nfig = Figure(data=data, layout=layout)\n\n\npio.show(fig, validate=False)","sub_path":"pneumovis/pages/dash_app_dir/patientGraph.py","file_name":"patientGraph.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"315831066","text":"\"\"\"\nThis is to calculate the Q and error statistics directly using:\noutlet discharge from SAC-SMA output\nthe RTI discharge data\n\n\"\"\"\n\nfrom datetime import datetime, timedelta\nfrom plot_SAC_utility import *\nimport os\n\n\n# user inputs\nworkdir = '' # workdir of the time series files\nif workdir:\n os.chdir(workdir)\n\nsac_discharge_file = 'DRGC2_discharge_outlet_22yr.ts'\nrti_discharge_file = 'DRGC2H_F.QME.txt'\n\nwatershed = 'Animas'\nyear = '1988_2010' # can be single or multiple year used for title info\n\nstart_date = ''\nend_date = ''\n\n# SAC-SMA output plots #############################################################\n# import SAC-SMA model data\n\nraw_discharge = pd.read_csv(sac_discharge_file, skiprows=250, header=None, names=['raw'])\ndischarge = raw_discharge['raw'].str.split('\\s+', expand=True)\ndischarge_flow = [float(x) for x in discharge[3].tolist()]\n\n# get time obj\ntime_str = (discharge[1]+discharge[2]).tolist()\ntime_obj = []\nfor x in time_str:\n if x[-2:] == '25':\n x = x[:-2] + x[-2:].replace('25', '23')\n time = datetime.strptime(x, '%d%m%y%H')\n time += timedelta(hours=2)\n else:\n time = datetime.strptime(x, '%d%m%y%H')\n\n time_obj.append(time)\n\n# import rti discharge data\nrti_discharge = pd.read_csv(rti_discharge_file, skiprows=3, header=None, names=['raw']) # time column is used as index in dataframe\nif not (start_date and end_date):\n start_date = datetime.strftime(time_obj[0], '%Y-%m-%d')\n end_date = datetime.strftime(time_obj[-1]+ timedelta(days=1), '%Y-%m-%d')\n\nobs_discharge = rti_discharge.ix[start_date:end_date]['raw'].apply(lambda x : x*0.0283168) # daily discharge in cms with start and end time\n\n\n# get the sac daily mean data\nsac_df = pd.DataFrame(data={'time': time_obj, 'discharge': discharge_flow}, columns=['time', 'discharge'])\nsac_discharge_outlet = sac_df.set_index('time').groupby(pd.TimeGrouper(freq='D'))['discharge'].mean()\n\n# make obs vs simulation plot\nplot_obs_vs_sim(time=sac_discharge_outlet.index.values,\n sim=sac_discharge_outlet.tolist(),\n obs=obs_discharge.tolist(),\n month_interval=12,\n format='%Y/%m',\n save_name='obs_sim_discharge_{}_{}.png'.format(watershed, year))","sub_path":"22yr_Animas_sac_Q_outlet_stat.py","file_name":"22yr_Animas_sac_Q_outlet_stat.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"102613401","text":"#Embedded file name: /Applications/Ableton Live 9 Suite.app/Contents/App-Resources/MIDI Remote Scripts/LiveControl_2_0/LC2ClipSlotComponent.py\nfrom _Framework.ClipSlotComponent import ClipSlotComponent\nfrom LC2Sysex import LC2Sysex\n\nclass LC2ClipSlotComponent(ClipSlotComponent):\n\n def set_get_offsets(func):\n LC2ClipSlotComponent._get_offset = func\n\n set_get_offsets = staticmethod(set_get_offsets)\n\n def release_attributes():\n LC2ClipSlotComponent._get_offset = None\n\n release_attributes = staticmethod(release_attributes)\n\n def __init__(self, tid, sid):\n ClipSlotComponent.__init__(self)\n self._tid = tid\n self._sid = sid\n\n def set_clip_slot(self, clip_slot):\n if clip_slot != self._clip_slot:\n if self._clip_slot is not None:\n try:\n self._clip_slot.remove_has_stop_button_listener(self._send_state)\n if self.has_clip():\n self._clip_slot.clip.remove_color_listener(self._on_color_changed)\n self._clip_slot.clip.remove_name_listener(self._on_name_changed)\n except:\n pass\n\n ClipSlotComponent.set_clip_slot(self, clip_slot)\n if self._clip_slot is not None:\n self._clip_slot.add_has_stop_button_listener(self._send_state)\n if self.has_clip():\n self._clip_slot.clip.add_color_listener(self._on_color_changed)\n self._clip_slot.clip.add_name_listener(self._on_name_changed)\n\n def _on_clip_state_changed(self):\n ClipSlotComponent._on_clip_state_changed(self)\n if self.has_clip():\n if not self._clip_slot.clip.color_has_listener(self._on_color_changed):\n self._clip_slot.clip.add_color_listener(self._on_color_changed)\n if not self._clip_slot.clip.name_has_listener(self._on_name_changed):\n self._clip_slot.clip.add_name_listener(self._on_name_changed)\n self._send_state()\n\n def _on_color_changed(self):\n self._send_state()\n\n def _on_name_changed(self):\n self._send_state()\n\n def _on_playing_state_changed(self):\n self._send_state()\n\n def _send_state(self):\n if self._clip_slot is not None:\n if hasattr(self, '_get_offset'):\n if self._get_offset is not None:\n offsets = self._get_offset()\n if self._tid < offsets[2] and self._sid < offsets[3]:\n sysex = LC2Sysex('CLIP')\n sysex.byte(self._tid)\n sysex.byte(self._sid)\n sysex.trim(self.get_name(), 40)\n sysex.rgb(self.color())\n sysex.byte(self.state())\n sysex.send()\n\n def get_name(self):\n if self._clip_slot is not None:\n if self._has_clip():\n name = unicode(self._clip_slot.clip.name)\n elif self._clip_slot.controls_other_clips:\n name = '>'\n elif self._clip_slot.has_stop_button:\n name = '[]'\n else:\n name = ''\n else:\n name = ''\n return name\n\n def state(self):\n playing = 0\n if self._has_clip():\n playing = 1\n if self._clip_slot.clip.is_playing:\n playing = 2\n elif self._clip_slot.clip.is_triggered:\n playing = 3\n elif self._clip_slot is not None:\n if self._clip_slot.is_playing:\n playing = 2\n elif self._clip_slot.is_triggered:\n playing = 3\n return playing\n\n def color(self):\n if self._clip_slot is not None:\n if self._has_clip():\n rgb = self._clip_slot.clip.color\n elif self._clip_slot.has_stop_button:\n rgb = 3289650\n else:\n rgb = 0\n else:\n rgb = 0\n return rgb\n\n def _has_clip(self):\n if self._clip_slot is not None:\n return self._clip_slot.has_clip\n else:\n return 0\n\n def launch(self):\n if self._clip_slot is not None:\n if self._has_clip():\n self._clip_slot.clip.fire()\n if self.song().select_on_launch:\n self.select()\n else:\n self._clip_slot.fire()\n\n def select(self):\n if self._clip_slot is not None:\n if self._has_clip():\n self.song().view.selected_track = self._clip_slot.canonical_parent\n self.song().view.selected_scene = self.song().scenes[list(self._clip_slot.canonical_parent.clip_slots).index(self._clip_slot)]\n\n def update(self):\n if self._allow_updates:\n if self.is_enabled():\n self._send_state()","sub_path":"AbletonLive9_RemoteScripts/LiveControl_2_0/LC2ClipSlotComponent.py","file_name":"LC2ClipSlotComponent.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"215868957","text":"import os, django, sys\nsys.path.append('../')\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"app.settings\")\ndjango.setup()\n\nfrom weather_api.models import Weather \nfrom stock_price_api.models import Stock, Price \nimport csv\n\n\n\ndef generate_chart_data():\n\ttarget_stock = ['AAPL', 'IBM', 'JBLU', 'DAL', 'LUV', 'MON', 'DE', 'ADM']\n\tny_weather = Weather.objects.filter(name='NY')\n\tstocks = Price.objects.all()\n\tdata_per_stock = []\n\tfor target in target_stock:\n\t\tprint(target)\n\t\tdates = []\n\t\tweather_data = []\n\t\tprice_close_data = [0]\n\t\ta = 0\n\t\t# print('hello')\n\t\twhile a < len(ny_weather):\n\t\t\tif a > 0:\n\t\t\t\tnull = price_close_data[a-1]\n\t\t\telse:\n\t\t\t\tnull = 0\n\t\t\tdate = str(ny_weather[a].timestamp)[:10]\n\t\t\t# print(date)\n\t\t\tprices = Price.objects.filter(date=date)\n\t\t\tif len(prices) == 0:\n\t\t\t\tprice_close_data.append(null)\n\t\t\telse:\n\t\t\t\t# print(prices)\n\t\t\t\tstock = Stock.objects.filter(symbol=target)\n\t\t\t\t# print(stock)\n\t\t\t\tif len(stock) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tfor p in prices:\n\t\t\t\t\tif p.stock_id == stock[0].id:\n\t\t\t\t\t\tif len(stocks) != 0: ## for first testing, using only one value\n\t\t\t\t\t\t\tprice_close_data.append(round(float(p.close),2))\n\t\t\t\t\t\telse: \n\t\t\t\t\t\t\tprice_close_data.append(null)\n\t\t\tdates.append(date)\n\t\t\tweather_data.append(ny_weather[a].temp)\n\t\t\ta += 1\n\t\tdata = [dates,weather_data,price_close_data]\n\t\tdata_per_stock.append(data)\n\treturn data_per_stock\n\n\n## Calc Averages ##\n\ndef calculate_average():\n\tall_data = generate_chart_data()\n\tav_data = []\n\tfor data in all_data:\n\t\ttotal_price = 0\n\t\tfor price in data[2]:\n\t\t\ttotal_price += round(float(price),2)\n\t\tav_price = total_price / len(data[2])\n\n\t\ttotal_temp = 0\n\t\tfor temp in data[1]:\n\t\t\ttotal_temp += round(float(temp),2)\n\t\tav_temp = total_temp / len(data[1])\n\n\t\tav_data_point = (round(av_price,2),round(av_temp,2))\n\t\tav_data.append(av_data_point)\n\treturn av_data\n\ndef difference_from_average():\n\tpure_data = generate_chart_data()\n\taverages = calculate_average()\n\tdata = []\n\ta = 0\n\tprint('length',len(pure_data))\n\tprint('length of list',len(pure_data[0][0]))\n\twhile a < len(pure_data):\n\t\t# print(pure_data[a][0])\n\t\tdata_points = []\n\t\tb = 0\n\t\taverage_temps = []\n\t\taverage_prices = []\n\t\tdates = []\n\t\twhile b < len(pure_data[a][0]):\n\t\t\t# print(pure_data[a][1],averages[a])\n\t\t\tdiff_temp = round(((float(pure_data[a][1][b]) - float(averages[a][1])) / float(averages[a][1])),2)\n\t\t\tdiff_price = round(((float(pure_data[a][2][b]) - float(averages[a][0])) / float(averages[a][0])),2)\n\t\t\taverage_temps.append(diff_temp)\n\t\t\taverage_prices.append(diff_price)\n\t\t\tdates.append(pure_data[a][0][b])\n\t\t\t# data_points.append([pure_data[a][0][b],diff_temp,diff_price])\n\t\t\tb += 1\n\t\tdata.append([dates,average_temps,average_prices])\n\t\ta += 1\n\treturn data\n\n\ndef make_histogram():\n\tstock = csv.reader(open('stocks.csv', newline=''))\n\tall_prices = []\n\tfor row in stock:\n\t\tall_prices.append(row[2])\n\treturn all_prices\n\ndef steam_graph():\n\tstock = csv.reader(open('stocks.csv', newline=''))\n\tgraphable_json = []\n\tprice_per_stock = []\n\tcurrent_stock = 'AAPL'\n\tcount = 0\n\tfor row in stock:\n\t\tprice = round(float(row[2]),2)\n\t\tif row[0] == current_stock and count < 200 and price<100:\n\t\t\tprice_per_stock.append({'x': count, 'y': round(float(row[2]),2)})\n\t\t\tcount += 1\n\t\telse: ## Switch to next stock\n\t\t\tgraphable_json.append({\"name\": current_stock,\"values\": price_per_stock})\n\t\t\t## re-define values\n\t\t\tcurrent_stock = row[0]\n\t\t\tprice_per_stock = []\n\t\t\tcount = 0\n\tjson = []\n\tfor item in graphable_json:\n\t\tif len(item['values']) == 200:\n\t\t\tjson.append(item)\n\treturn json\n\n\n\n\n","sub_path":"make_chart.py","file_name":"make_chart.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"581489517","text":"from datetime import datetime\nimport re\nimport logging\n\nimport config\nimport slack.client as slackclient\n\nbitbucket_messages_regex = re.compile(r\"(?P<action>\\w+)\\spull\\srequest\\s\\<(?P<url>.+)\\>\")\n\ndef process_event_message(msg, cfg=config):\n if msg:\n msg['username'] = cfg.BOT_USER_NAME + ': BitBucket'\n if 'icon_emoji' in msg:\n del msg['icon_emoji']\n msg['icon_url'] = cfg.ICON_URL\n return msg\n\ndef process_history_messages(messages, cfg=config):\n try:\n bitbucket_events = {}\n for m in messages:\n if m.get('type', None) == 'message' and m.get('subtype', None) == 'bot_message':\n msg_username = m.get('username', m.get('user', m.get('bot_id', '')))\n if msg_username == cfg.BOT_USER_NAME:\n for a in m.get('attachments', []):\n text = a.get('text', '')\n bitbucket_pull_event_match = bitbucket_messages_regex.match(text)\n if bitbucket_pull_event_match:\n bitbucket_pull_event = bitbucket_pull_event_match.groupdict()\n bitbucket_pull_event['ts'] = datetime.fromtimestamp(float(m.get('ts', None)))\n bitbucket_pull_event['ts_str'] = m.get('ts', None)\n url = bitbucket_pull_event.get('url')\n if url not in bitbucket_events:\n bitbucket_events[url] = list()\n bitbucket_events[url].append(bitbucket_pull_event)\n\n for url, events in bitbucket_events.items():\n sorted_events = sorted(events, key=lambda e: e['ts'])\n last_event = sorted_events[-1]\n if last_event['action'] in cfg.BITBUCKET_FINAL_ACTIONS:\n to_delete_events = sorted_events\n if cfg.BITBUCKET_LEAVE_MESSAGES_WITH_FINAL_ACTIONS:\n to_delete_events = sorted_events[:-1]\n for e in to_delete_events:\n slackclient.delete_message(cfg.CHANNEL, e['ts_str'], cfg)\n except Exception as inst:\n logging.error(str(inst))\n","sub_path":"handler/bitbucket.py","file_name":"bitbucket.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"237245778","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom .models import MiniURL\nfrom . import views\n\ndef creer_url():\n mini = MiniURL(url=\"http://foo.bar\", pseudo=\"Maxime\")\n mini.code = mini.generer(6)\n mini.save()\n return mini\n\nclass MiniURLTests(TestCase):\n def test_liste(self):\n \"\"\" Vérifie si une URL sauvegardée est bien affichée \"\"\"\n\n mini = creer_url()\n reponse = self.client.get(reverse('url_liste'))\n\n self.assertEqual(reponse.status_code, 200)\n self.assertContains(reponse, mini.url)\n self.assertQuerysetEqual(reponse.context['minis'], [repr(mini)])\n\n def test_nouveau_redirection(self):\n \"\"\" Vérifie la redirection d'un ajout d'URL \"\"\"\n data = {\n 'url': 'http://www.djangoproject.com',\n 'pseudo': 'Jean Dupont',\n }\n\n reponse = self.client.post(reverse('url_nouveau'), data)\n self.assertEqual(reponse.status_code, 302)\n self.assertRedirects(reponse, reverse('url_liste'))","sub_path":"mini_url/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"568723413","text":"# Insolation experiment - Max Trostel and Bruce Duffy - May 16, 2018\n\n# This program gets and writes data from the insolation experiment, a set of three insulated boxes, each\n# containing a 60 W incandescent lightbulb which heats the box when the temperature falls below\n# 20 C. Box 1 has an insulation value of R=3, Box 2 R=5 with window, Box 3 R=5\n# w/o window. The boxes are placed outside in winter to test these insulation\n# values The data collected by the exeriment and retrieved by this code comes\n# in as follows:\n# Time/Date, Epoch Time (s), Box 1 Lightbulb (V), Box 2 Lightbulb (V), Box\n# 3 Lightbulb (V), Box 1 Temp (C), Box 2 Temp (C), Box 3 Temp (C), Outside Temp\n# (C), and Insolation/910 (W/m^2).\n\n# Further details can be found in documents accompanying this code, and in the\n# --help ussage.\n\n\nfrom lxml import html\nimport requests\nimport xml.etree.ElementTree as et\nfrom datetime import datetime\nfrom time import time, sleep\nimport schedule\nimport csv\nimport sys\nimport readchar\n\n# Usage statement:\nuse =\"Usage: insolation.py --interval=<int, seconds> --duration=<real,\\\n hours>\\nRuns insolation experiment with parameters; writes to current\\\n directory.\\n\\\n\\n -h, --help display this help and exit\\\n\\n -i, --interval takes =<int, seconds>, i.e. an interger number of\\\n seconds for the sampling interval\\\n\\n -d, --duration takes =<real, hours>, i.e. a double precision float of\\\n the total time of the observing run in hours\\\n\\n\\nBy default, when no arguments are given, the code runs with --interval=5\\\n and --duration = 23.98. Interval cannot exceed 4000 and duration 72.\"\n\n# Get parameters from command line arguments\nif len(sys.argv) == 1:\n interval = 5\n dur = 23.98\n duration = int(dur*3600)\nelse:\n for i in range(1,len(sys.argv)):\n arg = sys.argv[i]\n try:\n pi = arg.index('=')\n except:\n pi = len(arg)\n param = arg[:pi]\n if param == '--help' or param == '-h':\n print(use)\n quit(1)\n elif param == '--interval' or param == '-i':\n try:\n interval = int(arg[pi+1:])\n except:\n print(\"Error: invalid interval - NaN\")\n quit(1)\n if interval > 4000:\n print(\"Error: invalid interval - too long (>4000)\")\n quit(1)\n elif param == '--duration' or param == '-d':\n try:\n dur = float(arg[pi+1:])\n except:\n print(\"Error: invalid duration - NaN\")\n quit(1)\n if dur > 72:\n print(\"Error: invalid duration - too long (>72)\")\n quit(1)\n else:\n print(\"Error: invalid argument - \" + str(param) + \" - see --help for usage\")\n quit(1)\n try:\n interval\n except:\n print(\"Error: interval not defined - see --help for usage\")\n quit(1)\n try:\n dur\n except:\n print(\"Error: duration not defined - see --help for usage\")\n quit(1)\n duration = int(dur*3600)\n\n# Function to get data from insolation server, returns time and state data\ndef getstate():\n page = requests.get('http://insolation.physics.carleton.edu/stateFull.xml')\n root = et.fromstring(page.content)\n state = [datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), str(int(time())),\n str(float(root[0].text)), str(float(root[1].text)), str(float(root[2].text)),\n str(float(root[3].text)), str(float(root[4].text)), str(float(root[5].text)),\n str(float(root[6].text)), str(float(root[7].text))]\n return state\n\n# Record start time of experiment run\ntry:\n starttime = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n startepoch = int(time())\nexcept:\n print(\"Error: cannot get system time.\")\n quit(1)\n\n# Header to be written to the CSV file\nheader = [[\"Date/Time\",\"Epoch\",\"V1\",\"V2\",\"V3\",\"T1\",\"T2\",\"T3\",\"T4\",\"Sol\",\"FailureCount\"]]\n\n# Create and open new csv, named by argument parameters and start date/time\ntry:\n myFile = open('Insolation.i_' + str(interval) + '.d_' + str(dur) + '.' + starttime + '.csv', 'w')\nexcept:\n print(\"Error: cannot open data file.\")\n quit(1)\n\n# Begin error count at 0\necount = 0\n\n# Begin writing to CSV file, starting with header, followed by state data\nwith myFile:\n writer = csv.writer(myFile)\n writer.writerows(header)\n # Get data, write to csv row\n def job():\n global ecount\n t = round((time() - startepoch)/3600, 4)\n try:\n dat = getstate()\n newData = [dat + [str(ecount)]]\n ecount = 0\n print(\"State retrieval successful... \" + str(t) + \"/\" + str(dur) + \"hours\")\n try:\n writer.writerows(newData)\n except:\n print(\"Error: cannot write to data file.\")\n quit(1)\n except:\n ecount = ecount + 1\n print(\"Error: state retrieval failed... \" + str(t) + \"/\" + str(dur) +\n \"hours\")\n if (int(time()) > int(startepoch + duration)):\n print(\"Observing run complete.\")\n return quit()\n\n # Schedule new data retrieval/writing at specified interval\n schedule.every(interval).seconds.do(job)\n\n while True:\n schedule.run_pending()\n sleep(1)\n","sub_path":"code+docs/insolation.py","file_name":"insolation.py","file_ext":"py","file_size_in_byte":5323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287935564","text":"from django.conf.urls import url, re_path\n\nfrom administrator import views\nfrom administrator.views import standard, timeliner, choice_add, questionaire_manage, blank_add, answer_add, matrix_add, \\\n form_add, accumulation, export_answer, import_answer, questionaire_submit, questionaire_delete, question_delete, \\\n scheme_show, calculate\n\nurlpatterns = [\n url('/standard', standard, name='standard'),\n url(r'/delete', name='delete', view=views.delete),\n url(r'/edit', name='edit', view=views.edit),\n re_path(r'/indicator_export$', views.indicator_export, name='indicator_export'),\n re_path(r'/upload_indicator$', views.upload_indicator, name='upload_indicator'),\n url(r'^download_indicator/', views.download_indicator, name=\"download_indicator\"),\n # 下载问卷测试/问卷状态改变测试\n url(r'^export_questionaire/', views.export_questionaire, name='export_questionaire'),\n url(r'^question_status/', views.questionaire_status, name='question_status'),\n #\n re_path(r'/timeliner_create$', views.timeliner_create, name='timeliner_create'),\n re_path(r'/timeliner_edit$', views.timeliner_edit, name='timeliner_edit'),\n re_path(r'/timeliner_delete$', views.timeliner_delete, name='timeliner_delete'),\n url(r'/questionaire_submit$', questionaire_submit, name='questionaire_submit'),\n url(r'/questionaire_delete$', questionaire_delete, name='questionaire_delete'),\n url(r'/question_delete$', question_delete, name='question_delete'),\n url(r'/choice_add$', choice_add, name='choice_add'),\n url(r'/blank_add$', blank_add, name='blank_add'),\n url(r'/answer_add$', answer_add, name='answer_add'),\n url(r'/matrix_add$', matrix_add, name='matrix_add'),\n url(r'/form_add$', form_add, name='form_add'),\n url(r'/questionaire$', views.questionaire, name='questionaire'),\n url(r'/timeliner$', timeliner, name='timeliner'),\n url(r'/questionaire_manage$', questionaire_manage, name='questionaire_manage'),\n url(r'/export_answer$', export_answer, name='export_answer'),\n url(r'/import_answer$', import_answer, name='import_answer'),\n url(r'/accumulation$', accumulation, name='accumulation'),\n url(r'scheme_show$', scheme_show, name='scheme_show'),\n url(r'/calculate', calculate, name='calculate'),\n]\n","sub_path":"administrator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"380541487","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom .base import *\n\n__docformat__ = 'restructuredtext en'\n\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\n\nSOUTH_TESTS_MIGRATE = True\nDATABASES = {\n 'default': {\n\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': root('db', 'dev.sqlite3')\n },\n # 'postgres': {\n # 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n # 'NAME': 'coxtactoe',\n # 'USER': 'coxtactoe',\n # 'PASSWORD': 'c0Xtactoe',\n # 'HOST': '127.0.0.1'\n # }\n}\n\n# INSTALLED_APPS += (\"debug_toolbar\", )\n#\n# MIDDLEWARE_CLASSES += (\"debug_toolbar.middleware.DebugToolbarMiddleware\", )\n","sub_path":"tictac/tictac/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"592213087","text":"# -*- coding: utf-8 -*-\n\n\n'''\n'.' Matches any single character.\n'*' Matches zero or more of the preceding element.\n\nThe matching should cover the entire input string (not partial).\n\nThe function prototype should be:\nbool isMatch(const char *s, const char *p)\n\nSome examples:\nisMatch(\"aa\",\"a\") → false\nisMatch(\"aa\",\"aa\") → true\nisMatch(\"aaa\",\"aa\") → false\nisMatch(\"aa\", \"a*\") → true\nisMatch(\"aa\", \".*\") → true\nisMatch(\"ab\", \".*\") → true\nisMatch(\"aab\", \"c*a*b\") → true\n'''\n\nclass Solution(object):\n def isMatch(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n #O(NM) time, O(NM) space\n n, m = len(s), len(p)\n match = [[False for j in xrange(m + 1)] for i in xrange(n + 1)]\n\n #Base cases - empty string matches w/ empty pattern\n match[0][0] = True\n\n #empty string to nonempty pattern\n for i in xrange(1, m + 1):\n if i > 1 and p[i - 1] == '*':\n match[0][i] = match[0][i - 2]\n\n #DP cases\n for i in xrange(1, n + 1):\n for j in xrange(1, m + 1):\n #If the string char equals the pattern char or the pattern is a dot, \n #there is a match if everything before matches\n if p[j - 1] == '.' or p[j - 1] == s[i - 1]:\n match[i][j] = match[i - 1][j - 1]\n #Star can only match if there is something before the star to match\n elif j > 1 and p[j - 1] == '*':\n #If the character before the star matches char in the string,\n #the star can match 0 of the char before it, or at least 1.\n #match[i][j-2] is for matching 0, cause we need to skip the last 2\n #characters in the pattern. match[i - 1][j] is for matching 1, cause\n #we need to skip 1 character in the string, but keep the entire pattern\n if p[j - 2] == s[i - 1] or p[j - 2] == '.':\n match[i][j] = match[i][j - 2] or match[i - 1][j]\n #If the character before the star does not match the char in the string,\n #then the only way to get a match is if the star matches 0 characters\n else:\n match[i][j] = match[i][j - 2]\n\n return match[n][m]\n","sub_path":"python/regular_express_matching.py","file_name":"regular_express_matching.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"610505376","text":"import sys, os\nsys.path.append('/Users/manuel/OneDrive/Chinese_Vocab_Trainer')\nfrom tkinter import *\nfrom random import shuffle\nfrom chinese_vocab_trainer.vocab import Vocab\n# from PIL import ImageTk, Image\n\n\nclass Application:\n def __init__(self):\n self.vocab = None\n self.direction = \"ec\"\n self.box = 0\n # stack created after pressing start button\n self.word_list = None\n # list of words generated from word_list but\n self.rand_word_list = None\n # word currently being asked to translate\n self.current_word = None\n # accessors to labels in window that display vocabulary\n self.chin = None\n self.pin = None\n self.eng = None\n # number of words that have already been tested\n self.words_tested = None\n # number of words that should be tested\n self.num_words = None\n # upper and lower limiting threshold for deciding whether vocab is move or not\n self.low_limit = None\n self.up_limit = None\n\n self.display = None\n\n self.buttons = {\"start\": None, \"finish\": None, \"next\": None, \"reveal\": None, \"correct\": None, \"wrong\": None}\n self.activity = {\"start\": False, \"finish\": False, \"next\": False, \"reveal\": False, \"correct\": True,\n \"wrong\": True}\n self.but_active = {\"start\": \"black\", \"finish\": \"black\", \"next\": \"black\", \"reveal\": \"black\",\n \"correct\": \"green\", \"wrong\": \"#F36B60\"}\n self.but_inactive = {\"start\": \"grey\", \"finish\": \"grey\", \"next\": \"grey\", \"reveal\": \"grey\",\n \"correct\": \"grey\", \"wrong\": \"grey\"}\n\n def start_gui(self):\n root = Tk()\n root.title(\"Chinese Vocabulary Trainer\")\n root.resizable(0, 0)\n\n # it works!!!!!!\n img = PhotoImage(file='chin.png')\n root.tk.call('wm', 'iconphoto', root._w, img)\n # -------------------------\n\n mainframe = LabelFrame(root, text='', padx=0, pady=0, borderwidth=0)\n mainframe.pack(padx=20, pady=0)\n\n loadframe = LabelFrame(mainframe, text='', padx=10, pady=0, borderwidth=0)\n loadframe.grid(row=0, column=0, padx=0, pady=0)\n\n trainingframe = LabelFrame(mainframe, text='', padx=0, pady=20, borderwidth=0)\n trainingframe.grid(row=1, column=0, padx=0, pady=0)\n\n # ----------\n # LOAD FRAME\n\n # Entry frame\n entryframe = LabelFrame(loadframe, text='', padx=0, pady=0, borderwidth=0)\n entryframe.grid(row=0, column=0, padx=0, pady=0, rowspan=2)\n\n no_words = Label(entryframe, text=\"number of words\")\n no_words.grid(row=0, column=0)\n\n number_words_entry = Entry(entryframe, width=5, borderwidth=5)\n number_words_entry.grid(row=0, column=1, columnspan=1, padx=5, pady=5)\n number_words_entry.insert(0, \"0\")\n\n low_limit = Label(entryframe, text=\"lower limit\")\n low_limit.grid(row=1, column=0)\n\n low_limit_entry = Entry(entryframe, width=5, borderwidth=5)\n low_limit_entry.grid(row=1, column=1, columnspan=1, padx=5, pady=5)\n low_limit_entry.insert(0, \"-2\")\n\n up_limit = Label(entryframe, text=\"upper limit\")\n up_limit.grid(row=2, column=0)\n\n up_limit_entry = Entry(entryframe, width=5, borderwidth=5)\n up_limit_entry.grid(row=2, column=1, columnspan=1, padx=5, pady=5)\n up_limit_entry.insert(0, \"+5\")\n\n # Drop Frame\n dropframe = LabelFrame(loadframe, text='', padx=0, pady=0, borderwidth=0)\n dropframe.grid(row=0, column=1, padx=0, pady=0)\n\n chapter = StringVar()\n chapter.set(\"Box 1\")\n chap = OptionMenu(dropframe, chapter, \"Box 1\", \"Box 2\", \"Box 3\", \"Box 4\", \"Box 5\")\n chap.grid(row=0, column=0)\n\n direction = StringVar()\n direction.set(\"English -> Chinese\")\n dir_drop = OptionMenu(dropframe, direction, \"English -> Chinese\", \"Chinese -> English\")\n dir_drop.grid(row=0, column=1)\n\n filepath_entry = Entry(dropframe, width=30, borderwidth=5)\n filepath_entry.grid(row=0, column=2, columnspan=1, padx=10, pady=10)\n filepath_entry.insert(0, \"/Users/manuel/Desktop/chinese.txt\")\n\n # button frame\n buttonframe = LabelFrame(loadframe, text='', padx=0, pady=0, borderwidth=0)\n buttonframe.grid(row=1, column=1, padx=0, pady=0)\n\n self.buttons[\"start\"] = Button(buttonframe, text=\"Start\", fg=\"black\", padx=40, pady=20, font=(\"Helvetica\", 15),\n width=5, command=lambda: self.start_learning(filepath_entry.get(), chapter.get(),\n direction.get(), number_words_entry.get(), low_limit_entry.get(), up_limit_entry.get()))\n self.buttons[\"start\"].grid(row=0, column=0)\n\n self.buttons[\"finish\"] = Button(buttonframe, text=\"Finish\", fg=\"grey\", padx=40, pady=20, font=(\"Helvetica\", 15),\n width=5, command=lambda: self.finish(filepath_entry.get()))\n self.buttons[\"finish\"].grid(row=0, column=1, padx=20)\n\n # ----------\n # TRAINING FRAME\n\n # VOCAB FRame\n vocabframe = LabelFrame(trainingframe, text='', padx=0, pady=0, borderwidth=0)\n vocabframe.grid(row=0, column=1, padx=0, pady=20)\n\n self.chin = StringVar()\n chin_label = Label(vocabframe, textvariable=self.chin, bg=\"#33455c\", fg=\"white\", width=20, font=(\"Helvetica\", 35))\n chin_label.grid(row=1, column=0, padx=5)\n\n self.pin = StringVar()\n pin_label = Label(vocabframe, textvariable=self.pin, bg=\"#33455c\", fg=\"white\", width=20, font=(\"Helvetica\", 35))\n pin_label.grid(row=1, column=1, padx=5)\n\n self.eng = StringVar()\n eng_label = Label(vocabframe, textvariable=self.eng, bg=\"#33455c\", fg=\"white\", width=20, font=(\"Helvetica\", 35))\n eng_label.grid(row=1, column=2, padx=5)\n\n self.display = StringVar()\n disp = Label(vocabframe, textvariable=self.display, bg=\"#f0e8eb\", width=20, font=(\"Helvetica\", 20))\n disp.grid(row=0, column=1, pady=15)\n\n # ACTION FRAME\n actionframe = LabelFrame(trainingframe, text='', padx=0, pady=20, borderwidth=0)\n actionframe.grid(row=1, column=1, padx=0, pady=0)\n\n self.buttons[\"next\"] = Button(actionframe, text=\"Next\", fg=\"grey\", padx=40, pady=20, font=(\"Helvetica\", 15),\n width=5, command=lambda: self.next())\n self.buttons[\"next\"].grid(row=3, column=0, padx=20)\n\n self.buttons[\"reveal\"] = Button(actionframe, text=\"Reveal\", fg=\"grey\", padx=40, pady=20, font=(\"Helvetica\", 15),\n width=5, command=lambda: self.reveal())\n self.buttons[\"reveal\"].grid(row=3, column=1, padx=20)\n\n self.buttons[\"correct\"] = Button(actionframe, text=\"correct\", fg=\"grey\", padx=40, pady=20, font=(\"Helvetica\", 15),\n width=5, command=lambda: self.correct(chapter.get()))\n self.buttons[\"correct\"].grid(row=3, column=2, padx=20)\n\n self.buttons[\"wrong\"] = Button(actionframe, text=\"wrong\", fg=\"grey\", padx=40, pady=20, font=(\"Helvetica\", 15),\n width=5, command=lambda: self.wrong(chapter.get()))\n self.buttons[\"wrong\"].grid(row=3, column=3, padx=20)\n\n root.mainloop()\n\n def start_app(self):\n self.start_gui()\n\n def correct(self, chapter):\n if self.activity[\"reveal\"]:\n print(\"correct\")\n if self.current_word != None:\n self.current_word[0].score += 1\n\n self.buttons[\"correct\"].config(fg=self.but_inactive['correct'])\n self.buttons[\"wrong\"].config(fg=self.but_inactive['wrong'])\n self.activity[\"correct\"] = True\n self.activity[\"wrong\"] = True\n self.activity[\"reveal\"] = False\n self.buttons[\"next\"].config(fg=self.but_active['next'])\n\n def wrong(self, chapter):\n if self.activity[\"reveal\"]:\n print(\"wrong\")\n if self.current_word != None:\n self.current_word[0].score -= 1\n\n self.buttons[\"correct\"].config(fg=self.but_inactive['correct'])\n self.buttons[\"wrong\"].config(fg=self.but_inactive['wrong'])\n self.activity[\"correct\"] = True\n self.activity[\"wrong\"] = True\n self.activity[\"reveal\"] = False\n self.buttons[\"next\"].config(fg=self.but_active['next'])\n\n def next(self):\n\n if (self.activity[\"correct\"] or self.activity[\"wrong\"]) and self.activity[\"start\"]:\n # update display\n if self.words_tested == self.num_words:\n self.words_tested = 0\n display_text = str(self.words_tested+1) + \" / \" + str(self.num_words) + \" of words\"\n self.set_display(display_text)\n\n print(\"Next\")\n # changes self.current_word to a new word\n self.get_next_word()\n if self.current_word != None:\n if self.direction == \"ec\":\n self.eng.set(self.current_word[0].english)\n self.chin.set(\"\")\n self.pin.set(\"\")\n else:\n self.chin.set(self.current_word[0].character)\n self.eng.set(\"\")\n self.pin.set(\"\")\n\n self.buttons[\"next\"].config(fg=self.but_inactive['next'])\n self.activity[\"next\"] = True\n self.buttons[\"reveal\"].config(fg=self.but_active['reveal'])\n self.activity[\"correct\"] = False\n self.activity[\"wrong\"] = False\n\n else:\n print(\"This box is empty, Choose another box\")\n self.buttons[\"next\"].config(fg=self.but_inactive['next'])\n self.activity[\"correct\"] = False\n self.activity[\"wrong\"] = False\n else:\n self.buttons[\"next\"].config(fg=self.but_inactive['next'])\n print(\"This box is empty, Choose another box\")\n\n def reveal(self):\n if self.activity[\"next\"]:\n print(\"Reveal\")\n if self.current_word != None:\n if self.direction == \"ec\":\n self.chin.set(self.current_word[0].character)\n self.pin.set(self.current_word[0].pinyin)\n else:\n self.eng.set(self.current_word[0].english)\n self.pin.set(self.current_word[0].pinyin)\n\n self.buttons[\"reveal\"].config(fg=self.but_inactive['reveal'])\n self.activity[\"reveal\"] = True\n self.activity[\"next\"] = False\n self.buttons[\"correct\"].config(fg=self.but_active['correct'])\n self.buttons[\"wrong\"].config(fg=self.but_active['wrong'])\n\n def get_next_word(self):\n if self.words_tested >= self.num_words:\n self.words_tested = 0\n if self.rand_word_list != []:\n self.current_word = self.rand_word_list[self.words_tested]\n self.words_tested += 1\n ###############################\n else:\n self.current_word = None\n\n def shuffle_words(self, number):\n # determine number of words to be asked\n if number == 0 or number > len(self.word_list):\n number = len(self.word_list)\n self.num_words = number\n\n shuffle(self.word_list)\n self.rand_word_list = self.word_list[0:number]\n\n def start_learning(self, file_path, box, direction, number, low_limit, up_limit):\n print(\"Start\")\n \"\"\" This should initiate the underlying program \"\"\"\n\n self.vocab = Vocab(file_path)\n self.direction = self.get_direction(direction)\n self.box = int(box.split(\" \")[1])\n self.num_words = int(number)\n self.words_tested = 0\n self.word_list = self.vocab.get_words(box=self.box - 1)\n self.shuffle_words(self.num_words)\n\n self.chin.set(\"\")\n self.pin.set(\"\")\n self.eng.set(\"\")\n\n self.low_limit = int(low_limit)\n self.up_limit = int(up_limit)\n\n # change coloring of buttons\n self.activity[\"start\"] = True\n self.buttons[\"finish\"].config(fg=self.but_active['finish'])\n self.buttons[\"next\"].config(fg=self.but_active['next'])\n\n self.activity[\"correct\"] = True\n self.activity[\"wrong\"] = True\n\n # set the display\n display_text = str(self.words_tested) + \" / \" + str(self.num_words) + \" of words\"\n self.set_display(display_text)\n\n def finish(self, filepath):\n # Ensure that you can only finish the session and save it if the Start button has been pressed at least once\n if self.activity[\"start\"]:\n print(\"Finish\")\n # 1) put the learned and classified words from a session back into\n # the original Vocab object.\n old_vocab = self.vocab.get_vocab_list()\n box = self.box - 1\n for word in self.word_list:\n orig_pos = word[1]\n old_vocab[box][orig_pos] = word[0]\n\n # 2) update the box that a word belongs to based on its score\n self.vocab.update_words(self.low_limit, self.up_limit)\n\n # 3) write update vocab object to file\n self.vocab.write_vocab(file_path=filepath)\n\n self.activity[\"correct\"] = False\n self.activity[\"wrong\"] = False\n self.activity[\"next\"] = False\n self.activity[\"reveal\"] = False\n self.activity[\"start\"] = False\n self.buttons[\"next\"].config(fg=self.but_inactive['next'])\n self.buttons[\"reveal\"].config(fg=self.but_inactive['next'])\n self.buttons[\"correct\"].config(fg=self.but_inactive['next'])\n self.buttons[\"wrong\"].config(fg=self.but_inactive['next'])\n self.buttons[\"finish\"].config(fg=self.but_inactive['finish'])\n\n self.chin.set(\"\")\n self.pin.set(\"\")\n self.eng.set(\"\")\n\n def get_direction(self, direction):\n if direction == \"English -> Chinese\":\n return \"ec\"\n elif direction == \"Chinese -> English\":\n return \"ce\"\n\n def set_display(self, text):\n \"\"\" sets the display to the text specifiedin text\"\"\"\n self.display.set(text)\n\n\nif __name__ == \"__main__\":\n app = Application()\n app.start_app()\n\n\n","sub_path":"chinese_vocab_trainer/vocab_trainer.py","file_name":"vocab_trainer.py","file_ext":"py","file_size_in_byte":14273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"70035235","text":"import scrabble\n\n# What is the longest palindrome in the English language?\n\n\ndef is_a_palindrome(word):\n # here I want integer division because the middle letter will not have a pair\n # so the check is the same if the word has an odd or an even number of letters\n half_length = len(word)//2\n\n for i in range(half_length):\n if word[i] != word[-i-1]:\n return False\n break\n return True\n\npalindromes = []\n#longest_word = ''\nfor word in scrabble.wordlist:\n if is_a_palindrome(word) == True:\n \n palindromes.append(word)\n\nfor word in palindromes:\n print(word,len(word))\n\n","sub_path":"int-python-programming/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"242530303","text":"# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE file in the project root for\n# full license information.\nimport connexion\n# changed from from swagger_server import encoder\nfrom . import encoder\n# added logging config in merge\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger(\"azure.iot.device\").setLevel(level=logging.DEBUG)\nlogging.getLogger(\"paho\").setLevel(level=logging.DEBUG)\nlogging.getLogger(\"werkzeug\").setLevel(level=logging.WARNING) # info level can leak credentials into the log\n\n\ndef main():\n app = connexion.App(__name__, specification_dir='./swagger/')\n app.app.json_encoder = encoder.JSONEncoder\n app.add_api('swagger.yaml', arguments={'title': 'Azure IOT End-to-End Test Wrapper Rest Api'})\n # changed from app.run(port=8080)\n app.run(port=8080, debug=True, use_reloader=False, threaded=True)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"docker_images/pythonv2/wrapper/swagger_server/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"284678675","text":"import random\nclass Character (object):\n MAXDEXTERITY = 100\n def __init__ (self, name, hit_points, strength, dexterity, shield):\n self.name = name\n self.hit_points = int(hit_points)\n self.strength = int(strength)\n self.dexterity = int(dexterity)\n self.shield = int(shield)\n def attack (self, enemy):\n total = self.dexterity + enemy.dexterity\n ranint = random.randint(0, total)\n if ranint < self.dexterity:\n drandom = random.randint(0, self.strength)\n if enemy.shield > 0:\n enemy.shield-=1\n print(self.name + \" hits \" + enemy.name + \" causing \" + \"0\" + \" damage.\")\n print(enemy.name + \" used a shield! \" + str(enemy.shield) + \" shield(s) remaining for \" + enemy.name + \".\")\n else:\n print(self.name + \" hits \" + enemy.name + \" causing \" + str(drandom) + \" damage.\")\n enemy.hit_points -= drandom\n else:\n print(self.name + \" misses \" + enemy.name + \".\")\n def die (self):\n print(self.name + \" is dead.\")\n def __str__ (self):\n print(self.name + \": \" + str(self.hit_points) + \" \" + str(self.strength) + \" \" + str(self.dexterity) + \" \" + str(self.shield))\nclass CharacterList (object):\n def __init__ (self, file_name):\n file_name = open(file_name, \"r\")\n self.clist = []\n for line in file_name:\n line = line.strip()\n linelist = line.split(\",\")\n self.clist.append(Character(linelist[0], linelist[1], linelist[2], linelist[3], linelist[4])) \n def print_list (self):\n index = 1\n for c in self.clist:\n print(str(index) + \": \", end=\"\")\n c.__str__()\n index += 1\n def get_and_remove_character (self, i):\n first = self.clist[(i-1)]\n self.clist.remove(first)\n return first\n def get_random_character (self):\n return random.choice(self.clist)\n def get_number_of_characters (self):\n return len(self.clist)","sub_path":"Python/Unit4/characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"428972848","text":"#https://www.hackerrank.com/challenges/validate-list-of-email-address-with-filter/problem\ndef fun(s):\n # return True if s is a valid email, else return False\n try:\n username, url = s.split(\"@\")\n website, extension = url.split(\".\")\n except ValueError:\n return False\n \n if not username.replace(\"-\", \"\").replace(\"_\", \"\").isalnum():\n return False\n if not website.isalnum():\n return False\n if len(extension) > 3:\n return False\n return True","sub_path":"hackerrank/Python-Functionals/validatingEmailAddressesWithFilter.py","file_name":"validatingEmailAddressesWithFilter.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"537183266","text":"import sys\nimport os\nimport math\nimport random\n\n\"\"\"\n Design an algorithm for computing the k-th largest element in an array. Assume\n entries are distinct.\n\n ===========\n\n Use quick select\n\"\"\"\ndef find_kth_largest_element(arr, k):\n return _select(arr, 0, len(arr) - 1, k - 1)\n\ndef _select(arr, left, right, k):\n while True:\n pivot_index = random.randint(left, right)\n pivot_new_index = partition(arr, left, right, pivot_index)\n pivot_dist = pivot_new_index - left\n if pivot_dist == k:\n return arr[pivot_new_index]\n elif pivot_dist > k:\n right = pivot_new_index - 1\n else:\n left = pivot_new_index + 1\n k -= pivot_dist + 1\n\ndef partition(arr, left, right, pivot_index):\n pivot_value = arr[pivot_index]\n arr[right], arr[pivot_index] = arr[pivot_index], arr[right]\n stored_index = left\n\n for i in range(left, right):\n if arr[i] < pivot_value:\n arr[i], arr[stored_index] = arr[stored_index], arr[i]\n stored_index += 1\n arr[right], arr[stored_index] = arr[stored_index], arr[right]\n return stored_index\n\n#v = [9, 8, 7, 6, 5, 0, 1, 2, 3, 4]\n#print([find_kth_largest_element(v, i) for i in range(1, len(v) + 1)])\n\n","sub_path":"EPI/Python/Searching/12_11_find_kth_largest_element.py","file_name":"12_11_find_kth_largest_element.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"177435448","text":"import matplotlib.pyplot as plt\n\nimport constants\nimport equation\n\nxValues = [x/100 for x in range(50, 300)]\nyValues = [equation.electrostatic(constants.ElementaryCharge,\n constants.ElementaryCharge,\n x * constants.Femtometre) for x in xValues]\n\nplt.plot(xValues, yValues)\n\nplt.xlabel('Range (fm)')\nplt.ylabel('Force (N)')\n\nplt.title('Electrostatic Force')\n\nplt.show()\n","sub_path":"equationTester2.py","file_name":"equationTester2.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"512813025","text":"from django.shortcuts import render\nfrom django.views import View\nfrom django.http import JsonResponse\nfrom .models import *\nimport json\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .forms import OrderForm, RegisterForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nimport datetime\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.urls import reverse\nfrom . utils import cookieCart, cartData, guestOrder\n\n\nclass StoreView(View):\n def get(self, request):\n\n if request.user.is_authenticated:\n order, created = Order.objects.get_or_create(customer=request.user, complete=False)\n items = order.orderitem_set.all()\n cartItems = order.all_cart_quantity\n else:\n items = []\n order = {'all_cart_value':0,'all_cart_quantity':0, 'shipping':False}\n cartItems = order['all_cart_quantity']\n products=Product.objects.all()\n context={'products':products, 'cartItems':cartItems}\n return render(request, 'store/store.html', context)\n\n\nclass About(View):\n def get(self,request):\n context={}\n return render(request, 'store/about.html', context)\n\nclass CartView(View):\n def get(self, request):\n\n data = cartData(request)\n cartItems = data['cartItems']\n order = data['order']\n items = data['items']\n context = {'items': items, 'order':order, 'cartItems':cartItems}\n return render(request, 'store/cart.html', context)\n\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n@csrf_exempt\ndef checkout(request):\n if request.user.is_authenticated:\n customer = request.user\n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n items = order.orderitem_set.all()\n cartItems = order.all_cart_quantity\n else:\n cookieData = cookieCart(request)\n cartItems = cookieData['cartItems']\n order = cookieData['order']\n items = cookieData['items']\n context = {'items': items, 'order':order, 'cartItems':cartItems}\n return render(request, 'store/checkout.html', context)\n\ndef updateItem(request):\n data = json.loads(request.body)\n productId = data['productId']\n action = data['action']\n\n print('Action:', action)\n print('Product:', productId)\n\n customer=request.user\n product=Product.objects.get(id=productId)\n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n\n orderItem, created= OrderItem.objects.get_or_create(order=order, product=product)\n\n if action =='add':\n orderItem.quantity=(orderItem.quantity+1)\n\n elif action =='remove':\n orderItem.quantity=(orderItem.quantity-1)\n\n orderItem.save()\n\n if orderItem.quantity<=0:\n orderItem.delete()\n\n return JsonResponse('Item was added', safe=False)\n\n\ndef registerPage(request):\n if request.user.is_authenticated:\n return redirect('store')\n else:\n form = RegisterForm()\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if request.POST[\"password\"] == request.POST[\"password_confirmation\"]:\n\n # user = User.objects.get(id=request.user.id)\n print(form.is_valid())\n if form.is_valid():\n # user.username = form.cleaned_data.get('username')\n # user.email = form.cleaned_data.get('email')\n # user.save()\n user = User.objects.create_user(\n username=form.cleaned_data['username'],\n email=form.cleaned_data['email'],\n password=form.cleaned_data['password']\n )\n print(\"Here\")\n messages.success(request, 'Account was created for {}'.format(user))\n return redirect('store')\n else:\n messages.error(request, 'Data is invalid')\n return redirect('store')\n\n else:\n messages.error(request, 'Passwords are not the same.')\n return redirect('store')\n\n context = {'form': form}\n return render(request, 'store/register.html', context)\n\n\ndef loginPage(request):\n page = 'login'\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('store')\n return render(request, 'store/login.html', {'page':page})\n\n\ndef processOrder(request):\n transaction_id = datetime.datetime.now().timestamp()\n data = json.loads(request.body)\n\n if request.user.is_authenticated:\n customer = request.user\n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n total = float(data['form']['total'])\n order.transaction_id = transaction_id\n\n if total == order.all_cart_value:\n order.complete = True\n order.save()\n\n\n else:\n total, order=guestOrder(request,data)\n\n total = float(data['form']['total'])\n order.transaction_id = transaction_id\n\n if total == order.all_cart_value:\n order.complete = True\n order.save()\n\n ShippingAddress.objects.create(\n customer=customer,\n order=order,\n address=data['shipping']['address'],\n city=data['shipping']['city'],\n postcode=data['shipping']['postcode'],\n )\n return JsonResponse('Payment submitted..', safe=False)\n\n# Create your views here.\n\n","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"585209283","text":"#!/usr/bin/env python\n\n'''finalSteps.py - OpenFOAM working panel'''\n\nimport wx\nfrom createRun import CreateRunFile\nfrom definedWxClasses import RadioBtnsSizer, LabelFormat, GaugeFrame\nimport const\nfrom jobsStatus import HandleJobsFrame\n\nclass TestFrame(wx.Frame):\n def __init__(self, loginInfo, *args, **kwargs):\n wx.Frame.__init__(self, None, *args, **kwargs)\n\n self.Center()\n self.SetSizerAndFit(FinalStepsSizer(self, loginInfo=loginInfo))\nclass FinalStepsSizer(wx.BoxSizer):\n '''Sizer to define the final steps in'''\n def __init__(self, parent, foamDataSizer=None, loginInfo=None, *args, **kwargs):\n wx.BoxSizer.__init__(self, wx.VERTICAL)\n\n '''some default values for bindings'''\n self.foamDataSizer = foamDataSizer\n self.loginInfo = loginInfo\n self.parent = parent\n\n '''CLUSTER SPECIFIC WIDGETS'''\n '''path to remote directory'''\n remLbl = LabelFormat(parent, label=\"Workspace folder:\")\n remPath = LabelFormat(parent, label=loginInfo['remotePath'], size=(500,-1))\n remSizer = wx.BoxSizer(wx.HORIZONTAL)\n remSizer.AddMany([remLbl, remPath])\n\n \"\"\"generate run\"\"\"\n genRunSizer = RadioBtnsSizer(parent, check=self.GenerateRun(),\n label=\"Use available 'run.pbs'?\", disable=True, yesbtnName=\"genRun\")\n\n '''queue sizer'''\n queueLbl = LabelFormat(parent, label='Insert cluster queue name:')\n self.queueName = wx.TextCtrl(parent, size=const.textBox, value='user')\n queueSizer = wx.BoxSizer(wx.HORIZONTAL)\n queueSizer.AddMany([queueLbl, self.queueName])\n\n\n '''BUTTONS'''\n space = (20, -1)\n sendBtn = wx.Button(parent, -1, \"Send job to cluster\", size=const.btnSize)\n loadBtn = wx.Button(parent, -1, \"Jobs Managment\", size=const.btnSize)\n sendBtn.Bind(wx.EVT_BUTTON, self.SendJob)\n loadBtn.Bind(wx.EVT_BUTTON, self.LoadCase)\n btnSizer = wx.BoxSizer(wx.HORIZONTAL)\n btnSizer.AddMany([sendBtn, space, loadBtn])\n\n\n '''final sizer (self)'''\n self.AddMany([const.hdelimit, const.hdelimit,\n (remSizer, 0, wx.LEFT|wx.EXPAND|wx.ALIGN_LEFT, 15), const.vdelimit,\n (genRunSizer, 0, wx.LEFT|wx.EXPAND|wx.ALIGN_LEFT, 15), const.vdelimit,\n (queueSizer, 0, wx.LEFT|wx.EXPAND|wx.ALIGN_LEFT, 15), const.vdelimit,\n (btnSizer, 0, wx.LEFT|wx.EXPAND|wx.ALIGN_LEFT, 15)\n ])\n\n def GenerateRun(self):\n '''runBtn event'''\n import os.path\n return os.path.exists(os.path.join(self.loginInfo['casePath'], 'run.pbs'))\n\n def SendJob(self, event, data={}):\n '''gather and submit all the inputed data to cluster'''\n self.loginInfo['queue'] = self.queueName.GetValue()\n '''submit and run the job on the cluster:'''\n if not self.parent.FindWindowByName(\"genRun\").GetValue():\n \"\"\"if a new run.pbs file has to be created:\"\"\"\n CreateRunFile(self.parent, self.foamDataSizer, self.loginInfo)\n GaugeFrame(action=\"submit\", loginInfo=self.loginInfo).Show()\n\n def LoadCase(self, event):\n '''download case data from the cluster'''\n HandleJobsFrame(self.loginInfo).Show()\n\nif __name__ == '__main__':\n app = wx.App(False)\n from remote_data import loginInfo\n TestFrame(loginInfo=loginInfo).Show()\n app.MainLoop()\n","sub_path":"finalSteps.py","file_name":"finalSteps.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"479224689","text":"import math\n\nweight = input('Enter your weight in pounds: ')\nheight = input('Enter your height in inches: ')\n\nweight_in_kg = float(weight) * 0.45359237\nheight_in_met = float(height) * 0.0254\n\nbmi = (weight_in_kg / height_in_met**2)\n\nif bmi < 18.5:\n print('You\\'re underweight')\nelif bmi >= 18.5 and bmi < 25.0:\n print(\"You're in normal condition\")\nelif bmi >= 25.0 and bmi < 30:\n print(\"You're overweight\")\nelse:\n print(\"You're obese\")\n\n","sub_path":"basic/if_else/bmi_index.py","file_name":"bmi_index.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"13353566","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$','pages.views.index',),\n url(r'^about/', 'pages.views.about'),\n url(r'^beats/', 'pages.views.beats'),\n url(r'^code/', 'pages.views.code'),\n url(r'^s3direct/', include('s3direct.urls')),\n url(r'^portfolio/', 'pages.views.portfolio'),\n url(r'^client/', 'client.views.login'),\n\turl(r'^accounts/', include('registration.backends.default.urls')),\n)\n","sub_path":"atronandbeyond/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"103659001","text":"metadata = {\n 'protocolName': 'Zymo Extraction/BP Detection - 24 Samples',\n 'author': 'Chaz <chaz@opentrons.com>; Anton <acjs@stanford.edu>',\n 'source': 'COVID-19 Project',\n 'apiLevel': '2.2'\n}\n\n# Protocol constants\nQPCR_LABWARE = 'ab_96_aluminumblock'\n\nMASTER_MIX_MAP = '''\nReaction\tReaction\tReaction\tEndogenous\tEndogenous\tEndogenous\t\t\t\t\tReaction\tReaction\nReaction\tReaction\tReaction\tEndogenous\tEndogenous\tEndogenous\t\t\t\t\tReaction\tEndogenous\nReaction\tReaction\tReaction\tEndogenous\tEndogenous\tEndogenous\nReaction\tReaction\tReaction\tEndogenous\tEndogenous\tEndogenous\nReaction\tReaction\tReaction\tEndogenous\tEndogenous\tEndogenous\nReaction\tReaction\tReaction\tEndogenous\tEndogenous\tEndogenous\nReaction\tReaction\tReaction\tEndogenous\tEndogenous\tEndogenous\nReaction\tReaction\tReaction\tEndogenous\tEndogenous\tEndogenous\n'''\n\nSAMPLE_MAP = '''\nSample 1\tSample 9\tSample 17\tSample 1\tSample 9\tSample 17\t\t\t\t\tPCD 4\tPCD 4\nSample 2\tSample 10\tSample 18\tSample 2\tSample 10\tSample 18\t\t\t\t\tNegative\tNegative\nSample 3\tSample 11\tSample 19\tSample 3\tSample 11\tSample 19\nSample 4\tSample 12\tSample 20\tSample 4\tSample 12\tSample 20\nSample 5\tSample 13\tSample 21\tSample 5\tSample 13\tSample 21\nSample 6\tSample 14\tSample 22\tSample 6\tSample 14\tSample 22\nSample 7\tSample 15\tSample 23\tSample 7\tSample 15\tSample 23\nSample 8\tSample 16\tSample 24\tSample 8\tSample 16\tSample 24\n'''\n\n# Master mix locations on the eppendorf tube holder\nREAGENT_LOCATIONS = {\n 'Reaction': 'A1',\n 'Endogenous': 'B1',\n 'Standard': 'B2',\n 'Water': 'B3',\n 'Negative': 'B3', # Synonym for water\n 'IEC RNA': 'C1',\n 'PCD 8': 'C5',\n 'PCD 7': 'C6',\n 'PCD 6': 'D1',\n 'PCD 5': 'D2',\n 'PCD 4': 'D3',\n 'PCD 3': 'D4',\n 'PCD 2': 'D5',\n 'PCD 1': 'D6'\n}\n\n# Transfer volumes\nMIX_VOLUME = 15\nSAMPLE_VOLUME = 5\n\n# Tip locations\nSAMPLE_TIP_LOCATIONS = ['2', '3']\n\nimport re\nimport itertools\n\ndef transfer_with_primitives(p, source, dest, volume=SAMPLE_VOLUME, mix=19):\n p.pick_up_tip()\n\n p.aspirate(1, source)\n for _ in range(2):\n p.aspirate(mix, source)\n p.dispense(mix, source)\n\n p.aspirate(volume - 1, source)\n p.dispense(volume - 1, dest)\n\n for _ in range(2):\n p.aspirate(mix, dest)\n p.dispense(mix, dest)\n\n p.dispense(1, dest)\n p.blow_out(dest.top())\n p.air_gap(3, -1)\n p.drop_tip()\n\ndef run(protocol):\n sample_tip_racks = [\n protocol.load_labware(\n 'opentrons_96_filtertiprack_20ul', s) for s in SAMPLE_TIP_LOCATIONS\n ]\n p20 = protocol.load_instrument('p20_single_gen2', 'right', tip_racks=sample_tip_racks)\n\n tempdeck = protocol.load_module('tempdeck', '4')\n tempdeck.set_temperature(4)\n\n tempplate = tempdeck.load_labware(\n QPCR_LABWARE)\n\n tempplate_wells_by_row = list(itertools.chain(*tempplate.rows()))\n\n reagent_rack = protocol.load_labware(\n 'opentrons_24_tuberack_nest_1.5ml_snapcap', 5)\n\n elution_plate = protocol.load_labware(\n 'opentrons_96_aluminumblock_nest_wellplate_100ul', '1',\n 'Station B Plate on Al Block')\n\n p20.flow_rate.aspirate = 10\n p20.flow_rate.dispense = 15\n p20.flow_rate.blow_out = 50\n\n # Distribute Master Mixes\n # Split up the master mix map into a list\n master_mix_labels = [\"\"] * 12*8\n for i, row in enumerate(MASTER_MIX_MAP.strip('\\n ').split('\\n')):\n for j, label in enumerate(row.split('\\t')):\n master_mix_labels[i*12+j] = label\n\n master_mix_wells = dict()\n\n # Figure out unique master mixes on the map, so we can use a single\n # tip for each\n for mix in set(master_mix_labels):\n if mix == '':\n continue\n master_mix_wells[mix] = list()\n\n for mix, well in zip(master_mix_labels, tempplate_wells_by_row):\n if mix == '':\n continue\n master_mix_wells[mix].append(well)\n\n # Do the master mix transfer\n for mix, wells in master_mix_wells.items():\n p20.pick_up_tip()\n for well in wells:\n p20.transfer(MIX_VOLUME, reagent_rack[REAGENT_LOCATIONS[mix]], well, new_tip='never')\n p20.blow_out(well.top())\n p20.drop_tip()\n\n # Transfer the samples and controls\n # Same deal as above, except every transfer gets its own tip,\n # which makes it easier because we don't bother optimizing for tip use.\n # Transfers are made row by row, left to right.\n\n sample_labels = [\"\"] * 12*8\n for i, row in enumerate(SAMPLE_MAP.strip('\\n ').split('\\n')):\n for j, label in enumerate(row.split('\\t')):\n sample_labels[i*12+j] = label\n\n sample_wells = zip(sample_labels, tempplate_wells_by_row)\n for sample, dest_well in sample_wells:\n # Determine whether we are dealing with an actual sample, which we\n # will take from the input sample plate; or a control, which we will\n # take from a location on the reagent rack. We expect samples to be\n # numbered, and will take the sample from the well matching the sample\n # number (eg Sample 1 = well 1 = plate.wells()[0])\n\n if sample == '':\n continue\n\n sample_match = re.match(r'Sample ([0-9]+)', sample)\n if sample_match:\n source_well = elution_plate.wells()[int(sample_match.groups()[0]) - 1]\n else:\n source_well = reagent_rack[REAGENT_LOCATIONS[sample]]\n\n transfer_with_primitives(p20, source_well, dest_well)\n","sub_path":"protocols/OMI_Clinical/StationC-Clinical-2020-04-16/station-C-24clinical-2020-04-16.py","file_name":"station-C-24clinical-2020-04-16.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"165673700","text":"# https://atcoder.jp/contests/nikkei2019-ex/tasks/nikkei2019ex_e\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda:sys.stdin.readline().rstrip()\ndef resolve():\n n=1000\n x=1789997546303\n ans=[1]*1001\n while(n):\n ans[n]=x\n x=3*x+1 if(x&1) else x//2\n n-=1\n print(ans[int(input())])\nresolve()\n","sub_path":"全国統一プログラミング王決定戦_エキシビジョン/f_コラッツ問題.py","file_name":"f_コラッツ問題.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"178932677","text":"\"\"\"\r\n@file\r\n@brief Optimisation of :epkg:`ONNX` graphs.\r\n\"\"\"\r\nfrom onnx.helper import make_graph\r\nfrom ._onnx_optimisation_common import ( # pylint: disable=E0611\r\n _apply_optimisation_on_graph, _apply_remove_node_fct_node)\r\n\r\n\r\ndef onnx_remove_node_unused(onnx_model, recursive=True, debug_info=None, **options):\r\n \"\"\"\r\n Removes unused nodes of the graph. An unused node\r\n is not involved in the output computation.\r\n\r\n @param onnx_model onnx model\r\n @param recursive looks into subgraphs\r\n @param debug_info debug information (private)\r\n @param options unused\r\n @return new onnx _model\r\n \"\"\"\r\n if debug_info is None:\r\n debug_info = [str(type(onnx_model)).rsplit(\r\n '.', maxsplit=1)[-1].strip(\"'>\")]\r\n else:\r\n debug_info = (debug_info +\r\n [str(type(onnx_model)).rsplit('.', maxsplit=1)[-1].strip(\"'>\")])\r\n\r\n if hasattr(onnx_model, 'graph'):\r\n return _apply_optimisation_on_graph(\r\n onnx_remove_node_unused, onnx_model,\r\n recursive=recursive, debug_info=debug_info,\r\n **options)\r\n\r\n graph = onnx_model\r\n data = {}\r\n valid = {}\r\n edges = {}\r\n\r\n for init in graph.initializer:\r\n data[init.name, 0] = init\r\n\r\n for node in graph.node:\r\n data[node.name, 1] = node\r\n for inp in node.input:\r\n data[inp, 0] = node\r\n edges[(inp, 0), (node.name, 1)] = node\r\n for out in node.output:\r\n data[out, 0] = node\r\n edges[(node.name, 1), (out, 0)] = node\r\n\r\n for out in graph.output:\r\n valid[out.name, 0] = True\r\n\r\n modif = 1\r\n while modif > 0:\r\n modif = 0\r\n for e1, e2 in edges: # pylint: disable=E1141\r\n if valid.get(e2, False) and not valid.get(e1, False):\r\n valid[e1] = True\r\n modif += 1\r\n\r\n new_nodes = [n for n in graph.node if (n.name, 1) in valid]\r\n new_inits = [n for n in graph.initializer if (n.name, 0) in valid]\r\n\r\n if recursive:\r\n # Handles subgraphs.\r\n for i in range(len(new_nodes)): # pylint: disable=C0200\r\n node = new_nodes[i]\r\n if node is None or not (node.attribute): # pylint: disable=C0325\r\n continue\r\n new_nodes[i] = _apply_remove_node_fct_node(\r\n onnx_remove_node_unused,\r\n node, recursive=True, debug_info=debug_info + [node.name])\r\n\r\n # Finally create the new graph.\r\n nodes = list(filter(lambda n: n is not None, new_nodes))\r\n graph = make_graph(nodes, onnx_model.name,\r\n onnx_model.input, onnx_model.output,\r\n new_inits)\r\n\r\n graph.value_info.extend(onnx_model.value_info) # pylint: disable=E1101\r\n return graph\r\n","sub_path":"mlprodict/onnx_tools/optim/onnx_optimisation_unused.py","file_name":"onnx_optimisation_unused.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"455834461","text":"import MySQLdb.cursors\n\nconf = {\n \"host\": \"localhost\",\n \"port\": 3306,\n \"user\": \"root\",\n \"db\": \"isuconp\",\n \"charset\": \"utf8mb4\",\n \"cursorclass\": MySQLdb.cursors.DictCursor,\n \"autocommit\": True\n}\n\ndb = MySQLdb.connect(**conf)\ncur = db.cursor()\n\nfor i in range(1, 10001):\n cur.execute(\"SELECT * FROM `posts` WHERE id = %s\", (i,))\n post = cur.fetchone()\n\n post_id = str(post['id'])\n mime = post['mime']\n if (mime == 'image/jpeg'):\n ext = 'jpg'\n elif (mime == 'image/png'):\n ext = 'png'\n elif (mime == 'image/gif'):\n ext = 'gif'\n print(post_id + '.' + ext)\n with open('/home/isucon/private_isu/webapp/public/image/' + post_id + '.' + ext, 'wb') as f:\n f.write(post['imgdata'])\n","sub_path":"write_image.py","file_name":"write_image.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"319714315","text":"# Problem 4 / Largest palindrome product\n#\n# A palindromic number reads the same both ways. The largest palindrome made from the product\n# of two 2-digit numbers is 9009 = 91 × 99.\n#\n# Find the largest palindrome made from the product of two 3-digit numbers.\n\nmaximum = 0\nfor i in range(100, 999):\n for j in range(100, 999):\n product = i * j\n if str(product) == str(product)[::-1] and product > maximum:\n maximum = product\nprint(maximum)\n","sub_path":"problem004.py","file_name":"problem004.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"243982093","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"home/\", views.home_page, name=\"home_page\"),\n path(\"home/<int:habitpk>/delete/\", views.delete_habit, name=\"delete_habit\"),\n path(\"create_profile/\", views.create_profile, name=\"create_profile\"),\n path(\"add_habit/\", views.add_habit, name=\"add_habit\"),\n path(\"home/habit_info/<int:habitpk>/\", views.habit_info, name=\"habit_info\"),\n path(\"home/habit_info/<int:habitpk>/add_log/\", views.add_log, name=\"add_log\"),\n path(\n \"home/habit_info/<int:habitpk>/delete_log/<int:logpk>/\",\n views.delete_log,\n name=\"delete_log\",\n ),\n path(\"home/<int:habitpk>/edit/\", views.edit_habit, name=\"edit_habit\"),\n]","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"403609769","text":"\r\nimport sys\r\nimport boto3\r\nimport mysql.connector\r\nimport os\r\nimport pymysql\r\nimport time\r\n\r\n\r\ndef convert_domain_to_database(domain_obj, isp_name):\r\n print(\"Converting Data to Database\")\r\n print(\"connecting to online mysql\")\r\n ENDPOINT=\"database-2.cuzgntwsj1dy.us-east-2.rds.amazonaws.com\"\r\n PORT=\"3306\"\r\n USR=\"admin\"\r\n PW = \"Cyberhub\"\r\n REGION=\"us-east-2a\"\r\n DBNAME=\"database-2\"\r\n\r\n\r\n\r\n db = pymysql.connect(host = ENDPOINT, user = USR, password = PW)\r\n\r\n cursor = db.cursor()\r\n\r\n cursor.execute(\"select version()\")\r\n print(\"cursor: \"+str(cursor))\r\n\r\n data = cursor.fetchone()\r\n print(str(data))\r\n\r\n sql = '''use ISP'''\r\n cursor.execute(sql)\r\n\r\n sql = '''show tables'''\r\n print(cursor.execute(sql))\r\n\r\n tables = cursor.fetchall()\r\n\r\n print(tables)\r\n\r\n time_now = time.strftime('%Y-%m-%d %H:%M:%S')\r\n print(time_now)\r\n\r\n\r\n #MAKE A ISP Object in the DB - Done\r\n sql = '''\r\n insert into ISP(isp_name, time_date) values('%s', '%s')''' % (isp_name,time_now)\r\n cursor.execute(sql)\r\n db.commit()\r\n\r\n #MAKE A DNS Object in the DB - nearly done, need to change one of the columns to point to ISP id, not domain\r\n sql = '''\r\n insert into DNS(dns_address, dns_name, dns_public) values('%s', '%s', '%s')''' % (\"test 3\",\"test should be 1 here ->\", 1)\r\n cursor.execute(sql)\r\n db.commit()\r\n\r\n #MAKE A Domain Object in the DB\r\n\r\n\r\n\r\ndef show_ips():\r\n print(\"Sowing IPS\")\r\n print(\"connect to online mysql\")\r\n ENDPOINT=\"database-2.cuzgntwsj1dy.us-east-2.rds.amazonaws.com\"\r\n PORT=\"3306\"\r\n USR=\"admin\"\r\n PW = \"Cyberhub\"\r\n REGION=\"us-east-2a\"\r\n DBNAME=\"database-2\"\r\n\r\n db = pymysql.connect(host = ENDPOINT, user = USR, password = PW)\r\n\r\n cursor = db.cursor()\r\n\r\n cursor.execute(\"select version()\")\r\n print(\"cursor: \"+str(cursor))\r\n\r\n data = cursor.fetchone()\r\n print(str(data))\r\n\r\n\r\n sql = '''use ISP'''\r\n print(cursor.execute(sql))\r\n\r\n sql = '''show tables'''\r\n print(cursor.execute(sql))\r\n\r\n sql = '''select * from Domain'''\r\n cursor.execute(sql)\r\n print(cursor.fetchall())\r\n\r\ndef create_table():\r\n print(\"CREATING TABLE\")\r\n\r\n\r\ndef connect_AWS():\r\n print(\"connect to online mysql\")\r\n ENDPOINT=\"database-2.cuzgntwsj1dy.us-east-2.rds.amazonaws.com\"\r\n PORT=\"3306\"\r\n USR=\"admin\"\r\n PW = \"Cyberhub\"\r\n REGION=\"us-east-2a\"\r\n DBNAME=\"database-2\"\r\n\r\n db = pymysql.connect(host = ENDPOINT, user = USR, password = PW)\r\n\r\n cursor = db.cursor()\r\n\r\n cursor.execute(\"select version()\")\r\n print(\"cursor: \"+str(cursor))\r\n\r\n data = cursor.fetchone()\r\n print(str(data))\r\n\r\n\r\n sql = '''drop database kgptalkie'''\r\n cursor.execute(sql)\r\n\r\n sql = '''create database kgptalkie'''\r\n cursor.execute(sql)\r\n\r\n cursor.connection.commit()\r\n\r\n sql = '''use kgptalkie'''\r\n cursor.execute(sql)\r\n\r\n sql = '''\r\n create table person (\r\n id int not null auto_increment,\r\n fname text,\r\n lname text,\r\n primary key (id)\r\n )\r\n '''\r\n cursor.execute(sql)\r\n\r\n sql = '''\r\n create table test (\r\n id int not null auto_increment,\r\n test_one text,\r\n test_two text,\r\n primary key (id)\r\n )\r\n '''\r\n cursor.execute(sql)\r\n\r\n sql = '''show tables'''\r\n print(cursor.execute(sql))\r\n print(cursor.fetchall())\r\n\r\n sql = '''\r\n insert into person(fname, lname) values('%s', '%s')''' % ('laxmi', 'kant')\r\n cursor.execute(sql)\r\n db.commit()\r\n\r\n sql = '''select * from person'''\r\n cursor.execute(sql)\r\n print(cursor.fetchall())\r\n\r\n print(\"END!\")\r\n\r\n\r\ndef main():\r\n\r\n\r\n\r\n #connect_AWS()\r\n convert_domain_to_database(domain_obj = None, isp_name = \"TEST_20_May\")\r\n #show_ips()\r\n\r\n #connect_AWS()\r\n\r\nif __name__ == \"__main__\":\r\n\r\n main()\r\n","sub_path":"Make Results/AWS_MySQL.py","file_name":"AWS_MySQL.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"202741639","text":"# Uses Python3\n'''The last digit of sum of Fib(0) to Fib(n) numbers.\n We need to only sum the last digits of Fib numbers.\n But, if n is too large, this takes time. We can make\n use of Pisano period of 10: \n Fib(n) mod m = Fib(n mod p) mod m\n because Fib(n) mod m is a periodic sequence with period p.\n That is, if g(n) = Fib(n) mod m, g(k + p) = g(k)\n The periodic sequence is called Pisano period.\n \n To compute the last digit of sums, merely compute Pisano period \n when m = 10 (Ans. p = 60) and compute only first p Fibonaccy numbers.'''\n\ndef pisano_period(m):\n '''Compute Pisano period for m & also return the Pisano period'''\n fib_series = [0,1]\n i = 2\n while True:\n new = fib_series[i-1] + fib_series[i-2]\n fib_series.append(new)\n i += 1\n if new % m == 0:\n new = fib_series[i-1] + fib_series[i-2]\n fib_series.append(new)\n i += 1\n if new % m == 1:\n break\n return i-2, fib_series[:-2]\n\ndef fibonacci_last_digit_of_sum(n):\n # Compute Pisano period of Fib(n) modulo m as p\n ## Not required to compute p for this problem. Pisano period of 10 = 60\n p,fib_series = pisano_period(10)\n fib_series2 = list(map(lambda x: (x*x) % 10, fib_series))\n \n k = n // p\n r = n % p\n\n return (sum(fib_series2) * k + sum(fib_series2[:r+1])) % 10\n\ndef main():\n n = int(input())\n c = fibonacci_last_digit_of_sum(n)\n print(c)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Programming_Challenges_Solutions/week2_algorithmic_warmup/8_last_digit_of_the_sum_of_squares_of_fibonacci_numbers/fibonacci_sum_of_squares_last_digit.py","file_name":"fibonacci_sum_of_squares_last_digit.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"212015896","text":"\n### one code one day\n### 2020/03/12\n### leetcode 523 连续的子数组和\n### 动态规划\n### dp维护数组从开始位置到当前位置的和,便于求每个区间的和\n\ndef checkSubarraySum(self, nums: List[int], k: int) -> bool:\n\t### 边界判定\n if(len(nums) < 2):\n return False\n ### k为0时,单独处理\n if(k == 0):\n for i in range(len(nums)-1):\n if(nums[i] == 0 and nums[i+1] == 0):\n return True\n return False\n ### k为负数,转化为正数\n k = max(k, -k)\n ### 开始dp\n dp = [0] * len(nums)\n dp[0] = nums[0]\n for i in range(1,len(nums)):\n dp[i] = dp[i-1] + nums[i]\n if(dp[i] % k == 0):\n return True\n for j in range(0,i-1):\n if((dp[i] - dp[j]) % k == 0):\n return True\n return False\t","sub_path":"动态规划/checkSubArraySum.py","file_name":"checkSubArraySum.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"25374572","text":"import pytest\nimport numpy as np\n\nimport warnings\nfrom kmapper import KeplerMapper\n\n\nclass TestLogging():\n \"\"\" Simple tests that confirm map completes at each logging level\n \"\"\"\n\n def test_runs_with_logging_0(self):\n mapper = KeplerMapper(verbose=0)\n data = np.random.rand(100, 2)\n graph = mapper.map(data)\n\n def test_runs_with_logging_1(self):\n mapper = KeplerMapper(verbose=1)\n data = np.random.rand(100, 2)\n graph = mapper.map(data)\n\n def test_runs_with_logging_2(self):\n mapper = KeplerMapper(verbose=2)\n data = np.random.rand(100, 2)\n graph = mapper.map(data)\n\n\nclass TestLens():\n # TODO: most of these tests only accomodate the default option. They need to be extended to incorporate all possible transforms.\n\n # one test for each option supported\n def test_str_options(self):\n mapper = KeplerMapper()\n\n data = np.random.rand(100, 10)\n\n options = [\n ['sum', np.sum],\n ['mean', np.mean],\n ['median', np.median],\n ['max', np.max],\n ['min', np.min],\n ['std', np.std],\n ['l2norm', np.linalg.norm]\n ]\n\n first_point = data[0]\n last_point = data[-1]\n for tag, func in options:\n lens = mapper.fit_transform(data, projection=tag, scaler=None)\n np.testing.assert_almost_equal(lens[0][0], func(first_point))\n np.testing.assert_almost_equal(lens[-1][0], func(last_point))\n\n def test_lens_size(self):\n mapper = KeplerMapper()\n\n data = np.random.rand(100, 10)\n lens = mapper.fit_transform(data)\n\n assert lens.shape[0] == data.shape[0]\n\n def test_map_custom_lens(self):\n # I think that map currently requires fit_transform to be called first\n mapper = KeplerMapper()\n data = np.random.rand(100, 2)\n graph = mapper.map(data)\n assert graph[\"meta_data\"][\"projection\"] == \"custom\"\n assert graph[\"meta_data\"][\"scaler\"] == \"None\"\n\n def test_projection(self):\n # accomodate scaling, values are in (0,1), but will be scaled slightly\n atol = 0.1\n\n mapper = KeplerMapper()\n data = np.random.rand(100, 5)\n lens = mapper.fit_transform(data, projection=[0, 1])\n np.testing.assert_allclose(lens, data[:, :2], atol=atol)\n\n lens = mapper.fit_transform(data, projection=[0])\n np.testing.assert_allclose(lens, data[:, :1], atol=atol)\n\n\nclass TestAPIMaintenance():\n \"\"\" These tests just confirm that new api changes are backwards compatible\"\"\"\n\n def test_warn_old_api(self):\n \"\"\" Confirm old api works but throws warning \"\"\"\n\n mapper = KeplerMapper()\n data = np.random.rand(100, 10)\n lens = mapper.fit_transform(data)\n\n with pytest.deprecated_call():\n graph = mapper.map(lens, data, nr_cubes=10)\n\n with pytest.deprecated_call():\n graph = mapper.map(lens, data, overlap_perc=10)\n\n with pytest.deprecated_call():\n graph = mapper.map(lens, data, nr_cubes=10, overlap_perc=0.1)\n\n def test_new_api_old_defaults(self):\n mapper = KeplerMapper()\n data = np.random.rand(100, 10)\n lens = mapper.fit_transform(data)\n\n _ = mapper.map(lens, data, nr_cubes=10)\n c2 = mapper.coverer\n\n assert c2.overlap_perc == 0.1\n\n _ = mapper.map(lens, data, overlap_perc=0.1)\n c2 = mapper.coverer\n\n assert c2.nr_cubes == 10\n\n def test_no_warn_normally(self, recwarn):\n \"\"\" Confirm that deprecation warnings behave as expected\"\"\"\n mapper = KeplerMapper()\n data = np.random.rand(100, 10)\n lens = mapper.fit_transform(data)\n\n warnings.simplefilter('always')\n graph = mapper.map(lens, data)\n\n assert len(recwarn) == 0\n assert DeprecationWarning not in recwarn\n","sub_path":"test/test_mapper.py","file_name":"test_mapper.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"196504229","text":"# -*- coding: utf-8 -*-\nimport base64\nimport logging\nimport os\nimport time\nfrom concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom gcloud.conf import settings\nfrom pipeline_plugins.components.utils import cc_get_ips_info_by_str\n\nget_client_by_user = settings.ESB_GET_CLIENT_BY_USER\nlogger = logging.getLogger(\"root\")\n\nSCRIPT_CONTENT = \"\"\"\nfile_path=$1\n\nif [ ! -f \"${file_path}\" ]; then\n exit 1\nfi\n\nbase64 ${file_path}\n\"\"\"\nSCRIPT_TYPE = \"1\"\nJOB_WAIT_TIME_OUT = 60\n\n\ndef get_job_content(remote_files, operator, biz_cc_id):\n \"\"\"\n 根据ip、文件路径获取远程服务器的以base64编码的文件内容\n @param remote_files: 文件集合 [{\"file_path\":\"\", \"ip\":\"只支持单个ip\", \"job_account\":\"\"}]\n @param operator: 操作人员\n @param biz_cc_id: 业务id\n @return: {\n \"success\": [\n {\"file_name\": \"file_name\", \"content\": \"content\", \"ip\": \"1.1.1.2\"}\n ],\n \"failure\": [\n {\"file_name\": \"file_name\", \"ip\": \"1.1.1.1\", \"message\": \"error\"}\n ]\n }\n \"\"\"\n client = get_client_by_user(operator)\n job_execute_suc_records = []\n job_execute_fail_records = []\n\n ip_str = \",\".join([remote_file[\"ip\"] for remote_file in remote_files])\n ip_info = cc_get_ips_info_by_str(\n username=operator,\n biz_cc_id=biz_cc_id,\n ip_str=ip_str,\n use_cache=False,\n )\n ip_list_result = [{\"ip\": _ip[\"InnerIP\"], \"bk_cloud_id\": _ip[\"Source\"]} for _ip in ip_info[\"ip_result\"]]\n _ip_list_result = [_ip[\"InnerIP\"] for _ip in ip_info[\"ip_result\"]]\n\n for remote_file in remote_files:\n script_param = remote_file[\"file_path\"]\n _, file_name = os.path.split(script_param)\n job_account = remote_file[\"job_account\"]\n\n if remote_file[\"ip\"] not in _ip_list_result:\n job_execute_fail_records.append(\n {\n \"file_name\": file_name,\n \"ip\": remote_file[\"ip\"],\n \"message\": \"ip 信息在 cmdb 不存在\",\n \"ip_list\": remote_file[\"ip\"],\n }\n )\n continue\n for ip_list in ip_list_result:\n if remote_file[\"ip\"] != ip_list[\"ip\"]:\n continue\n job_kwargs = {\n \"bk_biz_id\": biz_cc_id,\n \"account\": job_account,\n \"ip_list\": [ip_list],\n \"script_param\": base64.b64encode(script_param.encode(\"utf-8\")).decode(\"utf-8\"),\n \"script_type\": SCRIPT_TYPE,\n \"script_content\": base64.b64encode(SCRIPT_CONTENT.encode(\"utf-8\")).decode(\"utf-8\"),\n }\n job_result = client.job.fast_execute_script(job_kwargs)\n logger.info(\"job_result: {result}, job_kwargs: {kwargs}\".format(result=job_result, kwargs=job_kwargs))\n if job_result[\"result\"]:\n job_instance_id = job_result[\"data\"][\"job_instance_id\"]\n job_execute_suc_records.append(({\"file_name\": file_name, \"ip\": remote_file[\"ip\"]}, job_instance_id))\n else:\n job_execute_fail_records.append(\n {\n \"file_name\": file_name,\n \"ip\": remote_file[\"ip\"],\n \"message\": job_result[\"message\"],\n \"ip_list\": ip_list,\n }\n )\n\n polling_job_results = []\n with ThreadPoolExecutor(max_workers=10) as t:\n all_task = [t.submit(get_job_instance_log, task, operator, biz_cc_id) for task in job_execute_suc_records]\n wait(all_task, return_when=ALL_COMPLETED)\n for job_result in all_task:\n polling_job_results.append(job_result.result())\n # 获取轮询结果\n result_success = []\n for polling_job_result in polling_job_results:\n if polling_job_result[\"result\"]:\n result_success.append(\n {\n \"file_name\": polling_job_result[\"key\"][\"file_name\"],\n \"content\": polling_job_result[\"log_content\"],\n \"ip\": polling_job_result[\"key\"][\"ip\"],\n }\n )\n else:\n job_execute_fail_records.append(\n {\n \"file_name\": polling_job_result[\"key\"][\"file_name\"],\n \"ip\": polling_job_result[\"key\"][\"ip\"],\n \"message\": polling_job_result[\"message\"],\n }\n )\n result = {\"failure\": job_execute_fail_records, \"success\": result_success}\n\n return result\n\n\ndef get_job_instance_log(job_instance_record, operator, bk_biz_id):\n \"\"\"\n 轮询job执行结果\n @param job_instance_record: [({\"file_name\": file_name, \"ip\": remote_file[\"ip\"]}, job_instant_id)]\n @param operator: admin\n @param bk_biz_id: 123\n @return:\n \"\"\"\n client = get_client_by_user(operator)\n get_job_instance_log_kwargs = {\"job_instance_id\": job_instance_record[1], \"bk_biz_id\": bk_biz_id}\n get_job_instance_log_return = client.job.get_job_instance_log(get_job_instance_log_kwargs)\n if not get_job_instance_log_return[\"result\"]:\n return {\"result\": False, \"message\": get_job_instance_log_return[\"message\"], \"key\": job_instance_record[0]}\n else:\n start_time = time.time()\n while time.time() < start_time + JOB_WAIT_TIME_OUT:\n job_status = get_job_instance_log_return[\"data\"][0][\"status\"]\n if job_status == 3:\n return {\n \"result\": True,\n \"key\": job_instance_record[0],\n \"log_content\": \"\\n\".join(\n [\n job_log[\"step_results\"][0][\"ip_logs\"][0][\"log_content\"]\n for job_log in get_job_instance_log_return[\"data\"]\n ]\n ),\n }\n elif job_status > 3:\n return {\n \"result\": False,\n \"message\": get_job_instance_log_return[\"message\"],\n \"key\": job_instance_record[0],\n }\n # 休眠1s再去查询接口\n time.sleep(1)\n get_job_instance_log_return = client.job.get_job_instance_log(get_job_instance_log_kwargs)\n return {\"result\": False, \"message\": _(\"请求job执行结果超时\"), \"key\": job_instance_record[0]}\n","sub_path":"pipeline_plugins/components/utils/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"166090932","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nQuick-n-dirty loading of UCSF ecog data.\n\"\"\"\n\n# Authors: Ronald L. Sprouse (ronald@berkeley.edu)\n# \n# Copyright (c) 2015, The Regents of the University of California\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# \n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# * Neither the name of the University of California nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import division\nimport numpy as np\nimport scipy.signal\nimport htkmfc\nimport os\n\ndef int2wavname(n):\n '''Convert an integer in the range 1 to 256 to the ECOG file naming\nconvention where channel 1 is '11' and Channel 256 is '464'.'''\n return \"Wav{:d}{:d}.htk\".format(\n int(np.ceil(n/64)),\n int(np.mod(n-1, 64) + 1)\n )\n\ndef get_bad_channels(ddir, subdir='Artifacts', fname='badChannels.txt'):\n '''Return an array of bad channel numbers in ddir.'''\n with open(os.path.join(ddir, subdir, fname)) as f:\n return [int(n) for n in f.readline().strip().split()]\n \ndef load_block(ddir, subdir):\n '''Load all the Wav*.htk channel data in a block subdir into an ndarray.\nReturn the data, sample rate, and bad channels.'''\n # Electrodes (channels) are numbered starting with 1.\n badchan = get_bad_channels(ddir)\n htk = htkmfc.open(os.path.join(ddir, subdir, int2wavname(1)))\n rate = htk.sampPeriod * 1E-3\n c1 = np.squeeze(htk.getall())\n dc1 = scipy.signal.decimate(c1, 10, axis=0)\n cdata = np.empty((256, dc1.shape[0], dc1.shape[1])) * np.nan\n if 1 not in badchan:\n cdata[0,:,:] = dc1\n for idx in range(2, 257):\n if idx not in badchan:\n htk = htkmfc.open(os.path.join(ddir, subdir, int2wavname(idx)))\n cdata[idx-1,:,:] = scipy.signal.decimate(np.squeeze(htk.getall()), 10, axis=0)\n return (cdata, rate, badchan)\n","sub_path":"python/ecog_ucsf.py","file_name":"ecog_ucsf.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"60196398","text":"# coding: utf-8\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom io import BytesIO\nimport os\nimport sys\n\nglobal PORT\nPORT = int(sys.argv[1])\n\n\n# 调用摄像头拍摄图片\nclass MyCamera:\n bPiCamera = None\n engine = None\n camera = None\n\n def __init__(self):\n try:\n from picamera import PiCamera\n except ImportError:\n import cv2 as cv\n\n if 'PiCamera' in dir():\n self.engine = PiCamera\n self.camera = PiCamera()\n self.camera.rotation = 180\n self.camera.resolution = (360, 240)\n self.bPiCamera = True\n else:\n self.engine = cv\n self.camera = cv.VideoCapture(0)\n self.bPiCamera = False\n\n def capture(self, num):\n if self.bPiCamera:\n frame = BytesIO()\n self.camera.annotate_text = \"frame:{}/100\".format(num)\n self.camera.capture(frame, 'jpeg')\n return frame.getvalue()\n else:\n ret, frame = self.camera.read()\n _, jpeg = self.engine.imencode('.jpg', frame)\n return jpeg.tobytes()\n\n\nclass MyRequestHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n # 页面输出模板字符串\n self.protocal_version = 'HTTP/1.1' # 设置协议版本\n self.send_response(200) # 设置响应状态码\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Content-Type',\n 'multipart/x-mixed-replace;boundary=frame') # 设置响应头\n self.end_headers()\n\n camera = MyCamera()\n for num in range(1, 101):\n frameBytes = camera.capture(num)\n self.wfile.write(b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frameBytes +\n b'\\r\\n')\n os._exit(0)\n\n\ndef run():\n global PORT\n httpd = HTTPServer(('', PORT), MyRequestHandler)\n httpd.serve_forever()\n\n\ndef createDaemon():\n # fork进程\n try:\n if os.fork() > 0:\n os._exit(0)\n except OSError as error:\n print('fork #1 failed: %d (%s)', error.errno, error.strerror)\n os._exit(1)\n os.chdir('/')\n os.setsid()\n os.umask(0)\n try:\n pid = os.fork()\n if pid > 0:\n print('Daemon PID %d', pid)\n os._exit(0)\n except OSError as error:\n print('fork #2 failed: %d (%s)', error.errno, error.strerror)\n os._exit(1)\n # 重定向标准IO\n sys.stdout.flush()\n sys.stderr.flush()\n si = open(\"/dev/null\", 'r')\n so = open(\"/dev/null\", 'a+')\n se = open(\"/dev/null\", 'a+')\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n run()\n\n\ncreateDaemon()\n","sub_path":"back/models/py/camera/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"455280244","text":"from time import time\r\nfrom time import time\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport mlrose_hiive as mlrose\r\nfrom mlrose_hiive import neural\r\nfrom mlrose_hiive import runners\r\nfrom mlrose_hiive import algorithms\r\nfrom sklearn.preprocessing import StandardScaler, RobustScaler, OneHotEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import learning_curve\r\nfrom sklearn.neural_network import MLPClassifier\r\n\r\n# For part 2, neural network, freeze your architecture from ass1\r\n# - Can be tiny differences, but make sure activation and hyperparameters\r\n# - mlrose is bad at this, they didn't implement hyperparameters well or completely\r\n# - If performance is significantly worse, retune or swap APIs\r\n# - Can use new data sets, retune same as ass1 but dont talk about what you tuned\r\n# - mlrose will almost always need retuning for any data set, retune with mlrose\r\n# - You need LCA from Ass1 and then LCAs for different optimizers to compare them\r\n# - Compare each optimizer vs eachother and backprop results (assignment 1)\r\n# - Compare performance with time and accuracy\r\n\r\ndef main(task):\r\n\r\n # Smart Grid\r\n data = pd.read_csv('smart_grid_2.csv')\r\n\r\n data = data.drop('stab', axis=1)\r\n\r\n y = data['stabf'].copy()\r\n y= y.astype('category')\r\n y = y.cat.codes\r\n X = data.drop('stabf', axis=1).copy()\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=27, stratify=y)\r\n\r\n\r\n train_scaler = RobustScaler()\r\n X_train_scaler = train_scaler.fit(X_train)\r\n X_train = X_train_scaler.transform(X_train)\r\n\r\n X_test = X_train_scaler.transform(X_test)\r\n\r\n if 'tuning_plots' in task:\r\n # # Tune RHC\r\n # restart_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\n # # restart_list = [0]\r\n # rhc_accuracy_tuning_list = []\r\n # rhc_accuracy_test_list = []\r\n # for restarts in restart_list:\r\n # rhc_nn = neural.NeuralNetwork( hidden_nodes=[30, 30],\r\n # activation='relu',\r\n # algorithm='random_hill_climb',\r\n # max_iters=10000,\r\n # bias=True,\r\n # is_classifier=True,\r\n # learning_rate=0.1,\r\n # early_stopping=True,\r\n # clip_max=1e+10,\r\n # restarts=restarts,\r\n # schedule=mlrose.GeomDecay(1),\r\n # pop_size=200,\r\n # mutation_prob=0.1,\r\n # max_attempts=100,\r\n # random_state=27,\r\n # curve=True)\r\n # # print(y_train)\r\n # rhc_nn.fit(X_train, y_train)\r\n\r\n # y_train_pred = rhc_nn.predict(X_train)\r\n # # print(y_train_pred)\r\n # y_train_accuracy = accuracy_score(y_train, y_train_pred)\r\n # print(y_train_accuracy)\r\n # rhc_accuracy_tuning_list.append(y_train_accuracy)\r\n # y_test_pred = rhc_nn.predict(X_test)\r\n # y_test_accuracy = accuracy_score(y_test, y_test_pred)\r\n # rhc_accuracy_test_list.append(y_test_accuracy)\r\n # print(rhc_accuracy_tuning_list)\r\n\r\n # temperature_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\n # #temperature_list = [1, 5, 10]\r\n # sa_accuracy_tuning_list = []\r\n # sa_accuracy_test_list = []\r\n # for temp in temperature_list:\r\n # sa_nn = neural.NeuralNetwork( hidden_nodes=[30, 30],\r\n # activation='relu',\r\n # algorithm='simulated_annealing',\r\n # max_iters=10000,\r\n # bias=True,\r\n # is_classifier=True,\r\n # learning_rate=0.1,\r\n # early_stopping=True,\r\n # clip_max=1e+10,\r\n # restarts=1,\r\n # schedule=mlrose.GeomDecay(temp),\r\n # pop_size=200,\r\n # mutation_prob=0.1,\r\n # max_attempts=500,\r\n # random_state=27,\r\n # curve=True)\r\n # sa_nn.fit(X_train, y_train)\r\n\r\n # y_train_pred = sa_nn.predict(X_train)\r\n # y_train_accuracy = accuracy_score(y_train, y_train_pred)\r\n # print(y_train_accuracy)\r\n # sa_accuracy_tuning_list.append(y_train_accuracy)\r\n # y_test_pred = sa_nn.predict(X_test)\r\n # y_test_accuracy = accuracy_score(y_test, y_test_pred)\r\n # sa_accuracy_test_list.append(y_test_accuracy)\r\n # print(sa_accuracy_tuning_list)\r\n\r\n # population_size_list = [25, 50, 75, 100, 125, 150, 175, 200]\r\n population_size_list = [100]\r\n ga_accuracy_tuning_list = []\r\n ga_accuracy_test_list = []\r\n for population in population_size_list:\r\n ga_nn = neural.NeuralNetwork( hidden_nodes=[30, 30],\r\n activation='relu',\r\n algorithm='genetic_alg',\r\n max_iters=5000,\r\n bias=True,\r\n is_classifier=True,\r\n learning_rate=0.001,\r\n early_stopping=True,\r\n clip_max=1e+10,\r\n restarts=0,\r\n schedule=mlrose.GeomDecay(1),\r\n pop_size=population,\r\n mutation_prob=0.1,\r\n max_attempts=200,\r\n random_state=27,\r\n curve=True)\r\n ga_nn.fit(X_train, y_train)\r\n\r\n y_train_pred = ga_nn.predict(X_train)\r\n y_train_accuracy = accuracy_score(y_train, y_train_pred)\r\n print(y_train_accuracy)\r\n ga_accuracy_tuning_list.append(y_train_accuracy)\r\n y_test_pred = ga_nn.predict(X_test)\r\n y_test_accuracy = accuracy_score(y_test, y_test_pred)\r\n ga_accuracy_test_list.append(y_test_accuracy)\r\n print(ga_accuracy_tuning_list)\r\n\r\n plt.rc(\"font\", size=8)\r\n plt.rc(\"axes\", titlesize=10)\r\n plt.rc(\"axes\", labelsize=10)\r\n plt.rc(\"xtick\", labelsize=8)\r\n plt.rc(\"ytick\", labelsize=8)\r\n plt.rc(\"legend\", fontsize=8)\r\n plt.rc(\"figure\", titlesize=11)\r\n fig, ax = plt.subplots(1, 3, figsize=(12, 3.5), sharey=True)\r\n plt.ylim([0.7, 1.0])\r\n fig.suptitle('Neural Networks Optimizer Tuning', fontsize=14)\r\n\r\n ax[0].scatter(restart_list, rhc_accuracy_tuning_list, label='Training', c='r', marker='x', s=10)\r\n ax[0].scatter(restart_list, rhc_accuracy_test_list, label='Test', c='g', marker='o', s=10)\r\n ax[0].set(xlabel='Restarts', ylabel = 'Accuracy', title='RHC Restarts')\r\n ax[0].legend()\r\n\r\n ax[1].scatter(temperature_list, sa_accuracy_tuning_list, label='Training', c='r', marker='x', s=10)\r\n ax[1].scatter(temperature_list, sa_accuracy_test_list, label='Test', c='g', marker='o', s=10)\r\n ax[1].set(xlabel='Temperature', ylabel = 'Accuracy', title='SA Temperature')\r\n ax[1].legend()\r\n\r\n ax[2].scatter(population_size_list, ga_accuracy_tuning_list, label='Training', c='r', marker='x', s=10)\r\n ax[2].scatter(population_size_list, ga_accuracy_test_list, label='Test', c='g', marker='o', s=10)\r\n ax[2].set(xlabel='Population Size', ylabel = 'Accuracy', title='GA Population Size')\r\n ax[2].legend()\r\n ax[2].yaxis.tick_right()\r\n\r\n\r\n plt.show()\r\n\r\n if 'performance_graph' in task:\r\n cv=5\r\n n_jobs=1\r\n train_sizes=np.linspace(.2, 1.0, 5)\r\n\r\n # Assignment 1 learner (back propagation)\r\n bp_nn = MLPClassifier(hidden_layer_sizes=[30, 30], max_iter=100, random_state=27)\r\n bp_fit_t = time()\r\n bp_nn.fit(X_train, y_train)\r\n bp_fit_time = time() - bp_fit_t\r\n print('Back Propagation fit time: ' + str(bp_fit_time))\r\n bp_pred_t = time()\r\n bp_nn_pred = bp_nn.predict(X_test)\r\n bp_pred_time = time() - bp_pred_t\r\n print('Back Propagation predict time: ' + str(bp_pred_time))\r\n bp_nn_loss = bp_nn.loss_\r\n print('Back Propagation Loss: ' + str(bp_nn_loss))\r\n bp_loss_curve = bp_nn.loss_curve_\r\n\r\n print('Generating learning curve...')\r\n bp_train_sizes, bp_train_scores, bp_test_scores, bp_fit_times, _ = \\\r\n learning_curve(bp_nn, X_train, y_train, cv=cv, n_jobs=n_jobs,\r\n train_sizes=train_sizes,\r\n return_times=True)\r\n bp_train_scores_mean = np.mean(bp_train_scores, axis=1)\r\n bp_train_scores_std = np.std(bp_train_scores, axis=1)\r\n bp_test_scores_mean = np.mean(bp_test_scores, axis=1)\r\n bp_test_scores_std = np.std(bp_test_scores, axis=1)\r\n bp_fit_times_mean = np.mean(bp_fit_times, axis=1)\r\n bp_fit_times_std = np.std(bp_fit_times, axis=1)\r\n\r\n # Random Hill Climb\r\n print('Random Hill Climb...')\r\n rhc_nn = neural.NeuralNetwork( hidden_nodes=[30, 30],\r\n activation='relu',\r\n algorithm='random_hill_climb',\r\n max_iters=10000,\r\n bias=True,\r\n is_classifier=True,\r\n learning_rate=0.5,\r\n early_stopping=True,\r\n clip_max=1e+10,\r\n restarts=2,\r\n schedule=mlrose.GeomDecay(1),\r\n pop_size=200,\r\n mutation_prob=0.1,\r\n max_attempts=200,\r\n random_state=27,\r\n curve=True)\r\n rhc_fit_t = time()\r\n rhc_nn.fit(X_train, y_train)\r\n rhc_fit_time = time() - rhc_fit_t\r\n print('Random Hill Climb fit time: ' + str(rhc_fit_time))\r\n rhc_pred_t = time()\r\n rhc_nn_pred = rhc_nn.predict(X_test)\r\n rhc_pred_time = time() - rhc_pred_t\r\n print('Random Hill Climb predict time: ' + str(rhc_pred_time))\r\n rhc_nn_loss = rhc_nn.loss\r\n print('Random Hill Climb Loss: ' + str(rhc_nn_loss))\r\n\r\n rhc_loss_curve = rhc_nn.fitness_curve\r\n\r\n print('Generating learning curve...')\r\n rhc_train_sizes, rhc_train_scores, rhc_test_scores, rhc_fit_times, _ = \\\r\n learning_curve(rhc_nn, X_train, y_train, cv=cv, n_jobs=n_jobs,\r\n train_sizes=train_sizes,\r\n return_times=True)\r\n rhc_train_scores_mean = np.mean(rhc_train_scores, axis=1)\r\n rhc_train_scores_std = np.std(rhc_train_scores, axis=1)\r\n rhc_test_scores_mean = np.mean(rhc_test_scores, axis=1)\r\n rhc_test_scores_std = np.std(rhc_test_scores, axis=1)\r\n rhc_fit_times_mean = np.mean(rhc_fit_times, axis=1)\r\n rhc_fit_times_std = np.std(rhc_fit_times, axis=1)\r\n\r\n print('Simulated Annealing...')\r\n # Simluated Annealing\r\n sa_nn = neural.NeuralNetwork( hidden_nodes=[30, 30],\r\n activation='relu',\r\n algorithm='simulated_annealing',\r\n max_iters=15000,\r\n bias=True,\r\n is_classifier=True,\r\n learning_rate=0.5,\r\n early_stopping=True,\r\n clip_max=1e+10,\r\n restarts=0,\r\n schedule=mlrose.GeomDecay(2),\r\n pop_size=200,\r\n mutation_prob=0.1,\r\n max_attempts=200,\r\n random_state=27,\r\n curve=True)\r\n sa_fit_t = time()\r\n sa_nn.fit(X_train, y_train)\r\n sa_fit_time = time() - sa_fit_t\r\n print('Simulated Annealing fit time: ' + str(sa_fit_time))\r\n sa_pred_t = time()\r\n sa_nn_pred = sa_nn.predict(X_test)\r\n sa_pred_time = time() - sa_pred_t\r\n print('Simulated Annealing predict time: ' + str(sa_pred_time))\r\n sa_nn_loss = sa_nn.loss\r\n print('Simulated Annealing Loss: ' + str(sa_nn_loss))\r\n sa_loss_curve = sa_nn.fitness_curve\r\n\r\n print('Generating learning curve...')\r\n sa_train_sizes, sa_train_scores, sa_test_scores, sa_fit_times, _ = \\\r\n learning_curve(sa_nn, X_train, y_train, cv=cv, n_jobs=n_jobs,\r\n train_sizes=train_sizes,\r\n return_times=True)\r\n sa_train_scores_mean = np.mean(sa_train_scores, axis=1)\r\n sa_train_scores_std = np.std(sa_train_scores, axis=1)\r\n sa_test_scores_mean = np.mean(sa_test_scores, axis=1)\r\n sa_test_scores_std = np.std(sa_test_scores, axis=1)\r\n sa_fit_times_mean = np.mean(sa_fit_times, axis=1)\r\n sa_fit_times_std = np.std(sa_fit_times, axis=1)\r\n\r\n # Genetic Algorithm\r\n print('Genetic Algorithm')\r\n ga_nn = neural.NeuralNetwork( hidden_nodes=[30, 30],\r\n activation='relu',\r\n algorithm='genetic_alg',\r\n max_iters=10000,\r\n bias=True,\r\n is_classifier=True,\r\n learning_rate=0.05,\r\n early_stopping=True,\r\n clip_max=1e+10,\r\n restarts=0,\r\n schedule=mlrose.GeomDecay(1),\r\n pop_size=100,\r\n mutation_prob=0.1,\r\n max_attempts=200,\r\n random_state=27,\r\n curve=True)\r\n ga_fit_t = time()\r\n ga_nn.fit(X_train, y_train)\r\n ga_fit_time = time() - ga_fit_t\r\n print('Genetic Algorithm fit time: ' + str(ga_fit_time))\r\n ga_pred_t = time()\r\n ga_nn_pred = ga_nn.predict(X_test)\r\n ga_pred_time = time() - ga_pred_t\r\n print('Genetic Algorithm predict time: ' + str(ga_pred_time))\r\n ga_nn_loss = ga_nn.loss\r\n print('Genetic Algorithm Loss: ' + str(ga_nn_loss))\r\n ga_loss_curve = ga_nn.fitness_curve\r\n\r\n print('Generating learning curve...')\r\n ga_train_sizes, ga_train_scores, ga_test_scores, ga_fit_times, _ = \\\r\n learning_curve(ga_nn, X_train, y_train, cv=cv, n_jobs=n_jobs,\r\n train_sizes=train_sizes,\r\n return_times=True)\r\n ga_train_scores_mean = np.mean(ga_train_scores, axis=1)\r\n ga_train_scores_std = np.std(ga_train_scores, axis=1)\r\n ga_test_scores_mean = np.mean(ga_test_scores, axis=1)\r\n ga_test_scores_std = np.std(ga_test_scores, axis=1)\r\n ga_fit_times_mean = np.mean(ga_fit_times, axis=1)\r\n ga_fit_times_std = np.std(ga_fit_times, axis=1)\r\n\r\n\r\n # Plot learning curve\r\n plt.rc(\"font\", size=8)\r\n plt.rc(\"axes\", titlesize=10)\r\n plt.rc(\"axes\", labelsize=10)\r\n plt.rc(\"xtick\", labelsize=8)\r\n plt.rc(\"ytick\", labelsize=8)\r\n plt.rc(\"legend\", fontsize=8)\r\n plt.rc(\"figure\", titlesize=11)\r\n\r\n fig, axes = plt.subplots(1, 4, figsize=(12, 3.5))\r\n fig.suptitle('Neural Network Learning Curves', fontsize=14)\r\n\r\n axes[0].set(xlabel='Training Examples', ylabel = 'Accuracy', title='Back Propagation')\r\n axes[0].grid()\r\n axes[0].fill_between(bp_train_sizes, bp_train_scores_mean - bp_train_scores_std,\r\n bp_train_scores_mean + bp_train_scores_std, alpha=0.1,\r\n color=\"r\")\r\n axes[0].fill_between(bp_train_sizes, bp_test_scores_mean - bp_test_scores_std,\r\n bp_test_scores_mean + bp_test_scores_std, alpha=0.1,\r\n color=\"g\")\r\n axes[0].plot(bp_train_sizes, bp_train_scores_mean, 'o-', color=\"r\",\r\n label=\"Training\")\r\n axes[0].plot(bp_train_sizes, bp_test_scores_mean, 'o-', color=\"g\",\r\n label=\"Cross-validation\")\r\n axes[0].legend(loc=\"best\")\r\n\r\n axes[1].set(xlabel='Training Examples', title='Random Hill Climb')\r\n axes[1].grid()\r\n axes[1].fill_between(rhc_train_sizes, rhc_train_scores_mean - rhc_train_scores_std,\r\n rhc_train_scores_mean + rhc_train_scores_std, alpha=0.1,\r\n color=\"r\")\r\n axes[1].fill_between(rhc_train_sizes, rhc_test_scores_mean - rhc_test_scores_std,\r\n rhc_test_scores_mean + rhc_test_scores_std, alpha=0.1,\r\n color=\"g\")\r\n axes[1].plot(rhc_train_sizes, rhc_train_scores_mean, 'o-', color=\"r\",\r\n label=\"Training\")\r\n axes[1].plot(rhc_train_sizes, rhc_test_scores_mean, 'o-', color=\"g\",\r\n label=\"Cross-validation\")\r\n axes[1].legend(loc=\"best\")\r\n\r\n axes[2].set(xlabel='Training Examples', title='Simulated Annealing')\r\n axes[2].grid()\r\n axes[2].fill_between(sa_train_sizes, sa_train_scores_mean - sa_train_scores_std,\r\n sa_train_scores_mean + sa_train_scores_std, alpha=0.1,\r\n color=\"r\")\r\n axes[2].fill_between(sa_train_sizes, sa_test_scores_mean - sa_test_scores_std,\r\n sa_test_scores_mean + sa_test_scores_std, alpha=0.1,\r\n color=\"g\")\r\n axes[2].plot(sa_train_sizes, sa_train_scores_mean, 'o-', color=\"r\",\r\n label=\"Training\")\r\n axes[2].plot(sa_train_sizes, sa_test_scores_mean, 'o-', color=\"g\",\r\n label=\"Cross-validation\")\r\n axes[2].legend(loc=\"best\")\r\n\r\n axes[3].set(xlabel='Training Examples', title='Genetic Algorithm')\r\n axes[3].grid()\r\n axes[3].fill_between(ga_train_sizes, ga_train_scores_mean - ga_train_scores_std,\r\n ga_train_scores_mean + ga_train_scores_std, alpha=0.1,\r\n color=\"r\")\r\n axes[3].fill_between(ga_train_sizes, ga_test_scores_mean - ga_test_scores_std,\r\n ga_test_scores_mean + ga_test_scores_std, alpha=0.1,\r\n color=\"g\")\r\n axes[3].plot(ga_train_sizes, ga_train_scores_mean, 'o-', color=\"r\",\r\n label=\"Training\")\r\n axes[3].plot(ga_train_sizes, ga_test_scores_mean, 'o-', color=\"g\",\r\n label=\"Cross-validation\")\r\n axes[3].legend(loc=\"best\")\r\n\r\n\r\n plt.show()\r\n plt.savefig('nn_performance_graph', format='png')\r\n\r\n fig, axes = plt.subplots(1, 4, figsize=(12, 3.5))\r\n fig.suptitle('Neural Network Loss Curves', fontsize=14)\r\n\r\n axes[0].plot(bp_loss_curve)\r\n axes[0].set(xlabel='Iteration', ylabel = 'Loss', title='Back Propagation')\r\n\r\n axes[1].plot(rhc_loss_curve)\r\n axes[1].set(xlabel='Iteration', title='Random Hill Climb')\r\n\r\n axes[2].plot(sa_loss_curve)\r\n axes[2].set(xlabel='Iteration', title='Simulated Annealing')\r\n\r\n axes[3].plot(ga_loss_curve)\r\n axes[3].set(xlabel='Iteration', title='Genetic Algorithm')\r\n\r\n plt.show()\r\n plt.savefig('nn_performance_graph', format='png')\r\n return\r\n\r\n","sub_path":"assignment_2/optimization/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":19343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"389731166","text":"n=int(input())\nline=[[0]*n]*n\nfor i in range(n):\n line[i]=input().split(' ')\n\nresult=1\nfor i in range(line):\n for j in range(line[0]):\n if i==j or i+j==n-1:\n if line[i][j]!=line[0][0]:\n result=0\n break\n else:\n if line[i][j]!=line[0][1]:\n result=0\n break\nif result==0:\n print(\"NO\")\nelse:\n print(\"YES\")\n \n","sub_path":"Code/CodeRecords/2859/60618/236933.py","file_name":"236933.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"606303359","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport argparse\nimport pickle\n\nfrom fuzzywuzzy import fuzz\nfrom util import www2fb, clean_uri\n\ndef get_index(index_path):\n print(\"loading index from: {}\".format(index_path))\n with open(index_path, 'rb') as f:\n index = pickle.load(f)\n return index\n\ndef pick_best_name(question, names_list):\n best_score = None\n best_name = None\n for name in names_list:\n score = fuzz.ratio(name, question)\n if best_score == None or score > best_score:\n best_score = score\n best_name = name\n\n return best_name\n\ndef augment_dataset(datadir, index_namespath, outdir):\n names_map = get_index(index_namespath)\n skipped = 0\n allpath = os.path.join(outdir, \"all.txt\")\n outallfile = open(allpath, 'w')\n print(\"creating new datasets...\")\n files = [(\"annotated_fb_data_train\", \"train\"), (\"annotated_fb_data_valid\", \"valid\"), (\"annotated_fb_data_test\", \"test\")]\n for f_tuple in files:\n f = f_tuple[0]\n fname = f_tuple[1]\n fpath = os.path.join(datadir, f + \".txt\")\n fpath_numbered = os.path.join(outdir, fname + \".txt\")\n\n outfile = open(fpath_numbered, 'w')\n print(\"reading from {}\".format(fpath))\n with open(fpath, 'r') as f:\n for i, line in enumerate(f):\n if i % 1000000 == 0:\n print(\"line: {}\".format(i))\n\n items = line.strip().split(\"\\t\")\n if len(items) != 4:\n print(\"ERROR: line - {}\".format(line))\n sys.exit(0)\n\n lineid = \"{}-{}\".format(fname, (i + 1))\n subject = www2fb(items[0])\n predicate = www2fb(items[1])\n object = www2fb(items[2])\n question = items[3]\n\n if subject not in names_map.keys():\n skipped += 1\n print(\"lineid {} - name not found. skipping question.\".format(lineid))\n continue\n\n cand_entity_names = names_map[subject]\n entity_name = pick_best_name(question, cand_entity_names)\n\n line_to_print = \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(lineid, subject, entity_name, predicate, object, question)\n # print(line_to_print)\n outfile.write(line_to_print + \"\\n\")\n outallfile.write(line_to_print + \"\\n\")\n\n print(\"wrote to {}\".format(fpath_numbered))\n outfile.close()\n\n print(\"wrote to {}\".format(allpath))\n print(\"skipped # questions: {}\".format(skipped))\n outallfile.close()\n print(\"DONE!\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Augment dataset with line ids, shorted names, entity names')\n parser.add_argument('-d', '--dataset', dest='dataset', action='store', required = True,\n help='path to the dataset directory - contains train, valid, test files')\n parser.add_argument('-i', '--index_names', dest='index_names', action='store', required=True,\n help='path to the pickle for the names index')\n parser.add_argument('-o', '--output', dest='output', action='store', required=True,\n help='output directory for new dataset')\n\n args = parser.parse_args()\n print(\"Dataset: {}\".format(args.dataset))\n print(\"Index - Names: {}\".format(args.index_names))\n print(\"Output: {}\".format(args.output))\n\n if not os.path.exists(args.output):\n os.makedirs(args.output)\n\n augment_dataset(args.dataset, args.index_names, args.output)\n print(\"Augmented dataset with line ids, shorted names, entity names.\")\n","sub_path":"ferhan_simple_qa_rnn/scripts/augment_dataset.py","file_name":"augment_dataset.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"394106554","text":"import sys, string, os \r\n\r\n\r\n\"\"\"Crawl_space = r\"E:\\2016_ClarkCounty_imageryclassification\\classifiedImagery\\corrected_input\\Tiff_Output\"\r\nImagery_List = []\r\n\r\nfor root, dirs, files in os.walk(Crawl_space):\r\n for filename in files:\r\n #print filename[:-12]\r\n Imagery_List.append(filename[:-12])\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ninputPath =r\"E:\\LiDAR_factor\\Classified_output\"\r\n\r\n\r\noutFile = open(inputPath + '\\\\' + 'Image_convert.bls', \"w\")\r\noutFile.write(\"PortInput1\\tPortInput2\\n\")\r\n\r\nOutputImages = r'E:/LiDAR_factor/Classified_output'\r\nresrepo_county_images = r'R:/Image_ClarkCounty/2016/ClarkCounty_Collection'\r\n\r\n\r\nOutputImages_List = []\r\nfor root, dirs, files in os.walk(OutputImages):\r\n for filename in files:\r\n if filename[-4:] == '.tif':\r\n \r\n real_root = OutputImages\r\n \r\n line = '\"{wsPace}/{Imagefiles}\"'.format(wsPace = real_root, Imagefiles = filename)\r\n lineComplete = line\r\n \r\n OutputImages_List.append(lineComplete)\r\n \r\n #outFile.write(lineComplete)\r\n#outFile.close()\r\nResrepo_list = []\r\nfor root, dirs, files in os.walk(resrepo_county_images):\r\n for filename in files:\r\n if filename[-4:] == \".tif\" and 'jpg' not in root:\r\n Last_root = root[-3:]\r\n First_root = root[:-4]\r\n realRoot = First_root + '/' + Last_root\r\n resroot_line = '\"{root}/{file}\"'.format(root = realRoot, file = filename)\r\n Resrepo_list.append(resroot_line)\r\n\r\n\r\nFinalList = []\r\n\r\nfor i in range(0, len(Resrepo_list)):\r\n for x in range(0, len(OutputImages_List)):\r\n if Resrepo_list[i].split('/')[-1][:6] == OutputImages_List[x].split('/')[-1][:6]:\r\n FinalList.append(Resrepo_list[i])\r\nFinalList.sort()\r\nOutputImages_List.sort()\r\nfor i in range(0, len(FinalList)):\r\n line = FinalList[i] + '\\t' + OutputImages_List[i] +'\\n'\r\n outFile.write(line)\r\noutFile.close()\r\n","sub_path":"LiDAR_Factor_Full_valley/LiDAR_factor/Scripts/ScriptThatGeneratesBLSFile.py","file_name":"ScriptThatGeneratesBLSFile.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"339496306","text":"import math\n# The set [1,2,3,??,n] contains a total of n! unique permutations.\n#\n# By listing and labeling all of the permutations in order,\n# We get the following sequence (ie, for n = 3):\n#\n# \"123\"\n# \"132\"\n# \"213\"\n# \"231\"\n# \"312\"\n# \"321\"\n#\n# Given n and k, return the kth permutation sequence.\n#\n# Note: Given n will be between 1 and 9 inclusive.\n\n\nclass Solution:\n # @param {integer} n\n # @param {integer} k\n # @return {string}\n def getPermutation(self, n, k):\n array = range(1, n + 1)\n k = (k % math.factorial(n)) - 1\n permutation = []\n for i in xrange(n - 1, -1, -1):\n idx, k = divmod(k, math.factorial(i))\n permutation.append(array.pop(idx))\n return \"\".join(map(str, permutation))\n","sub_path":"060_Permutation_Sequence.py","file_name":"060_Permutation_Sequence.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"142011429","text":"from typing import List\nclass Solution:\n def rotate(self, matrix: List[List[int]]) -> None:\n length = len(matrix)\n if length <= 1:\n return\n for i in range(length//2):\n if i < length-i-1:\n for j in range(length-2*i-1):\n start = i\n end = length-i-1\n tmp = matrix[start][start+j]\n matrix[start][start+j] = matrix[end-j][start]\n matrix[end-j][start] = matrix[end][end-j]\n matrix[end][end-j] = matrix[start+j][end]\n matrix[start+j][end] = tmp\n return \n\nif __name__ == \"__main__\":\n s = Solution()\n matrix = [[15,13, 2, 5],[14, 3, 4, 1],[12, 6, 8, 9],[16, 7,10,11]]\n\n s.rotate(matrix)\n ","sub_path":"normal/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"292198382","text":"#stock.py\nfrom urllib.request import urlopen as req\nfrom bs4 import BeautifulSoup as soup\nfrom songline import Sendline\n\ntoken = '492THKHmNUvuBEcbSjfmdsgUm6I3NGk4atQRr8HIVuH'\n\nmessenger = Sendline(token) #messenger คือ ผู้ส่ง\n\ndef Checkprice(CODE,check=100):\n url = 'https://www.settrade.com/C04_02_stock_historical_p1.jsp?txtSymbol={}&ssoPageId=10&selectPage=2'.format(CODE)\n\n webopen = req(url) #req คือ เปิดเว็บโดยไม่ต้องเปิดเว็บบราวเซอร์\n page_html = webopen.read() #ข้อมูลดิบ\n webopen.close() #ปิดการ req\n\n data = soup(page_html,'html.parser')\n result = data.find_all('div','col-xs-6')\n\n #print([result[3].text])\n price = float(result[2].text)\n change = result[3].text\n change = change.replace('\\n','') #replace แทน x ด้วย y\n change = change.replace('\\t','')\n change = change.replace('\\r','')\n change = change.replace(' ','')\n change = change.replace('เปลี่ยนแปลง','')\n\n\n pchange = result[4].text\n pchange = pchange.replace('\\n','')\n pchange = pchange.replace('\\r','')\n pchange = pchange.replace(' ','')\n pchange = pchange.replace('%เปลี่ยนแปลง','')\n\n update = data.find_all('span','stt-remark')\n update = update[0].text.replace('ข้อมูลล่าสุด','')[1:]\n\n print(CODE)\n print(price)\n print(change)\n print(pchange)\n print(update)\n if float(price) < check:\n messenger.sticker(5,1)\n messenger.sendimage('https://i.imgflip.com/2o9lpv.jpg')\n \n textline = 'CODE: {}\\nPrice: {}\\nChange: {}\\n'.format(CODE,price,change)\n textline2 = '%Change: {}\\nUpdate: {}'.format(pchange,update)\n messenger.sendtext(textline + textline2)\n print('------')\n\nCheckprice('PTT',40)\nCheckprice('SCB',1)\n\n#print(len(update)) #นับว่าใน list มีข้อมูลกี่ชุด\n#float() แปลงจากข้อความเป็นจุดทศนิยม\n#int() แปลงจากข้อความเป็นตัวเลขจำนวนเต็ม\n\n#print(type(price)) #คำสั่งเช็คชนิด\n#print(price * 10)\n\n\n\n","sub_path":"18-19/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"145200594","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom .services import get_coloring_book\nfrom .services import get_books\nfrom .services import like_coloring_book\nfrom .services import insert_coloring_book_comment\nfrom .services import get_coloring_book_comments\nfrom .services import change_coloring_book_price\n\ndef coloring_books(request, book_id):\n coloring_book = get_coloring_book(book_id)\n comments = get_coloring_book_comments(book_id)\n context = {\n 'book': coloring_book,\n 'comments': comments\n }\n return render(request, 'coloring_book_view.html', context)\n\n\ndef home(request):\n book = get_coloring_book(3)\n context = {\n 'book': book\n }\n\n return render(request, 'home.html', context)\n\ndef purchase(request, book_id):\n book = get_coloring_book(book_id)\n context = {\n 'book': book\n }\n return render(request, 'purchase.html', context)\n\ndef browse(request):\n books = get_books()\n context = {\n 'books': books\n }\n return render(request, 'browse.html', context)\n\ndef likeBook(request):\n print(request)\n bookid = request.POST.get('bookId')\n likes = like_coloring_book(request.POST.get('bookId'))\n '''change_coloring_book_price(request.POST.get('bookId'), 0.25)'''\n print(\"hellohellohleooasfsfdasf\")\n return JsonResponse({\n 'likes': likes\n })\n\ndef commentBook(request):\n comment = insert_coloring_book_comment(request.POST.get('name'), request.POST.get('comment'), request.POST.get('bookId'))\n '''change_coloring_book_price(request.POST.get('bookId'), 0.5)'''\n return JsonResponse({\n 'comment': comment\n })\n","sub_path":"my_lil_jstor/my_lil_jstor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"571685970","text":"import os\nimport unittest\n\nfrom numpy import array_equal, isclose, matrix, random\nfrom numpy.linalg import norm\n\nfrom curves import (bezier, bezier_from_hermite, bezier_from_polynomial, cubic_hermite_spline, curve_constraints,\n exact_cubic, hermite_from_bezier, hermite_from_polynomial, piecewise_bezier_curve,\n piecewise_cubic_hermite_curve, piecewise_polynomial_curve, polynomial, polynomial_from_bezier,\n polynomial_from_hermite)\n\n\nclass TestCurves(unittest.TestCase):\n # def print_str(self, inStr):\n # print inStr\n # return\n\n def test_bezier(self):\n print(\"test_bezier\")\n # To test :\n # - Functions : constructor, min, max, derivate,compute_derivate, compute_primitive\n # - Variables : degree, nbWayPoints\n __EPS = 1e-6\n waypoints = matrix([[1., 2., 3.]]).T\n a = bezier(waypoints, 0., 2.)\n t = 0.\n while t < 2.:\n self.assertTrue(norm(a(t) - matrix([1., 2., 3.]).T) < __EPS)\n t += 0.1\n waypoints = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n # time_waypoints = matrix([0., 1.]).transpose()\n # Create bezier6 and bezier\n a = bezier(waypoints, 0., 3.)\n # Test waypoints\n self.assertTrue(a.nbWaypoints == 2)\n for i in range(0, a.nbWaypoints):\n if i == 0:\n self.assertTrue((a.waypointAtIndex(0) == matrix([1., 2., 3.]).transpose()).all())\n elif i == 1:\n self.assertTrue((a.waypointAtIndex(1) == matrix([4., 5., 6.]).transpose()).all())\n # self.assertTrue((a.waypoints == waypoints).all())\n # Test : Degree, min, max, derivate\n # self.print_str((\"test 1\")\n self.assertEqual(a.degree, a.nbWaypoints - 1)\n a.min()\n a.max()\n a(0.4)\n self.assertTrue((a(a.min()) == matrix([1., 2., 3.]).transpose()).all())\n self.assertTrue((a.derivate(0.4, 0) == a(0.4)).all())\n a.derivate(0.4, 2)\n a = a.compute_derivate(100)\n prim = a.compute_primitive(1)\n # Check primitive and derivate - order 1\n for i in range(10):\n t = float(i) / 10.\n self.assertTrue((a(t) == prim.derivate(t, 1)).all())\n self.assertTrue((prim(0) == matrix([0., 0., 0.])).all())\n # Check primitive and derivate - order 2\n prim = a.compute_primitive(2)\n for i in range(10):\n t = float(i) / 10.\n self.assertTrue((a(t) == prim.derivate(t, 2)).all())\n self.assertTrue((prim(0) == matrix([0., 0., 0.])).all())\n # Create new bezier curve\n waypoints = matrix([[1., 2., 3.], [4., 5., 6.], [4., 5., 6.], [4., 5., 6.], [4., 5., 6.]]).transpose()\n a0 = bezier(waypoints)\n a1 = bezier(waypoints, 0., 3.)\n prim0 = a0.compute_primitive(1)\n prim1 = a1.compute_primitive(1)\n # Check change in argument time_t of bezier\n for i in range(10):\n t = float(i) / 10.\n self.assertTrue(norm(a0(t) - a1(3 * t)) < __EPS)\n self.assertTrue(norm(a0.derivate(t, 1) - a1.derivate(3 * t, 1) * 3.) < __EPS)\n self.assertTrue(norm(a0.derivate(t, 2) - a1.derivate(3 * t, 2) * 9.) < __EPS)\n self.assertTrue(norm(prim0(t) - prim1(t * 3) / 3.) < __EPS)\n self.assertTrue((prim(0) == matrix([0., 0., 0.])).all())\n # testing bezier with constraints\n c = curve_constraints()\n c.init_vel = matrix([0., 1., 1.]).transpose()\n c.end_vel = matrix([0., 1., 1.]).transpose()\n c.init_acc = matrix([0., 1., -1.]).transpose()\n c.end_acc = matrix([0., 100., 1.]).transpose()\n # Check derivate with constraints\n waypoints = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n a = bezier(waypoints, c)\n self.assertTrue(norm(a.derivate(0, 1) - c.init_vel) < 1e-10)\n self.assertTrue(norm(a.derivate(1, 2) - c.end_acc) < 1e-10)\n\n # Test serialization : bezier 3\n a.saveAsText(\"serialization_curve.test\")\n # waypoints = matrix([[0,0,0,], [0,0,0,]]).transpose()\n b = bezier()\n b.loadFromText(\"serialization_curve.test\")\n self.assertTrue((a(0.4) == b(0.4)).all())\n os.remove(\"serialization_curve.test\")\n\n # Bezier dim 4\n waypoints = matrix([[1., 2., 3., 4.]]).T\n a = bezier(waypoints, 0., 2.)\n # Test serialization : bezier of dim 4\n a.saveAsText(\"serialization_curve.test\")\n # waypoints = matrix([[0,0,0,], [0,0,0,]]).transpose()\n b = bezier()\n b.loadFromText(\"serialization_curve.test\")\n self.assertTrue((a(0.4) == b(0.4)).all())\n os.remove(\"serialization_curve.test\")\n return\n\n def test_polynomial(self):\n print(\"test_polynomial\")\n # To test :\n # - Functions : constructor, min, max, derivate, serialize, deserialize\n waypoints = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n a = polynomial(waypoints) # Defined on [0.,1.]\n a = polynomial(waypoints, -1., 3.) # Defined on [-1.,3.]\n a.min()\n a.max()\n a(0.4)\n # Test get coefficient at degree\n self.assertTrue((a.coeff() == waypoints).all())\n self.assertTrue((a.coeffAtDegree(0) == matrix([1., 2., 3.]).transpose()).all())\n self.assertTrue((a.coeffAtDegree(1) == matrix([4., 5., 6.]).transpose()).all())\n # Other tests\n self.assertTrue((a(a.min()) == matrix([1., 2., 3.]).transpose()).all())\n self.assertTrue((a.derivate(0.4, 0) == a(0.4)).all())\n a.derivate(0.4, 2)\n a_derivated = a.compute_derivate(1)\n self.assertTrue((a.derivate(0.4, 1) == a_derivated(0.4)).all())\n # Test serialization\n a.saveAsText(\"serialization_curve.test\")\n b = polynomial()\n b.loadFromText(\"serialization_curve.test\")\n self.assertTrue((a(0.4) == b(0.4)).all())\n os.remove(\"serialization_curve.test\")\n return\n\n def test_polynomial_from_boundary_condition(self):\n p0 = matrix([1., 3., -2.]).T\n p1 = matrix([0.6, 2., 2.5]).T\n dp0 = matrix([-6., 2., -1.]).T\n dp1 = matrix([10., 10., 10.]).T\n ddp0 = matrix([1., -7., 4.5]).T\n ddp1 = matrix([6., -1., -4]).T\n min = 1.\n max = 2.5\n polC0 = polynomial(p0, p1, min, max)\n self.assertEqual(polC0.min(), min)\n self.assertEqual(polC0.max(), max)\n self.assertTrue(array_equal(polC0(min), p0))\n self.assertTrue(array_equal(polC0(max), p1))\n self.assertTrue(array_equal(polC0((min + max) / 2.), 0.5 * p0 + 0.5 * p1))\n polC1 = polynomial(p0, dp0, p1, dp1, min, max)\n self.assertEqual(polC1.min(), min)\n self.assertEqual(polC1.max(), max)\n self.assertTrue(isclose(polC1(min), p0).all())\n self.assertTrue(isclose(polC1(max), p1).all())\n self.assertTrue(isclose(polC1.derivate(min, 1), dp0).all())\n self.assertTrue(isclose(polC1.derivate(max, 1), dp1).all())\n polC2 = polynomial(p0, dp0, ddp0, p1, dp1, ddp1, min, max)\n self.assertEqual(polC2.min(), min)\n self.assertEqual(polC2.max(), max)\n self.assertTrue(isclose(polC2(min), p0).all())\n self.assertTrue(isclose(polC2(max), p1).all())\n self.assertTrue(isclose(polC2.derivate(min, 1), dp0).all())\n self.assertTrue(isclose(polC2.derivate(max, 1), dp1).all())\n self.assertTrue(isclose(polC2.derivate(min, 2), ddp0).all())\n self.assertTrue(isclose(polC2.derivate(max, 2), ddp1).all())\n # check that the exception are correctly raised :\n try:\n polC0 = polynomial(p0, p1, max, min)\n self.assertTrue(False) # should never get there\n except ValueError:\n pass\n\n try:\n polC1 = polynomial(p0, dp0, p1, dp1, max, min)\n self.assertTrue(False) # should never get there\n except ValueError:\n pass\n\n try:\n polC2 = polynomial(p0, dp0, ddp0, p1, dp1, ddp1, max, min)\n self.assertTrue(False) # should never get there\n except ValueError:\n pass\n\n return\n\n def test_cubic_hermite_spline(self):\n print(\"test_cubic_hermite_spline\")\n points = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n tangents = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n time_points = matrix([0., 1.]).transpose()\n a = cubic_hermite_spline(points, tangents, time_points)\n a.min()\n a.max()\n a(0.4)\n self.assertTrue((a.derivate(0.4, 0) == a(0.4)).all())\n a.derivate(0.4, 2)\n # Test serialization\n a.saveAsText(\"serialization_curve.test\")\n b = cubic_hermite_spline()\n b.loadFromText(\"serialization_curve.test\")\n self.assertTrue((a(0.4) == b(0.4)).all())\n os.remove(\"serialization_curve.test\")\n # test dim 4\n points = matrix([[1., 2., 3., 4.], [4., 5., 6., 7.]]).transpose()\n tangents = matrix([[1., 2., 3., 4.], [4., 5., 6., 7.]]).transpose()\n time_points = matrix([0., 1.]).transpose()\n a = cubic_hermite_spline(points, tangents, time_points)\n a.min()\n a.max()\n a(0.4)\n self.assertTrue((a.derivate(0.4, 0) == a(0.4)).all())\n a.derivate(0.4, 2)\n # Test serialization\n a.saveAsText(\"serialization_curve.test\")\n b = cubic_hermite_spline()\n b.loadFromText(\"serialization_curve.test\")\n self.assertTrue((a(0.4) == b(0.4)).all())\n os.remove(\"serialization_curve.test\")\n return\n\n def test_piecewise_polynomial_curve(self):\n print(\"test_piecewise_polynomial_curve\")\n # To test :\n # - Functions : constructor, min, max, derivate, add_curve, is_continuous, serialize, deserialize\n waypoints0 = matrix([[0., 0., 0.]]).transpose()\n waypoints1 = matrix([[1., 1., 1.]]).transpose()\n waypoints2 = matrix([[1., 1., 1.], [1., 1., 1.]]).transpose()\n polynomial(waypoints0, 0., 0.1)\n a = polynomial(waypoints1, 0., 1.)\n b = polynomial(waypoints2, 1., 3.)\n pc = piecewise_polynomial_curve(a)\n pc.append(b)\n pc.min()\n pc.max()\n pc(0.4)\n self.assertTrue((pc(pc.min()) == matrix([1., 1., 1.]).transpose()).all())\n self.assertTrue((pc.derivate(0.4, 0) == pc(0.4)).all())\n pc.derivate(0.4, 2)\n pc.is_continuous(0)\n pc.is_continuous(1)\n # Test serialization\n pc.saveAsText(\"serialization_pc.test\")\n pc_test = piecewise_polynomial_curve()\n pc_test.loadFromText(\"serialization_pc.test\")\n self.assertTrue((pc(0.4) == pc_test(0.4)).all())\n os.remove(\"serialization_pc.test\")\n return\n\n def test_piecewise_from_points_list(self):\n N = 7\n points = matrix(random.rand(3, N))\n points_derivative = matrix(random.rand(3, N))\n points_second_derivative = matrix(random.rand(3, N))\n time_points = matrix(random.rand(N)).T\n time_points.sort(0)\n polC0 = piecewise_polynomial_curve.FromPointsList(points, time_points)\n self.assertEqual(polC0.min(), time_points[0, 0])\n self.assertEqual(polC0.max(), time_points[-1, 0])\n self.assertTrue(polC0.is_continuous(0))\n self.assertTrue(not polC0.is_continuous(1))\n for i in range(N):\n self.assertTrue(isclose(polC0(time_points[i, 0]), points[:, i]).all())\n\n polC1 = piecewise_polynomial_curve.FromPointsList(points, points_derivative, time_points)\n self.assertEqual(polC1.min(), time_points[0, 0])\n self.assertEqual(polC1.max(), time_points[-1, 0])\n self.assertTrue(polC1.is_continuous(0))\n self.assertTrue(polC1.is_continuous(1))\n self.assertTrue(not polC1.is_continuous(2))\n for i in range(N):\n self.assertTrue(isclose(polC1(time_points[i, 0]), points[:, i]).all())\n self.assertTrue(isclose(polC1.derivate(time_points[i, 0], 1), points_derivative[:, i]).all())\n\n polC2 = piecewise_polynomial_curve.FromPointsList(points, points_derivative, points_second_derivative,\n time_points)\n self.assertEqual(polC2.min(), time_points[0, 0])\n self.assertEqual(polC2.max(), time_points[-1, 0])\n self.assertTrue(polC2.is_continuous(0))\n self.assertTrue(polC2.is_continuous(1))\n self.assertTrue(polC2.is_continuous(2))\n self.assertTrue(not polC2.is_continuous(3))\n for i in range(N):\n self.assertTrue(isclose(polC2(time_points[i, 0]), points[:, i]).all())\n self.assertTrue(isclose(polC2.derivate(time_points[i, 0], 1), points_derivative[:, i]).all())\n self.assertTrue(isclose(polC2.derivate(time_points[i, 0], 2), points_second_derivative[:, i]).all())\n\n # check if exepetion are corectly raised when time_points are not in ascending values\n time_points[0, 0] = 1\n time_points[1, 0] = 0.5\n try:\n polC0 = piecewise_polynomial_curve.FromPointsList(points, time_points)\n self.assertTrue(False) # should not get here\n except ValueError:\n pass\n try:\n polC1 = piecewise_polynomial_curve.FromPointsList(points, points_derivative, time_points)\n self.assertTrue(False) # should not get here\n except ValueError:\n pass\n try:\n polC2 = piecewise_polynomial_curve.FromPointsList(points, points_derivative, points_second_derivative,\n time_points)\n self.assertTrue(False) # should not get here\n except ValueError:\n pass\n return\n\n def test_piecewise_bezier3_curve(self):\n # To test :\n # - Functions : constructor, min, max, derivate, add_curve, is_continuous\n waypoints = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n a = bezier(waypoints, 0., 1.)\n b = bezier(waypoints, 1., 2.)\n pc = piecewise_bezier_curve(a)\n pc.add_curve(b)\n pc.min()\n pc.max()\n pc(0.4)\n self.assertTrue((pc(pc.min()) == matrix([1., 2., 3.]).transpose()).all())\n self.assertTrue((pc.derivate(0.4, 0) == pc(0.4)).all())\n pc.derivate(0.4, 2)\n pc.is_continuous(0)\n pc.is_continuous(1)\n # Test serialization\n pc.saveAsText(\"serialization_pc.test\")\n pc_test = piecewise_bezier_curve()\n pc_test.loadFromText(\"serialization_pc.test\")\n self.assertTrue((pc(0.4) == pc_test(0.4)).all())\n os.remove(\"serialization_pc.test\")\n return\n\n def test_piecewise_cubic_hermite_curve(self):\n print(\"test_piecewise_cubic_hermite_curve\")\n # To test :\n # - Functions : constructor, min, max, derivate, add_curve, is_continuous\n points = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n tangents = matrix([[2., 2., 2.], [4., 4., 4.]]).transpose()\n time_points0 = matrix([0., 1.]).transpose()\n time_points1 = matrix([1., 2.]).transpose()\n a = cubic_hermite_spline(points, tangents, time_points0)\n b = cubic_hermite_spline(points, tangents, time_points1)\n pc = piecewise_cubic_hermite_curve(a)\n pc.add_curve(b)\n pc.min()\n pc.max()\n pc(0.4)\n self.assertTrue((pc(0.) == matrix([1., 2., 3.]).transpose()).all())\n self.assertTrue((pc.derivate(0.4, 0) == pc(0.4)).all())\n pc.derivate(0.4, 2)\n pc.is_continuous(0)\n pc.is_continuous(1)\n # Test serialization\n pc.saveAsText(\"serialization_pc.test\")\n pc_test = piecewise_cubic_hermite_curve()\n pc_test.loadFromText(\"serialization_pc.test\")\n self.assertTrue((pc(0.4) == pc_test(0.4)).all())\n os.remove(\"serialization_pc.test\")\n return\n\n def test_exact_cubic(self):\n print(\"test_exact_cubic\")\n # To test :\n # - Functions : constructor, min, max, derivate, getNumberSplines, getSplineAt\n waypoints = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n time_waypoints = matrix([0., 1.]).transpose()\n a = exact_cubic(waypoints, time_waypoints)\n a.min()\n a.max()\n a(0.4)\n self.assertTrue((a.derivate(0.4, 0) == a(0.4)).all())\n a.derivate(0.4, 2)\n a.getNumberSplines()\n a.getSplineAt(0)\n # Test serialization\n a.saveAsText(\"serialization_pc.test\")\n b = exact_cubic()\n b.loadFromText(\"serialization_pc.test\")\n self.assertTrue((a(0.4) == b(0.4)).all())\n os.remove(\"serialization_pc.test\")\n return\n\n def test_exact_cubic_constraint(self):\n print(\"test_exact_cubic_constraint\")\n # To test :\n # - Functions : constructor, min, max, derivate\n waypoints = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n time_waypoints = matrix([0., 1.]).transpose()\n c = curve_constraints()\n c.init_vel = matrix([0., 1., 1.]).transpose()\n c.end_vel = matrix([0., 1., 1.]).transpose()\n c.init_acc = matrix([0., 1., 1.]).transpose()\n c.end_acc = matrix([0., 1., 1.]).transpose()\n c.init_vel\n c.end_vel\n c.init_acc\n c.end_acc\n exact_cubic(waypoints, time_waypoints)\n exact_cubic(waypoints, time_waypoints, c)\n return\n\n def test_cubic_hermite_spline_2(self):\n points = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n tangents = matrix([[2., 2., 2.], [4., 4., 4.]]).transpose()\n time_points = matrix([0., 1.]).transpose()\n a = cubic_hermite_spline(points, tangents, time_points)\n a.min()\n a.max()\n a(0.4)\n self.assertTrue((a(0.) == matrix([1., 2., 3.]).transpose()).all())\n self.assertTrue((a.derivate(0., 1) == matrix([2., 2., 2.]).transpose()).all())\n self.assertTrue((a.derivate(0.4, 0) == a(0.4)).all())\n a.derivate(0.4, 2)\n return\n\n def test_conversion_curves(self):\n print(\"test_conversion_curves\")\n __EPS = 1e-6\n waypoints = matrix([[1., 2., 3.], [4., 5., 6.]]).transpose()\n a = bezier(waypoints)\n # converting bezier to polynomial\n a_pol = polynomial_from_bezier(a)\n self.assertTrue(norm(a(0.3) - a_pol(0.3)) < __EPS)\n # converting polynomial to hermite\n a_chs = hermite_from_polynomial(a_pol)\n self.assertTrue(norm(a_chs(0.3) - a_pol(0.3)) < __EPS)\n # converting hermite to bezier\n a_bc = bezier_from_hermite(a_chs)\n self.assertTrue(norm(a_chs(0.3) - a_bc(0.3)) < __EPS)\n self.assertTrue(norm(a(0.3) - a_bc(0.3)) < __EPS)\n # converting bezier to hermite\n a_chs = hermite_from_bezier(a)\n self.assertTrue(norm(a(0.3) - a_chs(0.3)) < __EPS)\n # converting hermite to polynomial\n a_pol = polynomial_from_hermite(a_chs)\n self.assertTrue(norm(a_pol(0.3) - a_chs(0.3)) < __EPS)\n # converting polynomial to bezier\n a_bc = bezier_from_polynomial(a_pol)\n self.assertTrue(norm(a_bc(0.3) - a_pol(0.3)) < __EPS)\n self.assertTrue(norm(a(0.3) - a_bc(0.3)) < __EPS)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":19252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"274085445","text":"class ListNode:\n def __init__(self, val=None, next=None):\n self.val = val\n self.next = next\n\n\nclass MyLinkedList:\n def __init__(self):\n self.head = None\n\n def get(self, index: int) -> int:\n if index < 0:\n return -1\n else:\n if self.head == None:\n return -1\n else:\n cur = self.head\n idx = 0\n while idx < index:\n if cur.next == None:\n return -1\n idx += 1\n cur = cur.next\n return cur.val\n\n def addAtHead(self, val: int) -> None:\n self.head = ListNode(val, self.head)\n\n def addAtTail(self, val: int) -> None:\n new = ListNode(val)\n if self.head == None:\n self.head = new\n else:\n cur = self.head\n while True:\n if cur.next == None:\n break\n cur = cur.next\n cur.next = new\n\n def addAtIndex(self, index: int, val: int) -> None:\n if index < 0:\n return -1\n elif self.head == None and index != 0:\n return\n elif index == 0:\n self.addAtHead(val)\n else:\n pre = self.head\n idx = 1\n while idx < index:\n if pre.next == None:\n return -1\n idx += 1\n pre = pre.next\n pre.next = ListNode(val, pre.next)\n\n def deleteAtIndex(self, index: int) -> None:\n if index < 0:\n return -1\n elif self.head == None:\n return -1\n elif index == 0:\n self.head = self.head.next\n else:\n pre = self.head\n idx = 1\n while idx < index:\n if pre.next == None or pre.next.next == None:\n return -1\n idx += 1\n pre = pre.next\n if pre.next == None:\n return\n pre.next = pre.next.next\n\n def get_list(self):\n l = '['\n if self.head == None:\n return l + ']'\n cur = self.head\n while cur.next != None:\n l += f'{str(cur.val)}, '\n cur = cur.next\n l += str(cur.val)\n return l + ']'\n\n\ndef main_auto():\n cmds = [\"MyLinkedList\",\"addAtHead\",\"deleteAtIndex\",\"addAtHead\",\"addAtHead\",\"addAtHead\",\"addAtHead\",\"addAtHead\",\"addAtTail\",\"get\",\"deleteAtIndex\",\"deleteAtIndex\"]\n para = [[],[2],[1],[2],[7],[3],[2],[5],[5],[5],[6],[4]]\n for i in range(len(cmds)):\n print(f'Step {i}: {cmds[i]}, {para[i]}:')\n\n if cmds[i] == 'MyLinkedList':\n lk_list = MyLinkedList()\n elif cmds[i] == 'addAtHead':\n lk_list.addAtHead(para[i][0])\n elif cmds[i] == 'deleteAtIndex':\n lk_list.deleteAtIndex(para[i][0])\n elif cmds[i] == 'addAtTail':\n lk_list.addAtTail(para[i][0])\n elif cmds[i] == 'get':\n print(lk_list.get(para[i][0]))\n\n print(lk_list.get_list())\n\n\ndef main():\n print('Step 0: MyLinkedList, []:')\n lk_list = MyLinkedList()\n print(lk_list.get_list())\n\n print('Step 1: addAtHead, [2]:')\n lk_list.addAtHead(2)\n print(lk_list.get_list())\n\n\nif __name__ == '__main__':\n main_auto()\n","sub_path":"py_ex/leetcode/algorithm/easy/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"15324675","text":"import numpy as np\n\nimport re\n\nimport nltk\nnltk.download('wordnet')\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\nclass Preprocessor:\n\n def __init__(self):\n # Determinants\n self.det_regex = re.compile(r\"the|of|and|my|yours|to|your|his|its|\\\n our|their|these|this|those|what|which|whose|her\")\n # Hashtags\n self.hash_regex = re.compile(r\"#(\\w+)\")\n # URLs\n self.url_regex = re.compile(r\"(http|https|ftp)://[a-zA-Z0-9\\./]+|\\\n www.[a-zA-Z0-9\\./]+\")\n # Repeating words like hurrrryyyyyy\n self.rpt_regex = re.compile(r\"(.)\\1{1,}\", re.IGNORECASE);\n # Retweet\n self.rt_regex = re.compile(r\".*\\sRT\\b\")\n # Paranthesis\n self.openning = re.compile(r\"\\[|\\{|\\(\")\n self.closing = re.compile(r\"\\]|\\}|\\)\")\n # Emoticons\n self.emoticons_regex = self.__init_emoticons_regex()\n\n def __init_emoticons_regex(self):\n emoticons = [ \\\n ('__EMOT_SMILEY', [':-)', ':)', '(:', '(-:']), \\\n ('__EMOT_LAUGH', [':-D', ':D', 'X-D', 'XD', 'xD']), \\\n ('__EMOT_LOVE', ['<3', ':\\*']), \\\n ('__EMOT_WINK', [';-)', ';)', ';-D', ';D', '(;', '(-;']), \\\n ('__EMOT_FROWN', [':-(', ':(', '(:', '(-:']), \\\n ('__EMOT_CRY', [':,(', ':\\'(', ':\"(', ':((']), \\\n ]\n n = len(emoticons)\n escaped_emoticons = [None] * n\n for i in range(n):\n escaped_emoticons[i] = (emoticons[i][0],\n [emoji.replace(')', '\\)').replace('(', '\\(') for emoji in emoticons[i][1]])\n\n emoticons_regex = [ \\\n (repl, re.compile(r\"(\" + \"|\".join(regx) + \")\")) \\\n for (repl, regx) in escaped_emoticons \\\n ]\n return emoticons_regex\n\n\n def uniform_parenthesis(self, tweet, openning = \"(\", closing = \")\"):\n tweet = re.sub(self.openning, openning, tweet)\n tweet = re.sub(self.closing, closing, tweet)\n return tweet\n\n def uniform_emoticons(self, tweet):\n for (repl, regx) in self.emoticons_regex:\n tweet = re.sub(regx, ' ' + repl + ' ', tweet)\n return tweet\n\n def replace_hashtags(self, tweet, tag = \"__HASH__\"):\n return re.sub(self.hash_regex, tag, tweet)\n\n def replace_urls(self, tweet, tag = \"__URL__\"):\n return re.sub(self.url_regex, tag, tweet)\n\n def is_retweet(self, tweet):\n if self.rt_regex.match(tweet):\n return True\n return False\n\n def remove_determinants(self, tweet):\n return re.sub(self.det_regex, \"\", tweet)\n\n def remove_repeated_letters(self, tweet):\n def __rptd_replace(match):\n return match.group(1) + match.group(1)\n return re.sub(self.rpt_regex, __rptd_replace, tweet)\n\n def remove_emoticons(self, tweet):\n for (repl, regx) in self.emoticons_regex:\n tweet = re.sub(regx, '', tweet)\n return tweet\n\n def remove_ponctuation(self, tweet):\n ponctuation = \"\\n\\t!?<>:,;.()[]{}\\\\/_&\\-=+*\"\n for p in ponctuation:\n tweet = tweet.replace(p, ' ')\n return tweet\n\n def lemmatize(self, tweet):\n lem = WordNetLemmatizer()\n words = tweet.split(\" \")\n words = np.array([lem.lemmatize(word) for word in words])\n tweet = \" \".join(words)\n return tweet\n\n def default_processing(self, tweet):\n tweet = self.remove_repeated_letters(tweet)\n tweet = self.replace_urls(tweet)\n tweet = self.uniform_emoticons(tweet)\n tweet = self.uniform_parenthesis(tweet)\n tweet = self.remove_determinants(tweet)\n tweet = self.lemmatize(tweet)\n tweet = self.remove_ponctuation(tweet)\n return tweet\n","sub_path":"notebooks/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"263785310","text":"\"\"\"Data Weaver Wizard\n\nMain entry for CLI\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom pydataweaver.engines import choose_engine\nfrom pydataweaver.lib.datasets import datasets, dataset_names, license\nfrom pydataweaver.lib.defaults import CITATION, SCRIPT_SEARCH_PATHS\nfrom pydataweaver.lib.engine_tools import name_matches, reset_weaver\nfrom pydataweaver.lib.get_opts import parser\nfrom pydataweaver.lib.repository import check_for_updates\nfrom pydataweaver.lib.scripts import SCRIPT_LIST, reload_scripts, get_script\n\n\ndef main():\n \"\"\"This function launches the pydataweaver.\"\"\"\n if len(sys.argv) == 1:\n # If no command line Args are passed, show the help options\n parser.parse_args([\"-h\"])\n else:\n args = parser.parse_args()\n\n if (args.command not in [\"reset\", \"update\"] and\n not os.path.isdir(SCRIPT_SEARCH_PATHS[1]) and not [\n f for f in os.listdir(SCRIPT_SEARCH_PATHS[-1])\n if os.path.exists(SCRIPT_SEARCH_PATHS[-1])\n ]):\n check_for_updates()\n reload_scripts()\n script_list = SCRIPT_LIST()\n\n if args.command == \"join\" and not args.engine:\n parser.parse_args([\"join\", \"-h\"])\n\n if args.quiet:\n sys.stdout = open(os.devnull, \"w\")\n\n if args.command == \"help\":\n parser.parse_args([\"-h\"])\n\n if args.command == \"update\":\n check_for_updates()\n reload_scripts()\n return\n\n if args.command == \"reset\":\n reset_weaver(args.scope)\n return\n if args.command == \"citation\":\n if args.dataset is None:\n # get the citation of pydataweaver\n print(CITATION)\n return\n else:\n scripts = name_matches(script_list, args.dataset)\n for data_set in scripts:\n print(\"\\nDataset: {}\".format(data_set.name))\n print(\"Description: {}\".format(data_set.description))\n print(\"Citations:\")\n for cite in data_set.citation:\n for key, value in cite.items():\n print(\"{k}: {v}\".format(k=key, v=value))\n return\n if args.command == \"license\":\n data_set_license = license(args.dataset)\n if data_set_license:\n print(data_set_license)\n else:\n print(\"There is no license information for {}\".format(args.dataset))\n return\n\n # list the data sets available\n if args.command == \"ls\":\n if not (args.l or args.k or isinstance(args.v, list)):\n all_scripts = dataset_names()\n print(\"Available datasets : {}\\n\".format(len(all_scripts)))\n from pydataweaver import lscolumns\n\n lscolumns.printls(all_scripts)\n\n # If pydataweaver ls -v has a list of scripts, i.e item1, item2,\n # print the items' information, else consider all scripts\"\n elif isinstance(args.v, list):\n if args.v:\n try:\n all_scripts = [get_script(dataset) for dataset in args.v]\n except KeyError:\n all_scripts = []\n print(\"Dataset(s) is not found.\")\n else:\n all_scripts = datasets()\n print_info(all_scripts)\n\n else:\n param_licenses = args.l if args.l else None\n keywords = args.k if args.k else None\n\n # search\n searched_scripts = datasets(keywords, param_licenses)\n if not searched_scripts:\n print(\"No available datasets found\")\n else:\n print(\"Available datasets : {}\\n\".format(len(searched_scripts)))\n print_info(searched_scripts, keywords_license=True)\n\n return\n if args.command == \"join\":\n engine = choose_engine(args.__dict__)\n\n if hasattr(args, \"debug\") and args.debug:\n debug = True\n else:\n debug = False\n sys.tracebacklimit = 0\n\n if args.dataset is not None:\n scripts = name_matches(script_list, args.dataset)\n if scripts:\n for data_set in scripts:\n print(\"=> Integrating\", data_set.name)\n try:\n data_set.integrate(engine, debug=debug)\n data_set.engine.final_cleanup()\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print(e)\n if debug:\n raise\n\n\ndef print_info(all_scripts, keywords_license=False):\n count = 1\n for script in all_scripts:\n # Include a description if keywords_license are not used\n if not keywords_license:\n out_stm = (\"{count}. {title}\\n{name}\\n{keywords}\\n{description}\\n{licenses}\\n\"\n \"{citation}\\n\".format(\n count=count,\n title=script.title,\n name=script.name,\n keywords=script.keywords,\n description=script.description,\n licenses=str(script.licenses),\n citation=script.citation,\n ))\n else:\n out_stm = \"{count}. {title}\\n{name}\\n{keywords}\\n{licenses}\\n\" \"\".format(\n count=count,\n title=script.title,\n name=script.name,\n keywords=script.keywords,\n licenses=str(script.licenses),\n )\n print(out_stm)\n count += 1\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pydataweaver/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"213652252","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\nimport argparse\nfrom skimage.filters import threshold_adaptive\n\nfrom .image import resize\nfrom .utils import four_point_transform\n\n\ndef remove_borders(image):\n image = cv2.imread(image)\n ratio = image.shape[0] / 500.0\n orig = image.copy()\n image = resize(image, height=500)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (5, 5), 0)\n edged = cv2.Canny(gray, 75, 200)\n _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n cnts = sorted(cnts, key=cv2.contourArea, reverse=True)\n screenCnt = None\n for c in cnts:\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n print(len(approx) == 4)\n if len(approx) == 4:\n screenCnt = approx\n break\n cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)\n if screenCnt is not None and len(screenCnt) > 0:\n return four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)\n cv2.imwrite('original.jpg', orig)\n return 'original.jpg'\n","sub_path":"idmatch/idcardocr/core/preprocessing/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"517069379","text":"from django.conf.urls.defaults import *\n\nurlpatterns = patterns('feedback.views',\n \n url(r'^$', 'index', name='index'),\n \n url(r'^add_topic/$', 'add_topic', name='add_topic'),\n url(r'^add_topic_term/(?P<topic_id>\\d+)/$', 'add_topic_term', name='add_topic_term'),\n \n url(r'^remove_topic/(?P<topic_id>\\d+)/$', 'remove_topic', name='remove_topic'),\n url(r'^remove_topic_term/(?P<topic_term_id>\\d+)/$', 'remove_topic_term', name='remove_topic_term'),\n \n url(r'^entries/$', 'entries', name='entries'),\n url(r'^edit_atom/(?P<atom_id>\\d+)/$', 'edit_atom', name='edit_atom'),\n \n url(r'^submit/(?P<account_id>\\d+)/$', 'add_entry', name='add_entry'),\n)\n","sub_path":"feedback/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"650757547","text":"from flask import jsonify, abort\n\nfrom mainApp.DataBaseConnection import con\n\n\ndef displaySpecificCommonAttribute(id_attribute):\n return jsonify(checkSpecificCommonAttributeId(id_attribute)), 200\n\n\ndef checkSpecificCommonAttributeId(id_attribute):\n with con.cursor() as cur:\n cur.execute('SELECT lca.idListCommonAttribute FROM list_common_attribute AS lca '\n 'ORDER BY lca.idListCommonAttribute ASC')\n res_id_attribute = [i[0] for i in cur.fetchall()]\n if id_attribute in filter(lambda t: t == id_attribute, res_id_attribute):\n return getCommonAttributeInfo(id_attribute)\n else:\n return abort(404, description='Resource not found')\n\n\ndef getCommonAttributeInfo(id_attribute):\n try:\n with con.cursor() as cur:\n cur.execute(\n f'SELECT lca.idListCommonAttribute, lca.RepresentName, lca.Description, lca.idTableAttributes '\n f'FROM list_common_attribute AS lca '\n f'WHERE lca.idListCommonAttribute={id_attribute} ')\n result_1 = [list(i) for i in cur.fetchall()]\n except Exception as ex:\n print(ex)\n return getTableAttributeRepresentName(result_1, id_attribute)\n\n\ndef getTableAttributeRepresentName(result_1, id_attribute):\n try:\n with con.cursor() as cur:\n common_attributes = {'Id': result_1[0][0], 'Name': result_1[0][1], 'Description': result_1[0][2],\n 'AttributesContainer': {}}\n if result_1[0][3] is not None:\n cur.execute(f'SELECT taa.idTableAttributes, taa.TableAttributesRepresentName, taa.idTypeAttributes '\n f'FROM list_common_attribute AS lca '\n f'LEFT JOIN table_attributes AS taa ON lca.idTableAttributes=taa.idTableAttributes '\n f'WHERE lca.idTableAttributes={result_1[0][3]} AND lca.idListCommonAttribute={id_attribute}')\n result_2 = [list(i) for i in cur.fetchall()]\n getTypeAttribute(common_attributes, result_2, id_attribute)\n except Exception as ex:\n print(ex)\n return common_attributes\n\n\ndef getTypeAttribute(common_attributes, result_2, id_attribute):\n try:\n with con.cursor() as cur:\n cur.execute(f'SELECT tya.TypeAttributes FROM list_common_attribute AS lca '\n f'LEFT JOIN table_attributes AS taa ON lca.idTableAttributes=taa.idTableAttributes '\n f'LEFT JOIN type_attributes AS tya ON taa.idTypeAttributes=tya.idTypeAttributes '\n f'WHERE tya.idTypeAttributes={result_2[0][2]} AND lca.idListCommonAttribute={id_attribute}')\n result_3 = [list(i) for i in cur.fetchone()]\n sendTypeAttribute(common_attributes, result_3, result_2)\n except Exception as ex:\n print(ex)\n return common_attributes\n\n\ndef sendTypeAttribute(common_attributes, result_3, result_2):\n try:\n attributesContainerObject = {'Id': result_2[0][0], 'Name': result_2[0][1], 'Type': result_3[0][0]}\n common_attributes['AttributesContainer'].update(attributesContainerObject)\n except Exception as ex:\n print(ex)\n return common_attributes\n","sub_path":"mainApp/CommonAttributes/CommonAttributesMethods/GetSpecificCommonAttributes.py","file_name":"GetSpecificCommonAttributes.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"241437398","text":"from setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='uwseds-group-zero',\n version='1.0',\n description='an object detection project for UW CSE583 17Au',\n long_description=long_description,\n url='https://github.com/UWSEDS-aut17/uwseds-group-zero',\n author='Li Junlin, Li Tianqi, Nan Qiao, Dizhi Ma, Xiaohan Wang',\n author_email='{jlli0410, tqli3, nqiao, dizhim, xhwang}@uw.edu',\n license='MIT',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n ],\n keywords='object detection',\n packages=find_packages(),\n install_requires=['tensorflow==1.4.0',\n 'Pillow>=1.0',\n 'matplotlib',\n 'opencv-python',\n 'nose',\n 'coverage',\n 'pycodestyle'],\n python_requires='>=3.5, !=3.6.*, <4',\n\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"201078385","text":"import os\nimport shutil\nimport glob\nimport zipfile\nimport threading\nimport pickle\nimport time\nimport pandas as pd\nfrom datetime import datetime\nfrom xml.etree.cElementTree import XML\n\npd.options.display.max_columns = 100\n\ndef extract_docx_content():\n \"\"\"\n Extract text content from .docx file\n :return: pandas DataFrame\n \"\"\"\n\n # --- Namespace information needed to extract content\n WORD_NAMESPACE = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}'\n PARA = WORD_NAMESPACE + 'p'\n TEXT = WORD_NAMESPACE + 't'\n\n # --- Store fully processed docx files\n processed_docx_files = dict()\n\n pathInput = 'W:\\\\WinRisk\\\\PC_BusinessAnalytics\\\\SA_Claims\\\\SA_2'\n os.chdir(pathInput)\n docx_files = glob.glob('*.docx')\n # --- Extract text from docx extension\n for docx in docx_files:\n try:\n document = zipfile.ZipFile(docx)\n xml_content = document.read('word/document.xml')\n document.close()\n tree = XML(xml_content)\n\n #print(\"docx file {} being processed\".format(docx))\n # --- load data into paragraphs list\n paragraphs = []\n for paragraph in tree.getiterator(PARA):\n texts = [node.text\n for node in paragraph.getiterator(TEXT)\n if node.text]\n # --- texts into a single text string\n if texts:\n paragraphs.append(' '.join(texts).lower())\n processed_docx_files.update({docx: ''.join(paragraphs)})\n except:\n print(\"An error occured trying to parse {}\".format(docx))\n\n # --- Convert python dictionary to a dataframe\n return pd.DataFrame.from_dict(processed_docx_files, orient='index')\n","sub_path":"SparkPipeline/TextProcessing/ExtractDocxText.py","file_name":"ExtractDocxText.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"459076308","text":"\nclass Book():\n def __init__(self,book_id,book_name,book_author,book_url,website_url,website_name,end,sort):\n self.book_id=book_id\n self.book_name=book_name\n self.book_author=book_author\n self.book_url=book_url\n self.website_url=website_url\n self.website_name=website_name\n self.end=end\n self.sort=sort\n\n\nclass BookContent():\n def __init__(self,id,name,content,state,sort,query_url):\n self.id=id\n self.name=name\n self.content=content\n self.state=state\n self.sort=sort\n self.query_url=query_url\n\nclass UserBook():\n id=0\n login_name=None\n book_id=None\n book_name=None\n book_author=None\n book_read=0\n book_update=None\n create_time=None\n\n\nclass SysUser():\n login_user=None\n pwd=None","sub_path":"Mode.py","file_name":"Mode.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"361518396","text":"class Settings():\r\n '''储存《外星人入侵》的所有设置的类'''\r\n \r\n def __init__(self):\r\n '''初始化游戏设置'''\r\n #屏幕设置\r\n self.screen_width=1200\r\n self.screen_height=600\r\n self.bg_color=(230,230,230)\r\n #飞船的设置\r\n self.ship_speed_factor=3\r\n #球的设置\r\n self.ball_speed_factor=2\r\n self.ball_limit=3#等于1就正常运行3就不行\r\n \r\n def blitme(self):\r\n '''在指定位置绘制飞船'''\r\n self.screen.blit(self.image,self.rect)\r\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"393210853","text":"from collections import deque\n\ndef BFS(N):\n global n\n q = deque([N])\n cnt = 2\n while q and cnt:\n cnt -= 1\n for i in range(len(q)):\n v = q.popleft()\n for j in range(1, n+1):\n if not visited[j] and field[v][j]:\n visited[j] = 1\n q.append(j)\n\n\nn = int(input())\nm = int(input())\nfield = [[0 for i in range(n+1)] for j in range(n+1)]\nvisited = [0 for i in range(n+1)]\nfor i in range(m):\n s, e = map(int, input().split())\n field[s][e] = 1\n field[e][s] = 1\n\nvisited[1] = 1\nBFS(1)\nprint(sum(visited) - 1)\n\n# for _ in range(len(field)):\n# print(field[_])","sub_path":"python/algostudy/allim/boj5567_2.py","file_name":"boj5567_2.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"436377277","text":"import requests\nfrom bs4 import BeautifulSoup\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\n\n\nCLIENT_ID = \"YOUR CLIENT ID\"\nCLIENT_SECRET = \"YOUR CLIENT SECRET\"\n\nsp = spotipy.Spotify(\n auth_manager=SpotifyOAuth(\n scope=\"playlist-modify-private\",\n redirect_uri=\"http://example.com\",\n client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n show_dialog=True,\n cache_path=\"token.txt\"\n )\n)\nuser_id = sp.current_user()[\"id\"]\n\n\ndate = input(\"What date would you like to travel to? Enter a date YYYY-MM-DD:\")\nresponse = requests.get(\"https://www.billboard.com/charts/hot-100/\" + date)\n\nsoup = BeautifulSoup(response.text, \"html.parser\")\nsong_titles = soup.find_all(name=\"span\", class_=\"chart-element__information__song\")\nsongs = [song.getText() for song in song_titles]\n\nsong_uris = []\nyear = date.split(\"-\")[0]\nfor title in songs:\n result = sp.search(q=f\"track:{title} year:{year}\", type=\"track\")\n # print(result)\n try:\n uri = result[\"tracks\"][\"items\"][0][\"uri\"]\n song_uris.append(uri)\n except IndexError:\n print(f\"{title} doesn't exist in Spotify. Skipped.\")\n\nplaylist = sp.user_playlist_create(user=user_id, name=f\"{date} Billboard 100\", public=False)\n# print(playlist)\nsp.user_playlist_add_tracks(user=user_id, playlist_id=\"1K8w6l0BCgWauGKeNH80K3\", tracks=song_uris)\n","sub_path":"spotify.main.py","file_name":"spotify.main.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"173181482","text":"#equal deverse\ndef equal_devers(*args): #I defined a function with args\n words=[] #I opened a new empty list\n for i in args: #I wanted to look all elements \n if i==i[::-1]: #If their reverse is equal to them\n words.append(True) #If it is equal I added True to the words list\n else:\n words.append(False) # If it is not equal I added False to the words list\n return words #I wanted to return words list from the function\nprint(equal_devers(\"madam\",\"tacocat\",\"utrecht\"))","sub_path":"equal_reverse.py","file_name":"equal_reverse.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"567779317","text":"import os\nimport shutil\n# import tqdm\nimport torch\nimport torchvision\nfrom dataset import RemoteSensingMap, RemoteSensingMap_aug\nfrom torch.utils.data import DataLoader, Dataset\nimport numpy as np\nimport torch.nn.functional as F\n\n\ndef split_dataset_to_different_folder(train_folder_path, new_folder_path_img, new_folder_path_mask):\n img_lists = os.listdir(train_folder_path)\n for idx, ele in enumerate(img_lists):\n img_path = os.path.join(train_folder_path, ele)\n if os.path.isfile(img_path):\n prefix = ele.split('.')[1]\n if prefix == 'tif':\n new_path = os.path.join(new_folder_path_img, ele)\n else:\n new_path = os.path.join(new_folder_path_mask, ele)\n shutil.copy(img_path, new_path)\n\n\ndef save_checkpoint(state, filename=\"my_checkpoint.pth.tar\"):\n print(\"=> Saving checkpoint\")\n torch.save(state, filename)\n\n\ndef load_checkpoint(checkpoint, model):\n print(\"=> Loading checkpoint\")\n model.load_state_dict(checkpoint[\"state_dict\"])\n\n\ndef get_loaders(\n train_dir,\n train_maskdir,\n batch_size,\n train_transform,\n num_workers=4,\n pin_memory=True,\n fold_idx=0,\n train_idx=None,\n val_idx=None\n):\n # total_train_ds = RemoteSensingMap(\n ## image_dir=train_dir,\n # mask_dir=train_maskdir,\n # transform=train_transform,\n # )\n total_train_ds = RemoteSensingMap_aug(\n image_dir=train_dir,\n mask_dir=train_maskdir,\n transform=train_transform,\n )\n\n for i in range(len(total_train_ds)):\n if i % fold_idx == 0:\n val_idx.append(i)\n else:\n train_idx.append(i)\n train_ds = torch.utils.data.Subset(total_train_ds, train_idx)\n val_ds = torch.utils.data.Subset(total_train_ds, val_idx)\n\n train_loader = DataLoader(\n train_ds,\n batch_size=batch_size,\n num_workers=num_workers,\n pin_memory=pin_memory,\n shuffle=True,\n )\n val_loader = DataLoader(\n val_ds,\n batch_size=batch_size,\n num_workers=num_workers,\n pin_memory=pin_memory,\n shuffle=False,\n )\n\n return train_loader, val_loader\n\n\ndef calculate_F1(pred, mask, c=10):\n iou_result = []\n for idx in range(c):\n p = (mask == idx).int().reshape(-1)\n t = (pred == idx).int().reshape(-1)\n\n uion = p.sum() + t.sum()\n overlap = (p * t).sum()\n iou = 2 * overlap / (p.sum() + t.sum() + 1e-5)\n iou_result.append(iou.abs().data.cpu().numpy())\n return np.stack(iou_result), np.stack(iou_result).mean()\n\n\ndef calculate_miou(pred, mask, c=10):\n iou_result = []\n for idx in range(c):\n t = (mask == idx).reshape(-1)\n p = (pred == idx).reshape(-1)\n\n overlap = t & p\n tp = overlap.sum()\n fp = (overlap ^ p).sum()\n fn = (overlap ^ t).sum()\n\n iou = (tp) / (fp + fn + tp + 1e-5)\n iou_result.append(iou.data.cpu().numpy())\n return np.stack(iou_result), np.stack(iou_result).mean()\n\n\ndef check_valid_metric(loader, model, device='cuda'):\n model.eval()\n val_dice = []\n val_iou = []\n\n with torch.no_grad():\n for x, y in loader:\n x = x[:, :3, :, :].to(device)\n y = y.to(device)\n preds = model(x)\n preds = preds.argmax(1)\n _, mean_dice = calculate_F1(preds, y)\n val_dice.append(mean_dice)\n _, mean_iou = calculate_miou(preds, y)\n val_iou.append(mean_iou)\n print(f\"Got mean_dice_score:{sum(val_dice) / len(val_dice) * 100:.4f}\")\n print(f\"Got mean_iou:{sum(val_iou) / len(val_iou) * 100:.4f}\")\n model.train()\n return sum(val_dice) / len(val_dice), sum(val_iou) / len(val_iou)\n\n\ndef check_valid_metric_ocr(loader, model, device='cuda', config=None):\n model.eval()\n val_dice_corase = []\n val_iou_croase = []\n val_dice_fine = []\n val_iou_fine = []\n\n with torch.no_grad():\n for x, y in loader:\n x = x[:, :3, :, :].to(device)\n y = y.to(device)\n pred_corase, pred_fine = model(x)\n\n ph, pw = pred_corase.size(2), pred_corase.size(3)\n h, w = y.size(1), y.size(2)\n if ph != h or pw != w:\n pred_corase = F.interpolate(input=pred_corase, size=(h, w),\n mode='bilinear',\n align_corners=config.MODEL.ALIGN_CORNERS)\n pred_fine = F.interpolate(input=pred_fine, size=(h, w),\n mode='bilinear',\n align_corners=config.MODEL.ALIGN_CORNERS)\n pred_corase = pred_corase.argmax(1)\n pred_fine = pred_fine.argmax(1)\n _, mean_dice = calculate_F1(pred_corase, y)\n val_dice_corase.append(mean_dice)\n _, mean_dice = calculate_F1(pred_fine, y)\n val_dice_fine.append(mean_dice)\n _, mean_iou = calculate_miou(pred_corase, y)\n val_iou_croase.append(mean_iou)\n _, mean_iou = calculate_miou(pred_fine, y)\n val_iou_fine.append(mean_iou)\n print(f\"Got Croase mean_dice_score:{sum(val_dice_corase) / len(val_dice_corase) * 100:.4f}\")\n print(f\"Got Croase mean_iou:{sum(val_iou_croase) / len(val_iou_croase) * 100:.4f}\")\n print(f\"Got Croase mean_dice_score:{sum(val_dice_fine) / len(val_dice_fine) * 100:.4f}\")\n print(f\"Got Croase mean_iou:{sum(val_iou_fine) / len(val_iou_fine) * 100:.4f}\")\n model.train()\n return sum(val_dice_fine) / len(val_dice_fine), sum(val_iou_fine) / len(val_iou_fine)\n\n\ndef check_accuracy(loader, model, device=\"cuda\"):\n num_correct = 0\n num_pixels = 0\n dice_score = 0\n model.eval()\n\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device)\n y = y.to(device).unsqueeze(1)\n preds = torch.sigmoid(model(x))\n preds = (preds > 0.5).float()\n num_correct += (preds == y).sum()\n num_pixels += torch.numel(preds)\n dice_score += (2 * (preds * y).sum()) / (\n (preds + y).sum() + 1e-8\n )\n\n print(\n f\"Got {num_correct}/{num_pixels} with acc {num_correct / num_pixels * 100:.2f}\"\n )\n print(f\"Dice score: {dice_score / len(loader)}\")\n model.train()\n\n\ndef save_predictions_as_imgs(\n loader, model, folder=\"saved_images/\", device=\"cuda\"\n):\n model.eval()\n for idx, (x, y) in enumerate(loader):\n x = x.to(device=device)\n with torch.no_grad():\n preds = torch.sigmoid(model(x))\n preds = (preds > 0.5).float()\n torchvision.utils.save_image(\n preds, f\"{folder}/pred_{idx}.png\"\n )\n torchvision.utils.save_image(y.unsqueeze(1), f\"{folder}{idx}.png\")\n\n model.train()\n\n\nif __name__ == '__main__':\n train_folder_old = 'data/origins/suichang_round1_train_210120'\n new_imgs_folder = 'data/train_imgs'\n new_masks_folder = 'data/train_masks'\n\n split_dataset_to_different_folder(train_folder_old, new_imgs_folder, new_masks_folder)\n\n\n\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"414842651","text":"## Functions\nimport numpy as np\nfrom scipy.stats import t\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\nfrom scipy.stats import norm\nfrom sklearn.model_selection import train_test_split\nimport scipy.linalg as scl\n\ndef FrankeFunction(x, y, noise_level=0):\n term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))\n term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))\n term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))\n term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)\n noise = noise_level*np.random.randn(len(x),len(y))\n return term1 + term2 + term3 + term4 + noise\n\n\ndef OridinaryLeastSquares(design, data, test):\n inverse_term = np.linalg.inv(design.T.dot(design))\n beta = inverse_term.dot(design.T).dot(data)\n pred_test = test @ beta\n pred_train = design @ beta\n return beta, pred_test, pred_train\n\n\ndef OridinaryLeastSquares_SVD(design, data, test):\n U, s, V = np.linalg.svd(design)\n beta = V.T @ scl.pinv(scl.diagsvd(s, U.shape[0], V.shape[0])) @ U.T @ data\n pred_test = test @ beta\n pred_train = design @ beta\n return beta, pred_test, pred_train\n\n\ndef RidgeRegression(design, data, test, _lambda=0):\n inverse_term = np.linalg.inv(design.T @ design + _lambda*np.eye((design.shape[1])))\n beta = inverse_term @ (design.T) @ (data)\n pred_test = test @ beta\n pred_train = design @ beta\n return beta, pred_test, pred_train \n\ndef VarianceBeta_OLS(design, data, pred):\n N,p = np.shape(design)\n sigma = 1/(N-p-1) * np.sum((data - pred)**2)\n Bvar = np.diag(np.linalg.inv(design.T @ design)*sigma)\n conf95 = 1.96*np.sqrt(Bvar)\n return Bvar, conf95\n\ndef VarianceBeta_Ridge(design, data, pred, _lambda=0):\n N,p = np.shape(design)\n sigma = 1/(N-p-1) * np.sum((data - pred)**2)\n x = design.T @ design\n W = np.linalg.inv(x + _lambda*np.eye(x.shape[0]))@x\n Bvar = np.diag(sigma*W@np.linalg.inv(x + _lambda*np.eye(x.shape[0])).T)\n conf95 = 1.96*np.sqrt(Bvar)\n return Bvar, conf95\n\ndef MSE(y, ytilde):\n return (np.sum((y-ytilde)**2))/y.size\n\n\ndef R2Score(y, ytilde):\n return 1 - ((np.sum((y-ytilde)**2))/(np.sum((y-((np.sum(y))/y.size))**2)))\n\n\ndef MAE(y, ytilde):\n return (np.sum(np.abs(y-ytilde)))/y.size\n\n\ndef MSLE(y, ytilde):\n return (np.sum((np.log(1+y) - np.log(1+ytilde))**2))/y.size\n\n\ndef DesignDesign(x, y, power):\n '''\n This function employs the underlying pattern governing a design matrix\n on the form [1,x,y,x**2,x*y,y**2,x**3,(x**2)*y,x*(y**2),y**3 ....]\n x_power=[0,1,0,2,1,0,3,2,1,0,4,3,2,1,0,...,n,n-1,...,1,0]\n y_power=[0,0,1,0,1,2,0,1,2,3,0,1,2,3,4,...,0,1,...,n-1,n]\n '''\n\n concat_x = np.array([0,0])\n concat_y = np.array([0,0])\n\n\n for i in range(power):\n toconcat_x = np.arange(i+1,-1,-1)\n toconcat_y = np.arange(0,i+2,1)\n concat_x = np.concatenate((concat_x,toconcat_x))\n concat_y = np.concatenate((concat_y,toconcat_y))\n\n concat_x = concat_x[1:len(concat_x)]\n concat_y = concat_y[1:len(concat_y)]\n\n X,Y = np.meshgrid(x,y)\n X = np.ravel(X)\n Y = np.ravel(Y)\n DesignMatrix = np.empty((len(X),len(concat_x)))\n for i in range(len(concat_x)):\n DesignMatrix[:,i] = (X**concat_x[i])*(Y**concat_y[i])\n\n #DesignMatrix = np.concatenate((np.ones((len(X),1)),DesignMatrix), axis = 1)\n return DesignMatrix\n\ndef create_X(x, y, n ):\n if len(x.shape) > 1:\n x = np.ravel(x)\n y = np.ravel(y)\n\n N = len(x)\n l = int((n+1)*(n+2)/2) # Number of elements in beta\n X = np.ones((N,l))\n\n for i in range(1,n+1):\n q = int((i)*(i+1)/2)\n for k in range(i+1):\n X[:,q+k] = (x**(i-k))*(y**k)\n\n return X\n\ndef reshaper(k, data):\n output = []\n j = int(np.ceil(len(data)/k))\n for i in range(k):\n if i<k:\n output.append(data[i*j:(i+1)*j])\n else:\n output.append(data[i*j:])\n return np.asarray(output)\n\n\ndef k_fold_cv(k, indata, indesign, predictor, _lambda=0, shuffle=False):\n\n '''\n Usage: k-fold cross validation employing either RidgeRegression, OridinaryLeastSquares or ols_svd\n Input: k = number of folds\n indata = datapoints\n indesign = user defined design matrix\n predictor = RidgeRegression, OridinaryLeastSquares or ols_svd\n _lambda = hyperparameter/penalty paramter/tuning parameter for RidgeRegression\n shuffle = False, input data will not be shuffled\n True, input data will be shuffled\n output: r2_out/k = averaged out sample R2-score\n mse_out/k = averaged out sample MSE\n r2_in/k = averaged in sample R2-Score\n mse_in/k = averaged in sample MSE\n '''\n mask = np.arange(indata.shape[0])\n if shuffle:\n np.random.shuffle(mask)\n data = reshaper(k, indata[mask])\n design = reshaper(k, indesign[mask])\n r2_out = 0\n r2_in = 0\n mse_out = 0\n mse_in = 0\n\n for i in range(k):\n train_design = design[np.arange(len(design))!=i] # Featch all but the i-th element\n train_design = np.concatenate(train_design,axis=0)\n train_data = data[np.arange(len(data))!=i]\n train_data = np.concatenate(train_data,axis=0)\n test_design = design[i]\n test_data = data[i]\n\n if _lambda != 0:\n beta, pred_ts, pred_tr = predictor(train_design, train_data, test_design, _lambda)\n else:\n beta, pred_ts, pred_tr = predictor(train_design, train_data, test_design)\n\n r2_out += R2Score(test_data, pred_ts)\n r2_in += R2Score(train_data, pred_tr)\n mse_out += MSE(test_data, pred_ts)\n mse_in += MSE(train_data, pred_tr)\n\n return r2_out/k, mse_out/k, r2_in/k, mse_in/k\n","sub_path":"Project-1/Tests/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"481033516","text":"#\n# @lc app=leetcode id=40 lang=python3\n#\n# [40] Combination Sum II\n#\n\n# @lc code=start\n\nclass Solution:\n def combinationSum2(self, candidates: 'List[int]', target: int) -> 'List[List[int]]':\n ans = list()\n\n candidates.sort()\n def backtrack(path, target, start_idx):\n if target < 0:\n return\n\n if target == 0:\n ans.append(path)\n return \n\n layer_usage = set()\n for i in range(start_idx, len(candidates)):\n # if i > start_idx and candidates[i] == candidates[i-1]: # option1\n # continue\n if candidates[i] in layer_usage: # option2\n continue\n num = candidates[i]\n layer_usage.add(num)\n\n if num > target:\n break\n backtrack(path + [num], target-num, i+1)\n\n backtrack([], target, 0)\n return ans\n\nans = Solution().combinationSum2([10,1,2,7,6,1,5], 8)\nprint(ans)\n\n# @lc code=end\n\n","sub_path":"solutions/back-tracking/40.combination-sum-ii.py","file_name":"40.combination-sum-ii.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"534344301","text":"#! -*- coding:utf-8 -*-\n\nfrom urllib import request\n\n\"\"\"\nimport cookielib\n\ncookie = cookielib.CookieJar()\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\nresponse = opener.open('http://www.baidu.com')\n\nprint response.getcode()\nfor item in cookie:\n print('name=%s value=%s'% (item.name, item.value))\n\"\"\"\n\ni_essay_headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (\"\n \"KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36\",\n \"Referer\": \"http://baidu.com/\"}\nreq = request.Request(\"http://www.oschina.net/blog\", headers=i_essay_headers)\nresponse = request.urlopen(req)\nf = open(\"./demo.txt\", \"w\")\nf.write(response.read())\nf.close()\n","sub_path":"hota/basic/network/urllib2_demo.py","file_name":"urllib2_demo.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"351972060","text":"import sys\nfrom optparse import OptionParser\n\nimport awake\n\n\ndef _build_cliparser():\n usage = 'usage: %prog [options] MAC1 [MAC2 MAC3 MAC...]'\n parser = OptionParser(usage=usage,\n version='%%prog: %s' % awake.__version__)\n parser.add_option('-p', '--port', dest='port', default=9, type='int',\n help='Destination port. (Default 9)')\n bhelp = 'Broadcast ip of the network. (Default 255.255.255.255)'\n parser.add_option('-b', '--broadcast', dest='broadcast',\n default='255.255.255.255', type='string',\n help=bhelp)\n dhelp = 'Destination ip/domain to connect and send the packet, ' \\\n 'by default use broadcast.'\n parser.add_option('-d', '--destination', dest='destination', default=None,\n help=dhelp)\n fhelp = 'Use a file with the list of macs, ' \\\n 'separated with -s, by default \\\\n. ' \\\n 'If any mac (line where -s \\\\n), have the \"#\" character, ' \\\n 'any following character is considered a comment. ' \\\n 'Can be used multiple times for multiple files.'\n parser.add_option('-f', '--file', dest='file', action='append',\n default=[], help=fhelp)\n shelp = 'Pattern to be use as a separator with the -f option. (Default \\\\n)'\n parser.add_option('-s', '--separator', dest='separator', type='string',\n default='\\n', help=shelp)\n parser.add_option('-q', '--quiet', dest='quiet_mode', action='store_true',\n help='Do not output informative messages.',\n default=False)\n return parser\n\n\ndef _get_macs(options, args):\n macs = []\n if not options.file and len(args) < 1:\n errmsg = 'Requires at least one MAC address or a list of MAC (-f).'\n raise Exception(errmsg) \n macs += args\n try:\n for file_with_macs in options.file:\n macs += awake.utils.fetch_macs_from_file(file_with_macs,\n options.separator)\n except Exception:\n exep = awake.utils.fetch_last_exception()\n sys.stderr.write('%s\\n' % exep.args)\n return macs\n \n\ndef _notify_error_and_finish(message, cliparser):\n cliparser.print_help() \n cliparser.error(message)\n\n\ndef _send_packets(macs, broadcast, destination, port, quiet_mode):\n \"\"\"Send a magic packet to each mac in *macs*, this function tries\n to deliver even if some of the macs are faulty in anyway.\n\n Returns False in case of any error on any of the *macs*, otherwise True.\n \"\"\"\n no_errors = True\n for mac in macs:\n try:\n awake.wol.send_magic_packet(mac, broadcast, destination, port)\n except ValueError:\n exep = awake.utils.fetch_last_exception()\n sys.stderr.write('ERROR: %s\\n' % exep.args[0])\n no_errors = False\n else:\n if not quiet_mode:\n if destination is None:\n destination = broadcast\n print('Sending magic packet to %s with broadcast %s MAC %s port %d' % \\\n (destination, broadcast, mac, port))\n return no_errors\n\n\ndef main():\n cliparser = _build_cliparser()\n options, args = cliparser.parse_args()\n try:\n macs = _get_macs(options, args)\n except Exception:\n exep = awake.utils.fetch_last_exception()\n _notify_error_and_finish(exep.args[0], cliparser)\n if macs:\n if not _send_packets(macs, options.broadcast,\n options.destination,\n options.port, options.quiet_mode):\n sys.exit(1)\n else:\n _notify_error_and_finish('Unable to acquire any mac address',\n cliparser)\n","sub_path":"Panasonic Network Remote.indigoPlugin/Contents/Server Plugin/awake/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"342231120","text":"#!/usr/bin/env python\n\n\"\"\"\nCopyright (c) 2014-2015 Miroslav Stampar (@stamparm)\nSee the file 'LICENSE' for copying permission\n\"\"\"\n\nimport re\n\nfrom core.common import retrieve_content\n\n__url__ = \"http://malwared.malwaremustdie.org/rss.php\"\n__check__ = \"Malwared\"\n__info__ = \"malware\"\n__reference__ = \"malwaremustdie.org\"\n\ndef fetch():\n retval = {}\n content = retrieve_content(__url__)\n\n if __check__ in content:\n for match in re.finditer(r\"<link>http://([^<]+?)/?</link>\", content):\n retval[match.group(1)] = (__info__, __reference__)\n\n return retval\n","sub_path":"trails/feeds/malwaremustdie.py","file_name":"malwaremustdie.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"559914095","text":"from tkinter import *\nfrom tkinter import ttk \nfrom tkinter import messagebox\n\nimport edited_eq\nimport login as lg\n\nimport module\nimport sqlite3\n\nconn = sqlite3.connect('Data_base.db')\ncur = conn.cursor()\n\nroot = Tk()\nroot.geometry(\"1352x650+0+0\")\nroot.title(\"Mazda community\")\nroot.configure(background = 'black')\n\n\nTops = Frame(root, width = 1350, height = 100, bd = 4, relief = \"raise\")\nTops.pack(side = TOP)\n\n#HEADLIGHT \n\nlblInfo = Label(Tops, font = ('arial', 50, 'bold'), text = \" Welcome to Mazda community \", bd = 5, anchor = 'w')\nlblInfo.grid(row = 0, column = 1)\n\n\ndef login(): # use module login.py\n\n root = Tk()\n root.title(\"Login Form\")\n lg.main(root)\n root.mainloop()\n\n\n\n\n# Login Button\nLoginButton = Button(Tops, pady = 8, bd = 2, fg = 'black', font = ('arial', 10, 'bold'), width = 10, text = \"Sign in\", bg = 'white', command = login).grid(row = 0, column = 0)\n\n\nbottom = Frame(root, width = 1350, height = 600, bd = 4, relief = \"raise\")\nbottom.pack(side = TOP)\n\nbottomLeft = Frame(bottom, width = 1000, height = 600, bd = 4, relief = \"raise\")\nbottomLeft.pack(side = LEFT)\n#==============================================================================================================\nbottomLeftTop = Frame(bottomLeft, width = 1000, height = 300, bd = 4, relief = \"raise\")\nbottomLeftTop.pack(side = TOP)\n\nbottomLeftTopL = Frame(bottomLeftTop, width = 500, height = 300, bd = 4, relief = \"raise\")\nbottomLeftTopL.pack(side = LEFT)\n\nbottomLeftTopR = Frame(bottomLeftTop, width = 500, height = 300, bd = 4, relief = \"raise\")\nbottomLeftTopR.pack(side = RIGHT)\n#==============================================================================================================\n\nbottomLeftBottom = Frame(bottomLeft, width = 1000, height = 300, bd = 3, relief = \"raise\")\nbottomLeftBottom.pack(side = BOTTOM)\n\nbottomLeftBottomL = Frame(bottomLeftBottom, width = 500, height = 400, bd = 3, relief = \"raise\")\nbottomLeftBottomL.pack(side = LEFT)\n\nbottomLeftBottomR = Frame(bottomLeftBottom, width = 500, height = 400, bd = 3, relief = \"raise\")\nbottomLeftBottomR.pack(side = RIGHT)\n#==============================================================================================================\nbottomRight = Frame(bottom, width = 350, height = 600, bd = 4, relief = \"raise\")\nbottomRight.pack(side = RIGHT)\n#==============================================================================================================\n\n# Variables for customer's information\n\nCustomerName = StringVar()\nCustomerAdress = StringVar()\nCustomerPostcode = StringVar()\nCustomerTelephone = StringVar()\n\n\nTotal = StringVar() # Variable for total price\n\n# Input name\n\nlblName = Label(bottomLeftTopL, font = ('arial', 16, 'bold'), text = \"Name\", fg = 'black', width = 15, bd = 10, anchor = 'w')\nlblName.grid(row = 0, column = 0)\ntxtName = Entry(bottomLeftTopL, font = ('arial', 16, 'bold'), bd = 2, width = 24, bg = 'white', justify = 'left', textvariable = CustomerName)\ntxtName.grid(row = 0, column = 1)\n\ndef savetodb(): # Insert customer's information into database\n \n txtName = Entry(root)\n txtAdress = Entry(root)\n txtPostcode = Entry(root)\n txtTelephone = Entry(root)\n \n entry_name = CustomerName.get()\n entry_adress = CustomerAdress.get()\n entry_postcode = CustomerPostcode.get()\n entry_telephone = CustomerTelephone.get()\n entry_navi = var01.get()\n entry_interior = var02.get()\n entry_wheels = var03.get()\n entry_audio = var04.get()\n entry_upholstery = var05.get()\n entry_sensors = var05.get()\n entry_model = var1.get()\n entry_packet = var2.get()\n entry_total = Total.get()\n\n conn = sqlite3.connect('Data_base.db')\n with conn:\n cur = conn.cursor()\n cur.execute('insert into customers (name, adress, postcode, phone_number) values (?,?,?,?)', ((entry_name), (entry_adress), (entry_telephone), (entry_postcode))) \n \n\n txtName.pack()\n txtAdress.pack()\n txtPostcode.pack()\n txtTelephone.pack()\n\n conn = sqlite3.connect('Data_base.db')\n with conn:\n cur = conn.cursor()\n cur.execute('insert into sold_cars (name, adress, postcode, phone_number, navigation, interior, wheels, audio, upholstery, sensors, model, packet, total_amount) values (?,?,?,?,?,?,?,?,?,?,?,?,?)',\\\n ((entry_name), (entry_adress), (entry_telephone), (entry_postcode), (entry_navi), \\\n (entry_interior), (entry_wheels), (entry_audio), (entry_upholstery), (entry_sensors), (entry_model), (entry_packet), (entry_total)))\n\n\n# Input Adress\n\nlblAdress = Label(bottomLeftTopL, font = ('arial', 16, 'bold'), text = \"Adress\", fg = 'black', width = 15, bd = 10, anchor = 'w')\nlblAdress.grid(row = 1, column = 0)\ntxtAdress = Entry(bottomLeftTopL, font = ('arial', 16, 'bold'), bd = 2, width = 24, bg = 'white', justify = 'left', textvariable = CustomerAdress)\ntxtAdress.grid(row = 1, column = 1)\n\n# Input Postcode\n\nlblPostcode = Label(bottomLeftTopL, font = ('arial', 16, 'bold'), text = \"Postcode\", fg = 'black', width = 15, bd = 10, anchor = 'w')\nlblPostcode.grid(row = 2, column = 0)\ntxtPostcode = Entry(bottomLeftTopL, font = ('arial', 16, 'bold'), bd = 2, width = 24, bg = 'white', justify = 'left', textvariable = CustomerPostcode)\ntxtPostcode.grid(row = 2, column = 1) \n\n# Input Telephone Number\n\nlblTelephone = Label(bottomLeftTopL, font = ('arial', 16, 'bold'), text = \"Telephone\", fg = 'black', width = 15, bd = 10, anchor = 'w')\nlblTelephone.grid(row = 3, column = 0)\ntxtTelephone = Entry(bottomLeftTopL, font = ('arial', 16, 'bold'), bd = 2, width = 24, bg = 'white', justify = 'left', textvariable = CustomerTelephone)\ntxtTelephone.grid(row = 3, column = 1) \n\n# ************************************ TOTAL PRICE LABEL *************************\n\nlblTotal = Label(bottomLeftBottomR, font = ('arial', 16, 'bold'), text = \"Total\", fg = \"black\", width = 10 , bd = 12, anchor = 'w')\nlblTotal.grid(row = 0, column = 0)\ntxtTotal = Entry(bottomLeftBottomR, font = ('arial', 16, 'bold'), bd = 2, width = 17, bg = \"white\", justify = 'left', textvariable = Total)\ntxtTotal.grid(row = 0, column = 1)\n\n#==============================================================================================================\n\n# VARIABLES FOR EQUIPMENT COMBOBOXES\n\nvar01 = StringVar()\nvar02 = StringVar()\nvar03 = StringVar()\nvar04 = StringVar()\nvar05 = StringVar()\nvar06 = StringVar()\n\n#Navigation\n\nlblChooseaModel = Label(bottomLeftBottomL, font = ('arial', 12, 'bold'), text = \"Navigation\", fg = \"black\", width = 10, bd = 12, anchor = 'w')\nlblChooseaModel.grid(row = 0, column = 0)\ncboChooseaModel = ttk.Combobox(bottomLeftBottomL, textvariable = var01, state = 'readonly', font = ('arial', 20, 'bold'), width = 10)\n# Selecting records from database and inserting into tkinter's combobox\nnavi_query = cur.execute('SELECT navigation_name FROM navigation')\ndata_navi = ['']\n\nfor row in navi_query:\n data_navi.append(row)\n\ncboChooseaModel['value'] = data_navi\ncboChooseaModel.current(0)\ncboChooseaModel.grid(row = 0, column = 1)\n\ndef delete_navi(): # function for deleting selected element\n item = var01.get()\n conn = sqlite3.connect('Data_base.db')\n with conn:\n cur = conn.cursor()\n cur.execute(\"DELETE FROM navigation WHERE navigation_name = ?\", (item,))\n conn.commit()\nbtnNavi = Button(bottomLeftBottomL, text = \"Remove\", anchor = E, command = delete_navi).grid(row = 0, column = 2)\n\n#Car interior\n\nlblChooseaModel = Label(bottomLeftBottomL, font = ('arial', 12, 'bold'), text = \"Car interior\", fg = \"black\", width = 10, bd = 12, anchor = 'w')\nlblChooseaModel.grid(row = 1, column = 0)\ncboChooseaModel = ttk.Combobox(bottomLeftBottomL, textvariable = var02, state = 'readonly', font = ('arial', 20, 'bold'), width = 10)\ninterior_query = cur.execute('SELECT interior_name FROM car_interior')\ndata_interior = ['']\n\nfor row in interior_query:\n data_interior.append(row)\n\ncboChooseaModel['value'] = data_interior\ncboChooseaModel.current(0)\ncboChooseaModel.grid(row = 1, column = 1)\n\ndef delete_interior(): # function for deleting selected element\n item = var02.get()\n conn = sqlite3.connect('Data_base.db')\n with conn:\n cur = conn.cursor()\n cur.execute(\"DELETE FROM car_interior WHERE interior_name = ?\", (item,))\n conn.commit()\n\nbtnInterior = Button(bottomLeftBottomL, text = \"Remove\", anchor = E, command = delete_interior).grid(row = 1, column = 2)\n\n# Wheels\nlblChooseaModel = Label(bottomLeftBottomL, font = ('arial', 12, 'bold'), text = \"Wheels\", fg = \"black\", width = 10, bd = 12, anchor = 'w')\nlblChooseaModel.grid(row = 2, column = 0)\ncboChooseaModel = ttk.Combobox(bottomLeftBottomL, textvariable = var03, state = 'readonly', font = ('arial', 20, 'bold'), width = 10)\nwheels_query = cur.execute('SELECT wheel_name FROM wheel')\ndata_wheels = ['']\n\nfor row in wheels_query:\n data_wheels.append(row)\n\ncboChooseaModel['value'] = data_wheels\ncboChooseaModel.current(0)\ncboChooseaModel.grid(row = 2, column = 1)\n\ndef delete_wheel(): # function for deleting selected element\n item = var03.get()\n conn = sqlite3.connect('Data_base.db')\n with conn:\n cur = conn.cursor()\n cur.execute(\"DELETE FROM wheel WHERE wheel_name = ?\", (item,))\n conn.commit()\n\nbtnWheels = Button(bottomLeftBottomL, text = \"Remove\", anchor = E, command = delete_wheel).grid(row = 2, column = 2)\n\n# Audio\nlblChooseaModel = Label(bottomLeftBottomL, font = ('arial', 12, 'bold'), text = \"Audio\", fg = \"black\", width = 10, bd = 12, anchor = 'w')\nlblChooseaModel.grid(row = 3, column = 0)\ncboChooseaModel = ttk.Combobox(bottomLeftBottomL, textvariable = var04, state = 'readonly', font = ('arial', 20, 'bold'), width = 10)\naudio_query = cur.execute('SELECT audio_name FROM audio')\ndata_audio = ['']\n\nfor row in audio_query:\n data_audio.append(row)\n\ncboChooseaModel['value'] = data_audio\ncboChooseaModel.current(0)\ncboChooseaModel.grid(row = 3, column = 1)\n\ndef delete_audio(): # function for deleting selected element\n item = var04.get()\n conn = sqlite3.connect('Data_base.db')\n with conn:\n cur = conn.cursor()\n cur.execute(\"DELETE FROM audio WHERE audio_name = ?\", (item,))\n conn.commit()\nbtnAudio = Button(bottomLeftBottomL, text = \"Remove\", anchor = E, command = delete_audio).grid(row = 3, column = 2)\n\n# Upholstery\nlblChooseaModel = Label(bottomLeftBottomL, font = ('arial', 12, 'bold'), text = \"Upholstery\", fg = \"black\", width = 10, bd = 12, anchor = 'w')\nlblChooseaModel.grid(row = 4, column = 0)\ncboChooseaModel = ttk.Combobox(bottomLeftBottomL, textvariable = var05, state = 'readonly', font = ('arial', 20, 'bold'), width = 10)\nupholstery_query = cur.execute('SELECT upholstery_name FROM upholstery')\ndata_upholstery = ['']\n\nfor row in upholstery_query:\n data_upholstery.append(row)\n\ncboChooseaModel['value'] = data_upholstery\ncboChooseaModel.current(0)\ncboChooseaModel.grid(row = 4, column = 1)\n\ndef delete_upholstery(): # function for deleting selected element\n item = var05.get()\n conn = sqlite3.connect('Data_base.db')\n with conn:\n cur = conn.cursor()\n cur.execute(\"DELETE FROM upholstery WHERE upholstery_name = ?\", (item,))\n conn.commit()\nbtnUpholstery = Button(bottomLeftBottomL, text = \"Remove\", anchor = E, command = delete_upholstery).grid(row = 4, column = 2)\n\n# Sensors\nlblChooseaModel = Label(bottomLeftBottomL, font = ('arial', 12, 'bold'), text = \"Sensors\", fg = \"black\", width = 10, bd = 12, anchor = 'w')\nlblChooseaModel.grid(row = 5, column = 0)\ncboChooseaModel = ttk.Combobox(bottomLeftBottomL, textvariable = var06, state = 'readonly', font = ('arial', 20, 'bold'), width = 10)\nsensor_query = cur.execute('SELECT sensor_name FROM sensor')\ndata_sensor = ['']\n\nfor row in sensor_query:\n data_sensor.append(row)\n\ncboChooseaModel['value'] = data_sensor\ncboChooseaModel.current(0)\ncboChooseaModel.grid(row = 5, column = 1)\ndef delete_sensor(): # function for deleting selected element\n item = var06.get()\n conn = sqlite3.connect('Data_base.db')\n with conn:\n cur = conn.cursor()\n cur.execute(\"DELETE FROM sensor WHERE sensor_name = ?\", (item,))\n conn.commit()\n\nbtnSensor = Button(bottomLeftBottomL, text = \"Remove\", anchor = E, command = delete_sensor).grid(row = 5, column = 2)\n\nedited_equipment = StringVar()\nedited_price = IntVar()\n\n\n\ndef alter_equipment():\n root = Tk()\n root.geometry(\"400x400\")\n root.title(\"Add new equipment\")\n edited_eq.Alter_eq(root)\n root.mainloop()\n\n\n# Add new equipment button\nbtnAlter_equipment = Button(bottomLeftBottomL, pady = 8, bd = 2, fg = 'black', font = ('arial', 10, 'bold'), width = 10, text = \"Add item\", bg = 'white', command = alter_equipment).grid(row = 7, column = 0)\n\n\ndef Receipt(): # print receipt \n\n txtReceipt.delete(\"1.0\", END)\n txtReceipt.insert(END, \"\\t Confirmation of the transaction\" + \"\\n\" + \"\\n\" + \"\\n\")\n txtReceipt.insert(END, \"Name: \\t \\t\" + CustomerName.get() + \"\\n\")\n txtReceipt.insert(END, \"Adress: \\t \\t\" + CustomerAdress.get() + \"\\n\")\n txtReceipt.insert(END, \"Postcode: \\t \\t\" + CustomerPostcode.get() + \"\\n\")\n txtReceipt.insert(END, \"Telephone: \\t \\t\" + CustomerTelephone.get() + \"\\n\" + \"\\n\" + \"\\n\")\n txtReceipt.insert(END, \"Selected car and package: \" + \"\\n\" + \"\\n\")\n txtReceipt.insert(END, \"Car: \\t \\t\" + var1.get() + \"\\n\") \n txtReceipt.insert(END, \"Package: \\t \\t\" + var2.get() + \"\\n\" + \"\\n\")\n txtReceipt.insert(END, \"Selected equipment :\" + \"\\n\" + \"\\n\")\n txtReceipt.insert(END, \"Navigation: \\t \\t\" + var01.get() + \"\\n\" )\n txtReceipt.insert(END, \"Car interior: \\t \\t\" + var02.get() + \"\\n\")\n txtReceipt.insert(END, \"Wheels: \\t \\t\" + var03.get() + \"\\n\")\n txtReceipt.insert(END, \"Audio: \\t \\t\" + var04.get() + \"\\n\") \n txtReceipt.insert(END, \"Upholstery: \\t \\t\" + var05.get() + \"\\n\")\n txtReceipt.insert(END, \"Sensors: \\t \\t\" + var06.get() + \"\\n\" + \"\\n\")\n \n \n\n# Button for printing the receipt\nbtnReceipt = Button(bottomLeftBottomL, pady = 8, bd = 2, fg = 'black', font = ('arial', 10, 'bold'), width = 10, text = \"Print receipt\", bg = 'white', command = Receipt ).grid(row = 7, column = 1)\n\n\ndef Reset(): # Delete all typed data\n CustomerName.set(\"\")\n CustomerAdress.set(\"\")\n CustomerPostcode.set(\"\")\n CustomerTelephone.set(\"\")\n var01.set(\"\")\n var02.set(\"\")\n var03.set(\"\")\n var04.set(\"\")\n var05.set(\"\")\n var06.set(\"\")\n var1.set(\"\")\n var2.set(\"\")\n Receipt().set(\"\")\n \n\n\n# Button for clear all typed data\nbtnReset = Button(bottomLeftBottomR, pady = 8, bd = 2, fg = 'black', font = ('arial', 10, 'bold'), width = 10, text = \"Reset \", bg = 'white', command = Reset).grid(row = 2, column = 0) \n\ndef iExit():\n iExit = messagebox.askyesno(\"Mazda community\", \"Confirm, if you want to exit\")\n if iExit:\n root.destroy()\n return\n \n# Button for exit the application\nbtnExit = Button(bottomLeftBottomR, pady = 8, bd = 2, fg = 'black', font = ('arial', 10, 'bold'), width = 10, text = \"Exit \", bg = 'white', command = iExit).grid(row = 2, column = 1) \n\n# Help for exit\ndef callback():\n callback = messagebox.showinfo(\"Help\", \" To quit application please click on 'Exit' button and confirm \\n\\\n To delete all typed data please click on 'Reset' button\")\n return\n\n# Button for save customer's data and full order information into database (relatievly into customer table, and sold_cars table)\nbtnSave = Button(bottomLeftBottomR, pady = 8, bd = 2, fg = 'black', font = ('arial', 10, 'bold'), width = 10, text = \"Save \", bg = 'white', command = savetodb).grid(row = 3, column = 0) \n\nbtnHow_to_Exit = Button(bottomLeftBottomR, pady = 8, bd = 2, fg = 'black', font = ('arial', 10, 'bold'), width = 10, text = \"Help \", bg = 'white', command = callback).grid(row = 3, column = 1) \n\n\n#==============================================================================================================\n\n# Variables for car and model combobox\nvar1 = StringVar()\nvar2 = StringVar()\n\n\n# Choose a model\n\ndef combo_models_choose(chosen, value):\n chosen.append(value)\n models_price = []\n if value == \"Mazda_2\":\n model_query_price = cur.execute(\"SELECT price FROM models WHERE name_of_model = 'Mazda_2' \")\n for roww in model_query_price:\n models_price.append(roww)\n #print(*models_price)\n Total.set(*models_price) \n\n elif value == \"Mazda_3\":\n model_query_price = cur.execute(\"SELECT price FROM models WHERE name_of_model = 'Mazda_3' \")\n for roww in model_query_price:\n models_price.append(roww)\n #print(*models_price)\n Total.set(*models_price) \n\n elif value == \"Mazda_6\":\n model_query_price = cur.execute(\"SELECT price FROM models WHERE name_of_model = 'Mazda_6' \")\n for roww in model_query_price:\n models_price.append(roww)\n #print(*models_price)\n Total.set(*models_price) \n\n elif value == \"Mazda_CX_3\":\n model_query_price = cur.execute(\"SELECT price FROM models WHERE name_of_model = 'Mazda_CX_3' \")\n for roww in model_query_price:\n models_price.append(roww)\n #print(*models_price)\n Total.set(*models_price) \n #model_query_price = cur.execute(\" SELECT ( SELECT price from models WHERE name_of_model = 'Mazda_2') + (SELECT price FROM packages WHERE name_of_package = 'Standard' )\")\n\n\n elif value == \"Mazda_CX_5\":\n model_query_price = cur.execute(\"SELECT price FROM models WHERE name_of_model = 'Mazda_CX_5' \")\n for roww in model_query_price:\n models_price.append(roww)\n #print(*models_price)\n Total.set(*models_price)\n\n\n \n\n\n# Combobox for choosing model\nlblChooseaModel = Label(bottomLeftTopR, font = ('arial', 12, 'bold'), text = \"Choose a model\", fg = \"black\", width = 13, bd = 14, anchor = 'w')\nlblChooseaModel.grid(row = 0, column = 0)\ncboChooseaModel = ttk.Combobox(bottomLeftTopR, textvariable = var1, font = ('arial', 20, 'bold'), width = 12)\nmodels_query = cur.execute('SELECT name_of_model FROM models')\ndata_models = ['']\nfor row in models_query:\n data_models.append(row)\n \ncboChooseaModel['value'] = data_models\ncboChooseaModel.current(0)\ncboChooseaModel.grid(row = 1, column = 0)\n\ndef delete_model(): # function for deleting selected element\n item = var1.get()\n conn = sqlite3.connect('Data_base.db')\n with conn:\n cur = conn.cursor()\n cur.execute(\"DELETE FROM models WHERE name_of_model = ?\", (item,))\n conn.commit()\n\n\nbtnModel = Button(bottomLeftTopR, text = \"Remove\", anchor = S, command = delete_model).grid(row = 2, column = 0)\n\nchosen = []\nvar1.trace('w', lambda name, index, mode: combo_models_choose(chosen, var1.get()))\n\n# Choose a packet\n\ndef combo_packet_choose(chosen, value): # operating combobox\n chosen.append(value)\n# print(\"You've chosen \", *chosen)\n # models_price = []\n if value == \"Economy\":\n var01.set(\"Classic\")\n var02.set(\"Economy\")\n var03.set(\"Steel wheels\")\n var04.set(\"Two speakers\")\n var05.set(\"Classic\")\n var06.set(\"Rain Sensors\")\n\n\n elif value == \"Standard\":\n var01.set(\"Classic\")\n var02.set(\"Standard\")\n var03.set(\"Alloy wheels\")\n var04.set(\"Three speakers\")\n var05.set(\"Ecology leather\")\n var06.set(\"Lane Departure\") \n\n elif value == \"Premium\":\n var01.set(\"Premium\")\n var02.set(\"Premium\")\n var03.set(\"Alloy wheels\")\n var04.set(\"Four speakers\")\n var05.set(\"Natural leather\")\n var06.set(\"City support\")\n\n \n\n\nchosen_packet = []\nvar2.trace('w', lambda name, index, mode: combo_packet_choose(chosen_packet, var2.get()))\n\n\n# Label and combobox for packages\nlblChooseaPacket = Label(bottomLeftTopR, font = ('arial', 12, 'bold'), text = \"Choose a packet\", fg = \"black\", width = 13, bd = 14, anchor = 'w')\nlblChooseaPacket.grid(row = 0, column = 1)\ncboChooseaPacket = ttk.Combobox(bottomLeftTopR, textvariable = var2, state = 'readonly', font = ('arial', 20, 'bold'), width = 12)\npackages_query = cur.execute('SELECT name_of_package FROM packages')\ndata_packages = ['']\n\nfor row in packages_query:\n data_packages.append(row)\n \ncboChooseaPacket['value'] = data_packages\ncboChooseaPacket.current(0)\ncboChooseaPacket.grid(row = 1, column = 1)\n\ndef delete_packet(): # function for deleting selected element\n item = var2.get()\n conn = sqlite3.connect('Data_base.db')\n with conn:\n cur = conn.cursor()\n cur.execute(\"DELETE FROM packages WHERE name_of_package = ?\", (item,))\n conn.commit()\n\nbtnPackage = Button(bottomLeftTopR, text = \"Remove\", anchor = S, command = delete_packet).grid(row = 2, column = 1)\n\n\n\n# ************************************** RECEIPT *************************************\n\n# Label for receipt\n\nlblReceipt = Label(bottomRight, font = ('arial', 16, 'bold'), text = \"Receipt\", bd = 2, anchor = 'w')\nlblReceipt.grid(row = 0, column = 0, sticky = W)\ntxtReceipt = Text(bottomRight, font = ('arial', 11, 'bold'), bd = 8, width = 46, height = 26, bg = 'white')\ntxtReceipt.grid(row = 1, column = 0)\n\nroot.mainloop() ","sub_path":"view_controler.py","file_name":"view_controler.py","file_ext":"py","file_size_in_byte":21845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"356138447","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.forms.formsets import formset_factory\nfrom django.forms.models import modelformset_factory\nfrom django.http import HttpResponseRedirect, Http404, HttpResponse\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.template import RequestContext\nfrom django.views.generic.edit import DeleteView\nfrom urllib import quote\nimport datetime\n\nfrom schedule.conf.settings import (GET_EVENTS_FUNC, OCCURRENCE_CANCEL_REDIRECT,\n USE_ATTENDEES, USE_MAILCHIMP, MAILCHIMP_KEY, MAILCHIMP_EVENTLIST,\n MAILCHIMP_MARKETINGLIST)\n\nfrom schedule.forms import EventForm, OccurrenceForm, AttendeeForm, ModifyAttendanceForm\nfrom schedule.models import *\nfrom schedule.periods import weekday_names\nfrom schedule.utils import check_event_permissions, coerce_date_dict\n\nfrom django.core.mail import EmailMultiAlternatives, send_mail\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom django.conf import settings\n\n\ndef _send_email(template, to, subject, context={}):\n\n context = Context(context)\n\n html_tmpl = get_template('%s.html' % template)\n html = html_tmpl.render(context)\n\n txt_tmpl = get_template('%s.txt' % template)\n txt = txt_tmpl.render(context)\n\n msg = EmailMultiAlternatives(\n subject,\n txt,\n settings.DEFAULT_FROM_EMAIL,\n [to] if isinstance(to, basestring) else to,\n )\n msg.attach_alternative(html, \"text/html\")\n msg.send()\n\n\n\ndef calendar(request, calendar_slug, template='schedule/calendar.html', extra_context=None):\n \"\"\"\n This view returns a calendar. This view should be used if you are\n interested in the meta data of a calendar, not if you want to display a\n calendar. It is suggested that you use calendar_by_periods if you would\n like to display a calendar.\n\n Context Variables:\n\n ``calendar``\n The Calendar object designated by the ``calendar_slug``.\n \"\"\"\n extra_context = extra_context or {}\n calendar = get_object_or_404(Calendar, slug=calendar_slug)\n context = {\"calendar\": calendar}\n context.update(extra_context)\n return render_to_response(template, context, context_instance=RequestContext(request))\n\ndef calendar_by_periods(request, calendar_slug=None, periods=None,\n template_name=\"schedule/calendar_by_period.html\", extra_context=None):\n \"\"\"\n This view is for getting a calendar, but also getting periods with that\n calendar. Which periods you get, is designated with the list periods. You\n can designate which date you the periods to be initialized to by passing\n a date in request.GET. See the template tag ``query_string_for_date``\n\n Context Variables\n\n ``date``\n This was the date that was generated from the query string.\n\n ``periods``\n this is a dictionary that returns the periods from the list you passed\n in. If you passed in Month and Day, then your dictionary would look\n like this\n\n {\n 'month': <schedule.periods.Month object>\n 'day': <schedule.periods.Day object>\n }\n\n So in the template to access the Day period in the context you simply\n use ``periods.day``.\n\n ``calendar``\n This is the Calendar that is designated by the ``calendar_slug``.\n\n ``weekday_names``\n This is for convenience. It returns the local names of weekedays for\n internationalization.\n\n \"\"\"\n extra_context = extra_context or {}\n\n if calendar_slug:\n calendar = get_object_or_404(Calendar, slug=calendar_slug)\n event_list = GET_EVENTS_FUNC(request, calendar)\n else:\n calendar = None\n event_list = Event.objects.all()\n\n date = coerce_date_dict(request.GET)\n if date:\n try:\n date = datetime.datetime(**date)\n except ValueError:\n raise Http404\n else:\n date = datetime.datetime.now()\n period_objects = dict([(period.__name__.lower(), period(event_list, date)) for period in periods])\n context = {\n 'date': date,\n 'periods': period_objects,\n 'calendar': calendar,\n 'weekday_names': weekday_names,\n 'here':quote(request.get_full_path()),\n }\n context.update(extra_context)\n return render_to_response(template_name, context, context_instance=RequestContext(request),)\n\ndef event(request, event_id, template_name=\"schedule/event.html\", extra_context=None):\n \"\"\"\n This view is for showing an event. It is important to remember that an\n event is not an occurrence. Events define a set of reccurring occurrences.\n If you would like to display an occurrence (a single instance of a\n recurring event) use occurrence.\n\n Context Variables:\n\n event\n This is the event designated by the event_id\n\n back_url\n this is the url that referred to this view.\n \"\"\"\n extra_context = extra_context or {}\n event = get_object_or_404(Event, id=event_id)\n back_url = request.META.get('HTTP_REFERER', None)\n try:\n cal = event.calendar_set.get()\n except:\n cal = None\n context = {\n \"event\": event,\n \"back_url\" : back_url,\n }\n context.update(extra_context)\n return render_to_response(template_name, context, context_instance=RequestContext(request))\n\ndef occurrence(request, event_id,\n template_name=\"schedule/occurrence.html\", *args, **kwargs):\n \"\"\"\n This view is used to display an occurrence.\n\n Context Variables:\n\n ``event``\n the event that produces the occurrence\n\n ``occurrence``\n the occurrence to be displayed\n\n ``back_url``\n the url from which this request was refered\n \"\"\"\n extra_context = kwargs.get('extra_context', None) or {}\n event, occurrence = get_occurrence(event_id, *args, **kwargs)\n back_url = request.META.get('HTTP_REFERER', None)\n context = {\n 'event': event,\n 'occurrence': occurrence,\n 'back_url': back_url,\n }\n context.update(extra_context)\n\n if USE_ATTENDEES:\n form = AttendeeForm(request.POST or None, event=event, occurrence=occurrence, prefix='primary')\n AttendeeFormset = formset_factory(AttendeeForm, extra=3)\n formset = AttendeeFormset(request.POST or None, prefix=\"extra-guests\")\n mailing_list = request.POST.get('mailing_list', False)\n\n attendee_count = occurrence.attendee_set.filter(attending=True, wait_list=False).count()\n waitlist_count = occurrence.attendee_set.filter(attending=True, wait_list=True).count()\n\n #boolean for if the event is full or not.\n full = False\n if event.max_attendees > 0:\n full = (event.max_attendees <= attendee_count)\n\n attendee = None\n\n amount = int((event.rsvpcost or 0) * 100)\n\n if form.is_valid() and formset.is_valid():\n # Save Occurrence, in case it has not been persisted yet.\n occurrence.save()\n\n amount = int((event.rsvpcost or 0) * 100)\n\n for f in formset:\n if f.cleaned_data.get('name'):\n amount += int((event.rsvpcost or 0) * 100)\n\n primary_email = form.cleaned_data.get('email', None) or form.cleaned_data['stripeEmail']\n\n # Save Attendee\n attendee = Attendee(\n occurrence=occurrence,\n name=form.cleaned_data['name'],\n email=primary_email,\n phone=form.cleaned_data['phone'],\n )\n\n if full:\n attendee.wait_list = True\n\n # Save, create confirmation code\n attendee.save()\n\n for f in formset:\n # Save Attendee\n if f.cleaned_data.get('name'):\n a = Attendee(\n occurrence=occurrence,\n name=f.cleaned_data['name'],\n email=f.cleaned_data.get('email', None),\n phone=f.cleaned_data.get('phone', None),\n parent=attendee,\n confirmation_code=attendee.confirmation_code,\n )\n\n if full:\n a.wait_list = True\n\n # Save, create confirmation code\n a.save()\n\n if USE_MAILCHIMP:\n import mailchimp\n m = mailchimp.Mailchimp(MAILCHIMP_KEY)\n MAILCHIMP_EVENTLIST\n MAILCHIMP_MARKETINGLIST\n # Add the attendee to this event\n # Add any sub-attendees\n\n try:\n m.lists.subscribe(MAILCHIMP_EVENTLIST, {\n 'email': primary_email,\n 'merge_vars': {\n 'groupings': [\n {'name': '%s: %s' % (occurrence.event.title, occurrence.id) }\n ]\n }\n }, double_optin=False)\n except mailchimp.ListAlreadySubscribedError:\n pass\n except mailchimp.ValidationError:\n pass\n\n\n if mailing_list:\n try:\n m.lists.subscribe(MAILCHIMP_MARKETINGLIST, {\n 'email': primary_email,\n })\n except mailchimp.ListAlreadySubscribedError:\n pass\n except mailchimp.ValidationError:\n pass\n\n if (attendee.wait_list == False) and amount:\n import stripe\n # Charge Credit Card\n card = stripe.Token.retrieve(form.cleaned_data['stripeToken'])\n\n try:\n charge_obj = stripe.Charge.create(\n amount=amount,\n currency=\"usd\",\n card=card,\n description=\"RSVP %s, confirmation code %s\" % (occurrence.title, attendee.confirmation_code),\n metadata={\n \"id\": attendee.id,\n \"confirmation_code\": attendee.confirmation_code,\n },\n )\n attendee.stripe_transaction = charge_obj.id\n attendee.save()\n except stripe.CardError:\n messages.add_message(request, messages.WARNING,\n \"You've been added to the list, but there was a problem with you card and it was not processed. Please pay the full amount on the day of the event.\" % attendee.email)\n attendee.payment_exception = True\n attendee.save()\n\n\n # Do some stuff? Email the individual a reciept? Show them their confirmation code.\n if attendee.wait_list:\n messages.add_message(request, messages.INFO, \"Thanks, you've been added to the wait list\")\n else:\n messages.add_message(request, messages.INFO, \"Your confirmation code is %s. More info will be emailed to you at %s\" % (attendee.confirmation_code, attendee.email))\n\n email_context = {\n \"event\": event,\n \"occurrence\": occurrence,\n \"amount\": int(amount / 100),\n \"attendee_count\": attendee_count,\n \"waitlist_count\": waitlist_count,\n \"full\": full,\n \"attendee\": attendee,\n }\n # Email organizers\n _send_email(\n \"schedule/organizers_email\",\n [m[1] for m in settings.MANAGERS],\n \"New Attendee: %s\" % occurrence.title,\n email_context\n )\n\n # Email attendee\n _send_email(\n \"schedule/attendee_email\",\n attendee.email,\n \"Confirmation: %s\" % occurrence.title,\n email_context\n )\n\n\n # Return Redirect\n return redirect(request.get_full_path())\n\n context.update({\n \"form\": form,\n \"formset\": formset,\n \"amount\": amount,\n \"attendee_count\": attendee_count,\n \"waitlist_count\": waitlist_count,\n \"full\": full,\n \"attendee\": attendee,\n \"stripe_public_key\": getattr(settings, 'STRIPE_API_KEY', ''),\n })\n\n\n return render_to_response(template_name, context, context_instance=RequestContext(request))\n\n\n@check_event_permissions\ndef edit_occurrence(request, event_id,\n template_name=\"schedule/edit_occurrence.html\", *args, **kwargs):\n extra_context = kwargs.get('extra_context', None) or {}\n event, occurrence = get_occurrence(event_id, *args, **kwargs)\n next = kwargs.get('next', None)\n form = OccurrenceForm(data=request.POST or None, instance=occurrence)\n if form.is_valid():\n occurrence = form.save(commit=False)\n occurrence.event = event\n occurrence.save()\n next = next or get_next_url(request, occurrence.get_absolute_url())\n return HttpResponseRedirect(next)\n next = next or get_next_url(request, occurrence.get_absolute_url())\n context = {\n 'form': form,\n 'occurrence': occurrence,\n 'next':next,\n }\n context.update(extra_context)\n return render_to_response(template_name, context, context_instance=RequestContext(request))\n\n\n@check_event_permissions\ndef cancel_occurrence(request, event_id,\n template_name='schedule/cancel_occurrence.html', *args, **kwargs):\n \"\"\"\n This view is used to cancel an occurrence. If it is called with a POST it\n will cancel the view. If it is called with a GET it will ask for\n conformation to cancel.\n \"\"\"\n extra_context = kwargs.get('extra_context', None) or {}\n event, occurrence = get_occurrence(event_id, *args, **kwargs)\n next = kwargs.get('next',None) or get_next_url(request, event.get_absolute_url())\n if request.method != \"POST\":\n context = {\n \"occurrence\": occurrence,\n \"next\":next,\n }\n context.update(extra_context)\n return render_to_response(template_name, context, context_instance=RequestContext(request))\n occurrence.cancel()\n return HttpResponseRedirect(next)\n\n\ndef get_occurrence(event_id, occurrence_id=None, year=None, month=None,\n day=None, hour=None, minute=None, second=None):\n \"\"\"\n Because occurrences don't have to be persisted, there must be two ways to\n retrieve them. both need an event, but if its persisted the occurrence can\n be retrieved with an id. If it is not persisted it takes a date to\n retrieve it. This function returns an event and occurrence regardless of\n which method is used.\n \"\"\"\n if(occurrence_id):\n occurrence = get_object_or_404(Occurrence, id=occurrence_id)\n event = occurrence.event\n elif(all((year, month, day, hour, minute, second))):\n event = get_object_or_404(Event, id=event_id)\n occurrence = event.get_occurrence(\n datetime.datetime(int(year), int(month), int(day), int(hour),\n int(minute), int(second)))\n if occurrence is None:\n raise Http404\n else:\n raise Http404\n return event, occurrence\n\n\n@check_event_permissions\ndef create_or_edit_event(request, calendar_slug, event_id=None, next=None,\n template_name='schedule/create_event.html', form_class = EventForm, extra_context=None):\n \"\"\"\n This function, if it receives a GET request or if given an invalid form in a\n POST request it will generate the following response\n\n Template:\n schedule/create_event.html\n\n Context Variables:\n\n form:\n an instance of EventForm\n\n calendar:\n a Calendar with id=calendar_id\n\n if this function gets a GET request with ``year``, ``month``, ``day``,\n ``hour``, ``minute``, and ``second`` it will auto fill the form, with\n the date specifed in the GET being the start and 30 minutes from that\n being the end.\n\n If this form receives an event_id it will edit the event with that id, if it\n recieves a calendar_id and it is creating a new event it will add that event\n to the calendar with the id calendar_id\n\n If it is given a valid form in a POST request it will redirect with one of\n three options, in this order\n\n # Try to find a 'next' GET variable\n # If the key word argument redirect is set\n # Lastly redirect to the event detail of the recently create event\n \"\"\"\n extra_context = extra_context or {}\n date = coerce_date_dict(request.GET)\n initial_data = None\n if date:\n try:\n start = datetime.datetime(**date)\n initial_data = {\n \"start\": start,\n \"end\": start + datetime.timedelta(minutes=30)\n }\n except TypeError:\n raise Http404\n except ValueError:\n raise Http404\n\n instance = None\n if event_id is not None:\n instance = get_object_or_404(Event, id=event_id)\n\n calendar = get_object_or_404(Calendar, slug=calendar_slug)\n\n form = form_class(data=request.POST or None, instance=instance,\n hour24=True, initial=initial_data)\n\n if form.is_valid():\n event = form.save(commit=False)\n if instance is None:\n event.creator = request.user\n event.calendar = calendar\n event.save()\n next = next or reverse('event', args=[event.id])\n next = get_next_url(request, next)\n return HttpResponseRedirect(next)\n\n next = get_next_url(request, next)\n context = {\n \"form\": form,\n \"calendar\": calendar,\n \"next\":next\n }\n context.update(extra_context)\n return render_to_response(template_name, context, context_instance=RequestContext(request))\n\n\n@check_event_permissions\ndef delete_event(request, event_id, next=None, login_required=True, extra_context=None):\n \"\"\"\n After the event is deleted there are three options for redirect, tried in\n this order:\n\n # Try to find a 'next' GET variable\n # If the key word argument redirect is set\n # Lastly redirect to the event detail of the recently create event\n \"\"\"\n extra_context = extra_context or {}\n event = get_object_or_404(Event, id=event_id)\n next = next or reverse('day_calendar', args=[event.calendar.slug])\n next = get_next_url(request, next)\n extra_context['next'] = next\n return delete_object(request,\n model = Event,\n object_id = event_id,\n post_delete_redirect = next,\n template_name = \"schedule/delete_event.html\",\n extra_context = extra_context,\n login_required = login_required\n )\n\n\ndef lookup_confirmation_code(request):\n try:\n attendee = Attendee.objects.get(parent__isnull=True, confirmation_code__iexact=request.POST.get('confirmation_code', '').strip())\n return redirect(reverse('modify_attendance', args=[attendee.confirmation_code]))\n except Attendee.DoesNotExist:\n messages.add_message(request, messages.WARNING, \"Could not find confirmation code. Please try again.\")\n return redirect(request.META.get('HTTP_REFERER', '/'))\n\n\ndef modify_attendance(request, confirmation_code):\n try:\n attendee = Attendee.objects.get(parent__isnull=True, confirmation_code=confirmation_code)\n form = ModifyAttendanceForm(request.POST or None, instance=attendee)\n AttendeeFormset = modelformset_factory(Attendee, form=ModifyAttendanceForm, extra=0)\n formset = AttendeeFormset(request.POST or None, queryset=attendee.attendee_set.all(), prefix=\"extra-guests\")\n\n if form.is_valid() and formset.is_valid():\n attendee = form.save()\n other_attendees = formset.save()\n\n # Email organizers\n _send_email(\n \"schedule/organizers_email_update\",\n [m[1] for m in settings.MANAGERS],\n \"Attendee Update: %s\" % attendee.occurrence.title,\n {\n \"attendee\": attendee,\n \"other_attendees\": other_attendees,\n \"occurrence\": attendee.occurrence,\n \"event\": attendee.occurrence.event,\n }\n )\n\n messages.add_message(request, messages.SUCCESS, \"Thank you.\")\n return redirect(reverse('modify_attendance', args=[attendee.confirmation_code]))\n\n return render_to_response(\n \"schedule/modify_attendance.html\",\n {\n \"attendee\": attendee,\n \"occurrence\": attendee.occurrence,\n \"event\": attendee.occurrence.event,\n \"form\": form,\n \"formset\": formset,\n },\n context_instance=RequestContext(request)\n )\n\n except Attendee.DoesNotExist:\n messages.add_message(request, messages.WARNING, \"Your confirmation code appears to be incorrect, please check it and try again.\")\n return redirect('/')\n\n\ndef check_next_url(next):\n \"\"\"\n Checks to make sure the next url is not redirecting to another page.\n Basically it is a minimal security check.\n \"\"\"\n if not next or '://' in next:\n return None\n return next\n\ndef get_next_url(request, default):\n next = default\n if OCCURRENCE_CANCEL_REDIRECT:\n next = OCCURRENCE_CANCEL_REDIRECT\n if 'next' in request.REQUEST and check_next_url(request.REQUEST['next']) is not None:\n next = request.REQUEST['next']\n return next\n","sub_path":"schedule/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"357053508","text":"class Solution:\n def calculate(self, s: str) -> int:\n def compute(num1, num2, op):\n if op == \"-\":\n return num2 - num1\n if op == \"+\":\n return num1 + num2\n if op == \"*\":\n return num1 * num2\n if op == \"/\":\n return num2 // num1\n\n def precede(op1, op2):\n if op1 in {\"*\", \"/\"} and op2 in {\"+\", \"-\"}:\n return True\n return False\n\n nums, ops = [], []\n i = 0\n while i < len(s):\n if s[i].isdigit():\n num = 0\n while i < len(s) and s[i].isdigit():\n num = num * 10 + int(s[i])\n i += 1\n nums.append(num)\n else:\n if s[i] in {\"+\", \"-\", \"*\", \"/\"}:\n while ops and not precede(s[i], ops[-1]):\n nums.append(compute(nums.pop(), nums.pop(), ops.pop()))\n ops.append(s[i])\n i += 1\n while ops:\n nums.append(compute(nums.pop(), nums.pop(), ops.pop()))\n return nums[-1]\n","sub_path":"MY_REPOS/INTERVIEW-PREP-COMPLETE/Leetcode/227.py","file_name":"227.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"34363267","text":"def main():\r\n print('Digite o horario no formato HH:MM')\r\n\r\n inicio = input('Digite o horario de inicio: ')\r\n fim = input('Digite o horario de termino: ')\r\n\r\n hora1 = int(inicio.split(':')[0])\r\n minuto1 = int(inicio.split(':')[1])\r\n hora2 = int(fim.split(':')[0])\r\n minuto2 = int(fim.split(':')[1])\r\n\r\n if hora1 >= 24 or hora2 >= 24 or minuto1 >= 60 or minuto2 >= 60:\r\n print('Horario invalido')\r\n else:\r\n minutos1 = hora1 * 60 + minuto1\r\n minutos2 = hora2 * 60 + minuto2\r\n\r\n total = abs(minutos1 - minutos2)\r\n\r\n thoras = total // 60\r\n tminutos = total % 60\r\n\r\n print('O jogo teve uma duração de: %d horas e %d minutos' % (thoras, tminutos))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"Python/Fabio lista 2/Fabio_02_Q22.py","file_name":"Fabio_02_Q22.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"209525901","text":"import numpy as np\nimport cv2\nimport math\n\nfor k in range(200):\n print(\"Processing frame \",k)\n img = cv2.imread('thresh/TH_'+str(k)+'.png')\n\n imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n _,contours, hierarchy = cv2.findContours(imgray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n print(\"no of contours\",len(contours))\n x = []\n y = []\n r = []\n\n for contour in contours:\n # cv2.drawContours(img, contour, -1, (0, 255, 0), 1)\n (X,Y),radius = cv2.minEnclosingCircle(contour)\n center = (int(X),int(Y))\n # condition to not include the particles near plume\n if ((center[0]-430)**2+(center[1]-618)**2)**0.5 > 40 and radius>1:\n x.append(center[0])\n y.append(center[1])\n r.append(int(radius))\n print(len(r))\n\n repeat = []\n # find interior circles\n # https://doubleroot.in/lessons/circle/relative-postion-of-two-circles/\n for i in range(len(r)):\n for j in range(len(r)):\n if i!=j:\n c1c2 = ((x[i]-x[j])**2 + (y[i]-y[j])**2)**0.5\n r1r2 = abs(r[i]-r[j])\n print(k,c1c2,r1r2)\n if c1c2 <= r1r2 :\n if r[i]>r[j]:\n repeat.append(j)\n # else:\n # repeat.append(i)\n\n # remove interior circles\n for ele in sorted(list(set(repeat)), reverse = True):\n del r[ele]\n del x[ele]\n del y[ele]\n\n # delete the largest circle ie. plume region\n maxpos = r.index(max(r))\n r.pop(maxpos)\n x.pop(maxpos)\n y.pop(maxpos)\n\n # # remove ones\n # ones = [i for i,val in enumerate(r) if val==1]\n # for ele in sorted(list(set(ones)), reverse = True):\n # del r[ele]\n # del x[ele]\n # del y[ele]\n\n print(\"No of particles\",len(r))\n # draw Circles\n for i in range(len(r)):\n cv2.circle(img,(x[i],y[i]),int(r[i]),(0,255,0),1)\n cv2.circle(img, (430,618), 40, (0, 0, 255), 1)\n print(\"radius\", r)\n print(\"x coordinate\",x)\n print(\"y coordinate\",y)\n cv2.imwrite(\"blob/BB_\"+str(k)+\".png\",img)\n","sub_path":"IR_track/blob.py","file_name":"blob.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"426031987","text":"print('starting script')\nfrom music21 import *\nprint('done import')\n\ndef processScore(s):\n #extract soprano (melody) and alto (harmony) parts\n #recurse method works to recursively search through\n #different streams\n p0 = s.parts[0].recurse().getElementsByClass(note.Note).stream()\n p1 = s.parts[1].recurse().getElementsByClass(note.Note).stream()\n\n #new score from extracted parts\n s2 = stream.Stream()\n s2.insert(p0)\n s2.insert(p1)\n\n #turn into chords\n c = s2.chordify().getElementsByClass(chord.Chord)\n\n #counter\n i = 0\n #input vector\n x = []\n #target vector\n t = []\n f = open('harmonyData.csv', 'a')\n #for each chord\n for ch in c.notes:\n #every 8 notes\n if (i%8 == 0):\n #if have a complete phrase\n if (len(x) == 8):\n #print(','.join(x) + ',' + ','.join(t)+'\\n')\n f.write(','.join(x) + ',' + ','.join(t)+'\\n')\n #clear vectors\n x = []\n t = []\n i = i+1\n #if have a chord (no rest)\n if (len(ch) == 2):\n p = ch.pitches[0]\n p.octave = 4\n x.append(\"{0}\".format(p.midi - 60))\n p = ch.pitches[1]\n p.octave = 4\n t.append(\"{0}\".format(p.midi - 60))\n f.close()\n\ni = 0\nf = open('harmonyData.csv', 'w')\nf.write('x0,x1,x2,x3,x4,x5,x6,x7,t0,t1,t2,t3,t4,t5,t6,t7\\n')\nf.close()\nfor work in corpus.chorales.Iterator():\n i = i+1\n print('new work {0}'.format(i))\n processScore(work)\n","sub_path":"data/dataProcessing.py","file_name":"dataProcessing.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"586724132","text":"import time\nimport collections\n\n# Python 2+3 basestring\ntry:\n basestring\nexcept NameError:\n basestring = str\n\ndef is_list_of_lists(l):\n for item in l:\n if not isinstance(item, basestring) and isinstance(item, (list, tuple)):\n return True\n if isinstance(item, dict):\n return True\n \n return False\n\ndef flatten(item, dictionary=None, name=None, ignore_blanks=False):\n \n if dictionary is None:\n dictionary = collections.OrderedDict()\n \n if name is None:\n name = \"\"\n \n iterative_name = name\n \n if len(iterative_name) > 0:\n iterative_name = name + \".\"\n \n # Handle dictionaries\n if isinstance(item, dict):\n for key in item:\n flatten(item[key], dictionary, iterative_name + key, ignore_blanks=ignore_blanks)\n \n # Handle blanks\n elif ignore_blanks and item in [None, '']:\n pass # Ignore this one\n \n # Handle arrays where the items in the arrays are individual items like strings and numbers (not more lists)\n elif not isinstance(item, basestring) and isinstance(item, (list, tuple)) and not is_list_of_lists(item):\n if len(item) == 0 and ignore_blanks:\n pass # Ignore empty arrays\n else:\n \n # We need to convert the values to strings because Intersplunk.py::outputResults() assumes the values are strings and tries to do string replacements on the values.\n converted_list = []\n \n for i in item:\n converted_list.append(str(i))\n \n # Store the stringifed list\n dictionary[name] = converted_list\n \n elif not isinstance(item, basestring) and isinstance(item, (list, tuple)):\n \n index = 0\n \n for a in item:\n flatten(a, dictionary, iterative_name + str(index), ignore_blanks=ignore_blanks)\n \n index = index + 1\n \n # Handle plain values\n elif item in [True, False, None]: \n dictionary[name] = item\n \n # Handle date\n elif item.__class__.__name__ == \"struct_time\":\n dictionary[name] = time.strftime('%Y-%m-%dT%H:%M:%SZ', item)\n \n # Handle string values\n else:\n dictionary[name] = str(item)\n \n return dictionary\n\ndef dict_to_table(dictionary, attribute_column_name=\"attribute\", value_column_name=\"value\"):\n results_table = []\n \n for k, v in dictionary.items():\n d = collections.OrderedDict()\n \n d[attribute_column_name] = k\n d[value_column_name] = v\n \n results_table.append(d)\n \n return results_table \n\ndef flatten_to_table(item, attribute_column_name=\"attribute\", value_column_name=\"value\", dictionary=None, name=None, ignore_blanks=False):\n \n results = flatten(item, dictionary=dictionary, name=name, ignore_blanks=ignore_blanks)\n return dict_to_table(results, attribute_column_name=attribute_column_name, value_column_name=value_column_name)\n","sub_path":"src/bin/lookup_editor/flatten.py","file_name":"flatten.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"143958076","text":"from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments\r\nfrom torch.utils.data import DataLoader\r\nfrom datasets import load_from_disk\r\nfrom tqdm import tqdm\r\nfrom load_data import *\r\nimport pandas as pd\r\nimport torch\r\nimport pickle as pickle\r\nimport numpy as np\r\nimport argparse\r\n\r\ndef inference(model, tokenized_sent, device):\r\n dataloader = DataLoader(tokenized_sent, batch_size=1000, shuffle=False)\r\n model.eval()\r\n output_pred = []\r\n\r\n for data in tqdm(dataloader):\r\n with torch.no_grad():\r\n outputs = model(\r\n input_ids=data['input_ids'].to(device),\r\n attention_mask=data['attention_mask'].to(device),\r\n # token_type_ids=data['token_type_ids'].to(device)\r\n )\r\n logits = outputs[0]\r\n logits = logits.detach().cpu().numpy()\r\n result = np.argmax(logits, axis=-1)\r\n output_pred += list(result)\r\n \r\n return output_pred\r\n\r\ndef load_test_dataset(dataset_dir, tokenizer):\r\n # test_dataset = load_dataset(dataset_dir)\r\n # test_label = [0] * len(test_dataset)\r\n # print(test_dataset)\r\n\r\n test_dataset = load_from_disk(dataset_dir)['question']\r\n test_label = [0] * len(test_dataset)\r\n \r\n # tokenize dataset\r\n tokenized_test = tokenized_dataset(test_dataset, tokenizer)\r\n return tokenized_test, test_label\r\n\r\ndef main(args):\r\n \"\"\"\r\n 주어진 dataset tsv 파일과 같은 형태일 경우 inference 가능한 코드입니다.\r\n \"\"\"\r\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n\r\n # load tokenizer\r\n # bert-base-multilingual-cased\r\n # monologg/koelectra-base-v3-discriminator\r\n # xlm-roberta-large\r\n TOK_NAME = args.model_name\r\n tokenizer = AutoTokenizer.from_pretrained(TOK_NAME)\r\n\r\n # load model\r\n MODEL_NAME = args.model_dir\r\n model = AutoModelForSequenceClassification.from_pretrained(args.model_dir)\r\n model.to(device)\r\n\r\n # load test datset\r\n # file_name = \"train_korquad1_dummy_sudo_bm25_6or7\"\r\n test_dataset_dir = f\"/opt/ml/code/data/train_dataset/{args.file_name}\"\r\n # test_dataset_dir = f\"/opt/ml/code2/data/sudo/{args.file_name}\"\r\n test_dataset, test_label = load_test_dataset(test_dataset_dir, tokenizer)\r\n test_dataset = RE_Dataset(test_dataset ,test_label)\r\n\r\n # predict answer\r\n pred_answer = inference(model, test_dataset, device)\r\n \r\n # make csv file with predicted answer\r\n label_decoder = {0: '[WHO]', 1: '[WHEN]', 2: '[WHERE]', 3: '[WHAT]', 4: '[HOW]', 5: '[WHY]', 6: '[QUANTITY]', 7: '[CITE]'}\r\n output = pd.DataFrame(pred_answer, columns=['tag'])\r\n output['tag'] = output['tag'].apply(lambda x: label_decoder[x])\r\n output.to_csv(f\"/opt/ml/code2/prediction/{'train67'}_tag.csv\", index=False)\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n \r\n parser.add_argument('--model_dir', type=str, default=\"./results/checkpoint-600\")\r\n parser.add_argument('--seed', type=int, default=2021, help='seed (default = 2021)')\r\n parser.add_argument('--file_name', type=str, default='train', help='file_name (default = validation)')\r\n parser.add_argument('--model_name', type=str, default='xlm-roberta-large', help='model_name (default = xlm-roberta-large)')\r\n args = parser.parse_args()\r\n print(args)\r\n \r\n main(args)\r\n \r\n","sub_path":"엄희준/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"398220663","text":"#! /usr/bin/env python\n\n# Copyright (C) 2008 Cournapeau David <cournape@gmail.com>\n# Copyright (C) 2009 Nathaniel Smith <njs@pobox.com>\n\ndescr = \"\"\"Sparse matrix tools.\n\nThis is a home for sparse matrix code in Python that plays well with\nscipy.sparse, but that is somehow unsuitable for inclusion in scipy\nproper. Usually this will be because it is released under the GPL.\n\nSo far we have a wrapper for the CHOLMOD library for sparse cholesky\ndecomposition. Further contributions are welcome!\n\"\"\"\n\nimport os\nimport sys\n\nDISTNAME = 'scikits.sparse'\nDESCRIPTION = 'Scikits sparse matrix package'\nLONG_DESCRIPTION = descr\nMAINTAINER = 'Nathaniel Smith',\nMAINTAINER_EMAIL = 'njs@pobox.com',\nURL = 'http://code.google.com/p/scikits-sparse/'\nLICENSE = 'GPL'\nDOWNLOAD_URL = \"http://code.google.com/p/scikits-sparse/downloads/list\"\nVERSION = '0.1'\n\nfrom setuptools import setup, find_packages, Extension\nfrom Cython.Distutils import build_ext\n\nif __name__ == \"__main__\":\n setup(install_requires = ['numpy', 'scipy'],\n namespace_packages = ['scikits'],\n packages = find_packages(),\n package_data = {\n \"\": [\"*.mtx.gz\"],\n },\n #test_suite=\"tester\", # for python setup.py test\n zip_safe = True,\n name = DISTNAME,\n version = VERSION,\n maintainer = MAINTAINER,\n maintainer_email = MAINTAINER_EMAIL,\n description = DESCRIPTION,\n license = LICENSE,\n url = URL,\n download_url = DOWNLOAD_URL,\n long_description = LONG_DESCRIPTION,\n classifiers =\n [ 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Topic :: Scientific/Engineering'],\n cmdclass = {\"build_ext\": build_ext},\n ext_modules = [\n Extension(\"scikits.sparse.cholmod\",\n [\"scikits/sparse/cholmod.pyx\"],\n libraries=[\"cholmod\"],\n ),\n ],\n )\n","sub_path":"pypi_install_script/scikits.sparse-0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"590718890","text":"import os\nimport requests\n\nfrom maya import cmds\n\nfrom openpype.pipeline import legacy_io, PublishXmlValidationError\nfrom openpype.settings import get_project_settings\n\nimport pyblish.api\n\n\nclass MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin):\n \"\"\"Submit Maya scene to perform a local publish in Deadline.\n\n Publishing in Deadline can be helpful for scenes that publish very slow.\n This way it can process in the background on another machine without the\n Artist having to wait for the publish to finish on their local machine.\n\n Submission is done through the Deadline Web Service. DL then triggers\n `openpype/scripts/remote_publish.py`.\n\n Each publishable instance creates its own full publish job.\n\n Different from `ProcessSubmittedJobOnFarm` which creates publish job\n depending on metadata json containing context and instance data of\n rendered files.\n \"\"\"\n\n label = \"Submit Scene to Deadline\"\n order = pyblish.api.IntegratorOrder\n hosts = [\"maya\"]\n families = [\"publish.farm\"]\n targets = [\"local\"]\n\n def process(self, instance):\n project_name = instance.context.data[\"projectName\"]\n # TODO settings can be received from 'context.data[\"project_settings\"]'\n settings = get_project_settings(project_name)\n # use setting for publish job on farm, no reason to have it separately\n deadline_publish_job_sett = (settings[\"deadline\"]\n [\"publish\"]\n [\"ProcessSubmittedJobOnFarm\"])\n\n # Ensure no errors so far\n if not (all(result[\"success\"]\n for result in instance.context.data[\"results\"])):\n raise PublishXmlValidationError(\"Publish process has errors\")\n\n if not instance.data[\"publish\"]:\n self.log.warning(\"No active instances found. \"\n \"Skipping submission..\")\n return\n\n scene = instance.context.data[\"currentFile\"]\n scenename = os.path.basename(scene)\n\n job_name = \"{scene} [PUBLISH]\".format(scene=scenename)\n batch_name = \"{code} - {scene}\".format(code=project_name,\n scene=scenename)\n\n # Generate the payload for Deadline submission\n payload = {\n \"JobInfo\": {\n \"Plugin\": \"MayaBatch\",\n \"BatchName\": batch_name,\n \"Name\": job_name,\n \"UserName\": instance.context.data[\"user\"],\n \"Comment\": instance.context.data.get(\"comment\", \"\"),\n # \"InitialStatus\": state\n \"Department\": deadline_publish_job_sett[\"deadline_department\"],\n \"ChunkSize\": deadline_publish_job_sett[\"deadline_chunk_size\"],\n \"Priority\": deadline_publish_job_sett[\"deadline_priority\"],\n \"Group\": deadline_publish_job_sett[\"deadline_group\"],\n \"Pool\": deadline_publish_job_sett[\"deadline_pool\"],\n },\n \"PluginInfo\": {\n\n \"Build\": None, # Don't force build\n \"StrictErrorChecking\": True,\n \"ScriptJob\": True,\n\n # Inputs\n \"SceneFile\": scene,\n \"ScriptFilename\": \"{OPENPYPE_REPOS_ROOT}/openpype/scripts/remote_publish.py\", # noqa\n\n # Mandatory for Deadline\n \"Version\": cmds.about(version=True),\n\n # Resolve relative references\n \"ProjectPath\": cmds.workspace(query=True,\n rootDirectory=True),\n\n },\n\n # Mandatory for Deadline, may be empty\n \"AuxFiles\": []\n }\n\n # Include critical environment variables with submission + api.Session\n keys = [\n \"FTRACK_API_USER\",\n \"FTRACK_API_KEY\",\n \"FTRACK_SERVER\",\n \"OPENPYPE_VERSION\"\n ]\n environment = dict({key: os.environ[key] for key in keys\n if key in os.environ}, **legacy_io.Session)\n\n # TODO replace legacy_io with context.data\n environment[\"AVALON_PROJECT\"] = project_name\n environment[\"AVALON_ASSET\"] = legacy_io.Session[\"AVALON_ASSET\"]\n environment[\"AVALON_TASK\"] = legacy_io.Session[\"AVALON_TASK\"]\n environment[\"AVALON_APP_NAME\"] = os.environ.get(\"AVALON_APP_NAME\")\n environment[\"OPENPYPE_LOG_NO_COLORS\"] = \"1\"\n environment[\"OPENPYPE_REMOTE_JOB\"] = \"1\"\n environment[\"OPENPYPE_USERNAME\"] = instance.context.data[\"user\"]\n environment[\"OPENPYPE_PUBLISH_SUBSET\"] = instance.data[\"subset\"]\n environment[\"OPENPYPE_REMOTE_PUBLISH\"] = \"1\"\n\n payload[\"JobInfo\"].update({\n \"EnvironmentKeyValue%d\" % index: \"{key}={value}\".format(\n key=key,\n value=environment[key]\n ) for index, key in enumerate(environment)\n })\n\n self.log.info(\"Submitting Deadline job ...\")\n deadline_url = instance.context.data[\"defaultDeadline\"]\n # if custom one is set in instance, use that\n if instance.data.get(\"deadlineUrl\"):\n deadline_url = instance.data.get(\"deadlineUrl\")\n assert deadline_url, \"Requires Deadline Webservice URL\"\n url = \"{}/api/jobs\".format(deadline_url)\n response = requests.post(url, json=payload, timeout=10)\n if not response.ok:\n raise Exception(response.text)\n","sub_path":"openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py","file_name":"submit_maya_remote_publish_deadline.py","file_ext":"py","file_size_in_byte":5443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"637467183","text":"import numpy as np\nimport pandas as pd\nfrom rich.progress import track\nfrom vedo import shapes\n\ntry:\n from allensdk.api.queries.mouse_connectivity_api import (\n MouseConnectivityApi,\n )\n\n allen_sdk_installed = True\nexcept ModuleNotFoundError:\n allen_sdk_installed = False\n\nimport brainrender\nfrom brainrender.ABA.aba_utils import (\n parse_streamline,\n download_streamlines,\n experiments_source_search,\n parse_tractography_colors,\n)\nfrom brainrender.Utils.data_manipulation import is_any_item_in_list\n\n\nclass ABA:\n \"\"\"\n This class augments the functionality of\n BrainGlobeAtlas with methods specific to the Allen\n Mouse Brain atlas and necessary to populate scenes in \n brainrender. These include stuff like fetching streamlines\n and neuronal morphology data. \n \"\"\"\n\n atlas_name = \"ABA\"\n\n excluded_regions = [\"fiber tracts\"]\n\n # Used for streamlines\n base_url = \"https://neuroinformatics.nl/HBP/allen-connectivity-viewer/json/streamlines_NNN.json.gz\"\n\n def __init__(self):\n # mouse connectivity API [used for tractography]\n if allen_sdk_installed:\n self.mca = MouseConnectivityApi()\n else:\n self.mca = None\n\n # ------------------------- Scene population methods ------------------------- #\n\n def get_tractography(\n self,\n tractography,\n color=None,\n color_by=\"manual\",\n others_alpha=1,\n verbose=True,\n VIP_regions=[],\n VIP_color=None,\n others_color=\"white\",\n include_all_inj_regions=False,\n display_injection_volume=True,\n ):\n \"\"\"\n Renders tractography data and adds it to the scene. A subset of tractography data can receive special treatment using the with VIP regions argument:\n if the injection site for the tractography data is in a VIP regions, this is colored differently.\n\n :param tractography: list of dictionaries with tractography data\n :param color: color of rendered tractography data\n\n :param color_by: str, specifies which criteria to use to color the tractography (Default value = \"manual\")\n options:\n - manual, define color of each tract\n - target_region, color by the injected region\n\n :param others_alpha: float (Default value = 1)\n :param verbose: bool (Default value = True)\n :param VIP_regions: list of brain regions with VIP treatement (Default value = [])\n :param VIP_color: str, color to use for VIP data (Default value = None)\n :param others_color: str, color for not VIP data (Default value = \"white\")\n :param include_all_inj_regions: bool (Default value = False)\n :param display_injection_volume: float, if True a spehere is added to display the injection coordinates and volume (Default value = True)\n \"\"\"\n\n # check argument\n if not isinstance(tractography, list):\n if isinstance(tractography, dict):\n tractography = [tractography]\n else:\n raise ValueError(\n \"the 'tractography' variable passed must be a list of dictionaries\"\n )\n else:\n if not isinstance(tractography[0], dict):\n raise ValueError(\n \"the 'tractography' variable passed must be a list of dictionaries\"\n )\n\n if not isinstance(VIP_regions, list):\n raise ValueError(\"VIP_regions should be a list of acronyms\")\n\n COLORS = parse_tractography_colors(\n tractography,\n include_all_inj_regions,\n color=color,\n color_by=color_by,\n VIP_regions=VIP_regions,\n VIP_color=VIP_color,\n others_color=others_color,\n )\n COLORS = [\n c\n if c is not None\n else self._get_from_structure(t[\"structure-abbrev\"], \"rgb_triplet\")\n for c, t in zip(COLORS, tractography)\n ]\n\n # add actors to represent tractography data\n actors, structures_acronyms = [], []\n if brainrender.VERBOSE and verbose:\n print(\"Structures found to be projecting to target: \")\n\n # Loop over injection experiments\n for i, (t, color) in enumerate(zip(tractography, COLORS)):\n # Use allen metadata\n if include_all_inj_regions:\n inj_structures = [\n x[\"abbreviation\"] for x in t[\"injection-structures\"]\n ]\n else:\n inj_structures = [\n self.get_structure_ancestors(t[\"structure-abbrev\"])[-1]\n ]\n\n if (\n brainrender.VERBOSE\n and verbose\n and not is_any_item_in_list(\n inj_structures, structures_acronyms\n )\n ):\n print(\" -- ({})\".format(t[\"structure-abbrev\"]))\n structures_acronyms.append(t[\"structure-abbrev\"])\n\n # get tractography points and represent as list\n if color_by == \"target_region\" and not is_any_item_in_list(\n inj_structures, VIP_regions\n ):\n alpha = others_alpha\n else:\n alpha = brainrender.TRACTO_ALPHA\n\n if alpha == 0:\n continue # skip transparent ones\n\n # represent injection site as sphere\n if display_injection_volume:\n actors.append(\n shapes.Sphere(\n pos=t[\"injection-coordinates\"],\n c=color,\n r=brainrender.INJECTION_VOLUME_SIZE\n * t[\"injection-volume\"],\n alpha=brainrender.TRACTO_ALPHA,\n )\n )\n actors[-1].name = (\n str(t[\"injection-coordinates\"]) + \"_injection\"\n )\n\n points = [p[\"coord\"] for p in t[\"path\"]]\n actors.append(\n shapes.Tube(\n points,\n r=brainrender.TRACTO_RADIUS,\n c=color,\n alpha=alpha,\n res=brainrender.TRACTO_RES,\n )\n )\n actors[-1].name = str(t[\"injection-coordinates\"]) + \"_tractography\"\n\n return actors\n\n def get_streamlines(self, sl_file, color=None, *args, **kwargs):\n \"\"\"\n Render streamline data downloaded from https://neuroinformatics.nl/HBP/allen-connectivity-viewer/streamline-downloader.html\n\n :param sl_file: path to JSON file with streamliens data [or list of files]\n :param color: either a single color or a list of colors to color each streamline individually\n :param *args:\n :param **kwargs:\n\n \"\"\"\n if not isinstance(sl_file, (list, tuple)):\n sl_file = [sl_file]\n\n # get a list of colors of length len(sl_file)\n if color is not None:\n if isinstance(color, (list, tuple)):\n if isinstance(color[0], (float, int)): # it's an rgb color\n color = [color for i in sl_file]\n elif len(color) != len(sl_file):\n raise ValueError(\n \"Wrong number of colors, should be one per streamline or 1\"\n )\n else:\n color = [color for i in sl_file]\n else:\n color = [\"salmon\" for i in sl_file]\n\n actors = []\n if isinstance(\n sl_file[0], (str, pd.DataFrame)\n ): # we have a list of files to add\n for slf, col in track(\n zip(sl_file, color),\n total=len(sl_file),\n description=\"parsing streamlines\",\n ):\n if isinstance(slf, str):\n streamlines = parse_streamline(\n color=col, filepath=slf, *args, **kwargs\n )\n else:\n streamlines = parse_streamline(\n color=col, data=slf, *args, **kwargs\n )\n\n actors.extend(streamlines)\n else:\n raise ValueError(\n \"unrecognized argument sl_file: {}\".format(sl_file)\n )\n\n return actors\n\n # ----------------------------------- Utils ---------------------------------- #\n def get_projection_tracts_to_target(self, p0=None, **kwargs):\n \"\"\"\n Gets tractography data for all experiments whose projections reach the brain region or location of iterest.\n \n :param p0: list of 3 floats with AP-DV-ML coordinates of point to be used as seed (Default value = None)\n :param **kwargs: \n \"\"\"\n\n if self.mca is None:\n raise ModuleNotFoundError(\n 'You need allen sdk to use this functino: \"pip install allensdk\"'\n )\n\n # check args\n if p0 is None:\n raise ValueError(\"Please pass coordinates\")\n elif isinstance(p0, np.ndarray):\n p0 = list(p0)\n elif not isinstance(p0, (list, tuple)):\n raise ValueError(\"Invalid argument passed (p0): {}\".format(p0))\n\n p0 = [np.int(p) for p in p0]\n tract = self.mca.experiment_spatial_search(seed_point=p0, **kwargs)\n\n if isinstance(tract, str):\n raise ValueError(\n \"Something went wrong with query, query error message:\\n{}\".format(\n tract\n )\n )\n else:\n return tract\n\n def download_streamlines_for_region(self, region, *args, **kwargs):\n \"\"\"\n Using the Allen Mouse Connectivity data and corresponding API, this function finds expeirments whose injections\n were targeted to the region of interest and downloads the corresponding streamlines data. By default, experiements\n are selected for only WT mice and onl when the region was the primary injection target. Look at \"ABA.experiments_source_search\"\n to see how to change this behaviour.\n\n :param region: str with region to use for research\n :param *args: arguments for ABA.experiments_source_search\n :param **kwargs: arguments for ABA.experiments_source_search\n\n \"\"\"\n if self.mca is None:\n raise ModuleNotFoundError(\n 'You need allen sdk to use this functino: \"pip install allensdk\"'\n )\n\n # Get experiments whose injections were targeted to the region\n region_experiments = experiments_source_search(\n self.mca, region, *args, **kwargs\n )\n try:\n return download_streamlines(\n region_experiments.id.values,\n streamlines_folder=self.streamlines_cache,\n )\n except:\n print(f\"Could not download streamlines for region {region}\")\n return [], [] # <- there were no experiments in the target region\n\n def download_streamlines_to_region(\n self, p0, *args, mouse_line=\"wt\", **kwargs\n ):\n \"\"\"\n Using the Allen Mouse Connectivity data and corresponding API, this function finds injection experiments\n which resulted in fluorescence being found in the target point, then downloads the streamlines data.\n\n :param p0: list of floats with AP-DV-ML coordinates\n :param mouse_line: str with name of the mouse line to use(Default value = \"wt\")\n :param *args: \n :param **kwargs: \n\n \"\"\"\n experiments = pd.DataFrame(self.get_projection_tracts_to_target(p0=p0))\n if mouse_line == \"wt\":\n experiments = experiments.loc[experiments[\"transgenic-line\"] == \"\"]\n else:\n if not isinstance(mouse_line, list):\n experiments = experiments.loc[\n experiments[\"transgenic-line\"] == mouse_line\n ]\n else:\n raise NotImplementedError(\n \"ops, you've found a bug!. For now you can only pass one mouse line at the time, sorry.\"\n )\n return download_streamlines(\n experiments.id.values, streamlines_folder=self.streamlines_cache\n )\n","sub_path":"brainrender/atlases/aba.py","file_name":"aba.py","file_ext":"py","file_size_in_byte":12378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"439266899","text":"\"\"\"\nknn算法进行手写数字分类\n\"\"\"\nimport numpy as np\nfrom os import listdir\nimport operator\n\ndef img2vector(filename):\n returnVect = np.zeros((1, 1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0, 32*i+j] = int(lineStr[j])\n return returnVect\n\ndef classify0(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0] # 4\n diffMat = np.tile(inX, (dataSetSize, 1)) - dataSet # 待分类项变为dataset中的维数,并计算差值矩阵\n sqDiffMat = diffMat ** 2\n sqDistances = sqDiffMat.sum(axis=1) # 4×2的矩阵将列与列相加\n distances = sqDistances ** 0.5 #算得距离 d = ((xA0-xB0) ** 2 + (xA1 - xB1) ** 2) ** 0.5\n sortedDisIndicies = distances.argsort() #获得数值从小到大排列的标签\n classCount = {}\n for i in range(k):\n voteIlabel = labels[sortedDisIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n # sorted(iterable, key=None, reverse=False)\n sortedClassCount = sorted(classCount.items(), # python3中为items,2中为iteritems\n key=operator.itemgetter(1), reverse=True) #设置key按照字典中第二项排列\n return sortedClassCount[0][0]\n\ndef handwritingClassTest():\n hwLabels = []\n # 获取目录下文件\n trainingFileList = listdir('trainingDigits')\n m = len(trainingFileList)\n trainingMat = np.zeros((m, 1024))\n for i in range(m):\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0]) #文件名第一个数字为其标签\n hwLabels.append(classNumStr)\n trainingMat[i,:] = img2vector('trainingDigits/{}'\\\n .format(fileNameStr))\n testFileList = listdir('testDigits')\n errCount = 0.0\n mTest = len(testFileList)\n for i in range(mTest):\n fileNameStr = testFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n vectorUnderTest = img2vector('testDigits/{}'\\\n .format(fileNameStr))\n classifierResult = classify0(vectorUnderTest, trainingMat,\n hwLabels, 3)\n print('the classifier came back with: {}, the real answer is: {}'\\\n .format(classifierResult, classNumStr))\n if classifierResult != classNumStr:\n errCount += 1\n print('\\nthe total number of errors is: {}'.format(errCount))\n print('\\nthe total error rate is: {}'.format(errCount/float(mTest)))\n\nhandwritingClassTest()\n","sub_path":"ML/knn-3.py","file_name":"knn-3.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"617796189","text":"# Import needed libraries\nfrom fireTS.models import DirectAutoRegressor\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, explained_variance_score, r2_score\n\n# Import dataset\ndata = pd.read_excel('../Data/data.xlsx')\n\n# y = data.loc[:, ['Td','RH'] ]\n# X = data.loc[:, ['HT','AC','To','RD'] ]\n\ny = data.loc[:, ['Td','RH'] ]\nX = data.loc[:, ['HT','AC'] ]\n\n# Split dataset\n# Data test\nX_temp, X_test, y_temp, y_test = train_test_split(X, y, test_size=0.05, shuffle=False, random_state=15)\n# Data train and Data validation\nX_train, X_val, y_train, y_val = train_test_split(X_temp, y_temp, test_size=(15/95), shuffle=False, random_state=2019)\n\n# print proportions\nprint()\nprint('Dataset splitting configuration')\nprint('Train: {}% | Validation: {}% | Test {}%'.format(round(len(y_train)/len(y)*100,2),\n round(len(y_val)/len(y)*100,2),\n round(len(y_test)/len(y)*100,2)))\nprint()\n\n# score function\ndef kinerja(y_target, y_pred, method=\"evar\"):\n mask = np.isnan(y_target) | np.isnan(y_pred)\n if method == \"evar\":\n return explained_variance_score(y_target[~mask], y_pred[~mask])\n elif method == \"r2\":\n return r2_score(y_target[~mask], y_pred[~mask])\n elif method == \"mse\":\n return mean_squared_error(y_target[~mask], y_pred[~mask])\n elif method == \"mae\":\n return mean_absolute_error(y_target[~mask], y_pred[~mask])\n\n# Main Program\nask_train = input(\"Train the model? (y/n):\")\nif ask_train == 'y':\n # Model design (MLP-NARX)\n MLP_Td = MLPRegressor(random_state = 1, hidden_layer_sizes=(91), max_iter = 5000)\n MLP_RH = MLPRegressor(random_state = 1, hidden_layer_sizes=(91), max_iter = 5000)\n\n p_value, q_value, d_value = 1, [1]*2, [0]*2\n NARX_Td = DirectAutoRegressor(MLP_Td, auto_order=p_value, exog_order=q_value, exog_delay=d_value, pred_step=1)\n NARX_RH = DirectAutoRegressor(MLP_RH, auto_order=p_value, exog_order=q_value, exog_delay=d_value, pred_step=1)\n\n # Model Training\n print('Model (Td) training...')\n NARX_Td.fit(X_train, y_train.Td)\n print('Model (RH) training...')\n NARX_RH.fit(X_train, y_train.RH)\n\n # Prediction\n Td_pred = NARX_Td.predict(X_val, y_val.Td)\n RH_pred = NARX_RH.predict(X_val, y_val.RH)\n print()\n\n # Td Performance Evaluation\n EVar = round(kinerja(y_val.Td, Td_pred, method='evar')*100, 2)\n R2 = round(kinerja(y_val.Td, Td_pred, method='r2')*100, 2)\n RMSE = round(kinerja(y_val.Td, Td_pred, method='mse')**0.5, 2)\n MAE = round(kinerja(y_val.Td, Td_pred, method='mae'), 2)\n print(\"Td Performance evaluation based on Validation Data\")\n print(\"EVar = {}% | R2 = {}% | RMSE = {} | MAE = {}\".format(EVar, R2, RMSE, MAE))\n print()\n\n # RH Performance Evaluation\n EVar = round(kinerja(y_val.RH, RH_pred, method='evar')*100, 2)\n R2 = round(kinerja(y_val.RH, RH_pred, method='r2')*100, 2)\n RMSE = round(kinerja(y_val.RH, RH_pred, method='mse')**0.5, 2)\n MAE = round(kinerja(y_val.RH, RH_pred, method='mae'), 2)\n print(\"RH Performance evaluation based on Validation Data\")\n print(\"EVar = {}% | R2 = {}% | RMSE = {} | MAE = {}\".format(EVar, R2, RMSE, MAE))\n print()\n\n ask_simpan = input(\"Save the model? (y/n):\")\n if ask_simpan == 'y':\n print('Saving NARX Model...')\n # Menyimpan model yang sudah dibuat\n import pickle\n\n with open(\"model_Td.pkl\", 'wb') as file:\n pickle.dump(NARX_Td, file)\n print('NARX model (Td) saved!')\n\n with open(\"model_RH.pkl\", 'wb') as file:\n pickle.dump(NARX_RH, file)\n print('NARX model (RH) saved!')\n else:\n print('Model not saved.')\nelse:\n print('Model not trained.')\n","sub_path":"Source/model_training.py","file_name":"model_training.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"214873981","text":"\"\"\"\nprocess.py\n\n\"\"\"\nfrom tinman import application\nimport clihelper\nimport copy\nfrom tornado import httpserver\nfrom tornado import ioloop\nimport logging\nimport multiprocessing\nimport signal\nimport socket\nimport ssl\nfrom tornado import version as tornado_version\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Process(multiprocessing.Process):\n \"\"\"The process holding the HTTPServer and Application\"\"\"\n\n def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):\n \"\"\"Create a new instance of Process\n\n \"\"\"\n super(Process, self).__init__(group, target, name, args, kwargs)\n\n # Passed in values\n self.manager = kwargs['manager']\n self.port = kwargs['port']\n\n # Internal attributes holding instance information\n self.app = None\n self.http_server = None\n self.request_counters = dict()\n\n # If newrelic is passed, use it\n if self.manager.options.newrelic:\n import newrelic.agent\n newrelic.agent.initialize(self.manager.options.newrelic)\n\n # Fixup the configuration parameters\n self.config = self.fixup_configuration(self.manager.config)\n\n def create_application(self):\n \"\"\"Create and return a new instance of tornado.web.Application\n\n \"\"\"\n return application.Application(self.routes, self.port, **self.settings)\n\n def create_http_server(self):\n \"\"\"Setup the HTTPServer\n\n :rtype: tornado.httpserver.HTTPServer\n\n \"\"\"\n return self.start_httpserver(self.port, self.http_config)\n\n def fixup_configuration(self, config):\n \"\"\"Rewrite the SSL certreqs option if it exists, do this once instead\n # of in each process like we do for imports and other things\n\n :param dict config: the configuration dictionary\n\n \"\"\"\n new_config = copy.deepcopy(config)\n if 'ssl_options' in new_config['HTTPServer']:\n self.fixup_ssl_config(new_config['HTTPServer']['ssl_options'])\n\n # Set the debug to True if running in the foreground\n if self.manager.debug and not new_config['Application'].get('debug'):\n new_config['Application']['debug'] = True\n\n # Append the HTTP server ports for cross-process functionality\n new_config['Application']['server_ports'] = \\\n new_config['HTTPServer']['ports']\n\n return new_config\n\n def fixup_ssl_config(self, config):\n \"\"\"Check the config to see if SSL configuration options have been passed\n and replace none, option, and required with the correct values in\n the certreqs attribute if it is specified.\n\n :param dict config: the HTTPServer > ssl_options configuration dict\n\n \"\"\"\n if 'cert_reqs' in config:\n requirements = {'none': ssl.CERT_NONE,\n 'optional': ssl.CERT_OPTIONAL,\n 'required': ssl.CERT_REQUIRED}\n config['cert_reqs'] = requirements[config['cert_reqs']]\n\n @property\n def http_config(self):\n \"\"\"Return a dictionary of HTTPServer arguments using the default values\n as specified in the HTTPServer class docstrings if no values are\n specified.\n\n :param dict config: The HTTPServer specific section of the config\n :rtype: dict\n\n \"\"\"\n config = self.config['HTTPServer']\n return {'no_keep_alive': config.get('no_keep_alive', False),\n 'ssl_options': config.get('ssl_options'),\n 'xheaders': config.get('xheaders', False)}\n\n def on_sigterm(self, signal_unused, frame_unused):\n \"\"\"Stop the HTTP Server and IO Loop, shutting down the process\n\n :param int signal_unused: Unused signal number\n :param frame frame_unused: Unused frame the signal was caught in\n\n \"\"\"\n LOGGER.info('Stopping HTTP Server and IOLoop')\n self.http_server.stop()\n self.ioloop.stop()\n\n def on_sighup(self, signal_unused, frame_unused):\n \"\"\"Reload the configuration\n\n :param int signal_unused: Unused signal number\n :param frame frame_unused: Unused frame the signal was caught in\n\n \"\"\"\n self.config = self.fixup_configuration(self.manager.config)\n clihelper.setup_logging(self.manager.debug)\n\n # Update HTTP configuration\n for setting in self.http_config:\n if getattr(self.http_server, setting) != self.http_config[setting]:\n LOGGER.debug('Changing HTTPServer %s setting', setting)\n setattr(self.http_server, setting, self.http_config[setting])\n\n # Update Application Settings\n for setting in self.settings:\n if self.app.settings[setting] != self.settings[setting]:\n LOGGER.debug('Changing Application %s setting', setting)\n self.app.settings[setting] = self.settings[setting]\n\n # Update the routes\n routes = self.app.prepare_routes(self.routes)\n self.app.handlers = []\n self.app.named_handlers = {}\n self.app.add_handlers(\".*$\", routes)\n\n LOGGER.info('Configuration reloaded')\n\n def run(self):\n \"\"\"Called when the process has started\n\n :param int port: The HTTP Server port\n\n \"\"\"\n LOGGER.debug('Initializing process')\n\n # Now in a child process so setup logging for this process\n clihelper.setup_logging(self.manager.debug)\n\n # Register the signal handlers\n self.setup_signal_handlers()\n\n # Create the application instance\n self.app = self.create_application()\n\n # Create the HTTPServer\n self.http_server = self.create_http_server()\n\n # Hold on to the IOLoop in case it's needed for responding to signals\n self.ioloop = ioloop.IOLoop.instance()\n\n # Start the IOLoop, blocking until it is stopped\n try:\n self.ioloop.start()\n except KeyboardInterrupt:\n pass\n\n @property\n def routes(self):\n \"\"\"Return the route list from the configuration.\n\n :rtype: list\n\n \"\"\"\n return self.config['Routes']\n\n @property\n def settings(self):\n \"\"\"Return the Application configuration\n\n :rtype: dict\n\n \"\"\"\n return self.config['Application']\n\n def setup_signal_handlers(self):\n \"\"\"Called when a child process is spawned to register the signal\n handlers\n\n \"\"\"\n LOGGER.debug('Registering signal handlers')\n signal.signal(signal.SIGTERM, self.on_sigterm)\n signal.signal(signal.SIGHUP, self.on_sighup)\n\n def start_httpserver(self, port, args):\n \"\"\"Start the HTTPServer\n\n :param int port: The port to run the HTTPServer on\n :param dict args: Dictionary of arguments for HTTPServer\n :rtype: tornado.httpserver.HTTPServer\n\n \"\"\"\n # Start the HTTP Server\n LOGGER.info(\"Starting Tornado v%s HTTPServer on port %i Args: %r\",\n tornado_version, port, args)\n http_server = httpserver.HTTPServer(self.app, **args)\n http_server.bind(port, family=socket.AF_INET)\n http_server.start(1)\n return http_server\n","sub_path":"tinman/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":7169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"229191393","text":"import itertools\nimport codecs\nimport uncertainties\nimport numpy as np\nimport uncertainties.unumpy as unp\nfrom uncertainties.unumpy import (\n nominal_values as noms,\n std_devs as stds,\n)\nfrom uncertainties import ufloat\n\ndef make_table(columns, figures=None):\n assert hasattr(columns[0],'__iter__'), \"Wenn nur eine Zeile von Daten vorliegt, funktioniert zip nicht mehr; die Elemente von columns müssen Listen sein, auch wenn sie ihrerseits nur ein Element enthalten.\"\n\n if figures is None:\n figures = [None] * len(columns)\n\n cols = []\n for column, figure in zip(columns, figures):\n if np.any(stds(column)):\n if figure is None:\n figure = ''\n col = list(zip(*['{0:.{1:}uf}'.format(x, figure).split('+/-') for x in column]))\n else:\n col = list(zip(*[['{0:.{1:}f}'.format(x, figure)] for x in noms(column)]))\n cols.extend(col)\n\n max_lens = [max(len(s) for s in col) for col in cols]\n cols = [['{0:<{1:}}'.format(s, ml) for s in col] for col, ml in zip(cols, max_lens)]\n\n rows = list(itertools.zip_longest(*cols))\n\n return (r' \\\\' + '\\n').join([' & '.join(s for s in row if s is not None) for row in rows]) + r' \\\\'\n\ndef make_composed_table(tables):\n assert isinstance(tables, list), \"You need to give a list of filenames to make_composed_table!\"\n Output = ''\n for filename in tables:\n with open(filename, 'r') as f:\n Output += f.read()\n return Output\n\ndef make_SI(num, unit, exp='', figures=None):\n y = ufloat(0.0, 0) #siunitx mag kein 0 +- 0, deshalb hier der workaround\n if num == y:\n return \"(0 \\pm 0) ~ \\si{\" + unit + \"}\"\n if np.any(stds([num])):\n if figures is None:\n figures = ''\n x = '{0:.{1:}uf}'.format(num, figures).replace('/', '')\n else:\n x = '{0:.{1:}f}'.format(num, figures)\n\n return r'\\SI{{{}{}}}{{{}}}'.format(x, exp, unit)\n\ndef write(filename, content):\n f = codecs.open(filename, \"w\", \"utf-8\")\n if type(content) == uncertainties.core.Variable:\n content = \"\\num{\" + str(x.n) + \" +- \" + str(x.s) + \"}\"\n f.write(content)\n if not content.endswith('\\n'):\n f.write('\\n')\n f.close()\n else:\n f.write(content)\n if not content.endswith('\\n'):\n f.write('\\n')\n f.close()\n\n #\n # with open(filename, 'w') as f:\n # f.write(content)\n # if not content.endswith('\\n'):\n # f.write('\\n')\n\n\ndef make_full_table(caption,label,source_table, stacking=np.array([]), units=None):\n # Vorgeplänkel\n Output = \"\"\"\\\\begin{table}\n \\\\centering\n \\\\caption{\"\"\" + caption + \"\"\"}\n \\\\label{\"\"\" + label + \"\"\"}\n \\\\sisetup{parse-numbers=false}\n \\\\begin{tabular}{\\n\"\"\"\n\n # Kerngeschäft : source_table einlesen und verarbeiten, dh. Vor und Nachkommastellen rausfinden\n counter_columns = 0\n counter_lines = 0\n with open(source_table, 'r') as f:\n Text = f.read()\n for buchstabe in Text:\n if (buchstabe == '&'):\n counter_columns += 1\n elif (buchstabe == '\\\\'):\n counter_lines += 1\n\n NumberOfLines = counter_lines/2\n NumberOfColumns = counter_columns/counter_lines*2+1\n counter_digits_preDot = np.zeros((int(NumberOfLines), int(NumberOfColumns)), dtype=np.int)\n counter_digits_postDot = np.zeros((int(NumberOfLines), int(NumberOfColumns)), dtype=np.int)\n dot_reached = False\n counter_columns = 0\n counter_lines = 0\n with open(source_table, 'r') as f:\n Text = f.read()\n # 'Vor und Nachkommastellen rausfinden' beginnt hier\n for buchstabe in Text:\n if (buchstabe == '&'):\n counter_columns += 1\n dot_reached = False\n elif (buchstabe == '.'):\n dot_reached = True\n elif (buchstabe == '\\\\'):\n counter_lines += 1\n counter_columns = counter_columns % (NumberOfColumns-1)\n dot_reached = False\n elif (buchstabe != ' ') & (buchstabe != '\\n'):\n if (counter_lines/2 <= (NumberOfLines-1)):\n if dot_reached == False:\n counter_digits_preDot[int(counter_lines/2)][int(counter_columns)] += 1\n else:\n counter_digits_postDot[int(counter_lines/2)][int(counter_columns)] += 1\n # jetzt ermittle maximale Anzahl an Stellen und speichere sie in MaxDigitsPreDot und MaxDigitsPostDot\n MaxDigitsPreDot = []\n counter_digits_preDot_np = np.array(counter_digits_preDot)\n for x in counter_digits_preDot_np.T:\n MaxDigitsPreDot.append(max(x))\n MaxDigitsPostDot = []\n counter_digits_postDot_np = np.array(counter_digits_postDot)\n for x in counter_digits_postDot_np.T:\n MaxDigitsPostDot.append(max(x))\n # --------------------Ende der Stellensuche\n\n # Die Liste stacking in ein angepasstes Array umwandeln mit den tatsächlich betroffenen Spalten\n stacking_list = np.array(stacking)\n i = 0\n for x in stacking_list:\n stacking_list[i] += i\n i += 1\n\n # Schreiben der Tabellenformatierung\n if np.size(stacking) == 0:\n for digits_preDot, digits_postDot in zip(MaxDigitsPreDot, MaxDigitsPostDot):\n Output += '\\tS[table-format=' + str(digits_preDot) + '.' + str(digits_postDot) +']\\n'\n else: # es wurden fehlerbehaftete Werte übergeben, daher muss +- zwischen die entsprechenden Spalten\n i = 0.0\n for digits_preDot, digits_postDot in zip(MaxDigitsPreDot, MaxDigitsPostDot):\n if i in stacking_list:\n Output += '\\tS[table-format=' + str(digits_preDot) + '.' + str(digits_postDot) +']\\n'\n Output += '\\t@{${}\\\\pm{}$}\\n'\n elif i-1 in stacking_list:\n Output += '\\tS[table-format=' + str(digits_preDot) + '.' + str(digits_postDot) +', table-number-alignment = left]\\n' # wir wollen hier linksbündige Zahlen\n else:\n Output += '\\tS[table-format=' + str(digits_preDot) + '.' + str(digits_postDot) +']\\n'\n i += 1\n\n # Zwischengeplänkel\n Output += '\\t}\\n\\t\\\\toprule\\n\\t'\n\n # Einheitenzeile\n i=0\n stacking_list = np.array(stacking)\n for Spaltenkopf in units:\n if i in stacking_list:\n Output += '\\\\multicolumn{2}{c}'\n Output += '{' + str(Spaltenkopf) + '}\\t\\t'\n i += 1\n if i == np.size(units):\n Output += '\\\\\\\\ \\n\\t'\n elif i % 2 == 0:\n Output += '& \\n\\t'\n else:\n Output += '& '\n\n # Schlussgeplänkel\n Output += \"\"\"\\\\midrule\n \\\\input{\"\"\" + source_table + \"\"\"}\n \\\\bottomrule\n \\\\end{tabular}\n \\\\end{table}\"\"\"\n return Output\n","sub_path":"tex-table-generator/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"69890615","text":"# Наименьший элемент\n# Есть список чисел:\n\n# lis = [1, 34, 8, 0, -5, 7, 32, 74, 59, 92, 41, 10, -2]\n# Найдите самый маленький элемент списка и переместите его в зависимости от условия: \n\n# если найденный элемент меньше за ноль, то поместите его в конец списка\n# если найденный элемент больше или равен нулю, то поместите его в начало списка.\n# Solution:\n\nlis = [1, 34, 8, 0, -5, 7, 32, 74, 59, 92, 41, 10, -2]\n\ni = 0\nwhile (i < len(lis)):\n if (lis[i] < 0):\n lis.append(lis[i])\n lis.remove(lis[i])\n elif (lis[i] >= 0):\n lis.insert(0, lis[i])\n lis.remove(lis[i + 1])\n i += 1\n\n# print (lis)\n\n# Solution 2:\n\n# lis = [1, 34, 8, 0, -5, 7, 32, 74, 59, 92, 41, 10, -2]\n# # в min присваиваем первый элемент списка\n# min = lis[0] \n# for i in lis: # проходим по всем элементам\n# \t# если находим элемент меньший нежели тот, что находиться в переменной, то присваиваем новое значение\n# \tif i < min:\n# \t\tmin = i\n \n# print (\"Минимальное число: \", min)\n# print (\"Список до удаления: \", lis)\n# lis.remove (min)\n# print (\"Список после удаления: \", lis)\n# if min < 0:\n# \tlis.append (min)\n# else:\n# \tlis.insert (0, min)\n# print (\"Список с добавленным элементом: \", lis)","sub_path":"lesson07/exercise4.py","file_name":"exercise4.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"610382438","text":"s = input()\n\n# string import method\nimport string\n\n\ndef count_words_string(word):\n count = 1\n for letter in word:\n if letter in string.ascii_uppercase:\n count += 1\n return count\n\n\n# ord method\ndef count_words_ord(word):\n count = 1\n for letter in word:\n if ord(letter) >= 65 and ord(letter) <= 90:\n count += 1\n return count\n\n\nprint(count_words_ord(s))\n# print(count_words_string(s))","sub_path":"Algorithms/Strings/CamelCase.py","file_name":"CamelCase.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"374771283","text":"# 1647. 도시 분할 계획 (골드4)\n\nimport sys\ninput = sys.stdin.readline\n\ndef find_set(x):\n if x != parent[x]:\n parent[x] = find_set(parent[x])\n return parent[x]\n\nn, m = map(int, input().split())\nedges = []\nfor _ in range(m):\n a, b, c = map(int, input().split())\n edges.append((c, a, b))\nedges.sort()\n\nparent = list(range(n+1))\ntotal, count = [], 0\nfor c, a, b in edges:\n a_root, b_root = find_set(a), find_set(b)\n if a_root != b_root:\n parent[b_root] = a_root\n total.append(c)\n count += 1\n if count >= n-1:\n break\n \nprint(sum(total)-max(total))\n","sub_path":"week10_6_1/BOJ_1647_영주.py","file_name":"BOJ_1647_영주.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"496004483","text":"bl_info = {\r\n \"name\": \"Easy Model Compositing\",\r\n \"author\": \"Jeffrey Hepburn - John Roper\",\r\n \"version\": (1, 2,2),\r\n \"blender\": (2, 77, 0),\r\n \"location\": \"Render Panel (Properties editor) > Easy Model Compositing\",\r\n \"description\": \"Generate an easy model composite\",\r\n \"warning\": \"\",\r\n \"wiki_url\": \"\",\r\n \"tracker_url\": \"\",\r\n \"category\": \"Render\"\r\n}\r\n\r\nimport bpy\r\nfrom os.path import basename\r\nfrom bpy.props import *\r\nfrom bpy_extras.io_utils import ImportHelper, ExportHelper\r\n\r\n####################\r\n# Setup Subject(s) #\r\n####################\r\nclass ECS_Setup_Subjects(bpy.types.Operator):\r\n \"\"\"Setup the subject(s)\"\"\"\r\n bl_idname = \"render.ecs_setup_subjects\"\r\n bl_label = \"Setup Subject(s)\"\r\n bl_options = {'REGISTER', 'UNDO'}\r\n\r\n def execute(self, context):\r\n for obj in bpy.context.selected_objects:\r\n obj.pass_index = 100\r\n\r\n return {'FINISHED'}\r\n\r\n###########################\r\n# Setup Shadow Catcher(s) #\r\n###########################\r\nclass ECS_Setup_Shadow_Catchers(bpy.types.Operator):\r\n \"\"\"Setup the shadow catcher(s)\"\"\"\r\n bl_idname = \"render.ecs_setup_shadow_catchers\"\r\n bl_label = \"Setup Shadow Catcher(s)\"\r\n bl_options = {'REGISTER', 'UNDO'}\r\n\r\n def execute(self, context):\r\n for obj in bpy.context.selected_objects:\r\n obj.pass_index = 101\r\n\r\n return {'FINISHED'}\r\n\r\n#######################\r\n# Set Render Settings #\r\n#######################\r\nclass ECS_Set_Render_Settings(bpy.types.Operator):\r\n \"\"\"Set the render settings\"\"\"\r\n bl_idname = \"render.ecs_set_render_settings\"\r\n bl_label = \"Setup Render\"\r\n bl_options = {'REGISTER', 'UNDO'}\r\n\r\n def execute(self, context):\r\n pimage_op = context.scene.use_pimage\r\n render_layers = bpy.context.scene.render.layers\r\n\r\n for layer in render_layers:\r\n layer.use_pass_object_index = True\r\n layer.use_pass_ambient_occlusion = True\r\n layer.use_pass_diffuse_direct = True\r\n layer.use_pass_glossy_indirect = True\r\n\r\n bpy.context.scene.render.resolution_percentage = 100\r\n bpy.context.scene.cycles.film_transparent = True\r\n \r\n addon_dir = bpy.utils.user_resource('SCRIPTS', \"addons\")\r\n blendfile = addon_dir + \"/easy_model_compositing/node.blend\"\r\n selection = \"\\\\NodeTree\\\\\"\r\n ngroup = \"EasyModelCompositor\"\r\n\r\n filepath = blendfile + selection + ngroup\r\n directory = blendfile + selection\r\n filename = ngroup\r\n\r\n newNgroup = bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)\r\n \r\n if pimage_op == True:\r\n previewImagePath = bpy.path.abspath(context.scene.backgroundFilePath)\r\n imageName = basename(previewImagePath)\r\n bpy.data.images.load(previewImagePath)\r\n pimageWidth = bpy.data.images[imageName].size[0]\r\n pimageHeight = bpy.data.images[imageName].size[1]\r\n \r\n if pimage_op == True and context.scene.pimage_img_dim == True:\r\n bpy.context.scene.render.resolution_x = pimageWidth\r\n bpy.context.scene.render.resolution_y = pimageHeight\r\n \r\n bpy.context.scene.use_nodes = True\r\n tree = bpy.context.scene.node_tree\r\n nodes=bpy.context.scene.node_tree.nodes\r\n\r\n for node in tree.nodes:\r\n tree.nodes.remove(node)\t\r\n \r\n rlayers_node = tree.nodes.new(type='CompositorNodeRLayers')\r\n rlayers_node.location = -250,0\r\n \r\n group = tree.nodes.new(\"CompositorNodeGroup\")\r\n group.node_tree = bpy.data.node_groups['EasyModelCompositor']\r\n group.location = 0,0\r\n \r\n if pimage_op == True:\r\n pimage = tree.nodes.new(\"CompositorNodeImage\")\r\n pimage.location = -250,-350\r\n pimage.image = bpy.data.images[imageName]\r\n \r\n output_node = tree.nodes.new(type='CompositorNodeComposite')\r\n output_node.location = 250,0\r\n \r\n links = tree.links\r\n link = links.new(rlayers_node.outputs[0], group.inputs[0])\r\n link = links.new(rlayers_node.outputs[1], group.inputs[1])\r\n link = links.new(rlayers_node.outputs[\"AO\"], group.inputs[4])\r\n link = links.new(rlayers_node.outputs[\"IndexOB\"], group.inputs[2])\r\n link = links.new(rlayers_node.outputs[\"Diffuse Direct\"], group.inputs[3])\r\n link = links.new(rlayers_node.outputs[\"Glossy Indirect\"], group.inputs[5])\r\n if pimage_op == True:\r\n link = links.new(pimage.outputs[0], group.inputs[7])\r\n link = links.new(group.outputs[0], output_node.inputs[0])\r\n\r\n return {'FINISHED'}\r\n\r\n################\r\n# Import Titan #\r\n################\r\nclass ImportTitan(bpy.types.Operator):\r\n \"\"\"Import the Titan model\"\"\"\r\n bl_idname = \"scene.add_titan_model\"\r\n bl_label = \"Import Titan Model\"\r\n bl_options = {'REGISTER', 'UNDO'}\r\n\r\n def execute(self, context):\r\n addon_dir = bpy.utils.user_resource('SCRIPTS', \"addons\")\r\n blendfile = addon_dir + \"/easy_model_compositing/node.blend\"\r\n selection = \"\\\\Object\\\\\"\r\n objectName = \"Titan\"\r\n\r\n filepath = blendfile + selection + objectName\r\n directory = blendfile + selection\r\n filename = objectName\r\n\r\n titanModel = bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)\r\n\r\n return {'FINISHED'}\r\n\r\n###################\r\n# Import PBR Node #\r\n###################\r\nclass ImportPBR(bpy.types.Operator):\r\n \"\"\"Import the pbr node\"\"\"\r\n bl_idname = \"scene.add_pbr_node\"\r\n bl_label = \"Import PBR Node\"\r\n bl_options = {'REGISTER', 'UNDO'}\r\n\r\n def execute(self, context):\r\n addon_dir = bpy.utils.user_resource('SCRIPTS', \"addons\")\r\n blendfile = addon_dir + \"/easy_model_compositing/node.blend\"\r\n selection = \"\\\\NodeTree\\\\\"\r\n ngroup = \"SubstancePBR\"\r\n \r\n filepath = blendfile + selection + ngroup\r\n directory = blendfile + selection\r\n filename = ngroup\r\n\r\n pbrNode = bpy.ops.wm.append(filepath=filepath, filename=filename, directory=directory)\r\n \r\n materials = bpy.context.active_object.data.materials\r\n nMat = len(materials)\r\n isEmpty = not materials\r\n if isEmpty:\r\n bpy.data.materials.new(\"Substance PBR Material\")\r\n mat = bpy.data.materials.get(\"Substance PBR Material\")\r\n bpy.context.active_object.data.materials.append(mat)\r\n \r\n activeMat = bpy.context.active_object.active_material\r\n\r\n activeMat.use_nodes = True\r\n nodes = activeMat.node_tree.nodes\r\n\r\n group = nodes.new(\"ShaderNodeGroup\")\r\n group.node_tree = bpy.data.node_groups['SubstancePBR']\r\n group.location = 0,0\r\n\r\n return {'FINISHED'}\r\n\r\nclass ECSRenderPanel(bpy.types.Panel):\r\n \"\"\"Creates a Panel in the render tab of the properties editor\"\"\"\r\n bl_label = \"Easy Model Compositing\"\r\n bl_idname = \"RENDER_PT_ecs\"\r\n bl_space_type = \"PROPERTIES\"\r\n bl_region_type = \"WINDOW\"\r\n bl_context = \"render\"\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n \r\n if bpy.context.scene.render.engine == 'BLENDER_RENDER':\r\n row = layout.row()\r\n row.label(\"Please Enable Cycles To Use This Addon\", icon=\"ERROR\")\r\n else:\r\n row = layout.row()\r\n row.operator(\"render.ecs_setup_subjects\", icon='ZOOMIN')\r\n\r\n row = layout.row()\r\n row.operator(\"render.ecs_setup_shadow_catchers\", icon='ZOOMIN')\r\n \r\n row = layout.row()\r\n row.separator()\r\n \r\n row = layout.row()\r\n row.prop(context.scene, 'use_pimage')\r\n \r\n if context.scene.use_pimage == True:\r\n row.prop(context.scene, 'pimage_img_dim')\r\n row = layout.row()\r\n row.prop(context.scene, 'backgroundFilePath')\r\n\r\n row = layout.row()\r\n row.scale_y = 1.2\r\n row.operator(\"render.ecs_set_render_settings\", icon='RENDER_STILL')\r\n \r\n row = layout.row()\r\n row.separator()\r\n\r\n row = layout.row()\r\n row.label(\"Extras:\", icon='FILE_TICK')\r\n \r\n row = layout.row()\r\n row.operator(\"scene.add_titan_model\", icon='APPEND_BLEND')\r\n\r\n row = layout.row()\r\n row.operator(\"scene.add_pbr_node\", icon='APPEND_BLEND')\r\n\r\n################\r\n# Registration #\r\n################\r\ndef register():\r\n bpy.types.Scene.use_pimage = bpy.props.BoolProperty(\r\n name=\"Use Preview Background\",\r\n description=\"Use the preview image option\",\r\n default=False,\r\n )\r\n \r\n bpy.types.Scene.pimage_img_dim = bpy.props.BoolProperty(\r\n name=\"Use Image Dimensions\",\r\n description=\"Use the preview image dimensions as the render dimensions\",\r\n default=False,\r\n )\r\n \r\n bpy.types.Scene.backgroundFilePath = bpy.props.StringProperty \\\r\n (\r\n name = \"Select The Background Preview Image\",\r\n default = \"\",\r\n description = \"Define the image file\",\r\n subtype = 'FILE_PATH'\r\n )\r\n\r\n bpy.utils.register_class(ECS_Setup_Subjects)\r\n bpy.utils.register_class(ECS_Setup_Shadow_Catchers)\r\n bpy.utils.register_class(ECS_Set_Render_Settings)\r\n bpy.utils.register_class(ImportTitan)\r\n bpy.utils.register_class(ImportPBR)\r\n bpy.utils.register_class(ECSRenderPanel)\r\n\r\ndef unregister():\r\n bpy.utils.unregister_class(ECS_Setup_Subjects)\r\n bpy.utils.unregister_class(ECS_Setup_Shadow_Catchers)\r\n bpy.utils.unregister_class(ECS_Set_Render_Settings)\r\n bpy.utils.unregister_class(ImportTitan)\r\n bpy.utils.unregister_class(ImportPBR)\r\n bpy.utils.unregister_class(ECSRenderPanel)\r\n\r\nif __name__ == \"__main__\":\r\n register()\r\n","sub_path":"All_In_One/addons/easy_model_compositing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"541661890","text":"#!/usr/bin/python3\n\n# Import modules\nimport sys\nimport subprocess\nimport ipaddress\nimport argparse\nimport logging as log\n\n# We need to declare these elsewhere\n# Declare\ninfile=\"\"\noutfile=\"outfile.txt\"\n\n# Include\n\n# Definitions\n# subroutine to scan a range of addresses\n# This expects a CIDR block as an input\ndef scanRange(net_addr):\n print(\"net_addr \", net_addr)\n # Create the network\n ip_net = ipaddress.ip_network(net_addr, strict=False)\n print(\"ip_net \", ip_net)\n for host in ip_net.hosts():\n print (host)\n \n# if args.verbose:\n# log.basicConfig(format=\"%(levelname)s: %(message)s\", level=log.DEBUG)\n# log.info(\"net_addr: \", net_addr)\n# log.info(\"ip_net : \", ip_net)\n# else:\n# log.basicConfig(format=\"%(levelname)s: %(message)s\")\n\n # Get all hosts on that network\n all_hosts = list(ip_net.hosts())\n \n # For each IP address in the subnet, \n # run the ping command with subprocess.popen interface\n for i in range(len(all_hosts)):\n output = subprocess.Popen(['ping', '-n', '1', '-w', '500', str(all_hosts[i])], stdout=subprocess.PIPE).communicate()[0]\n if \"Destination host unreachable\" in output.decode('utf-8'):\n print(str(all_hosts[i]), \"is Offline\")\n elif \"Request timed out\" in output.decode('utf-8'):\n print(str(all_hosts[i]), \"is Offline\")\n else:\n print(str(all_hosts[i]), \"is Online\")\n\n# Read file containing subnets\n# Do we use CIDR format?\n# Do we want unique IP addresses?\n\n# Create a parser for our arguments\ndef _get_argparser():\n parser = argparse.ArgumentParser(description='Scan a network based on user input. This can be used to scan a single address or a range if addresses, provided in CIDR notation.')\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n parser.add_argument(\"-q\", \"--quiet\", action=\"store_true\")\n parser.add_argument(\"-i\", \"--infile\",\n help=\"input file. This expects subnets in CIDR notation.\")\n parser.add_argument(\"-o\", \"--outfile\",\n help=\"output file. This will out put the result of a scan. default: outfile.txt\", default=\"outfile.txt\")\n parser.add_argument(\"-S\", \"--singleton\",\n help=\"Scan a single IP address from the command line.\")\n parser.add_argument(\"-c\", \"--cidr\",\n help=\"Scan a CIDR range ie 10.0.0.0/24 which would scan address range 10.0.0.0 - 10.0.0.255\")\n\n # rename parse_args to something more managable\n args = parser.parse_args()\n\n # Sanity tests\n if args.infile is None and args.cidr is None and args.singleton is None:\n print (\"Some argument expected.\")\n sys.exit(1)\n elif args.cidr:\n scanRange(args.cidr)\n elif args.singleton:\n net_addr = args.singleton\n elif args.infile:\n print(args.infile, \"Function undefined\")\n else:\n parser.print_help()\n sys.exit()\n return args\n\n# Some sanity tests\n# If no arguments are provided, help and exit\n#if args.cidr:\n# #net_addr = args.cidr\n# #print(\"net_addr \", net_addr)\n# print(\"args.cidr \", args.cidr)\n# scanRange(args.cidr)\n#elif args.singleton:\n# net_addr = args.singleton\n#elif args.infile:\n# print(args.infile)\n#else:\n# parser.print_help() \n# sys.exit()\n\n if args.verbose:\n log.basicConfig(format=\"%(levelname)s: %(message)s\", level=log.DEBUG)\n log.info(\"CIDR: \", args.cidr)\n else:\n log.basicConfig(format=\"%(levelname)s: %(message)s\")\n\nif __name__ == \"__main__\":\n main()\n\n# TODO\n# All a comma-seperated range as an input\n# Allow the --cidr option to define the contents of infile\n# Otherwise allow a comma-separated range fro infile\n","sub_path":"python/network/scan_network.py","file_name":"scan_network.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"134311665","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\nfrom typing import MutableMapping, MutableSequence\n\nimport proto # type: ignore\n\nfrom google.cloud.monitoring_dashboard_v1.types import metrics\nfrom google.protobuf import duration_pb2 # type: ignore\n\n\n__protobuf__ = proto.module(\n package=\"google.monitoring.dashboard.v1\",\n manifest={\n \"XyChart\",\n \"ChartOptions\",\n },\n)\n\n\nclass XyChart(proto.Message):\n r\"\"\"A chart that displays data on a 2D (X and Y axes) plane.\n\n Attributes:\n data_sets (MutableSequence[google.cloud.monitoring_dashboard_v1.types.XyChart.DataSet]):\n Required. The data displayed in this chart.\n timeshift_duration (google.protobuf.duration_pb2.Duration):\n The duration used to display a comparison\n chart. A comparison chart simultaneously shows\n values from two similar-length time periods\n (e.g., week-over-week metrics).\n The duration must be positive, and it can only\n be applied to charts with data sets of LINE plot\n type.\n thresholds (MutableSequence[google.cloud.monitoring_dashboard_v1.types.Threshold]):\n Threshold lines drawn horizontally across the\n chart.\n x_axis (google.cloud.monitoring_dashboard_v1.types.XyChart.Axis):\n The properties applied to the X axis.\n y_axis (google.cloud.monitoring_dashboard_v1.types.XyChart.Axis):\n The properties applied to the Y axis.\n y2_axis (google.cloud.monitoring_dashboard_v1.types.XyChart.Axis):\n The properties applied to the Y2 axis.\n chart_options (google.cloud.monitoring_dashboard_v1.types.ChartOptions):\n Display options for the chart.\n \"\"\"\n\n class DataSet(proto.Message):\n r\"\"\"Groups a time series query definition with charting options.\n\n Attributes:\n time_series_query (google.cloud.monitoring_dashboard_v1.types.TimeSeriesQuery):\n Required. Fields for querying time series\n data from the Stackdriver metrics API.\n plot_type (google.cloud.monitoring_dashboard_v1.types.XyChart.DataSet.PlotType):\n How this data should be plotted on the chart.\n legend_template (str):\n A template string for naming ``TimeSeries`` in the resulting\n data set. This should be a string with interpolations of the\n form ``${label_name}``, which will resolve to the label's\n value.\n min_alignment_period (google.protobuf.duration_pb2.Duration):\n Optional. The lower bound on data point frequency for this\n data set, implemented by specifying the minimum alignment\n period to use in a time series query For example, if the\n data is published once every 10 minutes, the\n ``min_alignment_period`` should be at least 10 minutes. It\n would not make sense to fetch and align data at one minute\n intervals.\n target_axis (google.cloud.monitoring_dashboard_v1.types.XyChart.DataSet.TargetAxis):\n Optional. The target axis to use for plotting\n the metric.\n \"\"\"\n\n class PlotType(proto.Enum):\n r\"\"\"The types of plotting strategies for data sets.\n\n Values:\n PLOT_TYPE_UNSPECIFIED (0):\n Plot type is unspecified. The view will default to ``LINE``.\n LINE (1):\n The data is plotted as a set of lines (one\n line per series).\n STACKED_AREA (2):\n The data is plotted as a set of filled areas\n (one area per series), with the areas stacked\n vertically (the base of each area is the top of\n its predecessor, and the base of the first area\n is the X axis). Since the areas do not overlap,\n each is filled with a different opaque color.\n STACKED_BAR (3):\n The data is plotted as a set of rectangular\n boxes (one box per series), with the boxes\n stacked vertically (the base of each box is the\n top of its predecessor, and the base of the\n first box is the X axis). Since the boxes do not\n overlap, each is filled with a different opaque\n color.\n HEATMAP (4):\n The data is plotted as a heatmap. The series being plotted\n must have a ``DISTRIBUTION`` value type. The value of each\n bucket in the distribution is displayed as a color. This\n type is not currently available in the Stackdriver\n Monitoring application.\n \"\"\"\n PLOT_TYPE_UNSPECIFIED = 0\n LINE = 1\n STACKED_AREA = 2\n STACKED_BAR = 3\n HEATMAP = 4\n\n class TargetAxis(proto.Enum):\n r\"\"\"An axis identifier.\n\n Values:\n TARGET_AXIS_UNSPECIFIED (0):\n The target axis was not specified. Defaults\n to Y1.\n Y1 (1):\n The y_axis (the right axis of chart).\n Y2 (2):\n The y2_axis (the left axis of chart).\n \"\"\"\n TARGET_AXIS_UNSPECIFIED = 0\n Y1 = 1\n Y2 = 2\n\n time_series_query: metrics.TimeSeriesQuery = proto.Field(\n proto.MESSAGE,\n number=1,\n message=metrics.TimeSeriesQuery,\n )\n plot_type: \"XyChart.DataSet.PlotType\" = proto.Field(\n proto.ENUM,\n number=2,\n enum=\"XyChart.DataSet.PlotType\",\n )\n legend_template: str = proto.Field(\n proto.STRING,\n number=3,\n )\n min_alignment_period: duration_pb2.Duration = proto.Field(\n proto.MESSAGE,\n number=4,\n message=duration_pb2.Duration,\n )\n target_axis: \"XyChart.DataSet.TargetAxis\" = proto.Field(\n proto.ENUM,\n number=5,\n enum=\"XyChart.DataSet.TargetAxis\",\n )\n\n class Axis(proto.Message):\n r\"\"\"A chart axis.\n\n Attributes:\n label (str):\n The label of the axis.\n scale (google.cloud.monitoring_dashboard_v1.types.XyChart.Axis.Scale):\n The axis scale. By default, a linear scale is\n used.\n \"\"\"\n\n class Scale(proto.Enum):\n r\"\"\"Types of scales used in axes.\n\n Values:\n SCALE_UNSPECIFIED (0):\n Scale is unspecified. The view will default to ``LINEAR``.\n LINEAR (1):\n Linear scale.\n LOG10 (2):\n Logarithmic scale (base 10).\n \"\"\"\n SCALE_UNSPECIFIED = 0\n LINEAR = 1\n LOG10 = 2\n\n label: str = proto.Field(\n proto.STRING,\n number=1,\n )\n scale: \"XyChart.Axis.Scale\" = proto.Field(\n proto.ENUM,\n number=2,\n enum=\"XyChart.Axis.Scale\",\n )\n\n data_sets: MutableSequence[DataSet] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=DataSet,\n )\n timeshift_duration: duration_pb2.Duration = proto.Field(\n proto.MESSAGE,\n number=4,\n message=duration_pb2.Duration,\n )\n thresholds: MutableSequence[metrics.Threshold] = proto.RepeatedField(\n proto.MESSAGE,\n number=5,\n message=metrics.Threshold,\n )\n x_axis: Axis = proto.Field(\n proto.MESSAGE,\n number=6,\n message=Axis,\n )\n y_axis: Axis = proto.Field(\n proto.MESSAGE,\n number=7,\n message=Axis,\n )\n y2_axis: Axis = proto.Field(\n proto.MESSAGE,\n number=9,\n message=Axis,\n )\n chart_options: \"ChartOptions\" = proto.Field(\n proto.MESSAGE,\n number=8,\n message=\"ChartOptions\",\n )\n\n\nclass ChartOptions(proto.Message):\n r\"\"\"Options to control visual rendering of a chart.\n\n Attributes:\n mode (google.cloud.monitoring_dashboard_v1.types.ChartOptions.Mode):\n The chart mode.\n \"\"\"\n\n class Mode(proto.Enum):\n r\"\"\"Chart mode options.\n\n Values:\n MODE_UNSPECIFIED (0):\n Mode is unspecified. The view will default to ``COLOR``.\n COLOR (1):\n The chart distinguishes data series using\n different color. Line colors may get reused when\n there are many lines in the chart.\n X_RAY (2):\n The chart uses the Stackdriver x-ray mode, in\n which each data set is plotted using the same\n semi-transparent color.\n STATS (3):\n The chart displays statistics such as\n average, median, 95th percentile, and more.\n \"\"\"\n MODE_UNSPECIFIED = 0\n COLOR = 1\n X_RAY = 2\n STATS = 3\n\n mode: Mode = proto.Field(\n proto.ENUM,\n number=1,\n enum=Mode,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/cloud/monitoring_dashboard_v1/types/xychart.py","file_name":"xychart.py","file_ext":"py","file_size_in_byte":9963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"485080380","text":"# -*- coding: utf-8 -*-\nimport sys\n\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore, QtGui\n\nfrom mymyo import MyMyo\nfrom PyQt4.Qt import QMutex\n\n\nplotters = []\napp = QtGui.QApplication([])\ntimer = myotimer = win = myo = None\n\n\nclass PlotImage:\n XAXISLENGTH = 2000\n XAXISDATA = [x for x in range(XAXISLENGTH)]\n\n def __init__(self, title):\n self._data = [0 for x in range(self.XAXISLENGTH)]\n self._title = title\n self._curve = None\n self._pen = pg.mkPen(color=\"#3366FF\")\n self._plotstart = 0\n self._plot = None\n self.mutex = QMutex()\n\n def add_data(self, datapoint):\n self.mutex.lock()\n self._data.append(datapoint)\n if self._plotstart < self.XAXISLENGTH:\n self._data.pop(0)\n else:\n self._plotstart += 1\n self.mutex.unlock()\n\n def draw(self):\n self.mutex.lock()\n if not self._curve:\n self._curve = self._plot.plot(pen=self._pen)\n self._curve.setData(x=self.XAXISDATA, y=self._data[self._plotstart:])\n self.mutex.unlock()\n\n def addplot(self, win):\n self._plot = win.addPlot(title=self._title)\n self._plot.setRange(xRange=(0, self.XAXISLENGTH))\n\n\ndef update_plot_data(emgdata):\n for p, e in zip(emgdata, plotters):\n p.add_data(e)\n\n\ndef update_draw():\n for p in plotters:\n p.draw()\n\n\ndef startmyo():\n global myo\n myo = MyMyo(update_plot_data)\n\n\ndef main():\n global win, timer, myotimer\n win = pg.GraphicsWindow(title=\"Myo Sensor Data\")\n win.resize(1000, 600)\n win.setBackground(None)\n pg.setConfigOptions(antialias=True)\n for i in range(8):\n if i % 2 == 0:\n win.nextRow()\n plotters.append(PlotImage(str('Sensor %s' % str(int(i) + 1))))\n plotters[i].addplot(win)\n\n myotimer = QtCore.QTimer()\n myotimer.timeout.connect(startmyo)\n myotimer.setSingleShot(True)\n myotimer.start(1)\n\n timer = QtCore.QTimer()\n timer.timeout.connect(update_draw)\n timer.start(100)\n\n\nif __name__ == '__main__':\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n main()\n QtGui.QApplication.instance().exec_()\n","sub_path":"drawlivedata.py","file_name":"drawlivedata.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"416649423","text":"\n\n#calss header\nclass _INFINITY():\n\tdef __init__(self,): \n\t\tself.name = \"INFINITY\"\n\t\tself.definitions = [u'time or space that has no end: ', u'a place that is so far away that it cannot be reached: ', u'a number that is larger than all other numbers', u'an extremely large number of something: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_infinity.py","file_name":"_infinity.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"208983139","text":"#!/usr/bin/env python\n\nimport sys\nfrom covid_io import read_argv\nfrom utils import dataframe_output, datetime_isoformat\n\n\n# Retrieve the CSV files from https://covid19.isciii.es\ndf = read_argv(encoding='ISO-8859-1').rename(columns={\n 'FECHA': 'Date',\n 'CCAA': 'RegionCode',\n 'Fallecidos': 'Deaths'\n}).dropna(subset=['Date'])\n\n# Add the country code to all records\ndf['CountryCode'] = 'ES'\n\n# Confirmed cases are split across 3 columns\nconfirmed_columns = ['CASOS', 'PCR+', 'TestAc+']\nfor col in confirmed_columns:\n df[col] = df[col].fillna(0)\ndf['Confirmed'] = df.apply(lambda x: sum([x[col] for col in confirmed_columns]), axis=1)\n\n# Convert dates to ISO format\ndf['Date'] = df['Date'].apply(lambda date: datetime_isoformat(date, '%d/%m/%Y'))\n\n# Country-wide is the sum of all regions\nregion_level = df\ncountry_level = df.groupby(['Date', 'CountryCode']).sum().reset_index()\n\n# Output the results\ndataframe_output(country_level)\ndataframe_output(region_level, 'ES')\n","sub_path":"input/parse_es_iscii.py","file_name":"parse_es_iscii.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"164714673","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\ndef ELUCons(elu, nchan):\r\n if elu:\r\n return nn.ELU(inplace=True)\r\n else:\r\n return nn.PReLU(nchan)\r\n\r\nclass ContBatchNorm3d(nn.modules.batchnorm._BatchNorm): #input size = output size\r\n def _check_input_dim(self, input):\r\n if input.dim() != 5:\r\n raise ValueError('expected 5D input (got {}D input)'\r\n .format(input.dim()))\r\n super(ContBatchNorm3d, self)._check_input_dim(input)\r\n\r\n def forward(self, input):\r\n self._check_input_dim(input)\r\n return F.batch_norm(\r\n input, self.running_mean, self.running_var, self.weight, self.bias,\r\n True, self.momentum, self.eps)\r\n\r\nclass step1(nn.Module):\r\n def __init__(self,elu):\r\n super(step1,self).__init__()\r\n self.conv1 = nn.Conv3d(\r\n in_channels=1,\r\n out_channels=16,\r\n kernel_size=5,\r\n padding=2\r\n )\r\n self.bn1 = ContBatchNorm3d(16)\r\n self.relu1 = ELUCons(elu,16)\r\n def forward(self, x):\r\n # do we want a PRELU here as well?\r\n out = self.bn1(self.conv1(x))\r\n # split input in to 16 channels\r\n x16 = torch.cat((x, x, x, x, x, x, x, x,\r\n x, x, x, x, x, x, x, x), 0)\r\n out = self.relu1(torch.add(out, x16))\r\n return out\r\n\r\nclass step2(nn.Module):\r\n pass","sub_path":"V_Net.py","file_name":"V_Net.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"346592112","text":"# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-\n# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4\n#\n# MDAnalysis --- https://www.mdanalysis.org\n# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors\n# (see the file AUTHORS for the full list of names)\n#\n# Released under the GNU Public Licence, v2 or any higher version\n#\n# Please cite your use of MDAnalysis in published work:\n#\n# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,\n# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.\n# MDAnalysis: A Python package for the rapid analysis of molecular dynamics\n# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th\n# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.\n# doi: 10.25080/majora-629e541a-00e\n#\n# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.\n# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.\n# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787\n#\n\n\n\"\"\"LAMMPS DCD trajectory and DATA I/O --- :mod:`MDAnalysis.coordinates.LAMMPS`\n===============================================================================\n\nClasses to read and write LAMMPS_ DCD binary trajectories, LAMMPS DATA files\nand LAMMPS dump files. Trajectories can be read regardless of system-endianness\nas this is auto-detected.\n\nLAMMPS can `write DCD`_ trajectories but unlike a `CHARMM trajectory`_\n(which is often called a DCD even though CHARMM itself calls them\n\"trj\") the time unit is not fixed to be the AKMA_ time unit (20 AKMA\nis 0.978 picoseconds or 1 AKMA = 4.888821e-14 s) but can depend on\nsettings in LAMMPS. The most common case for biomolecular simulations\nappears to be that the time step is recorded in femtoseconds (command\n`units real`_ in the input file) and lengths in ångströms. Other cases\nare unit-less Lennard-Jones time units.\n\nThis presents a problem for MDAnalysis because it cannot autodetect\nthe unit from the file. By default we are assuming that the unit for\nlength is the ångström and for the time is the femtosecond. If this is\nnot true then the user *should supply the appropriate units* in the\nkeywords *timeunit* and/or *lengthunit* to :class:`DCDWriter` and\n:class:`~MDAnalysis.core.universe.Universe` (which then calls\n:class:`DCDReader`).\n\nData file formats\n-----------------\n\nBy default either the `atomic` or `full` atom styles are expected,\nhowever this can be customised, see :ref:`atom_style_kwarg`.\n\nDump files\n----------\n\nThe DumpReader expects ascii dump files written with the default\n`LAMMPS dump format`_ of 'atom'\n\n\nExample: Loading a LAMMPS simulation\n------------------------------------\n\nTo load a LAMMPS simulation from a LAMMPS data file (using the\n:class:`~MDAnalysis.topology.LAMMPSParser.DATAParser`) together with a\nLAMMPS DCD with \"*real*\" provide the keyword *format=\"LAMMPS*\"::\n\n >>> u = MDAnalysis.Universe(\"lammps.data\", \"lammps_real.dcd\", format=\"LAMMPS\")\n\nIf the trajectory uses *units nano* then use ::\n\n >>> u = MDAnalysis.Universe(\"lammps.data\", \"lammps_nano.dcd\", format=\"LAMMPS\",\n ... lengthunit=\"nm\", timeunit=\"ns\")\n\nTo scan through a trajectory to find a desirable frame and write to a LAMMPS\ndata file,\n\n>>> for ts in u.trajectory:\n... # analyze frame\n... if take_this_frame == True:\n... with mda.Writer('frame.data') as W:\n... W.write(u.atoms)\n... break\n\nNote\n----\nLennard-Jones units are not implemented. See :mod:`MDAnalysis.units`\nfor other recognized values and the documentation for the LAMMPS\n`units command`_.\n\nSee Also\n--------\n\n For further discussion follow the reports for `Issue 84`_ and `Issue 64`_.\n\n.. _LAMMPS: http://lammps.sandia.gov/\n.. _write DCD: http://lammps.sandia.gov/doc/dump.html\n.. _CHARMM trajectory: http://www.charmm.org/documentation/c36b1/dynamc.html#%20Trajectory\n.. _AKMA: http://www.charmm.org/documentation/c36b1/usage.html#%20AKMA\n.. _units real: http://lammps.sandia.gov/doc/units.html\n.. _units command: http://lammps.sandia.gov/doc/units.html\n.. _`Issue 64`: https://github.com/MDAnalysis/mdanalysis/issues/64\n.. _`Issue 84`: https://github.com/MDAnalysis/mdanalysis/issues/84\n.. _`LAMMPS dump format`: http://lammps.sandia.gov/doc/dump.html\n\nClasses\n-------\n\n.. autoclass:: DCDReader\n :members:\n :inherited-members:\n.. autoclass:: DCDWriter\n :members:\n :inherited-members:\n.. autoclass:: DATAReader\n :members:\n :inherited-members:\n.. autoclass:: DATAWriter\n :members:\n :inherited-members:\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom six.moves import zip, range, map\nfrom six import raise_from\nimport os\nimport numpy as np\n\nfrom MDAnalysis.core.groups import requires\nfrom MDAnalysis.lib import util, mdamath, distances\nfrom MDAnalysis.lib.util import cached\nfrom MDAnalysis.coordinates import DCD\nfrom MDAnalysis import units\nfrom MDAnalysis.topology.LAMMPSParser import DATAParser\nfrom MDAnalysis.exceptions import NoDataError\nfrom MDAnalysis.coordinates import base\n\nbtype_sections = {'bond':'Bonds', 'angle':'Angles',\n 'dihedral':'Dihedrals', 'improper':'Impropers'}\n\n\nclass DATAWriter(base.WriterBase):\n \"\"\"Write out the current time step as a LAMMPS DATA file.\n\n This writer supports the sections Atoms, Masses, Velocities, Bonds,\n Angles, Dihedrals, and Impropers. This writer will write the header\n and these sections (if applicable). Atoms section is written in the\n \"full\" sub-style if charges are available or \"molecular\" sub-style\n if they are not. Molecule id is set to 0 for all atoms.\n\n Note\n ----\n This writer assumes \"conventional\" or \"real\" LAMMPS units where length\n is measured in Angstroms and velocity is measured in Angstroms per\n femtosecond. To write in different units, specify `lengthunit`\n\n If atom types are not already positive integers, the user must set them\n to be positive integers, because the writer will not automatically\n assign new types.\n\n To preserve numerical atom types when writing a selection, the Masses\n section will have entries for each atom type up to the maximum atom type.\n If the universe does not contain atoms of some type in\n {1, ... max(atom_types)}, then the mass for that type will be set to 1.\n\n In order to write bonds, each selected bond type must be explicitly set to\n an integer >= 1.\n\n \"\"\"\n format = 'DATA'\n\n def __init__(self, filename, convert_units=True, **kwargs):\n \"\"\"Set up a DATAWriter\n\n Parameters\n ----------\n filename : str\n output filename\n convert_units : bool, optional\n units are converted to the MDAnalysis base format; [``True``]\n \"\"\"\n self.filename = util.filename(filename, ext='data')\n\n self.convert_units = convert_units\n\n self.units = {'time': 'fs', 'length': 'Angstrom'}\n self.units['length'] = kwargs.pop('lengthunit', self.units['length'])\n self.units['time'] = kwargs.pop('timeunit', self.units['time'])\n self.units['velocity'] = kwargs.pop('velocityunit',\n self.units['length']+'/'+self.units['time'])\n\n def _write_atoms(self, atoms):\n self.f.write('\\n')\n self.f.write('Atoms\\n')\n self.f.write('\\n')\n\n try:\n charges = atoms.charges\n except (NoDataError, AttributeError):\n has_charges = False\n else:\n has_charges = True\n\n indices = atoms.indices + 1\n types = atoms.types.astype(np.int32)\n resids = atoms.resids\n\n if self.convert_units:\n coordinates = self.convert_pos_to_native(atoms.positions, inplace=False)\n\n if has_charges:\n for index, resid, atype, charge, coords in zip(indices, resids, types, charges,\n coordinates):\n self.f.write('{i:d} {r:d} {t:d} {c:f} {x:f} {y:f} {z:f}\\n'.format(\n i=index, r=resid, t=atype, c=charge, x=coords[0],\n y=coords[1], z=coords[2]))\n else:\n for index, resid, atype, coords in zip(indices, resids, types, coordinates):\n self.f.write('{i:d} {r:d} {t:d} {x:f} {y:f} {z:f}\\n'.format(\n i=index, r=resid, t=atype, x=coords[0], y=coords[1],\n z=coords[2]))\n\n def _write_velocities(self, atoms):\n self.f.write('\\n')\n self.f.write('Velocities\\n')\n self.f.write('\\n')\n indices = atoms.indices + 1\n velocities = self.convert_velocities_to_native(atoms.velocities,\n inplace=False)\n for index, vel in zip(indices, velocities):\n self.f.write('{i:d} {x:f} {y:f} {z:f}\\n'.format(i=index, x=vel[0],\n y=vel[1], z=vel[2]))\n\n def _write_masses(self, atoms):\n self.f.write('\\n')\n self.f.write('Masses\\n')\n self.f.write('\\n')\n mass_dict = {}\n max_type = max(atoms.types.astype(np.int32))\n for atype in range(1, max_type+1):\n # search entire universe for mass info, not just writing selection\n masses = set(atoms.universe.atoms.select_atoms(\n 'type {:d}'.format(atype)).masses)\n if len(masses) == 0:\n mass_dict[atype] = 1.0\n else:\n mass_dict[atype] = masses.pop()\n if masses:\n raise ValueError('LAMMPS DATAWriter: to write data file, '+\n 'atoms with same type must have same mass')\n for atype, mass in mass_dict.items():\n self.f.write('{:d} {:f}\\n'.format(atype, mass))\n\n def _write_bonds(self, bonds):\n self.f.write('\\n')\n self.f.write('{}\\n'.format(btype_sections[bonds.btype]))\n self.f.write('\\n')\n\n bt = {}; t=0\n for bond, i in zip(bonds, range(1, len(bonds)+1)):\n if bond.type not in bt.keys():\n t += 1\n bt[bond.type] = t\n self.f.write('{:d} {:d} '.format(i, bt[bond.type])+\\\n ' '.join((bond.atoms.indices + 1).astype(str))+'\\n')\n# \n# try:\n# self.f.write('{:d} {:d} '.format(i, int(bond.type))+\\\n# ' '.join((bond.atoms.indices + 1).astype(str))+'\\n')\n# except TypeError:\n# raise_from(TypeError('LAMMPS DATAWriter: Trying to write bond, '\n# 'but bond type {} is not '\n# 'numerical.'.format(bond.type)),\n# None)\n\n def _write_dimensions(self, dimensions):\n \"\"\"Convert dimensions to triclinic vectors, convert lengths to native\n units and then write the dimensions section\n \"\"\"\n if self.convert_units:\n triv = self.convert_pos_to_native(mdamath.triclinic_vectors(\n dimensions),inplace=False)\n self.f.write('\\n')\n self.f.write('{:f} {:f} xlo xhi\\n'.format(0., triv[0][0]))\n self.f.write('{:f} {:f} ylo yhi\\n'.format(0., triv[1][1]))\n self.f.write('{:f} {:f} zlo zhi\\n'.format(0., triv[2][2]))\n if any([triv[1][0], triv[2][0], triv[2][1]]):\n self.f.write('{xy:f} {xz:f} {yz:f} xy xz yz\\n'.format(\n xy=triv[1][0], xz=triv[2][0], yz=triv[2][1]))\n self.f.write('\\n')\n\n @requires('types', 'masses')\n def write(self, selection, frame=None):\n \"\"\"Write selection at current trajectory frame to file.\n\n The sections for Atoms, Masses, Velocities, Bonds, Angles,\n Dihedrals, and Impropers (if these are defined) are\n written. The Atoms section is written in the \"full\" sub-style\n if charges are available or \"molecular\" sub-style if they are\n not. Molecule id in atoms section is set to to 0.\n\n No other sections are written to the DATA file.\n As of this writing, other sections are not parsed into the topology\n by the :class:`DATAReader`.\n\n Note\n ----\n If the selection includes a partial fragment, then only the bonds,\n angles, etc. whose atoms are contained within the selection will be\n included.\n\n Parameters\n ----------\n selection : AtomGroup or Universe\n MDAnalysis AtomGroup (selection or Universe.atoms) or also Universe\n frame : int (optional)\n optionally move to frame number `frame`\n\n \"\"\"\n u = selection.universe\n if frame is not None:\n u.trajectory[frame]\n else:\n frame = u.trajectory.ts.frame\n\n # make sure to use atoms (Issue 46)\n atoms = selection.atoms\n\n # check that types can be converted to ints if they aren't ints already\n try:\n atoms.types.astype(np.int32)\n #except ValueError:\n # raise_from(\n # ValueError(\n # 'LAMMPS.DATAWriter: atom types must be '\n # 'convertible to integers'),\n # None)\n except:\n t = 0; t_dict = {}\n for atom in atoms:\n if atom.type not in t_dict.keys():\n t += 1\n t_dict[atom.type] = str(t)\n atom.type = t_dict[atom.type]\n\n try:\n velocities = atoms.velocities\n except (NoDataError, AttributeError):\n has_velocities = False\n else:\n has_velocities = True\n\n features = {}\n with util.openany(self.filename, 'w') as self.f:\n self.f.write('LAMMPS data file via MDAnalysis\\n')\n self.f.write('\\n')\n self.f.write('{:>12d} atoms\\n'.format(len(atoms)))\n\n attrs = [('bond', 'bonds'), ('angle', 'angles'),\n ('dihedral', 'dihedrals'), ('improper', 'impropers')]\n\n for btype, attr_name in attrs:\n try:\n features[btype] = atoms.__getattribute__(attr_name)\n self.f.write('{:>12d} {}\\n'.format(len(features[btype]),\n attr_name))\n features[btype] = features[btype].atomgroup_intersection(\n atoms, strict=True)\n except:\n pass\n# for btype, attr_name in attrs:\n# features[btype] = atoms.__getattribute__(attr_name)\n# self.f.write('{:>12d} {}\\n'.format(len(features[btype]),\n# attr_name))\n# features[btype] = features[btype].atomgroup_intersection(\n# atoms, strict=True)\n\n self.f.write('\\n')\n self.f.write('{:>12d} atom types\\n'.format(max(atoms.types.astype(np.int32))))\n\n for btype, attr in features.items():\n self.f.write('{:>12d} {} types\\n'.format(len(attr.types()),\n btype))\n\n self._write_dimensions(atoms.dimensions)\n\n self._write_masses(atoms)\n self._write_atoms(atoms)\n for attr in features.values():\n if attr is None or len(attr) == 0:\n continue\n self._write_bonds(attr)\n\n if has_velocities:\n self._write_velocities(atoms)\n\n\n\n","sub_path":"scripts/LAMMPS.py","file_name":"LAMMPS.py","file_ext":"py","file_size_in_byte":15475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"460513656","text":"import sys\nsys.setrecursionlimit(10 ** 6)\n\n\nclass Graph:\n def __init__(self, nodes_count):\n self.nodes = []\n self.nodes_count = nodes_count\n for i in range(nodes_count):\n self.nodes.append([])\n\n def new_edge(self, first_node, second_node):\n self.nodes[first_node].append(second_node)\n self.nodes[second_node].append(first_node)\n\n\ndef create_graph():\n nodes_count = int(input())\n graph = Graph(nodes_count)\n for i in range(nodes_count - 1):\n first_node, second_node = map(int, input().split())\n graph.new_edge(first_node - 1, second_node - 1)\n return graph\n\n\ndef DFS(graph, node, previous, distance):\n max_distance = distance\n max_node = node\n for neighbour in graph.nodes[node]:\n if neighbour != previous:\n current_distance, current_max_nod = \\\n DFS(graph, neighbour, node, distance + 1)\n if max_distance < current_distance:\n max_distance = current_distance\n max_node = current_max_nod\n return max_distance, max_node\n\n\ndef get_max_distance(graph):\n distance, node = DFS(graph, 0, -1, 0)\n distance, node = DFS(graph, node, -1, 0)\n return distance\n\n\ndef main():\n graph = create_graph()\n print(get_max_distance(graph))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Homework 6/Task_E.py","file_name":"Task_E.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"84026330","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nn = 10\n\ndef u(n,x):\n x1 = np.zeros(n-x).tolist()\n x2 = np.ones(2*n - (n-x)).tolist()\n return np.array(x1 + x2) \n\ns = np.arange(-n,n);\nplt.plot(s,u(n,-3),\"o-\")\nplt.plot(s,u(n,2),\"r--\")\nplt.show()","sub_path":"numpy and scipy/baitap/b5.py","file_name":"b5.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"306772643","text":"from decimal import Decimal\n\nfrom config import settings\nfrom shop.models import Produk\n\n\nclass Keranjang(object):\n def __init__(self, req):\n self.session = req.session\n cart = self.session.get(settings.CART_SESSION_ID)\n if not cart:\n cart = self.session[settings.CART_SESSION_ID] = {}\n\n self.keranjang = cart\n\n def __iter__(self):\n all_produk_id = self.keranjang.keys()\n all_produk = Produk.objects.filter(id__in=all_produk_id)\n\n krj = self.keranjang.copy()\n for produk in all_produk:\n krj[str(produk.id)]['produk'] = produk\n\n for item in krj.values():\n item['harga'] = Decimal(item['harga'])\n item['harga_total'] = item['harga'] * item['jml']\n yield item # sama dg return hnya sj yield mengembalikan sbuah objek yg iterable\n\n def __len__(self):\n return sum(item['jml'] for item in self.keranjang.values())\n\n def tambah(self, produk, jml=1, override_jml=False):\n produk_id = str(produk.id)\n if produk_id not in self.keranjang:\n self.keranjang[produk_id] = {\n 'jml': 0,\n 'harga': str(produk.harga)\n }\n\n if override_jml:\n self.keranjang[produk_id]['jml'] = jml\n else:\n self.keranjang[produk_id]['jml'] += jml\n\n self.simpan()\n\n def hapus(self, produk):\n produk_id = str(produk.id)\n if produk_id in self.keranjang:\n del self.keranjang[produk_id]\n self.simpan()\n\n def simpan(self):\n self.session.modified = True\n\n def clear(self):\n del self.session[settings.CART_SESSION_ID]\n self.simpan()\n\n def get_harga_total(self):\n return sum(Decimal(item['harga']) * item['jml'] for item in self.keranjang.values())\n","sub_path":"antoniomele_olshop/cart/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"8297632","text":"#coding=utf-8\n\nfrom api.document.doc_tools import *\nfrom api.view.base import BaseHandler\nfrom app.customer.models.startup_image import StartupImage\nfrom app.customer.models.switch import Switcher\nfrom app.customer.models.version_info import VersionInfo\nimport time\nfrom app.customer.models.user import UserHeartBeat\nfrom app.channel.models.audit_info import *\nfrom django.conf import settings\nfrom app.audio.models.roomrecord import RoomRecord\n\n\n@handler_define\nclass Initial(BaseHandler):\n @api_define(\"Initial\", r'/app/inital', [\n Param('platform', True, str, \"\", \"\", u'IOS,ANDROID,H5,WEB'),\n ], description=\"初始化接口\")\n def get(self):\n obj = StartupImage.objects.filter(status=StartupImage.STATUS_VALID).first()\n if obj:\n data = {'id': obj.key, 'image': obj.image, 'url': obj.url}\n else:\n data = {}\n switches = {}\n platform = self.arg(\"platform\")\n ua = self.request.headers.get('User-Agent')\n\n # 开关规则 0不处理 1开启\n switch_on = Switcher.get_on_switches(platform)\n for switch in switch_on:\n status = switch.status\n if status.isdigit():\n status = int(status)\n switches[switch.name] = status\n \n # upgrade_type 0不做处理 1强升 2非强生\n version_info = {\n 'version': '',\n 'upgrade_type': '',\n 'download_url': '',\n 'desc': '',\n }\n app_name = ua.split(\";\")[0]\n uas = ua.split(\";\")\n if app_name == \"liaoai_teyue\" or app_name == \"liaoai_lizhen\":\n channel = None\n else:\n channel = uas[5]\n\n version = VersionInfo.get_version_info(platform, app_name, channel)\n ua_version = ua.split(\";\")[1]\n\n if version:\n version_info = version.format_version_info()\n # 强升\n if ua_version < version.min_version:\n version_info[\"upgrade_type\"] = 1\n elif version.min_version <= ua_version < version.version:\n version_info[\"upgrade_type\"] = version.upgrade_type\n else:\n version_info[\"upgrade_type\"] = 0\n\n downloads_url = version.download_url\n version_info[\"download_url\"] = downloads_url\n version_info[\"desc\"] = version.upgrade_info\n else:\n version_info[\"upgrade_type\"] = 0\n version_info[\"download_url\"] = \"\"\n version_info[\"desc\"] = \"\"\n\n version_info[\"version_code\"] = 1000000\n\n audit_info = ChannelAuditInfo.get_audit_info(channel)\n\n if audit_info and ua_version >= audit_info.version:\n switches[\"review\"] = 0\n else:\n switches[\"review\"] = 1\n\n\n share_url = settings.SHARE_URL\n invite_url = settings.INVITE_URL\n ins_img_url = settings.INS_IMAGE_URL\n\n self.write({\n 'status': \"success\",\n 'start_image': data,\n \"system_timestamp\": int(time.time()),\n \"switch\": switches,\n \"version_info\": version_info,\n \"report_interval\": UserHeartBeat.REPORT_INTERVAL,\n \"share_url\": share_url,\n \"invite_url\": invite_url,\n \"ins_img_url\": ins_img_url,\n \"pollingtime\":60\n })\n","sub_path":"api/handler/start_init.py","file_name":"start_init.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"88271578","text":"\noperacion = input(\"¿Que operacion quieres realizar? (Dividir / Multiplicar / Sumar / Restar) \")\n\nprimer_numero = int(input(\"Inserta el primero numero\"))\nsegundo_numero = int(input(\"Inserta el segundo numero\"))\n\nsigno_dividir = \"/\"\nsigno_multiplicar = \"*\"\nsigno_sumar = \"+\"\nsigno_restar = \"-\"\n\nif operacion == \"Dividir\":\n resultado = primer_numero/segundo_numero\n print(\"Operacion definida {}{}{}\".format(primer_numero, signo_dividir, segundo_numero))\n print(\"Resultado = {}\".format(resultado))\n\nelif operacion == \"Multiplicar\":\n resultado = primer_numero*segundo_numero\n print(\"Operacion definida {}{}{}\".format(primer_numero, signo_multiplicar, segundo_numero))\n print(\"Resultado = {}\".format(resultado))\n\nelif operacion == \"Sumar\":\n resultado = int(primer_numero+segundo_numero)\n print(\"Operacion definida {}{}{}\".format(primer_numero, signo_sumar, segundo_numero))\n print(\"Resultado = {}\".format(resultado))\n\nelif operacion == \"restar\":\n resultado = primer_numero-segundo_numero\n print(\"Operacion definida {}{}{}\".format(primer_numero, signo_restar, segundo_numero))\n print(\"Resultado = {}\".format(resultado))\n\n","sub_path":"calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"81185510","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/taurus/core/tango/tangodevice.py\n# Compiled at: 2019-08-19 15:09:29\n\"\"\"This module defines the TangoDevice object\"\"\"\nfrom builtins import object\nimport time\nfrom PyTango import DeviceProxy, DevFailed, LockerInfo, DevState\nfrom taurus.core.taurusdevice import TaurusDevice\nfrom taurus.core.taurusbasetypes import TaurusDevState, TaurusLockInfo, LockStatus, TaurusEventType\nfrom taurus.core.util.log import taurus4_deprecation\n__all__ = [\n 'TangoDevice']\n__docformat__ = 'restructuredtext'\n\nclass _TangoInfo(object):\n pass\n\n\ndef __init__(self):\n self.dev_class = self.dev_type = 'TangoDevice'\n self.doc_url = 'http://www.esrf.fr/computing/cs/tango/tango_doc/ds_doc/'\n self.server_host = 'Unknown'\n self.server_id = 'Unknown'\n self.server_version = 1\n\n\nclass TangoDevice(TaurusDevice):\n \"\"\"A Device object representing an abstraction of the PyTango.DeviceProxy\n object in the taurus.core.tango scheme\"\"\"\n _factory = None\n _scheme = 'tango'\n _description = 'A Tango Device'\n\n def __init__(self, name='', **kw):\n \"\"\"Object initialization.\"\"\"\n self.call__init__(TaurusDevice, name, **kw)\n self._deviceObj = self._createHWObject()\n self._lock_info = TaurusLockInfo()\n self._deviceStateObj = None\n self._deviceState = TaurusDevState.Undefined\n return\n\n def __getattr__(self, name):\n if name != '_deviceObj' and self._deviceObj is not None:\n return getattr(self._deviceObj, name)\n else:\n cls_name = self.__class__.__name__\n raise AttributeError(\"'%s' has no attribute '%s'\" % (cls_name, name))\n return\n\n def __contains__(self, key):\n \"\"\"delegate the contains interface to the device proxy\"\"\"\n hw = self.getDeviceProxy()\n if hw is None:\n return False\n else:\n return hw.__contains__(key)\n\n def __getitem__(self, key):\n \"\"\"read attribute value using key-indexing syntax (e.g. as in a dict)\n on the device\"\"\"\n attr = self.getAttribute(key)\n return attr.read()\n\n def __setitem__(self, key, value):\n \"\"\"set attribute value using key-indexing syntax (e.g. as in a dict)\n on the device\"\"\"\n attr = self.getAttribute(key)\n return attr.write(value)\n\n def getAttribute(self, attrname):\n \"\"\"Returns the attribute object given its name\"\"\"\n slashnb = attrname.count('/')\n if slashnb == 0:\n attrname = '%s/%s' % (self.getFullName(), attrname)\n elif attrname[0] == '/':\n attrname = '%s%s' % (self.getFullName(), attrname)\n return self.factory().getAttribute(attrname)\n\n @taurus4_deprecation(alt='.stateObj.read().rvalue [Tango] or ' + '.state [agnostic]')\n def getState(self, cache=True):\n stateAttrValue = self.stateObj.read(cache=cache)\n if stateAttrValue is not None:\n state_rvalue = stateAttrValue.rvalue\n return DevState.values[state_rvalue.value]\n else:\n return\n\n @taurus4_deprecation(alt='.stateObj [Tango] or ' + '.factory.getAttribute(state_full_name) [agnostic]')\n def getStateObj(self):\n return self.stateObj\n\n @taurus4_deprecation(alt='state')\n def getSWState(self, cache=True):\n raise Exception('getSWState has been removed. Use state instead')\n\n @property\n def state(self, cache=True):\n \"\"\"Reimplemented from :class:`TaurusDevice` to use Tango's state\n attribute for diagnosis of the current state. It supports a \"cache\"\n kwarg\n\n :param cache: (bool) If True (default), cache will be used when reading\n the state attribute of this device\n\n :return: (TaurusDevState)\n \"\"\"\n self._deviceState = TaurusDevState.NotReady\n try:\n taurus_tango_state = self.stateObj.read(cache).rvalue\n except:\n try:\n if self.getDeviceProxy().import_info().exported:\n self._deviceState = TaurusDevState.Undefined\n return self._deviceState\n else:\n return self._deviceState\n\n except:\n return self._deviceState\n\n from taurus.core.tango.enums import DevState as TaurusTangoDevState\n if taurus_tango_state == TaurusTangoDevState.UNKNOWN:\n self._deviceState = TaurusDevState.Undefined\n elif taurus_tango_state not in (TaurusTangoDevState.FAULT,\n TaurusTangoDevState.DISABLE,\n TaurusTangoDevState.INIT):\n self._deviceState = TaurusDevState.Ready\n return self._deviceState\n\n @taurus4_deprecation(alt='state [agnostic] or stateObj.read [Tango]')\n def getValueObj(self, cache=True):\n \"\"\" Deprecated by TEP14.\n ..warning::\n this bck-compat implementation is not perfect because the\n rvalue of the returned TangoAttributeValue is now a member of\n TaurusDevState instead of TaurusSWDevState\n \"\"\"\n if not cache:\n self.warning('Ignoring argument `cache=False`to getValueObj()')\n from taurus.core.tango.tangoattribute import TangoAttrValue\n ret = TangoAttrValue()\n ret.rvalue = self.state\n return ret\n\n def getDisplayDescrObj(self, cache=True):\n desc_obj = super(TangoDevice, self).getDisplayDescrObj(cache)\n ret = []\n for name, value in desc_obj:\n if name.lower() == 'device state' and self.stateObj is not None:\n tg_state = self.stateObj.read(cache).rvalue.name\n value = '%s (%s)' % (value, tg_state)\n ret.append((name, value))\n\n return ret\n\n def cleanUp(self):\n self.trace('[TangoDevice] cleanUp')\n self._descr = None\n if self._deviceStateObj is not None:\n self._deviceStateObj.removeListener(self)\n self._deviceStateObj = None\n self._deviceObj = None\n TaurusDevice.cleanUp(self)\n return\n\n @taurus4_deprecation(alt='.state().name')\n def getDisplayValue(self, cache=True):\n return self.state(cache).name\n\n def _createHWObject(self):\n try:\n return DeviceProxy(self.getFullName())\n except DevFailed as e:\n self.warning('Could not create HW object: %s' % e.args[0].desc)\n self.traceback()\n\n @taurus4_deprecation(alt='getDeviceProxy()')\n def getHWObj(self):\n return self.getDeviceProxy()\n\n def getDeviceProxy(self):\n if self._deviceObj is None:\n self._deviceObj = self._createHWObject()\n return self._deviceObj\n\n @taurus4_deprecation(alt='getDeviceProxy() is not None')\n def isValidDev(self):\n \"\"\"see: :meth:`TaurusDevice.isValid`\"\"\"\n return self._deviceObj is not None\n\n def lock(self, force=False):\n li = self.getLockInfo()\n if force:\n if self.getLockInfo().status == TaurusLockInfo.Locked:\n self.unlock(force=True)\n return self.getDeviceProxy().lock()\n\n def unlock(self, force=False):\n return self.getDeviceProxy().unlock(force)\n\n def getLockInfo(self, cache=False):\n lock_info = self._lock_info\n if cache and lock_info.status != LockStatus.Unknown:\n return lock_info\n else:\n try:\n dev = self.getDeviceProxy()\n li = LockerInfo()\n locked = dev.get_locker(li)\n msg = '%s ' % self.getSimpleName()\n if locked:\n lock_info.id = pid = li.li\n lock_info.language = li.ll\n lock_info.host = host = li.locker_host\n lock_info.klass = li.locker_class\n if dev.is_locked_by_me():\n status = LockStatus.LockedMaster\n msg += 'is locked by you!'\n else:\n status = LockStatus.Locked\n msg += 'is locked by PID %s on %s' % (pid, host)\n else:\n lock_info.id = None\n lock_info.language = None\n lock_info.host = host = None\n lock_info.klass = None\n status = LockStatus.Unlocked\n msg += 'is not locked'\n lock_info.status = status\n lock_info.status_msg = msg\n except:\n self._lock_info = lock_info = TaurusLockInfo()\n\n return lock_info\n\n def removeListener(self, listener):\n ret = TaurusDevice.removeListener(self, listener)\n if not ret or self.hasListeners():\n return ret\n return self.stateObj.removeListener(self)\n\n def addListener(self, listener):\n weWereListening = self.hasListeners()\n ret = TaurusDevice.addListener(self, listener)\n if not ret:\n return ret\n if weWereListening:\n try:\n evt_value = self.__decode(self.stateObj.read())\n except:\n self.debug('Cannot read state')\n return ret\n\n listeners = hasattr(listener, '__iter__') and listener or [\n listener]\n self.fireEvent(TaurusEventType.Change, evt_value, listeners)\n else:\n self.stateObj.addListener(self)\n return ret\n\n def eventReceived(self, event_src, event_type, event_value):\n if event_type == TaurusEventType.Config:\n return\n value = self.__decode(event_value)\n new_state = value.rvalue\n if new_state != self._deviceState:\n msg = 'Device State changed %s -> %s' % (self._deviceState.name,\n new_state.name)\n self.debug(msg)\n self._deviceState = new_state\n self.fireEvent(TaurusEventType.Change, value)\n\n def __decode(self, event_value):\n \"\"\"Decode events from the state attribute into TangoAttrValues whose\n rvalue is the Device state\"\"\"\n from taurus.core.tango.tangoattribute import TangoAttrValue\n if isinstance(event_value, TangoAttrValue):\n new_state = TaurusDevState.Ready\n elif isinstance(event_value, DevFailed):\n new_state = TaurusDevState.NotReady\n else:\n self.info('Unexpected event value: %r', event_value)\n new_state = TaurusDevState.Undefined\n from taurus.core.taurusbasetypes import TaurusModelValue\n value = TaurusModelValue()\n value.rvalue = new_state\n return value\n\n def __pollResult(self, attrs, ts, result, error=False):\n if error:\n for attr in attrs.values():\n attr.poll(single=False, value=None, error=result, time=ts)\n\n return\n for da in result:\n if da.has_failed:\n v, err = None, DevFailed(*da.get_err_stack())\n else:\n v, err = da, None\n attr = attrs[da.name]\n attr.poll(single=False, value=v, error=err, time=ts)\n\n return\n\n def __pollAsynch(self, attrs):\n ts = time.time()\n try:\n req_id = self.read_attributes_asynch(list(attrs.keys()))\n except DevFailed as e:\n return (\n False, e, ts)\n\n return (\n True, req_id, ts)\n\n def __pollReply(self, attrs, req_id, timeout=None):\n ok, req_id, ts = req_id\n if not ok:\n self.__pollResult(attrs, ts, req_id, error=True)\n return\n else:\n if timeout is None:\n timeout = 0\n timeout = int(timeout * 1000)\n result = self.read_attributes_reply(req_id, timeout)\n self.__pollResult(attrs, ts, result)\n return\n\n def poll(self, attrs, asynch=False, req_id=None):\n \"\"\"optimized by reading of multiple attributes in one go\"\"\"\n if req_id is not None:\n return self.__pollReply(attrs, req_id)\n else:\n if asynch:\n return self.__pollAsynch(attrs)\n error = False\n ts = time.time()\n try:\n result = self.read_attributes(list(attrs.keys()))\n except DevFailed as e:\n error = True\n result = e\n\n self.__pollResult(attrs, ts, result, error=error)\n return\n\n def _repr_html_(self):\n try:\n info = self.getDeviceProxy().info()\n except:\n info = _TangoInfo()\n\n txt = ('<table>\\n <tr><td>Short name</td><td>{simple_name}</td></tr>\\n <tr><td>Standard name</td><td>{normal_name}</td></tr>\\n <tr><td>Full name</td><td>{full_name}</td></tr>\\n <tr><td>Device class</td><td>{dev_class}</td></tr>\\n <tr><td>Server</td><td>{server_id}</td></tr>\\n <tr><td>Documentation</td><td><a target=\"_blank\" href=\"{doc_url}\">{doc_url}</a></td></tr>\\n</table>\\n').format(simple_name=self.getSimpleName(), normal_name=self.getNormalName(), full_name=self.getFullName(), dev_class=info.dev_class, server_id=info.server_id, doc_url=info.doc_url)\n return txt\n\n @taurus4_deprecation(alt='.description')\n def getDescription(self, cache=True):\n return self.description\n\n @property\n def description(self):\n try:\n self._description = self.getDeviceProxy().description()\n except:\n pass\n\n return self._description\n\n @property\n def stateObj(self):\n if self._deviceStateObj is None:\n self._deviceStateObj = self.getAttribute('state')\n return self._deviceStateObj","sub_path":"pycfiles/taurus-4.6.1-py2.7/tangodevice.py","file_name":"tangodevice.py","file_ext":"py","file_size_in_byte":13766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"312006296","text":"import webapp2\nimport uuid\nimport urllib\nimport constants\nimport urllib2\nimport json\nimport math\n\nclass IDTYPE():\n USER = \"user\"\n IMAGE = \"image\"\n SUBS = \"subs\"\n INVITE = \"inv\"\n STREAM = \"stream\"\n TREND_COUNTER = \"tcount\"\n COUNTER = \"count\"\n\n\ndef getUUID(type):\n return type + str(uuid.uuid4())\n #return base64.urlsafe_b64decode(uuid.uuid4().bytes)[:-2]\n\ndef distance_on_unit_sphere(lat1, long1, lat2, long2):\n\n # Convert latitude and longitude to\n # spherical coordinates in radians.\n degrees_to_radians = math.pi/180.0\n\n # phi = 90 - latitude\n phi1 = (90.0 - lat1)*degrees_to_radians\n phi2 = (90.0 - lat2)*degrees_to_radians\n\n # theta = longitude\n theta1 = long1*degrees_to_radians\n theta2 = long2*degrees_to_radians\n\n # Compute spherical distance from spherical coordinates.\n # For two locations in spherical coordinates\n # (1, theta, phi) and (1, theta, phi)\n # cosine( arc length ) =\n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n\n cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +\n math.cos(phi1)*math.cos(phi2))\n arc = math.acos( cos )\n\n # Remember to multiply arc by the radius of the earth\n # in your favorite set of units to get length.\n #returns distance in meters\n return arc * 6378100\n\nclass InternalAPIRequest(object):\n def __init__(self, method='GET', endpoint_name='', base_uri='', uri_args={}):\n if endpoint_name == '':\n self.endpoint = \"/\"\n else:\n self.endpoint = webapp2.uri_for(endpoint_name, **uri_args)\n self.method = method\n self.base_uri = base_uri\n self.response = ''\n self.params = {}\n self.response_data = ''\n\n def send(self):\n if self.method == 'POST':\n data = urllib.urlencode(self.params)\n req = urllib2.Request(self.base_uri + self.endpoint, data)\n else:\n endpoint_with_params = self.base_uri + self.endpoint + \"?\"\n prefix = \"\"\n for key,value in self.params.items():\n endpoint_with_params += prefix + key + \"=\" + value\n prefix = \"&\"\n req = endpoint_with_params\n\n try:\n resp = urllib2.urlopen(req)\n return json.loads(resp.read())\n except urllib2.HTTPError as e:\n self.response_code = e.code\n self.response_data = e.read()\n return None\n\n\nclass WebException(Exception):\n def __init__(self, redirect_uri, data):\n self.data = data\n self.redirect_uri = redirect_uri","sub_path":"handler_utils.py","file_name":"handler_utils.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"607797555","text":"'''\nProvides the Markov Chain implementation.\n'''\nimport random\n\n\nclass MarkovChain(object):\n '''\n A Markov chain.\n '''\n def __init__(self):\n '''\n Construct the Markov chain.\n '''\n self.memory = {}\n\n def train(self, samples):\n '''\n Train the markov chain with a list of samples.\n Each sample itself is a list of tokens that the chain\n will look at.\n '''\n for sample in samples:\n prev = '' # The start\n for token in sample:\n prev_mem = self.memory.setdefault(prev.lower(), [])\n prev_mem.append(token)\n prev = token\n\n def sample(self, length, start_token=''):\n '''\n Sample the chain, returning a list of tokens of the length given.\n Optionally, force it to start with the token given.\n '''\n if not length:\n return [start_token]\n token_mem = self.memory.get(start_token.lower())\n if token_mem is None or len(token_mem) == 0:\n return [start_token] # Chain's over folks\n next_token = random.choice(token_mem)\n return [start_token] + self.sample(length - 1, next_token)\n\n def get_possible_starts(self):\n '''\n Return the keys of the memory, thus the possible starting\n tokens.\n Note that these are always in lowercase because of implementation\n details.\n '''\n return self.memory.keys()\n","sub_path":"hashkov/chain.py","file_name":"chain.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"148367334","text":"from django.conf.urls import url\nfrom django.urls import path\nfrom accounts import views\nfrom accounts.views import (\n ProfileCreate,\n HomePageView,\n ProfileDetail,\n ProfileUpdate,\n MatchesList,\n SignUpView,\n # ActivateAccount,\n # JobUpdateView,\n # CollectionCreate,\n )\n\n\napp_name = 'accounts'\nurlpatterns = [\n\n path('home/', HomePageView.as_view(), name='home'),\n # path('signup/', SignUpView.as_view(), name='signup'),\n\n path('profile-create/', ProfileCreate.as_view(), name='profile-create'),\n path('profile-detail/<int:pk>/', ProfileDetail.as_view(), name='profile-detail'),\n path('profile-update/<int:pk>/', ProfileUpdate.as_view(), name='profile-update'),\n\n # path('profile/', NewCreateView.as_view(), name='profile'),\n # path('job-create/', JobCreateView.as_view(), name='job-create'),\n #path('job-update/<int:pk>/', JobUpdateView.as_view(), name='job-update'),\n\n path('matches-list/', MatchesList.as_view(), name='matches-list'),\n]\n\n\n\n\n\n\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"33390278","text":"import cv2\nimport numpy as np\n'''\n 输入为两张图像的地址,返回值为指定阈值下的匹配\n'''\n\n\ndef get_matches(img1_path, img2_path, sift_threshold):\n img1 = cv2.imread(img1_path)\n img2 = cv2.imread(img2_path)\n _, kp1, des1 = sift_kp(img1)\n _, kp2, des2 = sift_kp(img2)\n print(len(kp1))\n index1 = repeat_removal(kp1)\n index2 = repeat_removal(kp2)\n kp1 = np.array(kp1)[index1]\n kp2 = np.array(kp2)[index2]\n des1 = np.array(des1)[index1]\n des2 = np.array(des2)[index2]\n\n good_match = get_good_match(des1, des2, sift_threshold)\n matching_points_1, matching_points_2 = get_matching_points(kp1, kp2, good_match)\n return matching_points_1, matching_points_2, kp1, kp2, good_match\n\n\n# 去重返回值为不重复的值的下标\ndef repeat_removal(kp):\n temp = np.zeros([len(kp), 2])\n for i in range(len(kp)):\n temp[i] = kp[i].pt\n _, index = np.unique(temp, return_index=True, axis=0)\n return index\n\n# 得到在预匹配过后筛选的点,matching_points是一个2乘以n的二维矩阵,第一行为x坐标,第二行为y坐标\ndef get_matching_points(kp1, kp2, good_match):\n matching_points_1 = np.zeros((2, len(good_match)))\n matching_points_2 = np.zeros((2, len(good_match)))\n for i in range(len(good_match)):\n index1 = good_match[i].queryIdx\n index2 = good_match[i].trainIdx\n matching_points_1[0][i] = kp1[index1].pt[0]\n matching_points_1[1][i] = kp1[index1].pt[1]\n matching_points_2[0][i] = kp2[index2].pt[0]\n matching_points_2[1][i] = kp2[index2].pt[1]\n return matching_points_1, matching_points_2\n\n\n# 得到关键点\ndef sift_kp(image):\n gray_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n sift = cv2.xfeatures2d_SIFT.create()\n kp, des = sift.detectAndCompute(gray_image, None)\n kp_image = cv2.drawKeypoints(gray_image,kp,None)\n return kp_image, kp, des\n\n\n# 做匹配\ndef get_good_match(des1, des2, sift_threshold):\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(des1, des2, k=2)\n good = []\n for m, n in matches:\n if m.distance < sift_threshold * n.distance:\n good.append(m)\n return good\n\n\n","sub_path":"feature_matching/sift_matching_backup.py","file_name":"sift_matching_backup.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"262260156","text":"from flask import Flask, request, render_template\nimport requests\nfrom bs4 import BeautifulSoup\nimport datetime, json\nimport traceback\n\napp = Flask(__name__, template_folder=\"template\")\n\nlastTime = {} #rise, month, week, day\nlastChart = {} #rise, month, week, day\n\nsampleTypes = ['realtime', 'rise', 'month', 'week', 'day']\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/getMelonChart/<types>')\ndef getMelonChart(types=None):\n # types = {realtime, rise, month, week, day}\n\n if types in sampleTypes:\n if types not in lastChart or datetime.datetime.now() >= lastTime[types] + datetime.timedelta(hours=1) : # 한시간이 지났을 경우 or 처음 요청일경우\n print(\"갱신\")\n lastTime[types] = datetime.datetime.now().replace(minute=0, second=0, microsecond=0)\n\n if types == 'realtime':\n url = 'https://www.melon.com/chart/index.htm'\n else:\n url = \"https://www.melon.com/chart/\"+types+\"/index.htm\"\n\n header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko'}\n html = requests.get(url, headers=header).text\n soup = BeautifulSoup(html, 'html.parser')\n tag_list = []\n for tr_tag in soup.find(id='tb_list').find_all('tr'): \n # 올바른 요소만 추가\n if tr_tag.find(class_=\"ellipsis rank01\") is not None:\n add = {\n \"rank\" : tr_tag.find(class_='rank').text,\n \"artist\" : tr_tag.find(class_='ellipsis rank02').find('span').text,\n \"album\" : tr_tag.find(class_='ellipsis rank03').find('a').text,\n \"img\" : tr_tag.find(class_='image_typeAll').find('img')['src'].split('jpg')[0]+'jpg'\n }\n\n if tr_tag.find(class_='ellipsis rank01').find('.disabled') is not None:\n add[\"title\"] = tr_tag.find(class_='ellipsis rank01').find('a').text\n else:\n add[\"title\"] = tr_tag.find(class_='ellipsis rank01').find('span').text\n\n tag_list.append(add)\n\n lastChart[types] = tag_list\n return json.dumps(tag_list)\n else: # 이미 있는 자료일때\n return json.dumps(lastChart[types])\n else:\n return 'invaild request'\n \n@app.route('/getMelonChartView/<types>')\ndef getMelonChartView(types=None):\n if types in sampleTypes:\n return render_template('chart.html', chart=json.loads(getMelonChart(types)))\n else:\n return 'invaild request'\n\n\n@app.route('/getSearchResult')\ndef getSearchResult(param_q=None, param_page=None):\n keyword = request.args.get('q')\n page = request.args.get('page')\n\n if param_q is not None:\n keyword = param_q\n if param_page is not None:\n page = param_page\n\n if keyword is not None:\n url = \"https://www.melon.com/search/song/index.htm?q=\" + keyword\n\n if page is not None and int(page) > 1:\n url +=\"&pageSize=50&sort=weight§ion=all§ionId=&genreDir=&startIndex=\"+str((int(page)-1)*50+1)\n\n header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko'}\n html = requests.get(url, headers=header).text\n soup = BeautifulSoup(html, 'html.parser')\n song_list = []\n \n # 검색결과 존재시\n if soup.find(class_='section_no_data line') is None:\n for tr_tag in soup.find(id='frm_defaultList').find(\"tbody\").find_all('tr'): \n try:\n add = {\n \"no\" : tr_tag.find(class_='no').find(class_='wrap').text,\n \"title\" : tr_tag.find_all('td')[2].find(class_='fc_gray').text,\n \"album_no\" : tr_tag.find_all('td')[2].find(class_='fc_gray')['href'].replace(\"'\", \"\").replace(');', '').split(',')[-1],\n \"album\" : tr_tag.find_all('td')[4].find('a').text,\n \"is_title\" : 'false',\n \"is_hot\" : 'false'\n }\n\n artist = tr_tag.find(id='artistName').text\n add['artist'] = artist[:int(len(artist)/2)]\n\n if tr_tag.find(class_ = \"title\"):\n add['is_title'] = 'true'\n\n if tr_tag.find(class_ = \"hot\"):\n add['is_hot'] = 'true'\n\n song_list.append(add)\n except:\n traceback.print_exc()\n\n return json.dumps(song_list)\n return 'invaild'\n\n@app.route('/getSearchResultView')\ndef getSearchResultView():\n keyword = request.args.get('q')\n page = request.args.get('page')\n\n if keyword is not None:\n return render_template('search.html', search=json.loads(getSearchResult(keyword, page)))\n else:\n return 'invaild request'\n\n@app.route('/getAlbumThumb/<albumId>')\ndef getAlbumThumb(albumId=None):\n if albumId is not None:\n url = 'https://www.melon.com/delivery/streamingInfo.json?contsId='+albumId+'&contsType=SONG&bitrate=320&pocId=WP10&stRight=N'\n print(url)\n\n header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko'}\n html = requests.get(url, headers=header).text\n soup = BeautifulSoup(html, 'html.parser')\n content = json.loads(str(soup))\n \n return 'https://cdnimg.melon.co.kr/'+content['streamingInfo']['imgPath']\n else:\n return 'invaild request'\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"176143951","text":"'''testAutoEditor.py'''\n\n\"\"\"\nTest auto-editor and make sure everything is working.\n\"\"\"\n\n# Internal Libraries\nimport os\nimport sys\nimport shutil\nimport platform\nimport subprocess\n\n# Included Libraries\nfrom usefulFunctions import Log, FFprobe, sep\n\ndef getRunner():\n if(platform.system() == 'Windows'):\n return ['py', 'auto_editor/__main__.py']\n return ['python3', 'auto_editor/__main__.py']\n\n\ndef pipeToConsole(myCommands: list):\n print(myCommands)\n process = subprocess.Popen(myCommands, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n return process.returncode, stdout.decode(), stderr.decode()\n\n\ndef runTest(cmd):\n pretty_cmd = ' '.join(cmd)\n print(f'Running test: {pretty_cmd}')\n\n add_no_open = '.' in cmd[0]\n cmd = getRunner() + cmd\n if(add_no_open):\n cmd += ['--no_open']\n\n returncode, stdout, stderr = pipeToConsole(cmd)\n if(returncode > 0):\n print('Test Failed.\\n')\n print(stdout)\n print(stderr)\n sys.exit(1)\n else:\n print('Test Succeeded.\\n')\n\n\ndef checkForError(cmd, match=None):\n pretty_cmd = ' '.join(cmd)\n print(f'Running Error Test: {pretty_cmd}')\n\n returncode, stdout, stderr = pipeToConsole(getRunner() + cmd)\n if(returncode > 0):\n if('Error!' in stderr):\n if(match is not None):\n if(match in stderr):\n print('Found match. Test Succeeded.')\n else:\n print(f'Test Failed.\\nCould\\'t find \"{match}\"')\n sys.exit(1)\n else:\n print('Test Succeeded.')\n else:\n print('Test Failed.\\n')\n print(f'Program crashed.\\n{stdout}\\n{stderr}')\n sys.exit(1)\n else:\n print('Test Failed.\\n')\n print('Program responsed with a code 0, but should have failed.')\n sys.exit(1)\n\n\ndef cleanup(the_dir):\n for item in os.listdir(the_dir):\n item = f'{the_dir}{sep()}{item}'\n if('_ALTERED' in item or item.endswith('.xml') or item.endswith('.json')\n or item.endswith('.fcpxml')):\n os.remove(item)\n if(item.endswith('_tracks')):\n shutil.rmtree(item)\n\n\ndirPath = os.path.dirname(os.path.realpath(__file__))\nffprobe = FFprobe(dirPath, True, False, Log())\n\ndef fullInspect(fileName, *args):\n for item in args:\n func = item[0]\n expectedOutput = item[1]\n\n if(func(fileName) != expectedOutput):\n\n # Cheating on fps to allow 30 to equal 29.99944409236961\n if(isinstance(expectedOutput, float)):\n from math import ceil\n if(ceil(func(fileName) * 100) == expectedOutput * 100):\n continue\n\n print('Inspection Failed.')\n print(f'Expected Value: {expectedOutput} {type(expectedOutput)}')\n print(f'Actual Value: {func(fileName)} {type(func(fileName))}')\n sys.exit(1)\n print('Inspection Passed.')\n\ndef testAutoEditor():\n # Test Help Command\n runTest(['--help'])\n runTest(['-h'])\n runTest(['--frame_margin', '--help'])\n runTest(['--frame_margin', '-h'])\n runTest(['exportMediaOps', '--help'])\n runTest(['exportMediaOps', '-h'])\n runTest(['progressOps', '-h'])\n\n # Test the Help Command on itself.\n runTest(['--help', '--help'])\n runTest(['-h', '--help'])\n runTest(['--help', '-h'])\n runTest(['-h', '--help'])\n\n # Test version info\n runTest(['--version'])\n runTest(['-v'])\n runTest(['-V'])\n\n # Test debug info\n runTest(['--debug'])\n # --verbose by itself is UB.\n\n if(ffprobe.getFrameRate('example.mp4') != 30.0):\n print(f'getFrameRate did not equal 30.0')\n sys.exit(1)\n\n # Test info subcommand.\n runTest(['info', 'example.mp4'])\n runTest(['info', 'resources/man_on_green_screen.mp4'])\n runTest(['info', 'resources/multi-track.mov'])\n runTest(['info', 'resources/newCommentary.mp3'])\n runTest(['info', 'resources/test.mkv'])\n\n # Test example video.\n runTest(['example.mp4'])\n\n fullInspect(\n 'example_ALTERED.mp4',\n [ffprobe.getFrameRate, 30.0],\n [ffprobe.getResolution, '1280x720'],\n [ffprobe.getVideoCodec, 'mpeg4'],\n [ffprobe.getSampleRate, '48000'],\n )\n\n runTest(['example.mp4', 'exportMediaOps', '-vcodec', 'copy', '--show_ffmpeg_debug'])\n fullInspect(\n 'example_ALTERED.mp4',\n [ffprobe.getFrameRate, 30.0],\n [ffprobe.getResolution, '1280x720'],\n [ffprobe.getVideoCodec, 'h264'],\n [ffprobe.getSampleRate, '48000'],\n )\n\n runTest(['example.mp4', '--video_codec', 'h264', '--render', 'opencv'])\n fullInspect(\n 'example_ALTERED.mp4',\n [ffprobe.getFrameRate, 30.0],\n [ffprobe.getResolution, '1280x720'],\n [ffprobe.getVideoCodec, 'h264'],\n [ffprobe.getSampleRate, '48000'],\n )\n\n runTest(['example.mp4', '-m', '3'])\n runTest(['example.mp4', '-m', '0.3sec'])\n\n # Test rejecting files with no extension.\n shutil.copy('example.mp4', 'example')\n checkForError(['example', '--no_open'], 'must have extension.')\n os.remove('example')\n\n # Test ProgressOps\n runTest(['example.mp4', 'progressOps', '--machine_readable_progress'])\n runTest(['example.mp4', 'progressOps', '--no_progress'])\n\n # Test mp4 to mkv\n runTest(['example.mp4', '-o', 'example.mkv'])\n os.remove('example.mkv')\n\n # Test mkv to mp4\n runTest(['resources/test.mkv', '-o', 'test.mp4'])\n os.remove('test.mp4')\n\n # Test Audio File Input and Exporting\n runTest(['resources/newCommentary.mp3', '--silent_threshold', '0.1'])\n\n # Test Cut by All Tracks\n runTest(['resources/multi-track.mov', '--cut_by_all_tracks'])\n\n runTest(['resources/multi-track.mov', '--keep_tracks_seperate'])\n\n runTest(['example.mp4', '--cut_by_this_audio', 'resources/newCommentary.mp3'])\n\n runTest(['example.mp4', '--export_as_json'])\n runTest(['example.json'])\n\n runTest(['example.mp4', '-s', '2', '-mcut', '10'])\n runTest(['example.mp4', '-v', '2', '-mclip', '4'])\n runTest(['example.mp4', '--sounded_speed', '0.5'])\n runTest(['example.mp4', '--silent_speed', '0.5'])\n\n runTest(['example.mp4', '--scale', '1.5', '--render', 'av'])\n fullInspect(\n 'example_ALTERED.mp4',\n [ffprobe.getFrameRate, 30.0],\n [ffprobe.getResolution, '1920x1080'],\n [ffprobe.getSampleRate, '48000'],\n )\n\n runTest(['example.mp4', '--scale', '0.2', '--render', 'av'])\n fullInspect(\n 'example_ALTERED.mp4',\n [ffprobe.getFrameRate, 30.0],\n [ffprobe.getResolution, '256x144'],\n [ffprobe.getSampleRate, '48000'],\n )\n\n runTest(['example.mp4', '--scale', '1.5', '--render', 'opencv'])\n fullInspect(\n 'example_ALTERED.mp4',\n [ffprobe.getFrameRate, 30.0],\n [ffprobe.getResolution, '1920x1080'],\n [ffprobe.getSampleRate, '48000'],\n )\n\n runTest(['example.mp4', '--scale', '0.2', '--render', 'opencv'])\n fullInspect(\n 'example_ALTERED.mp4',\n [ffprobe.getFrameRate, 30.0],\n [ffprobe.getResolution, '256x144'],\n [ffprobe.getSampleRate, '48000'],\n )\n\n checkForError(['example.mp4', '--zoom', '0,60,1.5', '--render', 'av'])\n checkForError(['example.mp4', '--zoom', '0'])\n checkForError(['example.mp4', '--zoom', '0,60'])\n\n checkForError(['example.mp4', '--rectangle', '0,60,0,10,10,20', '--render', 'av'])\n checkForError(['example.mp4', '--rectangle', '0,60'])\n\n checkForError(['example.mp4', '--background', '000'])\n\n runTest(['create', 'test', '--width', '640', '--height', '360', '-o', 'testsrc.mp4'])\n fullInspect(\n 'testsrc.mp4',\n [ffprobe.getFrameRate, 30.0],\n [ffprobe.getResolution, '640x360'],\n )\n\n runTest(['testsrc.mp4', '--mark_as_loud', 'start,end', '--zoom', '10,60,2'])\n\n runTest(['example.mp4', '--mark_as_loud', 'start,end', '--rectangle',\n 'audio>0.05,audio<0.05,20,50,50,100', 'audio>0.1,audio<0.1,120,50,150,100'])\n\n runTest(['testsrc.mp4', '--mark_as_loud', 'start,end', '--zoom',\n 'start,end,1,0.5,centerX,centerY,linear', '--scale', '0.5'])\n fullInspect(\n 'testsrc_ALTERED.mp4',\n [ffprobe.getFrameRate, 30.0],\n [ffprobe.getResolution, '320x180'],\n )\n\n runTest(['testsrc.mp4', '--mark_as_loud', 'start,end', '--rectangle',\n '0,30,0,200,100,300,#43FA56,10'])\n\n os.remove('testsrc_ALTERED.mp4')\n os.remove('testsrc.mp4')\n\n cleanup(os.getcwd())\n cleanup('resources')\n\n for item in os.listdir('resources'):\n\n if('man_on_green_screen' in item or item.startswith('.') or item.endswith('.txt')):\n continue\n\n item = f'resources/{item}'\n runTest([item])\n runTest([item, '-exp'])\n runTest([item, '-exr'])\n runTest([item, '-exf'])\n runTest([item, '--export_as_clip_sequence'])\n runTest([item, '--preview'])\n\n runTest(['example.mp4', 'exportMediaOps', '-vcodec', 'h264', '--preset', 'faster'])\n runTest(['example.mp4', 'exportMediaOps', '--audio_codec', 'ac3'])\n runTest(['resources/newCommentary.mp3', 'exportMediaOps', '-acodec', 'pcm_s16le'])\n\n runTest(['example.mp4', '--mark_as_silent', '0,171', '-o', 'hmm.mp4'])\n runTest(['example.mp4', 'hmm.mp4', '--combine_files', '--debug'])\n\n os.remove('hmm.mp4')\n\n runTest(['resources/man_on_green_screen.mp4', '--edit_based_on', 'motion', '--debug',\n '--frame_margin', '0', '-mcut', '0', '-mclip', '0'])\n\n cleanup('resources')\n cleanup(os.getcwd())\n\nif(__name__ == '__main__'):\n testAutoEditor()\n","sub_path":"auto_editor/testAutoEditor.py","file_name":"testAutoEditor.py","file_ext":"py","file_size_in_byte":9629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"266286061","text":"import torch\nfrom torch.distributions import MultivariateNormal\nfrom ..utils import unflattify\n\n\nclass StackedObject(object):\n def __init__(self, concated, mask, prev_shape):\n \"\"\"\n Helper object\n \"\"\"\n\n self.concated = concated\n self.mask = mask\n self.prev_shape = prev_shape\n\n\ndef stacker(parameters, selector=lambda u: u.values, dim=1):\n \"\"\"\n Stacks the parameters and returns a n-tuple containing the mask for each parameter.\n :param parameters: The parameters\n :type parameters: tuple[Parameter]|list[Parameter]\n :param selector: The selector\n :param dim: The dimension to start flattening from\n :type dim: int\n :rtype: StackedObject\n \"\"\"\n\n to_conc = tuple()\n mask = tuple()\n prev_shape = tuple()\n\n i = 0\n # TODO: Currently only supports one sampling dimension...\n for p in parameters:\n s = selector(p)\n flat = s if s.dim() <= dim else s.flatten(dim)\n\n if flat.dim() == dim:\n to_conc += (flat.unsqueeze(-1),)\n slc = i\n else:\n to_conc += (flat,)\n slc = slice(i, i + flat.shape[-1])\n\n mask += (slc,)\n i += to_conc[-1].shape[-1]\n prev_shape += (s.shape[dim:],)\n\n return StackedObject(torch.cat(to_conc, dim=-1), mask, prev_shape)\n\n\ndef _construct_mvn(x, w):\n \"\"\"\n Constructs a multivariate normal distribution of weighted samples.\n :param x: The samples\n :type x: torch.Tensor\n :param w: The weights\n :type w: torch.Tensor\n :rtype: MultivariateNormal\n \"\"\"\n\n mean = (x * w.unsqueeze(-1)).sum(0)\n centralized = x - mean\n cov = torch.matmul(w * centralized.t(), centralized)\n\n return MultivariateNormal(mean, scale_tril=torch.cholesky(cov))\n\n\ndef _mcmc_move(params, dist, stacked, shape):\n \"\"\"\n Performs an MCMC move to rejuvenate parameters.\n :param params: The parameters to use for defining the distribution\n :type params: tuple[Parameter]\n :param dist: The distribution to use for sampling\n :type dist: MultivariateNormal\n :param stacked: The mask to apply for parameters\n :type stacked: StackedObject\n :param shape: The shape to sample\n :type shape: int\n :return: Samples from a multivariate normal distribution\n :rtype: torch.Tensor\n \"\"\"\n\n rvs = dist.sample((shape,))\n\n for p, msk, ps in zip(params, stacked.mask, stacked.prev_shape):\n p.t_values = unflattify(rvs[:, msk], ps)\n\n return True\n\n\ndef _eval_kernel(params, dist, n_params):\n \"\"\"\n Evaluates the kernel used for performing the MCMC move.\n :param params: The current parameters\n :type params: tuple[Distribution]\n :param dist: The distribution to use for evaluating the prior\n :type dist: MultivariateNormal\n :param n_params: The new parameters to evaluate against\n :type n_params: tuple of Distribution\n :return: The log difference in priors\n :rtype: torch.Tensor\n \"\"\"\n\n p_vals = stacker(params, lambda u: u.t_values)\n n_p_vals = stacker(n_params, lambda u: u.t_values)\n\n return dist.log_prob(p_vals.concated) - dist.log_prob(n_p_vals.concated)","sub_path":"pyfilter/inference/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"353757092","text":"#####################################################\n# Binary search tree with example data set\n##################################################### \n \nimport random\n\ndef losowa(N):\n return [ random.randint(0,100000000) for i in range(N)]\n \ndef in_tree(e, T):\n if T == None:\n return False\n n,left,right = T\n if e == n:\n return True\n if e < n:\n return in_tree(e, left)\n else:\n return in_tree(e, right)\n \ndef add2tree(e,T):\n if T == None:\n return (e, None, None)\n n,left,right = T\n \n if e == n:\n return T\n \n if e < n:\n new_left = add2tree(e, left)\n return (n, new_left, right)\n else:\n return (n, left, add2tree(e, right))\n\nL = losowa(100000)\n\n \nT = None \nfor e in L:\n T = add2tree(e, T)\n\nprint (all((e in L) for e in L)) \n","sub_path":"BinarySearchTree.py","file_name":"BinarySearchTree.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"96050382","text":"import time as t\nimport numpy as np\nimport pandas as pd\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'nyc': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters(city, month, day):\n \"\"\"\n Asks users to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print ('Hello! Let\\'s explore major US bikeshare data!')\n print ('')\n #Get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n t.sleep(1)\n while True:\n print (\"Which city bikeshare data would you like to explore?\\n\")\n city = input(\"Chicago, NYC or Washington?\\n\").lower()\n if city not in (\"chicago\", \"nyc\", \"washington\"):\n print(\"\\nInvalid answer\\n\")\n continue\n else:\n break\n\n print(\"\\nNow how do you want to filter your data?\\n\")\n\n #Get user input for month (all, january, february, ... , june)\n data_filter = input(\"Month, day, or both?\\n\").lower()\n\n while True:\n if data_filter not in (\"month\", \"day\", \"both\", \"none\"):\n print(\"\\nInvalid answer\\n\")\n data_filter = input(\"Month, day, both, or none?\\n\")\n elif data_filter == \"month\":\n print(\"Which month do you want to explore?\\n\")\n month = input(\"January, february, march, april, may, june or all?\\n\").lower()\n day = 'all'\n while True:\n if month not in ['january', 'february', 'march', 'april', 'may', 'june', 'all']:\n print(\"\\nInvalid answer\\n\")\n month = input(\"January, february, march, april, may, june or all?\\n\").lower()\n else:\n break\n break\n elif data_filter == \"day\":\n print(\"Which day do you want to explore?\\n\")\n day = input(\"Monday, tuesday, wednesday, thursday, friday, saturday, sunday or all?\\n\").lower()\n month = 'all'\n while True:\n if day not in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday','all']:\n print(\"\\nInvalid answer\\n\")\n day = input(\"Monday, tuesday, wednesday, thursday, friday, saturday, sunday or all?\\n\").lower()\n else:\n break\n break\n elif data_filter == \"both\":\n print(\"Which month do you want to explore?\\n\")\n month = input(\"January, february, march, april, may, june or all?\\n\").lower()\n while True:\n if month not in ['january', 'february', 'march', 'april', 'may', 'june', 'all']:\n print(\"\\nInvalid answer\\n\")\n month = input(\"January, february, march, april, may, june or all?\\n\").lower()\n else:\n break\n\n print(\"Now which day do you want to explore?\\n\")\n day = input(\"Monday, tuesday, wednesday, thursday, friday, saturday, sunday or all?\\n\").lower()\n while True:\n if day not in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday','all']:\n print(\"\\nInvalid answer\\n\")\n day = input(\"Monday, tuesday, wednesday, thursday, friday, saturday, sunday or all?\\n\").lower()\n else:\n break\n break\n\n print(\"---> \", city)\n print(\"---> \", month)\n print(\"---> \", day)\n return city, month, day\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n #filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n start_time = t.time()\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n print('')\n\n #display the most common month\n df['month'] = df['Start Time'].dt.month\n common_month = df['month'].mode()[0]\n\n print('Most Common Month:', common_month)\n print('')\n\n #display the most common day of week\n df['week'] = df['Start Time'].dt.week\n common_week = df['week'].mode()[0]\n\n print('Most Common day of week:', common_week)\n print('')\n\n #display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n common_hour = df['hour'].mode()[0]\n\n print('Most Common Start Hour:', common_hour)\n print('')\n\n print(\"\\nThis took %s seconds.\" % (t.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n print('')\n start_time = t.time()\n\n #display most commonly used start station\n common_start_station = df['Start Station'].mode()[0]\n\n print('Most Common Start Station:', common_start_station)\n print('')\n\n #display most commonly used end station\n common_end_station = df['End Station'].mode()[0]\n\n print('Most Common End Station:', common_end_station)\n print('')\n\n #display most frequent combination of start station and end station trip\n df['combo'] = df['Start Station'] + ' to ' + df['End Station']\n common_station_combo = df['combo'].mode()[0]\n\n print('Most common Combination:', common_station_combo)\n print('')\n\n print(\"\\nThis took %s seconds.\" % (t.time() - start_time))\n print('-'*40)\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = t.time()\n\n #display total travel time\n total_travel_time = df['Trip Duration'].sum()\n\n print('Total Travel Time:', total_travel_time)\n print('')\n\n #display mean travel time\n average = df['Trip Duration'].mean()\n\n print('Mean/Average Travel Time:', average)\n print('')\n\n print(\"\\nThis took %s seconds.\" % (t.time() - start_time))\n print('-'*40)\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = t.time()\n\n #Display counts of user types\n user_types = df['User Type'].value_counts()\n print('Counts of user types:', user_types)\n print('')\n\n #Display counts of gender\n if 'Gender' in df:\n gender = df['Gender'].value_counts()\n print('Counts of gender:', gender)\n print('')\n else:\n print(\"Gender information is not available for this city!\")\n\n #Display earliest, most recent, and most common year of birth\n if 'Birth_Year' in df:\n earliest_birth_year = df['Birth_Year'].min()\n print('Earliest Birth Year:', earliest_birth_year)\n print('')\n recent_birth_year = df['Birth Year'].max()\n\n print('Recent Birth Year:', recent_birth_year)\n print('')\n\n common_birth_year = df['Birth Year'].mode()[0]\n print('Most Popular Birth Year:', common_birth_year)\n print('')\n else:\n print(\"Birth year information is not available for this city!\")\n\n print(\"\\nThis took %s seconds.\" % (t.time() - start_time))\n print('-'*40)\n\ndef data(df):\n \"\"\" Displays 5 rows of raw data at a time \"\"\"\n line_number = 0\n print(\"\\nDo you want to see raw data?\\n\")\n answer = input(\"Yes or no?\\n\").lower()\n if answer not in ['yes', 'no']:\n print(\"\\nInvalid answer\\n\")\n answer = input(\"Yes or no?\\n\").lower()\n elif answer == 'yes':\n while True:\n line_number += 5\n print(df.iloc[line_number : line_number + 5])\n print(\"\\nDo you want to see more raw data?\\n\")\n continues = input(\"Yes or no?\\n\").strip().lower()\n if continues == 'no':\n break\n elif answer == 'no':\n return\n\ndef main():\n city = \"\"\n month = 0\n day = 0\n\n while True:\n city, month, day = get_filters(city, month, day)\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n data(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":9477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"527504930","text":"from tkinter import *\nimport pandas as pd \nimport backendcopy\n\n\n\n\nselected_tuple = None\ndef get_selected_row(event):\n try:\n global selected_tuple\n index=listbox.curselection()[0]\n selected_tuple=listbox.get(index)\n e1.delete(0,END)\n e1.insert(END,selected_tuple[0])\n e2.delete(0,END)\n e2.insert(END,selected_tuple[1])\n e3.delete(0,END)\n e3.insert(END,selected_tuple[2])\n e4.delete(0,END)\n e4.insert(END,selected_tuple[3])\n e5.delete(0,END)\n e5.insert(END,selected_tuple[4])\n except IndexError:\n pass \n\ndef view_command():\n \n listbox.delete(0,END)\n \n \n #listbox.insert(END,\"\\n\")\n for items in backendcopy.view():\n \n listbox.insert(END,' ',items)\n\ndef insert():\n if(product_id.get()==\"\" or date_from.get()==\"\" or date_to.get()==\"\" or amount.get()==\"\" or price_info.get()==\"\"):\n pass\n else:\n \n backendcopy.insert(product_id.get(),date_from.get(),date_to.get(),amount.get(),price_info.get())\n listbox.delete(0,END)\n listbox.insert(END,(product_id.get(),date_from.get(),date_to.get(),amount.get(),price_info.get()))\n\n\ndef update():\n backendcopy.update(product_id.get(),date_from.get(),date_to.get(),amount.get(),price_info.get())\ndef delete():\n backendcopy.delete(selected_tuple[0])\n\ndef search_command():\n listbox.delete(0,END)\n for row in backendcopy.search(product_id.get(),date_from.get(),date_to.get(),amount.get(),price_info.get()):\n listbox.insert(END,' ',row)\n\n \nwindow=Tk()\nframe1=Frame(window)\nframe1.pack()\n \nl1=Label(frame1,text=\"enter productID\",font=\"Times 12 bold\",width=12)\nl1.grid(row=0,column=0)\nl2=Label(frame1,text=\"enter date_from\",font=\"Times 12 bold\",width=12)\nl2.grid(row=0,column=2)\nl3=Label(frame1,text=\"enter date_to\",font=\"Times 12 bold\",width=12)\nl3.grid(row=1,column=0)\nl4=Label(frame1,text=\"amount\",font=\"Times 12 bold\",width=12)\nl4.grid(row=1,column=2)\nl5=Label(frame1,text=\"price info\",font=\"Times 12 bold\",width=12)\nl5.grid(row=2,column=0)\n\nproduct_id=StringVar()\ne1=Entry(frame1,width=30,textvariable=product_id)\ne1.grid(row=0,column=1)\ndate_from=StringVar()\ne2=Entry(frame1,width=30,textvariable=date_from)\ne2.grid(row=0,column=3)\ndate_to=StringVar()\ne3=Entry(frame1,width=30,textvariable=date_to)\ne3.grid(row=1,column=1)\namount=StringVar()\ne4=Entry(frame1,width=30,textvariable=amount)\ne4.grid(row=1,column=3)\nprice_info=StringVar()\ne5=Entry(frame1,width=30,textvariable=price_info)\ne5.grid(row=2,column=1)\n\nframe2 = Frame(window) \nframe2.pack()\n\nb1=Button(frame2,text=\"Viewall\",font=\"Times 12\",width=9,command=view_command)\nb1.grid(row=4,column=0)\nb2=Button(frame2,text=\"Insert\",font=\"Times 12\",width=9,command=insert)\nb2.grid(row=4,column=1)\nb3=Button(frame2,text=\"Update\",font=\"Times 12\",width=9,command=update)\nb3.grid(row=4,column=2)\nb3=Button(frame2,text=\"Search\",font=\"Times 12\",width=9,command=search_command)\nb3.grid(row=4,column=3)\nb4=Button(frame2,text=\"Delete\",font=\"Times 12\",width=9,command=delete)\nb4.grid(row=4,column=4)\nb5=Button(frame2,text=\"Close\",font=\"Times 12\",width=9,command=window.destroy)\nb5.grid(row=4,column=5)\n\nframe3 = Frame(window) \nframe3.pack()\nscroll = Scrollbar(frame3, orient=VERTICAL)\nlistbox= Listbox(frame3, yscrollcommand=scroll.set,width=80,height=16)\nscroll.config(command=listbox.yview)\nscroll.pack(side=RIGHT, fill=Y)\nlistbox.pack(side=LEFT, fill=BOTH, expand=1)\nlistbox.bind('<<ListboxSelect>>',get_selected_row)\n \nwindow.mainloop()","sub_path":"datakintercopy.py","file_name":"datakintercopy.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"477490970","text":"from sklearn import svm\nimport cv2 as cv\nimport numpy as np\n\n# X = np.array([[0,0], [1,1],[1,0]])\n# y = np.array([[0],[1],[1]])\n#\n# clf = svm.SVC() # class\n# clf.fit(X, y) # training the svc model\n# c = np.array([2,2])\n# xx=X.shape[1]\n# print(xx)\n# #\n# result = clf.predict(c.reshape(-1,1)) # predict the target of testing samples\n# print(result)\n#\n# # print(clf.support_vectors_)\nimg=cv.imread('/Users/lizhengyang/PycharmProjects/ElecMeterRec/resource/img/NewName/47.jpg',0)\nimg1=cv.imread('/Users/lizhengyang/PycharmProjects/ElecMeterRec/resource/img/NewName/43.jpg',0)\nimg2=cv.imread('/Users/lizhengyang/PycharmProjects/ElecMeterRec/resource/img/NewName/40.jpg',0)\n\n\nret, thresh = cv.threshold(img, 50, 255, cv.THRESH_BINARY)\nret, thresh1 = cv.threshold(img1, 50, 255, cv.THRESH_BINARY)\nret, thresh2 = cv.threshold(img2, 50, 255, cv.THRESH_BINARY)\n\nimg_train1 = np.array(thresh)#这个是关键,转化为数组\nimg_train2 = np.array(thresh1)#这个是关键,转化为数组\nimg_train3 = np.array(thresh2)#这个是关键,转化为数组\nimg1=[]\nimg2=[]\nimg3=[]\nfor i in range(0,480):\n img1.extend(img_train1[i])\n img2.extend(img_train2[i])\n img3.extend(img_train3[i])\n\n\n\nimg1=np.array(img1)\nimg2=np.array(img2)\nimg3=np.array(img3)\n# X1 = [[1,2,3],[2,3,5],[3,4,5]]\nXX = [img1,img2,img3]\ny = [0,1,0]\nclf = svm.SVC()\n# print(clf)\nclf.fit(XX, y)\nresult1=clf.predict([img1])\nprint(result1)\n#\n","sub_path":"imgpro/SvmTest.py","file_name":"SvmTest.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"593441074","text":"from CRABClient.UserUtilities import config, getUsernameFromSiteDB\nconfig = config()\n\nconfig.General.requestName = 'hcaltuple_run318983_wpub'\nconfig.General.transferOutputs = True\nconfig.General.transferLogs = True\n\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.psetName = 'pfg_new2_Global_RAW_cfg.py'\nconfig.JobType.pyCfgParams = ['outputFile=hcalTupleTree.root']\n\nconfig.Data.inputDataset = '/ZeroBias/Run2018B-v1/RAW'\nconfig.Data.inputDBS = 'global'\nconfig.Data.splitting = 'LumiBased'\nconfig.Data.unitsPerJob = 5\nconfig.Data.lumiMask = ''\nconfig.Data.runRange = '318983'\nconfig.Data.outLFNDirBase = '/store/user/%s/cms_area/hcal/task9/run318983_wpub/' % (getUsernameFromSiteDB())\nconfig.Data.publication = True\nconfig.Data.outputDatasetTag = \"hcaltuple_run318983_wpub\"\n\nconfig.Site.storageSite = \"T3_US_Rutgers\"\n","sub_path":"ntuplizer/crab_hcaltuple_run318983_cfg.py","file_name":"crab_hcaltuple_run318983_cfg.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"122476021","text":"import pymysql\nimport json\n\ndef exists_arg(key,dict):\n if (key in dict) and dict[key]:\n return True\n return False\n\ndef out_error(self,error,arg):\n self.err_str=error\n if 'error' in arg:\n if(type(arg['error']) == type([])): # type is list\n arg['error'].append(error)\n else: # type is str\n arg['error']=error\n else:\n print(error)\n print(\"\\nQUERY:\\n\"+arg['query'])\n if ('values' in arg) and len(arg['values']):\n print(arg['values'])\n #quit()\n\n\ndef get_query(self,arg): # для get и getrow\n self.error_str=''\n sf=''\n if not exists_arg('select_fields',arg):\n sf='*'\n else:\n sf=arg['select_fields']\n\n if not exists_arg('table',arg):\n out_error(self,\"FreshDB::\"+arg['method']+\" not set attr table\",arg)\n return ''\n \n \n query='select '+sf+' FROM '+arg['table']+' wt'\n \n # join-ы\n if 'tables' in arg:\n for table in arg['tables']:\n if ('lj' in table ) and (table['lj']) :\n query += ' LEFT '\n\n query += ' JOIN '\n query += table['t']\n\n if ('a' in table) and (table['a']):\n query += ' '+ table['a']\n\n if ('l' in table) and (table['l']):\n query += ' ON '+ table['l']\n\n\n if exists_arg('where',arg):\n query += ' WHERE '+arg['where']\n if exists_arg('order',arg):\n query += ' ORDER BY '+arg['order']\n \n if arg['method'] == 'getrow':\n arg['limit'] = 1\n \n if exists_arg('perpage',arg) and exists_arg('table',arg):\n arg['perpage']=int(arg['perpage'])\n if not(exists_arg('page',arg)):\n arg['page']=1\n\n query_count='SELECT CEILING(count(*) / ' + str(arg['perpage'])+') FROM '+arg['table'];\n\n if exists_arg('where',arg): query_count +=' WHERE '+arg['where']\n \n if exists_arg('group',arg): query_count +=' GROUP BY ' + arg['group']\n \n if not exists_arg('values',arg): arg['values']=[]\n\n arg['maxpage']=self.query(query=query_count,onevalue=1,values=arg['values'])\n\n limit1=(arg['page']-1) * arg['perpage'];\n arg['limit']=str(limit1)+','+str(arg['perpage']);\n\n\n if exists_arg('limit',arg):\n query += ' LIMIT ' + str(arg['limit'])\n return query\n\ndef to_json(data):\n return json.dumps(data, ensure_ascii=False) # ,separators=(',', ': ') sort_keys=False,indent=0,\n\n\nclass FreshDB():\n def go_connect(self,arg):\n self.connect = pymysql.connect(host=arg['host'], user=arg['user'], password=arg['password'], database=arg['dbname'])\n self.connect.ping(reconnect=True)\n\n\n def __init__(self, **arg):\n self.tmpl_saver = None;\n self.error_str=''\n if not exists_arg('host',arg):\n arg['host']='localhost'\n\n if not exists_arg('password',arg):\n arg['password']=''\n\n if exists_arg('tmpl_saver',arg):\n self.tmpl_saver=arg['tmpl_saver']\n \n self.go_connect(arg)\n # , cursorclass=pymysql.cursors.DictCursor\n def set_tmpl_saver(self,tmpl_saver):\n self.tmpl_saver=tmpl_saver\n\n def execute(self,cur,arg):\n if ('debug' in arg) and (arg['debug']):\n print(arg['query'])\n if exists_arg('values',arg): print(arg['values'])\n \n if not exists_arg('values',arg):\n arg['values']=[]\n\n try:\n cur.execute(arg['query'],arg['values'])\n self.connect.commit()\n except pymysql.err.ProgrammingError as err:\n out_error(self,str(err),arg)\n self.error_str = str(err)\n\n # except pymysql.err.IntegrityError as e2:\n # out_error(self,str(e2),arg)\n # self.error_str=e2\n\n except pymysql.err.InternalError as err:\n out_error(self,str(err),arg)\n self.error_str = str(err)\n \n\n\n def desc(self, **arg):\n cur = pymysql.cursors.DictCursor(self.connect)\n \n if not('method' in arg) :\n self.error_str=''\n arg['method']='desc'\n \n if not exists_arg('table',arg):\n out_error(self,\"FreshDB::\"+arg['method']+\" not set attr table\",arg)\n return \n\n arg['query']='desc '+arg['table']\n\n\n self.execute(cur, arg)\n if self.error_str: return {}\n \n fields=cur.fetchall()\n result={}\n for f in fields:\n result[ f['Field'] ]=f\n\n return result\n\n def getvalue(self, **arg):\n cur = self.connect.cursor()\n #print('arg:',arg)\n arg['method']='getvalue'\n arg['query']=get_query(self,arg)\n #print(arg)\n \n if self.error_str:\n return None\n self.execute(cur,arg)\n \n rez=cur.fetchone()\n print('rez',rez)\n if not rez: rez=''\n else: rez=rez[0]\n \n return self.prepare_result(rez,arg)\n\n def getrow(self, **arg):\n self.error_str=''\n cur = pymysql.cursors.DictCursor(self.connect)\n arg['method']='getrow'\n arg['query']=get_query(self,arg)\n\n if self.error_str:\n return {}\n\n self.execute(cur,arg)\n if self.error_str: return {}\n\n rez=cur.fetchone()\n \n if exists_arg('to_json',arg):\n return to_json(rez)\n #if not(rez):\n # rez=False\n\n return self.prepare_result(rez,arg) \n def prepare_result(self,rez,arg): # подготавливает и возвращает результат\n \n if exists_arg('to_json',arg):\n rez=to_json(rez)\n\n if exists_arg('to_tmpl',arg):\n if self.tmpl_saver :\n self.tmpl_saver(name=arg['to_tmpl'],value=rez)\n if exists_arg('perpage',arg) & exists_arg('maxpage',arg):\n return int(arg['maxpage'])\n else:\n return True\n\n if exists_arg('perpage',arg) & exists_arg('maxpage',arg):\n\n return int(arg['maxpage']),rez\n\n return rez\n def get(self, **arg):\n self.error_str=''\n if exists_arg('onerow',arg):\n return self.getrow(**arg)\n\n cur = pymysql.cursors.DictCursor(self.connect)\n arg['method']='get'\n arg['query']=get_query(self,arg)\n\n if self.error_str:\n return []\n\n self.execute(cur,arg)\n\n if self.error_str:\n rez=[]\n else:\n rez=cur.fetchall()\n return self.prepare_result(rez,arg)\n\n \n\n def query(self, **arg):\n self.error_str=''\n if exists_arg('onevalue',arg):\n cur = self.connect.cursor()\n else:\n cur = pymysql.cursors.DictCursor(self.connect)\n \n arg['method']='query'\n if not exists_arg('query',arg):\n out_error(self,'FreshDB::query: not exists attribute query',arg)\n\n if self.error_str : return []\n \n self.execute(cur,arg)\n\n if exists_arg('onevalue',arg):\n rez = cur.fetchone()\n if rez: rez=rez[0]\n else: rez=cur.fetchall()\n \n if exists_arg('to_json',arg): rez=to_json(rez)\n\n #if exists_arg('to_tmpl',arg):\n # self.tmpl_vars[arg['to_tmpl']]=rez\n\n #if exists_arg('perpage',arg) & exists_arg('maxpage',arg):return arg['maxpage']\n return rez\n\n #print(\"query:\")\n #print({'arg':arg})\n\n def save(self, **arg):\n self.error_str=''\n arg['method']='save'\n if not exists_arg('table',arg):\n out_error(self,\"FreshDB::\"+arg['method']+\" not set attr table\",arg)\n return \n \n if not exists_arg('data',arg):\n out_error(self,\"FreshDB::\"+arg['method']+\" not set attr data\",arg)\n return \n\n \n exists_fields=self.desc(table=arg['table'])\n data=arg['data']\n \n\n insert_fields=[]\n insert_vopr=[]\n insert_values=[]\n\n update_names=[]\n for name in data.keys():\n if name in exists_fields:\n insert_fields.append(name)\n insert_vopr.append('%s')\n insert_values.append(data[name])\n update_names.append(name+'=%s')\n\n \n if exists_arg('update',arg):\n if not exists_arg('where',arg):\n out_error(self,\"FreshDB::\"+arg['method']+\" not set attr where\",arg)\n return False\n \n arg['query']='UPDATE '+arg['table']+' SET '+','.join(update_names) + ' WHERE '+arg['where']\n else:\n q=''\n if exists_arg('replace',arg):\n q='REPLACE'\n else:\n q='INSERT'\n q +=' INTO '+arg['table'] +'(' + ','.join(insert_fields)+') VALUES (' + ','.join(insert_vopr) + ')'\n arg['query']=q\n\n arg['values']=insert_values\n\n cur = pymysql.cursors.DictCursor(self.connect)\n self.execute(cur,arg)\n return cur.lastrowid","sub_path":"lib/dbl/freshdb.py","file_name":"freshdb.py","file_ext":"py","file_size_in_byte":8440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"128493115","text":"# import keras modules\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.optimizers import SGD\n\nimport numpy as np\n\n\ndef get_data(n):\n X = np.random.uniform(0, 1, (n, 2))\n Y = np.sign(X[:, 0]) == np.sign(X[:, 1])\n return X, Y\n\n\n# create the network\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=3))\nmodel.add(Activation('tanh'))\nmodel.add(Dense(1))\nmodel.add(Activation('tanh'))\n\n# add optimizer, compile and train\nsgd = SGD(lr=0.005)\nmodel.compile(loss='mse', optimizer=sgd)\n\n# load and split dataset\nX, y = get_data(100000)\n#X, y = dataset[:,:2], dataset[:,2]\nbias = np.ones((X.shape[0], 1))\nX = np.concatenate((bias, X), axis=1)\nX_train, X_test, y_train, y_test = X[:8000], X[8000:], y[:8000], y[8000:]\n\n# add optimizer, compile and train\nmodel.fit(X_train, y_train, batch_size=None, epochs=100)\n\n# evaluate loss on test set\nloss = model.test_on_batch(X_test, y_test)\nprint(\"Loss on the test set :\", loss)\n","sub_path":"9/prob_keras.py","file_name":"prob_keras.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"207380218","text":"import argparse\nimport sys\nfrom typing import Union, List\n\nimport cv2\nimport h5py\nimport keras\nimport numpy as np\n\n\ndef load_image(image: Union[np.ndarray, str]):\n if isinstance(image, str):\n image = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n return image\n\n\nclass NeuralNetwork:\n\n def __init__(self, model_path, output_mapping=lambda x: x):\n self.output_mapping = output_mapping\n h5file = h5py.File(model_path, mode='r')\n model_version = h5file.attrs.get('keras_version')\n keras_version = str(keras.__version__).encode('utf8')\n\n if model_version != keras_version:\n print('Keras version(%s) != model version(%s)' % (keras_version, model_version), file=sys.stderr)\n\n self.model = keras.models.load_model(model_path)\n print(self.model.summary())\n\n def predict_single(self, image: Union[str, np.ndarray]):\n return self.predict([load_image(image)])[0]\n\n def predict(self, images: Union[List[np.ndarray], np.ndarray]):\n if isinstance(images, list):\n images = np.array(images)\n return list(map(\n self.output_mapping,\n self.model.predict(\n images.reshape(\n images.shape[0], *(self.model.layers[0].input_shape[1:])\n )\n )\n ))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--model',\n type=str,\n help='Your model.h5 file'\n )\n parser.add_argument(\n 'images',\n nargs='+',\n type=str,\n help='images to find predictions for'\n )\n ns = parser.parse_args()\n model = NeuralNetwork(ns.model)\n for image in ns.images:\n print(image, model.predict_single(image))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"keras_remake/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"403783867","text":"# 268. Missing Number\n# https://leetcode.com/problems/missing-number/\n# \n\nclass Solution(object):\n def missingNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n res=0\n i=0\n for val in nums:\n i=i+1\n res=res+i-val\n return res\n\n'''\nGiven an array containing n distinct numbers taken from 0, 1, 2, ..., n, find the one that is missing from the array. For example, Given nums = [0, 1, 3] return 2. Note: Your algorithm should run in linear runtime complexity. Could you implement it using only constant extra space complexity? Credits: Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases. Subscribe to see which companies asked this question. Show Tags Array Math Bit Manipulation Show Similar Problems (H) First Missing Positive (E) Single Number (M) Find the Duplicate Number\n'''\n","sub_path":"268. Missing Number/Accepted-60ms-64473666.py","file_name":"Accepted-60ms-64473666.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"428704324","text":"import numpy as np\n\n\nclass SpriteIdentifier:\n def __init__ (self, pixel_quantity=10, iterations=1, match_threshold=0.8):\n self.pixel_quantity = pixel_quantity\n self.iterations = iterations\n\n self.minimum_matches = int(match_threshold * pixel_quantity)\n\n self.sprites = dict()\n self.sprite_shapes = dict()\n\n def identify(self, sprite):\n results = dict()\n\n for sprite_name, sprite_data in self.sprites.get(sprite.image_shape, dict()).items():\n results[sprite_name] = 0\n\n for i in range(0, sprite_data[1].shape[2]):\n for ii in range(0, sprite_data[1].shape[0]):\n pixel = sprite_data[1][ii, 0, i, :3]\n coordinates = sprite_data[1][ii, 0, i, 3:]\n\n for iii in range(0, sprite.image_data.shape[3]):\n sprite_pixel = sprite.image_data[coordinates[0], coordinates[1], :, iii]\n\n if tuple(sprite_pixel) == tuple(pixel):\n results[sprite_name] += 1\n\n max_result = max(results.items(), key=lambda r: r[1])\n\n return max_result[0] if max_result[1] >= self.minimum_matches else \"UNKNOWN\"\n\n def register(self, sprite):\n if sprite.image_shape not in self.sprites:\n self.sprites[sprite.image_shape] = dict()\n\n sprite_sample_pixels_array = np.array(\n sprite.sample_pixels(quantity=self.pixel_quantity, iterations=self.iterations))\n\n self.sprites[sprite.image_shape][sprite.name] = (\n sprite,\n sprite_sample_pixels_array\n )\n\n self.sprite_shapes[sprite.name] = sprite.image_shape\n","sub_path":"lib/sprite_identifier.py","file_name":"sprite_identifier.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"582437191","text":"from flask import Flask, request, jsonify\nimport os\nimport time\nfrom flask_cors import CORS\n\napp = Flask(__name__)\ncors = CORS(app)\n\ndef dbCall():\n time.sleep(3)\n productsdb = [\n { 'id': 0, 'title': 'Apples', 'price': 1.20 },\n { 'id': 1, 'title': 'Bananas', 'price': 1.45 },\n { 'id': 2, 'title': 'Grapes', 'price': 5.12 },\n { 'id': 3, 'title': 'Blackberries', 'price': 2.52 }\n ]\n return productsdb\n\n@app.route('/', methods=['GET'])\ndef home():\n products = dbCall()\n return jsonify(products)\n\nif __name__ == '__main__':\n port= os.environ.get('PORT')\n app.run(host='0.0.0.0', debug=True, port=port)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"140675479","text":"from PIL import Image\n\n# Fonction pour le produit matriciel de deux matrices\ndef produitMatriciel(A,B):\n\tresult = [[0 for x in range(len(A))] for y in range(len(A))]\n\tfor i in range(len(A)):\n\t\tfor j in range(len(A)):\n\t\t\tresult[i][j] = A[i][j] * B[i][j]\n\treturn result\n\n# Fonction pour la somme matricielle de deux matrices\ndef sommeMatricielle(A,B):\n\tresult = [[0 for x in range(len(A))] for y in range(len(A))]\n\tfor i in range(len(A)):\n\t\tfor j in range(len(B)):\n\t\t\t\tresult[i][j] = A[i][j] + B[i][j]\n\treturn result\n\n# Renvoie une valeur égale à 0 \ndef reLU_function(pixel):\n\tpixel = 0\n\treturn pixel\n\n# Renvoie la valeur max de la matrice\ndef max_nbr_matrice(mat, nrb_pool):\n\tmax_value = mat[0][0]\n\tfor i in range(nrb_pool):\n\t\tfor j in range(nrb_pool):\n\t\t\tif mat[i][j] > max_value:\n\t\t\t\tmax_value = mat[i][j]\n\treturn max_value\n\n# Renvoie une liste double\ndef define_list(h, l):\n\tnew_list = [[0 for x in range(h)] for y in range(l)]\n\treturn new_list\n\n# ConvLayer en fonction de la matrice de filtre\ndef conv_layer(h, l, filter_size, matrice, basic_image, new_image, stride):\n\tmat = define_list(filter_size, filter_size)\n\tfor i in range(h-filter_size + 1):\n\t\tfor j in range(l-filter_size + 1):\n\t\t\tfor a in range(filter_size):\n\t\t\t\tfor b in range(filter_size):\n\t\t\t\t\t# On créer une matrice de transition\n\t\t\t\t\tmat[a][b] = basic_image[i+a][j+b]\n\t\t\t# Le produit\n\t\t\tpixel_mat = produitMatriciel(mat, matrice)\n\t\t\t# La somme\n\t\t\tfor c in range(filter_size):\n\t\t\t\tfor d in range(filter_size):\n\t\t\t\t\tnew_image[i][j] = new_image[i][j] + pixel_mat[c][d]\n\t\t\t# Une Moyenne\n\t\t\tnew_image[i][j] = new_image[i][j] / 9\n\t\t\t# Passe les valeurs négative en 0, reLU fonction\n\t\t\tif new_image[i][j] < 0:\n\t\t\t\tnew_image[i][j] = reLU_function(new_image[i][j])\n\t\t\t\t# On applique le stride\n\t\t\tj = j + stride - 1\n\t\ti = i + stride - 1\n\treturn new_image\n\n# Pooling de type max, garde la plus grande valeur parmi toutes celles de la matrice de taille pool_nbr\ndef max_pooling(h, l, pool_nbr, new_image):\n\tmat = define_list(pool_nbr, pool_nbr)\n\tfor i in range(h-pool_nbr):\n\t\tfor j in range(l-pool_nbr):\n\t\t\tfor a in range(pool_nbr):\n\t\t\t\tfor b in range(pool_nbr):\n\t\t\t\t\t# On créer une matrice de transition\n\t\t\t\t\tmat[a][b] = new_image[i+a][j+b]\n\t\t\tnew_image[i][j] = max_nbr_matrice(mat, pool_nbr)\n\t\t\t# Passe les valeurs négative en 0, reLU fonction\n\t\t\tif new_image[i][j] < 0:\n\t\t\t\tnew_image[i][j] = reLU_function(new_image[i][j])\n\treturn new_image\n\n# La matrice\nmatrice = []\n\n# Matrice 3*3 pour ressortir les contours\nmatrice_all_edge = [[-1, -1, -1], \n\t\t \t\t\t[-1, 8, -1],\n\t\t \t\t\t[-1, -1, -1]]\n\n# Matrice 3*3 pour ressortir certain contours\nmatrice_edge_1 = [[0, 1, 0], \n\t\t \t\t[1, -4, 1], \n\t\t \t\t[0, 1, 0]]\n\n\n\n# Ouverture et récupération des dimensions de l'image\nfiles = Image.open(\"../NN/trainingSample/0/0.jpg\")\n(l, h) = files.size\n\n# Définit la taille de la liste en fonction des dimensions de l'image \nbasic_image = define_list(h, l)\n\n# Définit la nouvelle image sur la même taille que la basique pour être sûr (pas besoin normalement d'aussi grand)\nnew_image = define_list(h, l)\n\n# Remplit la liste avec tous les pixels de l'image\nfor a in range(h):\n\tfor b in range(l):\n\t\td = Image.Image.getpixel(files, (a, b))\n\t\tbasic_image[a][b] = d\n\n# On choisit la taille du filtre et définit son carré\nfilter_size = 3\nfilter_size_pow = filter_size * filter_size\n\n# On attribut la matrice nécéssaire \nif filter_size == 3:\n\tmatrice = matrice_all_edge\n\n#if filter_size == 4:\n\t# A définir\n\n# On définit le stride (le décalage entre chaque calcul de matrice * filtre)\nstride = 1\n# Nouvelle image après passage dans un ConvLayer\nnew_image = conv_layer(h, l, filter_size, matrice, basic_image, new_image, stride)\n\n# La taille de la petite matrice pour choisir la plus grande valeur, ici choix parmi 2*2\npool_nbr = 2\n# Nouvelle image après passage dans un Pooling max\nnew_image = max_pooling(h, l, pool_nbr, new_image)\n\n# On met tout sous une seule liste\nnew_image_list = []\nx = 0\ny = 0\nfor x in new_image:\n\tfor y in x:\n\t\tnew_image_list.append(y) \n\n# Définit la nouvelle image sur la même taille que la basique, avec une couleur blanche et en mode L (mode 8 bytes, nb)\nnew_files = Image.new(\"L\", files.size, \"white\")\n\n# Place la liste avec tous les pixels dans la nouvelle image\nnew_files.putdata(new_image_list)\n\n# Sauvegarde l'image\nnew_files.save(\"new_image1.png\")\n\n# Fermeture du fichier\nfiles.close()\nnew_files.close()\n","sub_path":"Python/MachineLearning_DeepLearning/Numbers/CNN/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"6252104","text":"#! /usr/bin/env python\n\nfrom __future__ import print_function, division, absolute_import\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport sys\n\nplt.rcParams['mathtext.fontset'] = 'stix'\nplt.rcParams['font.family'] = 'STIXGeneral'\n\nf111 = np.genfromtxt('Gtau_111.dat', comments='#', usecols=(6,))\nf121 = np.genfromtxt('Gtau_121.dat', comments='#', usecols=(6,))\nf211 = np.genfromtxt('Gtau_211.dat', comments='#', usecols=(6,))\nf221 = np.genfromtxt('Gtau_221.dat', comments='#', usecols=(6,))\n\nf = plt.figure(figsize=(12,9))\n\nax1 = f.add_subplot(321)\nax1.set_title(r'$\\mathrm{top\\;layer}$', fontsize=16)\nax1.plot(np.linspace(2.8,3.8,11), f111[:11], 'r', lw='2', marker='^', ms='7.0', label=r'$d_{xy}$')\nax1.plot(np.linspace(3.8,2.8,11), f111[11:], 'r', lw='2', marker='v', ms='7.0')\nax1.plot(np.linspace(2.8,3.8,11), f121[:11], 'b', lw='2', marker='^', ms='7.0', label=r'$d_{xz}, d_{yz}$')\nax1.plot(np.linspace(3.8,2.8,11), f121[11:], 'b', lw='2', marker='v', ms='7.0')\nax1.set_xlim(2.8,3.8)\nax1.set_ylim(0,0.05)\nax1.set_ylabel(r'$G(\\tau=\\beta / 2)$', fontsize=14, labelpad=12)\nplt.axvspan(3.0, 3.3, facecolor='black', alpha=0.2)\nplt.legend(loc='upper right', frameon=False, fontsize=14)\n\nax1 = f.add_subplot(322)\nax1.set_title(r'$\\mathrm{bottom\\;layer}$', fontsize=16)\nax1.plot(np.linspace(2.8,3.8,11), f211[:11], 'r', lw='2', marker='^', ms='7.0', label=r'$d_{xy}$')\nax1.plot(np.linspace(3.8,2.8,11), f211[11:], 'r', lw='2', marker='v', ms='7.0')\nax1.plot(np.linspace(2.8,3.8,11), f221[:11], 'b', lw='2', marker='^', ms='7.0', label=r'$d_{xz}, d_{yz}$')\nax1.plot(np.linspace(3.8,2.8,11), f221[11:], 'b', lw='2', marker='v', ms='7.0')\nax1.set_xlim(2.8,3.8)\nax1.set_ylim(0,0.05)\nplt.axvspan(3.0, 3.3, facecolor='black', alpha=0.2)\n# ax1.set_ylabel(r'$G(\\tau=\\beta / 2)$')\nplt.legend(loc='upper right', frameon=False)\n\n\no111 = np.genfromtxt('occ_111.dat', comments='#', usecols=(6,))\no121 = np.genfromtxt('occ_121.dat', comments='#', usecols=(6,))\no211 = np.genfromtxt('occ_211.dat', comments='#', usecols=(6,))\no221 = np.genfromtxt('occ_221.dat', comments='#', usecols=(6,))\n\nax1 = f.add_subplot(323)\nax1.plot(np.linspace(2.8,3.8,11), o111[:11], 'r', lw='2', marker='^', ms='7.0')\nax1.plot(np.linspace(3.8,2.8,11), o111[11:], 'r', lw='2', marker='v', ms='7.0')\nax1.plot(np.linspace(2.8,3.8,11), o121[:11], 'b', lw='2', marker='^', ms='7.0')\nax1.plot(np.linspace(3.8,2.8,11), o121[11:], 'b', lw='2', marker='v', ms='7.0')\nax1.set_xlim(2.8,3.8)\nax1.set_ylim(0,0.5)\nax1.set_ylabel(r'$\\left\\langle \\hat{c}_{i,\\uparrow}^{\\dagger}\\hat{c}_{i,\\uparrow} \\right\\rangle$', fontsize=14, labelpad=12)\nplt.axvspan(3.0, 3.3, facecolor='black', alpha=0.2)\n\nax1 = f.add_subplot(324)\nax1.plot(np.linspace(2.8,3.8,11), o211[:11], 'r', lw='2', marker='^', ms='7.0')\nax1.plot(np.linspace(3.8,2.8,11), o211[11:], 'r', lw='2', marker='v', ms='7.0')\nax1.plot(np.linspace(2.8,3.8,11), o221[:11], 'b', lw='2', marker='^', ms='7.0')\nax1.plot(np.linspace(3.8,2.8,11), o221[11:], 'b', lw='2', marker='v', ms='7.0')\nax1.set_ylim(0,0.5)\nax1.set_xlim(2.8,3.8)\nplt.axvspan(3.0, 3.3, facecolor='black', alpha=0.2)\n\nd111 = np.genfromtxt('docc_111.dat', comments='#', usecols=(6,))\nd121 = np.genfromtxt('docc_121.dat', comments='#', usecols=(6,))\nd211 = np.genfromtxt('docc_211.dat', comments='#', usecols=(6,))\nd221 = np.genfromtxt('docc_221.dat', comments='#', usecols=(6,))\n\nax1 = f.add_subplot(325)\nax1.plot(np.linspace(2.8,3.8,11), d111[:11], 'r', lw='2', marker='^', ms='7.0')\nax1.plot(np.linspace(3.8,2.8,11), d111[11:], 'r', lw='2', marker='v', ms='7.0')\nax1.plot(np.linspace(2.8,3.8,11), d121[:11], 'b', lw='2', marker='^', ms='7.0')\nax1.plot(np.linspace(3.8,2.8,11), d121[11:], 'b', lw='2', marker='v', ms='7.0')\nax1.set_xlim(2.8,3.8)\nax1.set_ylim(0,0.008)\nplt.yticks([0,0.002,0.004,0.006,0.008])\nax1.set_xlabel(r\"$U'\\;[eV]$\", fontsize=14)\nax1.set_ylabel(r'$\\left\\langle \\hat{c}_{i,\\uparrow}^{\\dagger}\\hat{c}_{i,\\uparrow}\\hat{c}_{i,\\downarrow}^{\\dagger}\\hat{c}_{i,\\downarrow} \\right\\rangle$', fontsize=14)\nplt.axvspan(3.0, 3.3, facecolor='black', alpha=0.2)\n\nax1 = f.add_subplot(326)\nax1.plot(np.linspace(2.8,3.8,11), d211[:11], 'r', lw='2', marker='^', ms='7.0')\nax1.plot(np.linspace(3.8,2.8,11), d211[11:], 'r', lw='2', marker='v', ms='7.0')\nax1.plot(np.linspace(2.8,3.8,11), d221[:11], 'b', lw='2', marker='^', ms='7.0')\nax1.plot(np.linspace(3.8,2.8,11), d221[11:], 'b', lw='2', marker='v', ms='7.0')\nax1.set_xlim(2.8,3.8)\nax1.set_ylim(0,0.008)\nplt.yticks([0,0.002,0.004,0.006,0.008])\nax1.set_xlabel(r\"$U'\\;[eV]$\", fontsize=14)\nplt.axvspan(3.0, 3.3, facecolor='black', alpha=0.2, zorder=0)\n\n\nplt.tight_layout()\n# use png because of the transparency which is not supported by eps\nplt.savefig('layer-b38-hysteresis.svg', format='svg', dpi=600)\nplt.show()\n","sub_path":"ch4/02_hyst.py","file_name":"02_hyst.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"68926092","text":"from math import *\na ,b ,c = input(\"Inserer la longuer des 3 cotés > A:\" ),input(\"B :\") , input(\"C :\")\na ,b ,c = int(a) , int(b) , int(c)\nAir =0\nperiM =a+b+c\nDPeriM =periM/2\nAir = sqrt(DPeriM*(DPeriM-a)*(DPeriM-b)*(DPeriM-c))\n\nprint(\"Le perimetre =\",periM ,\"m\")\nprint(\"L'air = \" , Air ,\"m²\")\n\n","sub_path":"6_2.py","file_name":"6_2.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"344904301","text":"#!/usr/bin/env python3\n\n\nimport gzip\nimport sys\nimport FisherExact as fisher\nimport numpy as np\n\n\ndef input_cluster(filename):\n f = open(filename, 'r')\n i = next(f)\n cluster = {}\n for i in f:\n i = i.split(',')\n if not(i[2][:-1] in cluster):\n cluster[i[2][:-1]] = [i[0].replace('\\\"','')]\n else:\n cluster[i[2][:-1]].append(i[0].replace('\\\"',''))\n f.close()\n return(cluster)\n\n\ndef position_cluster(col, cluster):\n cluster_pos ={}\n for k in cluster.keys():\n cluster_pos[k] = []\n for n,i in enumerate(col):\n for k in cluster.keys():\n if i in cluster[k]:\n cluster_pos[k].append(n)\n return(cluster_pos)\n\n\ndef allele_frequency(line, cluster_pos):\n alleles = line[2].split('/')\n table = np.zeros((len(cluster_pos),2))\n for k in cluster_pos.keys():\n k_int = int(k)-1\n for i in cluster_pos[k]:\n if line[i] == alleles[0]:\n table[k_int,0] += 1\n elif line[i] == alleles[1]:\n table[k_int,1] += 1\n elif line[i] == 'X':\n table[k_int,0] += 0.5\n table[k_int,1] += 0.5\n return(table)\n\n\nusuage = 'usuage: fisher_test.py <jvcf_file> <cluster_file>'\nif len(sys.argv) == 1:\n exit(usuage)\n\njvcf = gzip.open(sys.argv[1], 'rt')\ncluster = input_cluster(sys.argv[2])\n\ncol = next(jvcf).split()\ncluster_pos = position_cluster(col, cluster)\n\nfilename = sys.argv[1].replace('all_accessions.VQ50.554_lines.jvcf.gz','fisher_test.txt')\nf = open(filename, 'w')\nfor i in jvcf:\n line = i.split()\n table = allele_frequency(line, cluster_pos)\n f.write(f'{line[0]}_{line[1]}\\t{line[0]}\\t{line[1]}\\t{fisher.fisher_exact(table)}\\n')\nf.close()\n","sub_path":"fisher_test.py","file_name":"fisher_test.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"17474932","text":"from nfa import *\r\nimport utils\r\n\r\nVALIDOS = [\"A\", \"B\", \"C\", \"D\" ,\"E\", \"a\" ,\"b\" ,\"c\" ,\"d\" ,\"e\",\"ε\", \"0\",\"1\"]\r\nOPERADORES = [\"*\", \"|\", \"?\", \"+\", \"^\", \")\"]\r\nOPERADORESUNI = [\"*\", \"?\", \"+\"]\r\nABCEDARIO = [\"A\", \"B\", \"C\", \"D\" ,\"E\", \"F\",\"G\" ,\"H\", \"I\", \"J\"]\r\nCOUNT = [10]\r\nEPSILON = \"ε\"\r\n\r\ndef nfaToDfa(arbolito, expresion):\r\n #hacer automata\r\n auto = automataEsq.Machine(expresion)\r\n automata = automataEsq.Machine(expresion)\r\n nuevosEstados = []\r\n opciones = []\r\n for u in expresion:\r\n if u not in OPERADORES:\r\n if u != '(' and u not in opciones:\r\n opciones.append(u)\r\n automateichon.automataBuilder(arbolito, auto)\r\n primero = utils.cerraduraEpsilon(auto, [0])\r\n primero = set(primero)\r\n auto.states[-1].accept = True\r\n nuevosEstados.append(primero)\r\n primerito = automataEsq.State(len(automata.states))\r\n primerito.vaina.append(primero) \r\n automata.states.append(primerito)\r\n for i in automata.states:\r\n for opcion in opciones:\r\n tempstates = set()\r\n for state in auto.states:\r\n if state.id in nuevosEstados[i.id]:\r\n for trans in state.transitions: \r\n if trans.simbolo == opcion:\r\n tempstates.add(trans.destino)\r\n x = set()\r\n for temp in tempstates:\r\n x.add(temp)\r\n x.update(utils.cerraduraEpsilon(auto,[temp]))\r\n if x not in nuevosEstados:\r\n if len(x) != 0:\r\n nuevosEstados.append(x)\r\n estadox = automataEsq.State(len(automata.states))\r\n estadox.vaina.append(x)\r\n for y in x:\r\n if auto.states[y].accept == True:\r\n estadox.accept = True\r\n automata.states.append(estadox)\r\n i.transitions.append(automataEsq.Transition(opcion,estadox.id))\r\n elif x in nuevosEstados:\r\n if len(x) != 0:\r\n for h in automata.states:\r\n if h.vaina[0] == x:\r\n i.transitions.append(automataEsq.Transition(opcion,h.id))\r\n print(nuevosEstados)\r\n # imprimir automat\r\n\r\n dfatext = open(\"dfa.txt\",\"w\")\r\n for state in automata.states:\r\n identidad = \"estado: \" + str(state.id) + \"\\n\"\r\n aceptacion = \"Aceptacion: \"+str(state.accept) + \"\\n\" \r\n dfatext.write(identidad)\r\n dfatext.write(aceptacion)\r\n for trap in state.transitions:\r\n if trap.simbolo == EPSILON:\r\n z = \"E\"\r\n else:\r\n z = trap.simbolo\r\n texto = \"Transicion: \" + str(trap.destino) + \" \" + \"Con: \" + z + \"\\n\"\r\n dfatext.write(texto)\r\n dfatext.close\r\n\r\n return automata\r\n\r\n\r\ndef matchingDFA(automata, expresion, textillo):\r\n #matching\r\n aceptados = []\r\n for e in expresion:\r\n if e not in OPERADORES:\r\n if e != '(':\r\n aceptados.append(e)\r\n for g in textillo:\r\n if g not in aceptados:\r\n print(\"no aceptado\")\r\n return 0\r\n opts = list(textillo)\r\n est = [0]\r\n est = utils.cerraduraEpsilon(automata, est)\r\n q = 0\r\n while True:\r\n temporales = []\r\n for es in est:\r\n for trans in automata.states[es].transitions:\r\n if trans.simbolo == opts[q] and trans.destino not in temporales:\r\n temporales.append(trans.destino)\r\n q += 1\r\n temporales = utils.cerraduraEpsilon(automata, temporales)\r\n if not temporales and opts == EPSILON:\r\n break\r\n est = temporales.copy()\r\n if q > len(opts)-1:\r\n break\r\n for x in est:\r\n if automata.states[x].accept:\r\n return print(\"Aceptado\")\r\n return print(\"No Aceptado\") ","sub_path":"Proyecto1/Proyecto1/nfaToDfa.py","file_name":"nfaToDfa.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"390936649","text":"# -*- coding: utf8 -*-\nfrom requests import Session\nfrom time import sleep\ndef main(*args):\n # 数据\n like_url = 'https://tieba.baidu.com/mo/q/newmoindex?'\n sign_url = 'http://tieba.baidu.com/sign/add'\n tbs = '4fb45fea4498360d1547435295'\n head = {\n 'Accept': 'text/html, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Connection': 'keep-alive',\n 'Cookie': '填入你的百度cookie(获取方法参考http://pandownload.com/faq/cookie.html,将网盘的TOKEN换成贴吧的TOKEN)',# 2020.4.16补充:好人一生平安🙏由于pd被封,故无法查看教程,详细步骤可参阅本帖第61楼的方案:[url=https://www.52pojie.cn/forum.php?mod=redirect&goto=findpost&ptid=1155287&pid=31391633]https://www.52pojie.cn/forum.php ... 155287&pid=31391633[/url]\n 'Host': 'tieba.baidu.com',\n 'Referer': 'http://tieba.baidu.com/i/i/forum',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/71.0.3578.98 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest'}\n s = Session()\n\n\n # 获取关注的贴吧\n bars = []\n dic = s.get(like_url, headers=head).json()['data']['like_forum']\n for bar_info in dic:\n bars.append(bar_info['forum_name'])\n\n\n # 签到\n already_signed_code = 1101\n success_code = 0\n need_verify_code = 2150040\n already_signed = 0\n succees = 0\n failed_bar = []\n n = 0\n retry_count = 0\n max_retry = 5 # 因需要验证码而重试的最大次数\n\n while n < len(bars):\n sleep(0.5)\n bar = bars[n]\n data = {\n 'ie': 'utf-8',\n 'kw': bar,\n 'tbs': tbs\n }\n try:\n r = s.post(sign_url, data=data, headers=head)\n except Exception as e:\n print(f'未能签到{bar}, 由于{e}。')\n failed_bar.append(bar)\n continue\n dic = r.json()\n msg = dic['no']\n if msg == already_signed_code: already_signed += 1; r = '已经签到过了!'\n elif msg == need_verify_code and retry_count <max_retry: n -= 1; retry_count += 1; r = f'需要验证码,即将重试!({retry_count}/{max_retry})'\n elif msg == need_verify_code: r = '验证码错误,跳过!'; retry_count = 0\n elif msg == success_code: r = f\"签到成功!你是第{dic['data']['uinfo']['user_sign_rank']}个签到的吧友,共签到{dic['data']['uinfo']['total_sign_num']}天。\"\n else: r = '未知错误!' + dic['error']\n print(f\"{bar}:{r}\")\n succees += 1\n n += 1\n l = len(bars)\n failed = \"\\n失败列表:\"+'\\n'.join(failed_bar) if len(failed_bar) else ''\n message = f'''共{l}个吧,其中{succees}个吧签到成功,{len(failed_bar)}个吧签到失败,{already_signed}个吧已经签到。失败列表:{failed}'''\n print(message)\n s.get(f\"https://sc.ftqq.com/你的SCKEY.send?text={message}\")# [未测试]Server酱推送,不需要则删除此行","sub_path":"test/tieba.py","file_name":"tieba.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"572431240","text":"class Solution(object):\n def nextPermutation(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n swaped = False\n for i in range(1, len(nums))[::-1]:\n if nums[i] > nums[i-1]:\n nums[i:] = nums[i:][::-1]\n for j in range(i, len(nums)):\n if nums[j] > nums[i-1]:\n nums[j], nums[i-1] = nums[i-1], nums[j]\n break\n swaped = True\n break\n if not swaped:\n for i in range(len(nums)//2):\n nums[i], nums[len(nums)-1-i] = nums[len(nums)-1-i], nums[i]\n\nsol = Solution()\nnums = [1,1,1]\nsol.nextPermutation(nums)\nprint(nums)","sub_path":"python/31. 下一个排列.py","file_name":"31. 下一个排列.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"448855956","text":"import pygame\nimport os\nimport math\n\nos.system(\"clear\")\n\nprint('Room Triangulation System')\nprint('Scale: 1m = 100px')\nprint()\nroomWidth\t\t= int(input('Width of the room in meters: '))\nroomHeight\t\t= int(input('Height of the room in meters: '))\nscreenWidth\t\t= roomWidth*100+50\nscreenHeight\t= roomHeight*100+50\nbeaconDistance\t= screenWidth-100\n\nwhite\t\t\t= (255,\t\t255,\t255)\nblack\t\t\t= (0,\t\t0,\t\t0)\nblue\t\t\t= (0,\t\t63,\t\t255)\ngreen\t\t\t= (0,\t\t255,\t63)\nred\t\t\t\t= (255,\t\t0,\t\t0)\n\nangleLeft\t\t= 20\nangleRight\t\t= 20\n\nrobotX\t\t\t= 0\nrobotY\t\t\t= 0\n\npygame.init()\npygame.font.init()\npixel\t\t\t= pygame.font.Font('pixel.ttf', 8)\nos.system('clear')\n\nprint('Room Triangulation System')\n\nscreen\t\t\t= pygame.display.set_mode((screenWidth, screenHeight))\npygame.mouse.set_visible(1)\npygame.key.set_repeat(500, 1)\n\nframe\t\t\t= pygame.Surface((screenWidth, screenHeight))\npygame.draw.rect(frame, white, pygame.rect.Rect(25, 25, screenWidth-50, screenHeight-50), 1)\npygame.draw.circle(frame, green, (50, screenHeight-50), 15, 1)\npygame.draw.circle(frame, green, (screenWidth-50, screenHeight-50), 15, 1)\npygame.draw.line(frame, green, (50, screenHeight-50), (screenWidth-50, screenHeight-50), 1)\n\nclock\t\t\t= pygame.time.Clock()\nrunning\t\t\t= 1\nchecked\t\t\t= 1\ncount\t\t\t= 1\n\nwhile(running):\n\tclock.tick(60)\n\tscreen.fill((10,10,10))\n\tfor event in pygame.event.get():\n\t\tif(event.type == pygame.QUIT):\n\t\t\trunning = 0\n\t\tif(event.type == pygame.KEYDOWN):\n\t\t\tif(event.key == pygame.K_ESCAPE):\n\t\t\t\tpygame.event.post(pygame.event.Event(pygame.QUIT))\n\t\t\tif(event.key == pygame.K_DOWN):\n\t\t\t\tif(checked == 1 and angleLeft > 10):\n\t\t\t\t\tangleLeft -= 1\n\t\t\t\tif(checked == 2 and angleRight > 10):\n\t\t\t\t\tangleRight -=1\n\t\t\tif(event.key == pygame.K_UP and angleLeft < 80):\n\t\t\t\tif(checked == 1):\n\t\t\t\t\tangleLeft += 1\n\t\t\t\tif(checked == 2 and angleRight < 80):\n\t\t\t\t\tangleRight +=1\n\t\tif(event.type == pygame.KEYUP):\n\t\t\tif(event.key == pygame.K_TAB):\n\t\t\t\tif(checked == 1):\n\t\t\t\t\tchecked = 2\n\t\t\t\telif(checked == 2):\n\t\t\t\t\tchecked = 1\n\n\tscreen.blit(frame, (0,0))\n\t# Calculate Robot Coordinates\n\tangleTop\t= 180 - angleLeft - angleRight\n\trobotX\t\t= ( math.cos(math.radians(angleLeft)) * math.sin(math.radians(angleRight)) * beaconDistance) / math.sin(math.radians(angleTop))\n\trobotY\t\t= ( math.sin(math.radians(angleLeft)) * math.sin(math.radians(angleRight)) * beaconDistance) / math.sin(math.radians(angleTop))\n\t# Beacon Signals\n\tpygame.draw.line(screen, green, (robotX+50, screenHeight-robotY-25), (50, screenHeight-50), 1)\n\tpygame.draw.line(screen, green, (robotX+50, screenHeight-robotY-25), (screenWidth-50, screenHeight-50), 1)\n\t# Cartesian Lines\n\tpygame.draw.line(screen, red, (robotX+50, screenHeight-robotY+30), (robotX+50, screenHeight-27), 1)\n\tpygame.draw.line(screen, red, (robotX+50, screenHeight-robotY-80), (robotX+50, 26), 1)\n\tpygame.draw.line(screen, red, (robotX-5, screenHeight-robotY-25), (26, screenHeight-robotY-25), 1)\n\tpygame.draw.line(screen, red, (robotX+105, screenHeight-robotY-25), (screenWidth-26, screenHeight-robotY-25), 1)\n\t# Robot\n\tpygame.draw.rect(screen, red, pygame.rect.Rect(robotX, screenHeight-robotY-75, 100, 100), 1)\n\t# Robot Circle\n\tpygame.draw.arc(screen, blue, pygame.rect.Rect(robotX-25, screenHeight-robotY-100, 150, 150), math.radians(count*8), math.radians(count*8+80), 4)\n\tpygame.draw.arc(screen, blue, pygame.rect.Rect(robotX-30, screenHeight-robotY-105, 160, 160), -math.radians(count*6+90), -math.radians(count*6+40), 4)\n\tpygame.draw.arc(screen, blue, pygame.rect.Rect(robotX-35, screenHeight-robotY-110, 170, 170), math.radians(count*16+40), math.radians(count*16+180), 4)\n\t# Beacon Circle\n\tif(checked == 1):\n\t\tpygame.draw.arc(screen, blue, pygame.rect.Rect(30, screenHeight-70, 40, 40), math.radians(count*8), math.radians(count*8+80), 1)\n\tif(checked == 2):\n\t\tpygame.draw.arc(screen, blue, pygame.rect.Rect(screenWidth-70, screenHeight-70, 40, 40), math.radians(count*8), math.radians(count*8+80), 1)\n\t# Text\n\trobotXY\t\t= pixel.render(str(round(robotX))+' | '+str(round(robotY)), 1, blue)\n\tangleText1\t= pixel.render(str(angleLeft)+'°', 1, blue)\n\tangleText2\t= pixel.render(str(angleRight)+'°', 1, blue)\n\tscreen.blit(robotXY, (robotX+2, screenHeight-robotY-75))\n\tscreen.blit(angleText1, (70, screenHeight-50))\n\tscreen.blit(angleText2, (screenWidth-90, screenHeight-50))\n\n\t#print(count)\n\tcount += 1\n\n\tpygame.display.flip()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"144096144","text":"import pysys\nfrom pysys.constants import *\nfrom pysys.basetest import BaseTest\nimport os, sys, re\n\nclass PySysTest(BaseTest):\n\n\tdef execute(self):\n\t\ttestsdir = os.path.normpath(self.input+'/../../../..')\n\t\tassert testsdir.endswith('pysys-examples'), testsdir\n\t\tself.log.info('printing tests from: %s', testsdir)\n\t\t\n\t\tl = {}\n\t\texec(open(self.input+'/../../../utilities/resources/runpysys.py').read(), {}, l) # define runPySys\n\t\trunPySys = l['runPySys']\n\t\trunPySys(self, 'basic', ['print'], workingDir=testsdir)\n\t\trunPySys(self, 'thistest', ['print', 'PySys_internal_073'], workingDir=testsdir)\n\t\trunPySys(self, 'full', ['print', '--full'], workingDir=testsdir)\n\t\trunPySys(self, 'groups', ['print', '--groups'], workingDir=testsdir)\n\t\trunPySys(self, 'modes', ['print', '--modes'], workingDir=testsdir)\n\t\trunPySys(self, 'nonexistent', ['print', 'non-existent'], workingDir=testsdir, ignoreExitStatus=True)\n\t\trunPySys(self, 'emptydir', ['print'], workingDir=self.mkdir('emptydir'), ignoreExitStatus=True)\n\t\t\t\n\tdef validate(self):\n\t\tfor t in ['basic', 'thistest', 'full', 'groups', 'modes']:\n\t\t\tself.assertGrep(t+'.err', expr='.*', contains=False) # no errors\n\n\t\tself.assertGrep('basic.out', expr='Fibonacci_test_001 *: *[^ ]+')\n\t\tself.assertGrep('full.out', expr='Test id *: *Fibonacci_test_001') # just pick one example\n\t\tself.assertGrep('modes.out', expr='mode1') # just pick one example\n\t\tself.assertLineCount('thistest.out', expr='.', condition='==1')\n\t\t\n\t\tself.assertGrep('groups.out', expr='examples') # just pick one example\n\n\t\tself.assertGrep('emptydir.err', expr='The supplied options did not result in the selection of any tests')\n\t\tself.assertGrep('nonexistent.err', expr='Unable to locate requested testcase')\n","sub_path":"pysys-examples/internal/testcases/PySys_internal_073/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"328189780","text":"\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom core import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',views.home,name='home'),\n path('contact/',views.contact.as_view(),name='contact'),\n path('success',views.success,name='success'),\n path('serv/',include('serv.urls')),\n path('edu/',include('edu.urls')),\n]\n","sub_path":"resumeproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"540267516","text":"#!/usr/bin/env python\n\n\"\"\"\n\n This file is part of Enbarr.\n\n Enbarr is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n Enbarr is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with Enbarr. If not, see <https://www.gnu.org/licenses/>.\n\n\"\"\"\n\nimport rclpy as rospy\nimport argparse\nfrom auv.msg import arbitrary_pca_commands\n\npublisher = rospy.Publisher('arbitrary_pca_commands', arbitrary_pca_commands, queue_size=3)\nrospy.init_node('unkill_thruster', anonymous=True)\n\nparser = argparse.ArgumentParser(\"Send a thruster un-kill command over the /surface_command topic.\")\nparser.add_argument('thruster', help='The name of the thruster to be used here.')\nargs = parser.parse_args(rospy.myargv()[1:])\n\ncommand = arbitrary_pca_commands()\ncommand.unkill_thruster = True\ncommand.thruster = args.thruster\n\npublisher.publish(command)\n","sub_path":"wurov/wurov/scripts/pca_unkill_thruster.py","file_name":"pca_unkill_thruster.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"242376357","text":"import webbrowser\r\nimport csv\r\nimport json\r\nimport itertools\r\nimport operator\r\nimport random\r\nfrom geopy.geocoders import Nominatim\r\n\r\ndef most_common(data):\r\n\tprint(data)\r\n\tsorted_list = sorted((x, i) for i, x in enumerate(data))\r\n\tgroups = itertools.groupby(sorted_list, key=operator.itemgetter(0))\r\n\tdef _auxfun(g):\r\n\t\titem, iterable = g\r\n\t\tcount = 0\r\n\t\tmin_index = len(data)\r\n\t\tfor _, where in iterable:\r\n\t\t\tcount += 1\r\n\t\t\tmin_index = (min(min_index, where))\r\n\t\treturn count, -min_index\r\n\treturn max(groups, key=_auxfun)[0]\r\n\r\ndef locationsBasic(data):\r\n\tdata_in_memory = []\r\n\teverywhere_tuples = []\r\n\tnighttimes_tuples = []\r\n\tworkhour_tuples = []\r\n\tdates_captured = []\r\n\r\n\tfor i in data[0][\"bundle\"][\"rumpel/locations/ios\"]:\r\n\t\tdata_in_memory.append(i)\r\n\tfor i in data_in_memory:\r\n\t\tif int(i['data']['dateCreatedLocal'][11:13]) < 6:\r\n\t\t\tnighttimes_tuples.append([i['data']['latitude'], i['data']['longitude']])\r\n\t\tif int(i['data']['dateCreatedLocal'][11:13]) > 8 and int(i['data']['dateCreatedLocal'][11:13]) < 19:\r\n\t\t\tworkhour_tuples.append([i['data']['latitude'], i['data']['longitude']])\r\n\t\tdates_captured.append(i['data']['dateCreatedLocal'][0:10])\r\n\t\teverywhere_tuples.append([i['data']['latitude'], i['data']['longitude']])\r\n\t\r\n\r\n\tgeolocator = Nominatim(user_agent=\"Mark_and_Maven\")\r\n\tlocationBigly = geolocator.reverse(str(most_common(everywhere_tuples)))\r\n\tlocationBiglyAddress = locationBigly.address\r\n\ttry: \r\n\t\tlocationBigNight = geolocator.reverse(str(most_common(nighttimes_tuples)))\r\n\t\tlocationBigNightAddress = locationBigNight.address\r\n\texcept ValueError:\r\n\t\tlocationBigNightAddress = ''\r\n\t\tprint('locationBigNight failed, possible nighttime data not found')\r\n\tlocationBigWork = geolocator.reverse(str(most_common(workhour_tuples)))\r\n\tlocationBigWorkAddress = locationBigWork.address\r\n\r\n\tlocationsBasic = [\r\n\t\t['locationBigly', locationBiglyAddress],\r\n\t\t['locationBigNight', locationBigNightAddress],\r\n\t\t['locationBigWork', locationBigWorkAddress],\r\n\t\t]\r\n\r\n\treturn locationsBasic","sub_path":"analyst.py","file_name":"analyst.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"31989035","text":"from ST3DNet import *\nfrom utils import *\nimport argparse\nimport configparser\nimport pickle\nimport os\nimport math\nimport time\nfrom keras.utils import plot_model\nfrom keras.optimizers import Adam\n\ndir = os.getcwd()\n\n# parser param\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--ctx\", default='0', type=str)\nargs = parser.parse_args()\nctx = args.ctx\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = ctx\n\n# read config file\nconfig_file = os.path.join(dir, 'config', 'nyc.conf')\nconfig = configparser.ConfigParser()\nprint('Read configuration file: %s' % config_file)\nprint('>>>>>>> configuration <<<<<<<')\nwith open(config_file, 'r') as f:\n print(f.read())\nprint('\\n')\nconfig.read(config_file)\ntraining_config = config['Training']\n\nlr = float(training_config['learning_rate'])\nbatch_size = int(training_config['batch_size'])\nnb_residual_unit = int(training_config['nb_residual_unit']) # number of residual units\n\nnb_epoch = int(training_config['nb_epoch']) # number of epoch at training stage\n# consider_external_info = bool(int(training_config['consider_external_info']))\n\nlen_closeness = int(training_config['len_closeness']) # length of closeness dependent sequence\nlen_period = int(training_config['len_period']) # length of peroid dependent sequence\nlen_trend = int(training_config['len_trend']) # length of trend dependent sequence\n\n# find best param\ncs = [True, False]\nlfun = 'mse'\n\nfor p in cs:\n consider_external_info = p\n filename = 'NYC_c%d_p%d_t%d' % (len_closeness, len_period, len_trend)\n hyperparams_name = lfun + 'lrNYC_c%d_p%d_t%d_r%d_b%d_lr%.1e' % (\n len_closeness, len_period, len_trend, nb_residual_unit, batch_size, lr)\n\n T = 24 # number of time intervals in one day\n nb_flow = 2 # there are two types of flows: new-flow and end-flow\n days_test = 10 # divide data into two subsets: Train & Test, of which the test set is the last 10 days\n len_test = T * days_test\n map_height, map_width = 16, 8 # grid size\n nb_area = 81\n m_factor = math.sqrt(1. * map_height * map_width / nb_area)\n m_factor_2 = 1. * map_height * map_width / nb_area\n\n if consider_external_info:\n filename = filename + '_ext'\n hyperparams_name = hyperparams_name + '_ext'\n else:\n filename = filename + '_noext'\n hyperparams_name = hyperparams_name + '_noext'\n\n filename = os.path.join(dir, \"data\", 'BikeNYC', filename)\n expdir = os.path.join(dir, \"experiment\", 'BikeNYC')\n fname_param = os.path.join(expdir, hyperparams_name + '.best.h5')\n print('filename:', filename)\n print('fname_param:', fname_param)\n\n # load data\n f = open(filename, 'rb')\n X_train = pickle.load(f)\n Y_train = pickle.load(f)\n X_test = pickle.load(f)\n Y_test = pickle.load(f)\n mmn = pickle.load(f)\n external_dim = pickle.load(f)\n timestamp_train = pickle.load(f)\n timestamp_test = pickle.load(f)\n\n for i in X_train:\n print(i.shape)\n\n Y_train = mmn.inverse_transform(Y_train) # X is MaxMinNormalized, Y is real value\n Y_test = mmn.inverse_transform(Y_test)\n\n c_conf = (len_closeness, nb_flow, map_height, map_width) if len_closeness > 0 else None\n t_conf = (len_trend, nb_flow, map_height, map_width) if len_trend > 0 else None\n\n model = ST3DNet(c_conf=c_conf, t_conf=t_conf, external_dim=external_dim,\n nb_residual_unit=nb_residual_unit)\n\n adam = Adam(lr=lr)\n model.compile(loss=lfun, optimizer=adam, metrics=[rmse])\n model.summary()\n plot_model(model, to_file=os.path.join(expdir, 'NYmodel.png'), show_shapes=True)\n\n from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n model_checkpoint = ModelCheckpoint(fname_param, monitor='val_rmse', verbose=1,\n save_best_only=True,\n mode='min')\n\n reduce_lr = ReduceLROnPlateau(monitor='val_rmse', factor=0.6, patience=50, min_lr=0.1 * lr)\n\n print('=' * 10)\n print(\"training model...\")\n start_time = time.time()\n history = model.fit(X_train, Y_train,\n nb_epoch=nb_epoch,\n batch_size=batch_size,\n validation_split=0.1,\n callbacks=[model_checkpoint, reduce_lr],\n verbose=2)\n end_time = time.time()\n print('cost %.f mins on training' % ((end_time - start_time) // 60))\n\n print('=' * 10)\n print('evaluating using the model that has the best loss on the valid set')\n model.load_weights(fname_param)\n score = model.evaluate(X_train, Y_train, batch_size=Y_train.shape[0] // 48, verbose=0)\n print('Train score: %.6f rmse (real): %.6f' % (score[0], score[1] * m_factor))\n score = model.evaluate(X_test, Y_test, batch_size=Y_test.shape[0], verbose=0)\n print('Test score: %.6f rmse (real): %.6f' % (score[0], score[1] * m_factor))\n\n Y_test_predict = model.predict(X_test, batch_size=Y_test.shape[0], verbose=0)\n test_rmse, test_mae, test_mape = compute(Y_test, Y_test_predict)\n print(hyperparams_name + ', rmse:%.6f, mae:%.6f, mape:%.6f' % (\n test_rmse * m_factor, test_mae * m_factor_2, test_mape))\n\n print('=' * 10)\n print(\"cont training model...\")\n start_time = time.time()\n adam = Adam(lr=0.1 * lr)\n model.compile(loss=lfun, optimizer=adam, metrics=[rmse])\n model.load_weights(fname_param)\n\n model_checkpoint = ModelCheckpoint(fname_param, monitor='rmse', verbose=1, save_best_only=True,\n mode='min')\n reduce_lr = ReduceLROnPlateau(monitor='rmse', factor=0.6, patience=50, min_lr=0.1 * 0.1 * lr)\n\n history = model.fit(X_train, Y_train,\n nb_epoch=800,\n batch_size=batch_size,\n callbacks=[model_checkpoint, reduce_lr],\n verbose=2)\n end_time = time.time()\n print('cont cost %.f mins on training' % ((end_time - start_time) // 60))\n\n print('=' * 10)\n print('cont evaluating...')\n model.load_weights(fname_param)\n score = model.evaluate(X_train, Y_train, batch_size=Y_train.shape[0] // 48, verbose=0)\n print('cont Train score: %.6f rmse (real): %.6f' % (score[0], score[1] * m_factor))\n score = model.evaluate(X_test, Y_test, batch_size=Y_test.shape[0], verbose=0)\n print('cont Test score: %.6f rmse (real): %.6f' % (score[0], score[1] * m_factor))\n\n Y_test_predict = model.predict(X_test, batch_size=Y_test.shape[0], verbose=0)\n test_rmse, test_mae, test_mape = compute(Y_test, Y_test_predict)\n print(hyperparams_name + ', rmse:%.6f, mae:%.6f, mape:%.6f' % (\n test_rmse * m_factor, test_mae * m_factor_2, test_mape))\n","sub_path":"trainNY.py","file_name":"trainNY.py","file_ext":"py","file_size_in_byte":6652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"548619490","text":"from math import *\r\nimport numpy as np\r\nimport os\r\nfrom matplotlib import pyplot\r\nimport matplotlib.pyplot as plt\r\nfrom pandas import read_csv\r\nfrom pandas import DataFrame\r\nfrom pandas import concat\r\nfrom openpyxl.reader.excel import load_workbook\r\nfrom openpyxl import Workbook\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom keras import regularizers\r\nfrom keras.optimizers import Adam\r\nfrom keras.utils import np_utils\r\nfrom keras.models import *\r\nfrom keras.models import Sequential, Model\r\nfrom keras.layers import *\r\nfrom keras.layers import Dense, Input, concatenate, LSTM, merge\r\nfrom keras.layers.core import *\r\nfrom attention_utils import get_activations, get_data_recurrent\r\nimport random\r\n\r\n#constant\r\nTOTAL_WEEK = 468\r\nTRAIN_WEEK = 370\r\nWEEK_LENGTH = 6\r\nTIME_STEPS = WEEK_LENGTH\r\nAREA_NUM = 10\r\n\r\ndef attention_3d_block(inputs):\r\n # inputs.shape = (batch_size, time_steps, input_dim)\r\n input_dim = int(inputs.shape[2])\r\n a = Permute((2, 1))(inputs)\r\n a = Reshape((input_dim, TIME_STEPS))(a) # this line is not useful. It's just to know which dimension is what.\r\n a = Dense(TIME_STEPS, activation='softmax')(a)\r\n a_probs = Permute((2, 1), name='attention_vec')(a)\r\n output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')\r\n return output_attention_mul\r\n\r\n#########################################\r\n#load content data\r\npre_XX = []\r\npath = ''\r\nfiles= os.listdir(path)\r\nfor file in files:\r\n filename = path + '\\\\' + file\r\n wb = load_workbook(filename = filename)\r\n ws = wb.active \r\n temp1 = []\r\n for row in ws.rows:\r\n temp2 = []\r\n for col in row:\r\n temp2.append(col.value)\r\n temp1.append(temp2)\r\n temp2 = temp1[1:]\r\n pre_XX += temp2\r\n\r\n#load weather data\r\npre_XX_wea = []\r\nfilename = ''\r\nwb = load_workbook(filename=filename)\r\nws = wb.active \r\ntemp1 = []\r\nfor row in ws.rows:\r\n temp2 = []\r\n for col in row:\r\n temp2.append(col.value)\r\n temp1.append(temp2)\r\ntemp2 = temp1[1:]\r\npre_XX_wea = temp2\r\n\r\n#load label\r\nfilename = ''\r\nwb = load_workbook(filename=filename)\r\nws = wb.active \r\n#load data\r\ntemp1 = []\r\nfor row in ws.rows:\r\n for col in row:\r\n temp2 = col.value\r\n temp1.append(temp2)\r\ntemp2 = temp1[1:]\r\nlabel = np.array(temp2)\r\n\r\n##############################################\r\n#normaliztion\r\npre_XX = np.array(pre_XX)\r\npre_XX_wea = np.array(pre_XX_wea)\r\nscaler = MinMaxScaler(feature_range=(0, 1))\r\npre_XX = scaler.fit_transform(pre_XX)\r\npre_XX_wea = scaler.fit_transform(pre_XX_wea)\r\n\r\n###############################\r\n#split train set and test set\r\ntrain_XX = []\r\ntest_XX = []\r\ntrain_XX_wea = []\r\ntest_XX_wea = []\r\ntrain_yy = []\r\ntest_yy = []\r\n\r\nfor i_file in range(len(files)):\r\n train_X = []\r\n test_X = []\r\n content = pre_XX[i_file * TOTAL_WEEK : (i_file + 1) * TOTAL_WEEK]\r\n for i_week in range(TRAIN_WEEK):\r\n train_X.append(content[i_week : i_week + WEEK_LENGTH])\r\n for i_week in range(TRAIN_WEEK, TOTAL_WEEK - WEEK_LENGTH):\r\n test_X.append(content[i_week : i_week + WEEK_LENGTH])\r\n train_XX.append(np.array(train_X))\r\n test_XX.append(np.array(test_X))\r\n\r\nweather = pre_XX_wea\r\nfor i_week in range(TRAIN_WEEK):\r\n train_XX_wea.append(weather[i_week : i_week + WEEK_LENGTH])\r\nfor i_week in range(TRAIN_WEEK, TOTAL_WEEK - WEEK_LENGTH):\r\n test_XX_wea.append(weather[i_week : i_week + WEEK_LENGTH])\r\ntrain_XX_wea = np.array(train_XX_wea)\r\ntest_XX_wea = np.array(test_XX_wea)\r\n\r\ntrain_yy = label[WEEK_LENGTH:TRAIN_WEEK + WEEK_LENGTH]\r\ntest_yy = label[TRAIN_WEEK + WEEK_LENGTH : ]\r\ntrain_yy = np.array(train_yy)\r\ntest_yy = np.array(test_yy)\r\n\r\n##############################################\r\n\r\n#disorder process\r\ncom_XX = [list(train_XX[i]) + list(test_XX[i]) for i in range(AREA_NUM)]\r\ncom_XX_wea = list(train_XX_wea) + list(test_XX_wea)\r\nlabel = list(train_yy) + list(test_yy)\r\n\r\ntrain_index = []\r\nrand_list = [j for j in range(TOTAL_WEEK - WEEK_LENGTH)]\r\nfor i in range(TRAIN_WEEK):\r\n jj = random.choice(rand_list)\r\n train_index.append(jj)\r\n rand_list.remove(jj)\r\ntest_index = []\r\nfor i in range(TOTAL_WEEK - WEEK_LENGTH):\r\n if i not in train_index:\r\n test_index.append(i)\r\n\r\n#iii = -1\r\ntrain_XX = [np.array([com_XX[i][k] for k in train_index]) for i in range(AREA_NUM)]\r\n#train_XX = train_XX[0:iii] + train_XX[iii+1:]\r\ntest_XX = [np.array([com_XX[i][k] for k in test_index]) for i in range(AREA_NUM)]\r\n#test_XX = test_XX[0:iii] + test_XX[iii+1:]\r\n\r\ntrain_XX_wea = np.array([com_XX_wea[k] for k in train_index])\r\ntest_XX_wea = np.array([com_XX_wea[k] for k in test_index])\r\n\r\ntrain_yy = np.array([label[k] for k in train_index])\r\ntest_yy = np.array([label[k] for k in test_index])\r\n\r\n\r\n###############################\r\n# design network\r\ninputs = [Input(shape=(train_XX[0].shape[1], train_XX[0].shape[2])) for i in range(AREA_NUM)]\r\ninput_wea = Input(shape=(train_XX_wea.shape[1], train_XX_wea.shape[2]))\r\n\r\nhl1s = [LSTM(32, return_sequences=True)(inputs[i]) for i in range(AREA_NUM)]\r\nhl1s_wea = LSTM(32)(input_wea)\r\n\r\nhl2hs = concatenate(hl1s)\r\nattention_mul = attention_3d_block(hl2hs)\r\nattention_mul = Flatten()(attention_mul)\r\n\r\nhl3s = concatenate([Dense(16, kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))(attention_mul)] + [(hl1s_wea)])\r\nhl4 = Dense(10, kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))(hl3s)\r\nhl5 = Dense(1, kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))(hl4)\r\n\r\nmodel = Model(inputs = inputs + [input_wea] , output = hl5)\r\nmodel.compile(loss='mape', optimizer='adam')\r\n\r\n# fit network\r\nhistory = model.fit(train_XX + [train_XX_wea], train_yy, epochs=250, batch_size=32, verbose=1)\r\n\r\n#predict\r\npre = model.predict(test_XX + [test_XX_wea])\r\npre_yy = pre.reshape(len(pre),)\r\nmape = np.mean(abs((test_yy - pre_yy)/test_yy))\r\nprint(mape)\r\n#plt.plot(test_yy, color = 'blue')\r\n#plt.plot(pre, color = 'red')\r\n\r\n#disorder prediction\r\ndis_ord_pre = model.predict(train_XX + [train_XX_wea])\r\ndis_ord_pre_yy = dis_ord_pre.reshape(len(dis_ord_pre),)\r\n\r\nm = []\r\nfor i in range(TOTAL_WEEK - WEEK_LENGTH):\r\n for j in range(len(train_index)):\r\n if i == train_index[j]:\r\n m.append(train_yy[j])\r\n break\r\n for j in range(len(test_index)):\r\n if i == test_index[j]:\r\n m.append(test_yy[j])\r\n break\r\nn = []\r\nfor i in range(TOTAL_WEEK - WEEK_LENGTH):\r\n for j in range(len(train_index)):\r\n if i == train_index[j]:\r\n n.append(dis_ord_pre_yy[j])\r\n break\r\n for j in range(len(test_index)):\r\n if i == test_index[j]:\r\n n.append(pre_yy[j])\r\n break\r\n#plt.plot(m[370:461], color = 'blue')\r\n#plt.plot(n[370:461], color = 'red')\r\n\r\n#div 10\r\np = np.array(m)\r\nq = np.array(n)\r\n#p = scaler.inverse_transform(p)\r\n#q = scaler.inverse_transform(q)\r\nfor i in range(len(m)):\r\n p[i] = p[i] / 10\r\nfor i in range(len(n)):\r\n q[i] = q[i] / 10\r\nplt.plot(p[370:461], color = 'blue')\r\nplt.plot(q[370:461], color = 'red')\r\n\r\nnp.savetxt('test_yy.csv', p, delimiter = ',')\r\nnp.savetxt('pre_yy.csv', q, delimiter = ',')\r\n","sub_path":"model/att_multi_channel_lstm.py","file_name":"att_multi_channel_lstm.py","file_ext":"py","file_size_in_byte":7247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"172880876","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Test of common schema\n\n:Author: Saahith Pochiraju <saahith116@gmail.com>\n:Date: 2017-07-31\n:Copyright: 2017, Karr Lab\n:License: MIT\n\"\"\"\nimport unittest\nfrom datanator.core import common_schema, models\nfrom datanator.data_source import pax\nimport flask\nimport tempfile\nimport shutil\nimport random\nimport os\nfrom six.moves import reload_module\nfrom sqlalchemy_utils.functions import drop_database\n\n\n\nclass TestExistingDatabase(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.flk = common_schema.CommonSchema()\n\n def test_repr_methods(self):\n\n for tablename in self.flk.base_model.metadata.tables.keys():\n for c in models.db.Model._decl_class_registry.values():\n if hasattr(c, '__tablename__') and c.__tablename__ == tablename:\n self.assertEqual(str(self.flk.session.query(c).first().__repr__()), str(self.flk.session.query(c).first()))\n\n\n def test_data_loaded(self):\n session = self.flk.session\n taxon = session.query(models.Taxon).filter_by(ncbi_id = 882).first()\n self.assertEqual(taxon.name, 'Desulfovibrio vulgaris str. Hildenborough')\n\n subunit = session.query(models.ProteinSubunit).filter_by(gene_name = 'TFAP2A').first()\n self.assertEqual(subunit.uniprot_id, 'P05549')\n self.assertEqual(subunit.class_name, 'Basic helix-span-helix factors (bHSH)')\n binding = session.query(models.DNABindingDataset).filter_by(subunit_id = subunit.subunit_id).first()\n data = session.query(models.DNABindingData).filter_by(dataset_id = binding.dataset_id).first()\n self.assertEqual(data.position, 1)\n self.assertEqual(data.frequency_g, 185)\n\n def test_size(self):\n session = self.flk.session\n\n subunits = session.query(models.ProteinSubunit).all()\n self.assertGreater(len(subunits), 20000)\n\n\n@unittest.skip('skip')\nclass TestLoadingDatabase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.cache_dirname = tempfile.mkdtemp()\n cls.cs = common_schema.CommonSchema(\n name = 'TestCommonSchema',\n cache_dirname = cls.cache_dirname,\n clear_content=True,\n load_content=True, max_entries=10,\n verbose=True, test=True)\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(cls.cache_dirname)\n drop_database(cls.cs.engine.url)\n\n def test_ncbi(self):\n session = self.cs.session\n tax = session.query(models.Taxon).filter_by(name = 'Homo sapiens').all()\n self.assertGreaterEqual(len(tax), 1)\n self.assertEqual(tax[0].ncbi_id, 9606)\n\n\n taxon = session.query(models.Taxon).filter_by(ncbi_id = 882).first()\n self.assertEqual(taxon.name, 'Desulfovibrio vulgaris str. Hildenborough')\n\n\n def test_pax(self):\n session = self.cs.session\n\n pax_compare = pax.Pax(cache_dirname = self.cache_dirname, download_backups = True, load_content = False)\n pax_session = pax_compare.session\n\n dataset = session.query(models.AbundanceDataSet).first()\n comparison = pax_session.query(pax.Dataset).filter_by(file_name = dataset.file_name).first()\n self.assertEqual(dataset.score, comparison.score)\n self.assertEqual(dataset.weight, comparison.weight)\n self.assertEqual(dataset.coverage, comparison.coverage)\n\n metadata = session.query(models.Metadata).filter_by(name = dataset.file_name).first()\n taxon = session.query(models._metadata_taxon).filter_by(_metadata_id = metadata.id).first()\n self.assertEqual(taxon.taxon_id, comparison.taxon_ncbi_id)\n\n\n def test_jaspar(self):\n session = self.cs.session\n subunit = session.query(models.ProteinSubunit).filter_by(gene_name = 'TFAP2A').first()\n self.assertEqual(subunit.uniprot_id, 'P05549')\n self.assertEqual(subunit.class_name, 'Basic helix-span-helix factors (bHSH)')\n binding = session.query(models.DNABindingDataset).filter_by(subunit_id = subunit.subunit_id).first()\n data = session.query(models.DNABindingData).filter_by(dataset_id = binding.dataset_id).first()\n self.assertEqual(data.position, 1)\n self.assertEqual(data.frequency_g, 185)\n\n metadata = session.query(models.Metadata).filter_by(name = 'RUNX1 Binding Motif').first()\n self.assertEqual(metadata.resource[0]._id, '8413232')\n\n\n def test_ecmdb(self):\n session = self.cs.session\n metabolite = session.query(models.Metabolite).filter_by(name = 'Deoxyuridine').first()\n self.assertEqual(metabolite.description, \"2'-Deoxyuridine is a naturally occurring nucleoside. It is similar in chemical structure to uridine, but without the 2'-hydroxyl group. It is considered to be an antimetabolite that is converted to deoxyuridine triphosphate during DNA synthesis.\")\n structure = session.query(models.Structure).get(metabolite.structure_id)\n self.assertEqual(structure._value_inchi , 'InChI=1S/C9H12N2O5/c12-4-6-5(13)3-8(16-6)11-2-1-7(14)10-9(11)15/h1-2,5-6,8,12-13H,3-4H2,(H,10,14,15)/t5-,6+,8+/m0/s1')\n\n\n def test_arrayexpress(self):\n session = self.cs.session\n sample = session.query(models.RNASeqDataSet).filter_by(experiment_accession_number = 'E-MTAB-6272', sample_name='Sample 13').first()\n self.assertEqual(sample.assay, 'Sample 13')\n self.assertEqual(sample.ensembl_organism_strain, \"mus_musculus\")\n self.assertEqual(sample.read_type, \"single\")\n self.assertEqual(sample.full_strain_specificity, True)\n self.assertEqual(sample.reference_genome[0].download_url, \"ftp://ftp.ensembl.org/pub/current_fasta/mus_musculus/cdna/Mus_musculus.GRCm38.cdna.all.fa.gz\")\n #print(\"here:{}\".format(sample._metadata_id))\n self.assertEqual(len(sample._metadata.characteristic), 7)\n #print(\"here:{}\".format(sample._metadata_id))\n self.assertEqual(len(sample._metadata.variable), 2)\n\n\n experiment = session.query(models.RNASeqExperiment).filter_by(accession_number = 'E-MTAB-6454').first()\n self.assertEqual(len(experiment.samples), 36)\n self.assertEqual(experiment.accession_number, 'E-MTAB-6454')\n self.assertEqual(experiment.exp_name, \"Gene expression profiling across ontogenetic stages in wood white (Leptidea sinapis) reveals pathways linked to butterfly diapause regulation\")\n self.assertEqual(experiment._experimentmetadata.description[:12], \"In temperate\")\n #self.assertEqual(experiment.has_fastq_files\n\n\n def test_sabio(self):\n session = self.cs.session\n metabolite = session.query(models.Metabolite).filter_by(metabolite_name = 'Peptide').first()\n self.assertEqual(metabolite._is_name_ambiguous , 1)\n structure = session.query(models.Structure).get(metabolite.structure_id)\n self.assertEqual(structure._value_smiles, '[*]C(N)C(=O)NC([*])C(O)=O')\n\n metadata = session.query(models.Metadata).filter_by(name = 'Kinetic Law 1').first()\n cell_line = session.query(models._metadata_cell_line).filter_by(_metadata_id = metadata.id).first()\n name = session.query(models.CellLine).get(cell_line.cell_line_id)\n self.assertEqual(name.name , 'variant DSAI (N76D/N87S/S103A/V104I)')\n\n\n q = session.query(models.KineticLaw) \\\n .join((models.Metadata, models.KineticLaw._metadata)).join((models.Resource, models.Metadata.resource))\\\n .filter(models.Resource.namespace == 'ec-code').filter(models.Resource._id.in_(['3.4.21.62']))\n\n compare = session.query(models.KineticLaw).filter_by(enzyme_id = q.first().enzyme_id).all()\n\n self.assertEqual(set([n.kinetic_law_id for n in q.all()]),\n set([c.kinetic_law_id for c in compare]))\n\n\n resource = session.query(models.Resource).filter_by(namespace = 'ec-code').filter_by(_id = '3.4.21.62').all()\n self.assertEqual(len(resource), 1)\n\n\n def test_corum(self):\n session = self.cs.session\n subunit = session.query(models.ProteinSubunit).filter_by(subunit_name = 'Histone deacetylase 5').first()\n self.assertEqual(subunit.uniprot_id, 'Q9UQL6')\n self.assertEqual(subunit.entrez_id, 10014)\n complex_ = session.query(models.ProteinComplex).get(subunit.proteincomplex_id)\n self.assertEqual(complex_.complex_name, 'BCL6-HDAC5 complex')\n\n metadata = session.query(models.Metadata).filter_by(name = 'BCL6-HDAC7 complex').first()\n resource = session.query(models._metadata_resource).filter_by(_metadata_id = metadata.id).first()\n pubmed = session.query(models.Resource).get(resource.resource_id)\n self.assertEqual(pubmed._id, '11929873')\n\n\n def test_intact_interactions(self):\n session = self.cs.session\n\n interact = session.query(models.ProteinInteraction).filter_by(protein_a = 'P49418').all()\n self.assertEqual(set([c.loc_b for c in interact]), set([ '-']))\n\n\n for items in interact:\n self.assertTrue(items._metadata)\n\n interact = session.query(models.ProteinInteraction).filter_by(protein_a = 'vrptraada').first()\n self.assertEqual(interact._metadata.resource[0].namespace, 'pubmed')\n self.assertEqual(interact._metadata.resource[0]._id, '10542231')\n\n\n def test_intact_complex_added(self):\n session = self.cs.session\n\n plex = session.query(models.ProteinComplex).filter_by(complex_name = 'LSM1-7-PAT1 complex, variant LSM1A-LSM3B-LSM6B-PAT1').all()\n self.assertEqual(len(plex), 1)\n self.assertEqual(plex[0].go_id, '1990726|0000932|0003729|0000290')\n self.assertEqual(plex[0].su_cmt, 'Q0WPK4(1)|O22823(1)|Q9C6K5(1)|F4K4E3(1)|Q9SI54(1)|Q9FKB0(1)|Q1H595(1)|Q945P8(1)')\n self.assertEqual(len(plex[0].protein_subunit), 8)\n\n\n def test_uniprot_added(self):\n session = self.cs.session\n\n uni = session.query(models.ProteinSubunit).filter_by(uniprot_id = 'Q72DQ8').first()\n self.assertEqual(uni.subunit_name, 'PYRH_DESVH')\n self.assertEqual(uni.length, '238')\n","sub_path":"tests/core/test_common_schema.py","file_name":"test_common_schema.py","file_ext":"py","file_size_in_byte":10049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"593507971","text":"import sys\nfrom InstagramAPI import InstagramAPI\n\napi = InstagramAPI(sys.argv[1], sys.argv[2])\napi.login()\n\nfollowers = api.getTotalFollowers(api.username_id)\nfollowerString = str(str(followers).encode('utf-8'))\nfollowerString = followerString[2:len(followerString) - 1]\nf = open(\"followers.txt\", \"a\")\nf.write(followerString)","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"567734615","text":"# To change this template, choose Tools | Templates\n# and open the template in the editor.\n\nfrom plugins import plugin\n\nclass pluginmanager(plugin):\n def __init__(self, bot):\n plugin.__init__(self, bot)\n self.match = { (r\"!load .+\", \"#MinoBot\") : self.load,\n (r\"!unload .+\", \"#MinoBot\") : self.unload,\n (r\"!reload .+\", \"#MinoBot\") : self.reload,\n (r\"!reloadall\", \"#MinoBot\") : self.reload_all }\n\n def load(self, channel, nick, msg):\n if self.bot.load_plugin(msg.split()[1]):\n self.msg(channel, \"Plugin '{0}' has been successfully loaded.\"\n .format(msg.split()[1]))\n else:\n self.msg(channel, \"Plugin '{0}' has failed to load.\"\n .format(msg.split()[1]))\n\n def unload(self, channel, nick, msg):\n if self.bot.unload_plugin(msg.split()[1]):\n self.msg(channel, \"Plugin '{0}' has been successfully unloaded.\"\n .format(msg.split()[1]))\n else:\n self.msg(channel, \"Plugin '{0}' has failed to unload.\"\n .format(msg.split()[1]))\n\n def reload(self, channel, nick, msg):\n if self.bot.reload_plugin(msg.split()[1]):\n self.msg(channel, \"Plugin '{0}' has been successfully reloaded.\"\n .format(msg.split()[1]))\n else:\n self.msg(channel, \"Plugin '{0}' has failed to reload.\"\n .format(msg.split()[1]))\n\n def reload_all(self, channel, nick, msg):\n failed = \"\"\n for plugin in self.bot.plugins:\n if self.bot.reload_plugin(plugin):\n pass\n else:\n failed += plugin + \", \"\n if not failed:\n self.msg(channel, \"All plugins were reloaded successfully.\")\n else:\n self.msg(channel, \"The following plugins failed to load: {}\"\n .format(failed.rstrip(\", \")))","sub_path":"src/plugins/pluginmanager.py","file_name":"pluginmanager.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"643773575","text":"__author__ = 'tarek'\nimport pandas as pd\nimport urllib\nimport time\nimport sys\nfrom twitterSentiment.models import Company, CompanyQuoteHistory\n\n#script that loops over every company and then retrieve the start quote and end quote for the time of the tweets\n\n\ndef company_quote_history_data(company_id, stock, startDate, endDate):\n #function to query Yahoo YQL and return the financial financialdata for a stock based on stock symbol.\n #the variables are selected according to Altman Z-Score\n stock = stock.strip('$')\n baseurl = \"https://query.yahooapis.com/v1/public/yql?\"\n #extract financialdata from balance sheet\n yql_bs_query = \"select * from yahoo.finance.historicaldata where symbol = '\"+stock+\"' and startDate= '\"+startDate+\"' and endDate = '\"+endDate+\"'\"\n yql_bs_url = baseurl + urllib.parse.urlencode({'q':yql_bs_query}) + \"&format=json&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback=\"\n as_json = pd.io.json.read_json(yql_bs_url)\n if as_json[\"query\"][\"results\"] is not None:\n if as_json[\"query\"][\"results\"][\"quote\"] is not None:\n for quote in as_json[\"query\"][\"results\"][\"quote\"]:\n print(quote)\n if quote is not None:\n try:\n aCompanyQuote = CompanyQuoteHistory(company_id = company_id,\n date = quote[\"Date\"],\n low = quote[\"Low\"],\n high = quote[\"High\"],\n close = quote[\"Close\"],\n open = quote[\"Open\"],\n volume = quote[\"Volume\"],\n adjs_close = quote[\"Adj_Close\"],\n symbol = quote[\"Symbol\"])\n aCompanyQuote.save()\n except:\n print (\"error with stock:\", stock, \"error:\",sys.exc_info())\n else:\n print('done')\n return True\n\n\ndef run():\n get_companies = Company.objects.all()\n for company in get_companies:\n company_quote_history_data(company.id, company.symbol, '2016-09-01', '2016-09-05')","sub_path":"scripts/company_historicalquotes_Yahoo_extract.py","file_name":"company_historicalquotes_Yahoo_extract.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"336118213","text":"from flask import Flask, Blueprint, render_template, request\nfrom . import db\nfrom flask_login import login_required, current_user\n\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport warnings\nwarnings.filterwarnings('ignore')\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.text import Tokenizer\n\npath = \"ai.txt\"\nlist_sentence=[]\nwith open(path, encoding=\"utf8\") as f:\n train_lines = f.readlines()\n for line in train_lines:\n line = line.split('__eou__')\n for i in range(len(line)):\n line[i] = line[i].strip()\n list_sentence.append(line)\n\nget_Question = []\nfor i in list_sentence:\n get_Question.append(i[0])\n\nget_Answer = []\nfor i in list_sentence:\n get_Answer.append(i[1])\n\nlabels = []\nfor i in range(len(get_Answer)):\n labels.append(i)\n\ndf = pd.DataFrame({\"questions\": get_Question, \"answers\": get_Answer, \"label\": labels})\n\ndef predict_class(msg):\n model = keras.models.load_model('project/model.h5')\n\n with open('project/token.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\n\n with open('project/label.pickle', 'rb') as enc:\n lbl_encoder = pickle.load(enc)\n\n max_len = 20\n\n result = model.predict(keras.preprocessing.sequence.pad_sequences(tokenizer.texts_to_sequences([msg]),\n truncating='post', maxlen=max_len))\n tag = lbl_encoder.inverse_transform([np.argmax(result)])\n new_tag = tag.astype(np.int)\n for i in range(len(df)):\n if i == new_tag:\n responses = df['answers'][i]\n return responses\n\n\ndef chatbot_response(msg):\n res = predict_class(msg)\n return res\n\nmain = Blueprint('main', __name__)\n\n@main.route('/')\ndef index():\n return render_template('index.html')\n\n@main.route('/chatbot')\n@login_required\ndef chatbot():\n return render_template('chatbot.html', name=current_user.name)\n\n@main.route(\"/get\")\ndef get_bot_response():\n userText = request.args.get('msg')\n return chatbot_response(userText)\n","sub_path":"Chatbot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"241340767","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\n##############################\n# U-NET\n##############################\n\nclass UNetDown(nn.Module):\n def __init__(self, in_size, out_size, bn=True, dropout=0.0):\n super(UNetDown, self).__init__()\n model = [ nn.Conv2d(in_size, out_size, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n\n if bn:\n model += [nn.InstanceNorm2d(out_size)]\n\n if dropout:\n model += [nn.Dropout(dropout)]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n return self.model(x)\n\nclass UNetUp(nn.Module):\n def __init__(self, in_size, out_size, dropout=0.0):\n super(UNetUp, self).__init__()\n model = [ nn.Upsample(scale_factor=2),\n nn.Conv2d(in_size, out_size, 3, stride=1, padding=1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.InstanceNorm2d(out_size) ]\n\n if dropout:\n model += [nn.Dropout(dropout)]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x, skip_input):\n x = self.model(x)\n out = torch.cat((x, skip_input), 1)\n #out = torch.add(x, skip_input)\n return out\n\nclass GeneratorUNet(nn.Module):\n def __init__(self, in_channels=3, out_channels=3):\n super(GeneratorUNet, self).__init__()\n\n self.down1 = UNetDown(in_channels, 64, bn=False)\n self.down2 = UNetDown(64, 128)\n self.down3 = UNetDown(128, 256)\n self.down4 = UNetDown(256, 512, dropout=0.5)\n self.down5 = UNetDown(512, 512, dropout=0.5)\n self.down6 = UNetDown(512, 512, dropout=0.5)\n self.down7 = UNetDown(512, 512, dropout=0.5)\n\n self.up1 = UNetUp(512, 512, dropout=0.5)\n self.up2 = UNetUp(1024, 512, dropout=0.5)\n self.up3 = UNetUp(1024, 512, dropout=0.5)\n self.up4 = UNetUp(1024, 256)\n self.up5 = UNetUp(512, 128)\n self.up6 = UNetUp(256, 64)\n\n\n final = [ nn.Upsample(scale_factor=2),\n nn.Conv2d(128, out_channels, 3, 1, 1),\n nn.Tanh() ]\n self.final = nn.Sequential(*final)\n\n def forward(self, x):\n # U-Net generator with skip connections from encoder to decoder\n d1 = self.down1(x)\n d2 = self.down2(d1)\n d3 = self.down3(d2)\n d4 = self.down4(d3)\n d5 = self.down5(d4)\n d6 = self.down6(d5)\n d7 = self.down7(d6)\n u1 = self.up1(d7, d6)\n u2 = self.up2(u1, d5)\n u3 = self.up3(u2, d4)\n u4 = self.up4(u3, d3)\n u5 = self.up5(u4, d2)\n u6 = self.up6(u5, d1)\n\n return self.final(u6)\n\n\n##############################\n# RESNET\n##############################\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_features):\n super(ResidualBlock, self).__init__()\n\n conv_block = [ nn.ReflectionPad2d(1),\n nn.Conv2d(in_features, in_features, 3),\n nn.InstanceNorm2d(in_features),\n nn.ReLU(inplace=True),\n nn.ReflectionPad2d(1),\n nn.Conv2d(in_features, in_features, 3),\n nn.InstanceNorm2d(in_features) ]\n\n self.conv_block = nn.Sequential(*conv_block)\n\n def forward(self, x):\n return x + self.conv_block(x)\n\nclass GeneratorResNet(nn.Module):\n def __init__(self, in_channels=3, out_channels=3, resblocks=9):\n super(GeneratorResNet, self).__init__()\n\n # Initial convolution block\n model = [ nn.ReflectionPad2d(3),\n nn.Conv2d(in_channels, 64, 7),\n nn.InstanceNorm2d(64),\n nn.ReLU(inplace=True) ]\n\n # Downsampling\n in_features = 64\n out_features = in_features*2\n for _ in range(2):\n model += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.ReLU(inplace=True) ]\n in_features = out_features\n out_features = in_features*2\n\n # Residual blocks\n for _ in range(resblocks):\n model += [ResidualBlock(in_features)]\n\n # Upsampling\n out_features = in_features//2\n for _ in range(2):\n model += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),\n nn.InstanceNorm2d(out_features),\n nn.ReLU(inplace=True) ]\n in_features = out_features\n out_features = in_features//2\n\n # Output layer\n model += [ nn.ReflectionPad2d(3),\n nn.Conv2d(64, out_channels, 7),\n nn.Tanh() ]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n return self.model(x)\n\nclass Discriminator(nn.Module):\n def __init__(self, in_channels=3):\n super(Discriminator, self).__init__()\n\n def discriminator_block(in_filters, out_filters, stride, normalize):\n \"\"\"Returns layers of each discriminator block\"\"\"\n layers = [nn.Conv2d(in_filters, out_filters, 3, stride, 1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n layers = []\n in_filters = in_channels*2\n for out_filters, stride, normalize in [ (64, 2, False),\n (128, 2, True),\n (256, 2, True),\n (512, 2, True)]:\n layers.extend(discriminator_block(in_filters, out_filters, stride, normalize))\n in_filters = out_filters\n\n # Output layer\n layers.append(nn.Conv2d(out_filters, 1, 3, 1, 1))\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, img_A, img_B):\n # Concatenate image and condition image by channels to produce input\n img_input = torch.cat((img_A, img_B), 1)\n return self.model(img_input)\n","sub_path":"implementations/pix2pix/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"300195931","text":"from bs4 import BeautifulSoup\nimport requests\nquote = 'https://en.wikipedia.org/wiki/Machine_learning'\n\npage = requests.get(quote)\n\nc = page.content\n\nsoup = BeautifulSoup(c, 'html.parser')\n\n#table-responsive question-list-table\n\nlist_data = soup.find_all('div', {'class' : 'toc'})\nlist_cont = []\nfor data in list_data:\n\tanc = data.find_all('a')\n\tfor a in anc:\n\t\ttxt = a.text.strip()\n\t\tprint(txt)\n\t\tlist_cont.append(txt)\n\nprint(len(list_cont))","sub_path":"trydjango19/bsTrual.py","file_name":"bsTrual.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"144251821","text":"\"\"\"\nThis module consists of methods to process downloaded GEE data. The starting\npoint is a json file written out at the end of the downloading step. This \nmodule cleans, resamples, and reformats the data to make it ready for analysis.\n\n\"\"\"\n\nimport json\nimport math\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nfrom statsmodels.nonparametric.smoothers_lowess import lowess\n\nfrom pyveg.src.data_analysis_utils import write_to_json\n\n\ndef read_json_to_dataframes(filename):\n \"\"\"\n Read a json file and convert the result to a dict of DataFrame.\n\n Parameters\n ----------\n filename : str\n Full path to input json file.\n\n Returns\n ----------\n dict\n A dict of the saved results in a DataFrame format. Keys are\n names of collections and the values are DataFrame of results\n for that collection.\n \"\"\"\n\n # check file exists\n if not os.path.exists(filename):\n raise FileNotFoundError(f'Could not find file \"{os.path.abspath(filename)}\".')\n\n # json read\n json_file = open(filename)\n data = json.load(json_file)\n\n # start with empty output dataframes\n dfs = {}\n\n # loop over collections and make a DataFrame from the results of each\n for collection_name, coll_results in data.items():\n\n rows_list = []\n\n # loop over time series\n for date, time_point in coll_results['time-series-data'].items():\n\n # check we have data for this time point\n if time_point is None or time_point == {} or time_point == []:\n \n # add Null row if data is missing at this time point \n rows_list.append({'date': date})\n\n # if we are looking at veg data, loop over space points\n elif isinstance(list(time_point)[0], dict):\n for space_point in time_point:\n rows_list.append(space_point)\n\n # otherwise, just add the row\n else:\n # the key of each object in the time series is the date, and data\n # for this date should be the values. Here we just add the date\n # as a value to enable us to add the whole row in one go later.\n time_point['date'] = date\n rows_list.append(time_point)\n\n # make a DataFrame and add it to the dict of DataFrames\n df = pd.DataFrame(rows_list)\n df = df.drop(columns=['slope', 'offset', 'mean', 'std'], errors='ignore')\n df = df.sort_values(by='date')\n assert( df.empty == False )\n dfs[collection_name] = df\n\n return dfs\n\n\ndef make_time_series(dfs):\n \"\"\"\n Given a dictionary of DataFrames which may contian many rows per time point (corresponding\n to the network centrality values of different sub-locations), collapse this\n into a time series by calculating the mean and std of the different sub-\n locations at each date.\n\n Parameters\n ----------\n dfs : dict of DataFrame\n Input DataFrame read by `read_json_to_dataframes`.\n\n Returns\n ----------\n DataFrame\n The time-series results averaged over sub-locations.\n \"\"\"\n\n # the time series dataframe\n ts_df = pd.DataFrame(columns=['date'])\n\n # loop over collections\n for col_name, df in dfs.items():\n\n #  if vegetation data\n if 'COPERNICUS/S2' in col_name or 'LANDSAT' in col_name:\n\n # group by date to collapse all network centrality measurements\n groups = df.groupby('date')\n\n # get summaries\n means = groups.mean()\n stds = groups.std()\n\n # rename columns\n if 'COPERNICUS/S2' in col_name:\n s = 'S2_'\n elif 'LANDSAT' in col_name:\n s = 'L' + col_name.split('/')[1][-1] + '_'\n else: \n s = col_name + '_'\n means = means.rename(columns={c: s + c + '_mean' for c in means.columns})\n stds = stds.rename(columns={c: s + c + '_std' for c in stds.columns})\n\n # merge\n df = pd.merge(means, stds, on='date', how='inner')\n ts_df = pd.merge_ordered(ts_df, df, on='date', how='outer')\n\n # add climate data if availible\n elif 'ECMWF/ERA5/' in col_name:\n df = df.set_index('date')\n ts_df = pd.merge_ordered(ts_df, df, on='date', how='outer')\n\n # remove unneeded columns\n ts_df = ts_df.loc[:,~ts_df.columns.str.contains('latitude_std', case=False)] \n ts_df = ts_df.loc[:,~ts_df.columns.str.contains('longitude_std', case=False)]\n\n assert( ts_df.empty == False )\n return ts_df\n\n\ndef resample_time_series(series, period='MS'):\n \"\"\"\n Resample and interpolate a time series dataframe so we have one row\n per time period (useful for FFT)\n\n Parameters\n ----------\n df: DataFrame\n Dataframe with date as index\n col_name: string,\n Identifying the column we will pull out\n period: string\n Period for resampling\n \n Returns\n -------\n Series: \n pandas Series with datetime index, and one column, one row per day\n \"\"\"\n\n # give the series a date index if the DataFrame is not index by date already\n # if df.index.name != 'date':\n # series.index = df.date\n \n # just in case the index isn't already datetime type\n series.index = pd.to_datetime(series.index)\n\n # resample to get one row per time period\n rseries = series.resample(period).mean()\n new_series = rseries.interpolate()\n\n return new_series\n\n\ndef resample_dataframe(df, columns, period='MS'):\n \"\"\"\n Resample and interpolate a time series dataframe so we have one row\n per time period.\n\n Parameters\n ----------\n df: DataFrame\n Dataframe with date as index.\n columns: list\n List of column names to resample. Should contain numeric data.\n period: string\n Period for resampling.\n \n Returns\n -------\n DataFrame: \n DataFrame with resample time series in `columns`.\n \"\"\"\n\n # new empty df to deal with length mismatches after resampling\n df_out = pd.DataFrame()\n\n # for each column to resample\n for column in columns:\n\n # resample the column\n series = df.set_index('date')[column]\n df_out[column] = resample_time_series(series, period=period)\n\n # generate a clean index\n df_out = df_out.reset_index()\n\n return df_out\n\n\ndef resample_data(dfs, period='MS'):\n \"\"\"\n Resample vegetation and rainfall DataFrames. Vegetation\n DataFrames are resampled at the sub-image level.\n\n Parameters\n ----------\n dfs : dict of DataFrame\n Time series data for multiple sub-image locations.\n period: string\n Period for resampling.\n\n Returns\n ----------\n dict of DataFrame\n Resampled data.\n \"\"\"\n\n # loop over collections\n for col_name, df in dfs.items():\n\n #  if vegetation data\n if 'COPERNICUS/S2' in col_name or 'LANDSAT' in col_name:\n \n # specify veg columns to resample\n columns = [c for c in df.columns if 'offset50' in c]\n\n # group by (lat, long)\n d = {}\n for name, group in df.groupby(['latitude', 'longitude']):\n d[name] = group\n\n # for each sub-image\n for key, df_ in d.items():\n \n # resample\n df_ = resample_dataframe(df_, columns, period=period)\n\n # replace df\n d[key] = df_\n\n # reconstruct the DataFrame\n df = list(d.values())[0]\n for df_ in list(d.values())[1:]:\n df = df.append(df_)\n\n # replace collection\n dfs[col_name] = df\n\n else: \n # assume ERA5 data\n columns = ['total_precipitation', 'mean_2m_air_temperature']\n\n # resample\n df_ = resample_dataframe(df_, columns, period=period)\n\n # replace df\n d[key] = df_\n\n return dfs\n\n\ndef drop_veg_outliers(dfs, column='offset50', sigmas=3.0):\n \"\"\"\n Loop over vegetation DataFrames and drop points in the\n time series that a significantly far away from the mean\n of the time series. Such points are assumed to be unphysical.\n\n Parameters\n ----------\n dfs : dict of DataFrame\n Time series data for multiple sub-image locations.\n column : str\n Name of the column to drop outliers on.\n sigmas : float\n Number of standard deviations a data point has to be\n from the mean to be labelled as an outlier and dropped.\n\n Returns\n ----------\n dict of DataFrame\n Time series data for multiple sub-image locations with\n some values in `column` potentially set to NaN.\n \"\"\"\n\n # loop over collections\n for col_name, veg_df in dfs.items():\n\n #  if vegetation data\n if 'COPERNICUS/S2' in col_name or 'LANDSAT' in col_name:\n\n # group by (lat, long)\n d = {}\n for name, group in veg_df.groupby(['latitude', 'longitude']):\n d[name] = group\n\n # for each sub-image\n for key, df_ in d.items():\n # calcualte residuals to the mean\n res = (df_[column] - df_[column].mean()).abs()\n\n # determine which are outliers\n outlier = res > df_[column].std() * sigmas\n\n # set to None\n df_.loc[outlier, column] = None\n\n # replace the df\n d[key] = df_\n\n # reconstruct the DataFrame\n df = list(d.values())[0]\n for df_ in list(d.values())[1:]:\n df = df.append(df_)\n\n # replace value in df\n dfs[col_name] = df\n\n return dfs\n\n\ndef smooth_veg_data(dfs, column='offset50', n=4):\n \"\"\"\n Loop over vegetation DataFrames and perform LOESS smoothing\n on the time series of each sub-image.\n\n Parameters\n ----------\n dfs : dict of DataFrame\n Time series data for multiple sub-image locations.\n column : str\n Name of the column to drop outliers and smooth.\n n : int\n Number of neighbouring point to use in smoothing\n Returns\n ----------\n dict of DataFrame\n Time series data for multiple sub-image locations with\n new column for smoothed data and ci.\n \"\"\"\n\n # create a new dataframe to avoid overwriting input\n dfs = dfs.copy()\n\n # loop over collections\n for col_name, df in dfs.items():\n\n #  if vegetation data\n if 'COPERNICUS/S2' in col_name or 'LANDSAT' in col_name:\n\n # remove outliers and smooth\n df = smooth_all_sub_images(df, column=column, n=n)\n\n # calculate ci\n #df = get_confidence_intervals(df, column=column)\n\n # replace DataFrame\n dfs[col_name] = df\n\n return dfs\n\n\ndef smooth_subimage(df, column='offset50', n=4, it=3):\n \"\"\"\n Perform LOWESS (Locally Weighted Scatterplot Smoothing) on the time\n series of a single sub-image.\n\n Parameters\n ----------\n df : DataFrame\n Input DataFrame containing the time series for a single\n sub-image.\n column : string, optional\n Name of the column in df to smooth.\n n : int, optional\n Size of smoothing window.\n it : int, optional\n Number of iterations of LOESS smoothing to perform.\n\n Returns\n ----------\n DataFrame\n The time-series DataFrame with a new column containing the\n smoothed results.\n \"\"\"\n df.dropna(inplace=True)\n\n # add a new column of datetime objects\n df['datetime'] = pd.to_datetime(df['date'], format='%Y/%m/%d')\n\n # extract data\n xs = df['datetime']\n ys = df[column]\n\n # num_days_per_timepoint = (xs.iloc[1] - xs.iloc[0]).days\n frac_data = min(n / len(ys), 1.0)\n\n # perform smoothing\n smoothed_y = lowess(ys, xs, is_sorted=True, return_sorted=False, frac=frac_data, it=it)\n\n # add to df\n df[column + '_smooth'] = smoothed_y\n df[column + '_smooth_res'] = ys - smoothed_y\n\n return df\n\n\ndef smooth_all_sub_images(df, column='offset50', n=4, it=3):\n \"\"\"\n Perform LOWESS (Locally Weighted Scatterplot Smoothing) on the time\n series of a set of sub-images.\n\n Parameters\n ----------\n df : DataFrame\n DataFrame containing time series results for all sub-images,\n with multiple rows per time point and (lat,long) point.\n column : string, optional\n Name of the column in df to smooth.\n n : int, optional\n Size of smoothing window.\n it : int, optional\n Number of iterations of LOESS smoothing to perform.\n\n Returns\n ----------\n Dataframe\n DataFrame of results with a new column containing a\n LOESS smoothed version of the column `column`.\n \"\"\"\n\n # group by (lat, long)\n d = {}\n for name, group in df.groupby(['latitude', 'longitude']):\n d[name] = group\n\n # for each sub-image\n for key, df_ in d.items():\n # perform smoothing\n d[key] = smooth_subimage(df_, column=column, n=n, it=it)\n\n # reconstruct the DataFrame\n df = list(d.values())[0]\n for df_ in list(d.values())[1:]:\n df = df.append(df_)\n\n return df\n\n\ndef store_feature_vectors(dfs, output_dir):\n \"\"\"\n Write out all feature vector information to a csv file, to be read\n later by the feature vector plotting script.\n\n Parameters\n ----------\n dfs : dict of DataFrame\n Time series data for multiple sub-image locations.\n output_dir : str\n Path to directory to save the csv.\n \"\"\"\n\n # loop over collections\n for col_name, veg_df in dfs.items():\n\n #  if vegetation data\n if 'COPERNICUS/S2' in col_name or 'LANDSAT' in col_name:\n\n # check the feature vectors are availible\n if 'feature_vec' not in veg_df.columns:\n print('Could not find feature vectors.')\n continue\n \n # sort by date\n veg_df = veg_df.sort_values(by='date').dropna()\n\n # create a df to store feature vectors\n df = pd.DataFrame()\n [print(value) for value in veg_df.feature_vec if not isinstance(value, list)]\n # add feature vectors to dataframe\n df = pd.DataFrame(value for value in veg_df.feature_vec)\n\n # rename percentile columns\n df = df.rename(columns={n: f'{(n+1)*5}th_percentile' for n in df.columns})\n\n # reindex\n df.index = veg_df.index\n\n # add information\n df.insert(0, 'date', veg_df['date'])\n df.insert(1, 'latitude', veg_df['latitude'])\n df.insert(2, 'longitude', veg_df['longitude'])\n\n # save csv\n if col_name == 'COPERNICUS/S2':\n s = 'S2'\n elif 'LANDSAT' in col_name:\n s = 'L' + col_name.split('/')[1][-1] + '_'\n else:\n s = col_name\n \n filename = os.path.join(output_dir, s+'_feature_vectors.csv')\n df.to_csv(filename, index=False)\n \n\ndef fill_veg_gaps(dfs, missing):\n \"\"\"\n Loop through sub-image time series and replace any gaps with mean \n value of the same month in other years.\n\n Parameters\n ----------\n dfs : dict of DataFrame\n Time series data for multiple sub-image locations.\n\n missing : dict of array\n Missing time points where no sub-images were analyse for\n each veg dataframe in `dfs`.\n \"\"\"\n\n # loop over collections\n for col_name, veg_df in dfs.items():\n\n #  if vegetation data\n if 'COPERNICUS/S2' in col_name or 'LANDSAT' in col_name:\n\n # group by (lat, long)\n d = {}\n for name, group in veg_df.groupby(['latitude', 'longitude']):\n d[name] = group\n\n # for each sub-image\n for key, df_ in d.items():\n\n # get lat, long of this sub-image\n lats = df_.latitude.drop_duplicates().values\n longs = df_.longitude.drop_duplicates().values\n assert ( len(lats) == 1 )\n assert ( len(longs) == 1 )\n lat = lats[0]\n long = longs[0]\n\n # construct missing rows\n missing_rows = [pd.Series({'date': date}) for date in missing[col_name]]\n\n # add back in missing values if necessary\n df_ = df_.append(missing_rows, ignore_index=True).sort_values(by='date')\n\n # make a new 'month' column\n df_['month'] = df_.date.str.split('-').str[1]\n\n # group by month and get monthly means\n monthly_means = df_.groupby('month').mean().offset50\n\n # loop through dataframe\n for index, row in df_.iterrows():\n\n # fill missing months with mean value\n if pd.isnull(row.offset50):\n this_month = row.month\n df_.loc[index, 'offset50'] = monthly_means.loc[this_month]\n df_.loc[index, 'latitude'] = lat\n df_.loc[index, 'longitude'] = long\n df_.loc[index, 'feature_vec'] = np.NaN\n\n # drop month column and replace old df\n df_ = df_.drop(columns='month')\n d[key] = df_\n\n # reconstruct the DataFrame\n df = list(d.values())[0]\n for df_ in list(d.values())[1:]:\n df = df.append(df_)\n\n dfs[col_name] = df\n\n return dfs\n\n\ndef get_missing_time_points(dfs):\n \"\"\"\n Find missing time points for each vegetatuin dataframe in `dfs`,\n and return a dict, with the same key as in `dfs`, but with values\n corresponding to missing dates.\n\n Parameters\n ----------\n dfs : dict of DataFrame\n Time series data for multiple sub-image locations.\n\n Returns\n ----------\n dict\n Missing time points for each vegetation df. \n \"\"\"\n\n # determine missing vegetation time points\n missing_points = {}\n \n # loop over collections\n for col_name, veg_df in dfs.items():\n\n #  if vegetation data\n if 'COPERNICUS/S2' in col_name or 'LANDSAT' in col_name:\n \n # get the start of the vegetation time series\n veg_start_date = veg_df.dropna().index[0]\n\n # remove leading NaNs\n veg_df = veg_df.loc[veg_start_date:]\n\n # store missing time points\n missing_points[col_name] = veg_df.drop_duplicates(subset='date', keep=False).date.values\n\n return missing_points\n\n\ndef detrend_df(df, period='MS'):\n \"\"\"\n Remove seasonality from a DataFrame containing the time series \n for a single sub-image.\n\n Parameters\n ----------\n df : DataFrame\n Time series data for a single sub-image location.\n period : str, optional\n ` Resample time series to this frequency and then infer\n lag to use for deseasonalizing.\n\n Returns\n ----------\n DataFrame\n Input with seasonality removed from time series columns.\n \"\"\"\n\n # infer lag from period\n if period == 'MS':\n lag = 12\n else:\n raise ValueError('Periods other than \"MS\" are not well supported yet!')\n\n # new empty df to deal with length mismatches after resampling\n df_out = pd.DataFrame()\n\n # resample time series (in case not done already)\n columns = [c for c in df.columns if any([s in c \n for s in ['offset50', 'precipitation', 'temperature', 'ndvi']])]\n\n df_out = resample_dataframe(df, columns, period=period)\n\n # detrend veg and climate columns\n for col in columns:\n df_out[col] = df_out[col].diff(lag)\n\n # need to keep this info for smoothing later\n try:\n df_out['latitude'] = df['latitude'].iloc[0]\n df_out['longitude'] = df['longitude'].iloc[0]\n except:\n pass\n\n return df_out\n\n\ndef detrend_data(dfs, period='MS'):\n \"\"\"\n Loop over each sub image time series DataFrames and remove\n time series seasonality by subtracting the previous year.\n Remove seasonality from precipitation data in the same way.\n\n Parameters\n ----------\n dfs : dict of DataFrame\n Time series data for multiple sub-image locations.\n period : str, optional\n ` Resample time series to this frequency and then infer\n lag to use for deseasonalizing.\n\n Returns\n ----------\n dict of DataFrame\n Time series data for multiple sub-image with\n seasonality removed.\n\n \"\"\"\n \n # don't overwrite input\n dfs = dfs.copy()\n\n for col_name, df in dfs.items():\n\n #  if vegetation data\n if 'COPERNICUS/S2' in col_name or 'LANDSAT' in col_name:\n\n # group by (lat, long)\n d = {}\n for name, group in df.groupby(['latitude', 'longitude'], as_index=False):\n d[name] = group\n \n # for each sub-image\n for key, df_ in d.items():\n d[key] = detrend_df(df_, period)\n\n # reconstruct the DataFrame\n df = list(d.values())[0]\n for df_ in list(d.values())[1:]:\n df = df.append(df_)\n\n df.dropna(inplace=True)\n\n dfs[col_name] = df\n\n else:\n # remove seasonality for weather data, this is a simpler time series\n\n dfs[col_name] = detrend_df(dfs[col_name], period)\n df.dropna(inplace=True)\n\n return dfs\n\n\ndef preprocess_data(input_dir, drop_outliers=True, fill_missing=True, \n resample=True, smoothing=True, detrend=True, \n n_smooth=4, period='MS'):\n \"\"\"\n This function reads and process data downloaded by GEE. Processing\n can be configured by the function arguments. Processed data is \n written to csv.\n\n Parameters\n ----------\n input_dir : str\n Path to the directory created during a GEE download job.\n drop_outliers : bool, optional\n Remove outliers in sub-image time series.\n fill_missing : bool, optional\n Fill missing points in the time series.\n resample : bool, optional\n Resample the time series using linear interpolation.\n smoothing : bool, optional\n Smooth the time series using LOESS smoothing.\n detrend : bool, optional\n Remove seasonal component by subtracting previous year.\n n_smooth : int, optional\n Number of time points to use for the smoothing window size.\n period : str, optional \n Pandas DateOffset string describing sampling frequency.\n\n Returns\n ----------\n str\n Path to the csv file containing processed data. \n \"\"\"\n\n # put output plots in the results dir\n output_dir = os.path.join(input_dir, 'processed_data')\n\n # check input file exists\n json_summary_path = os.path.join(input_dir, 'results_summary.json')\n if not os.path.exists(json_summary_path):\n raise FileNotFoundError(f'Could not find file \"{os.path.abspath(json_summary_path)}\".')\n\n # make output subdir\n if not os.path.exists(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n\n # read all json files in the directory and produce a dataframe\n print(f'Reading results from \"{os.path.abspath(json_summary_path)}\"...')\n\n # read json file to dataframes\n dfs = read_json_to_dataframes(json_summary_path)\n\n # keep track of time points where data is missing (by default pandas\n # groupby operations, which is used haveily in this module, drop NaNs)\n missing = get_missing_time_points(dfs)\n missing_json = {k : list(v) for k, v in missing.items()}\n write_to_json(os.path.join(output_dir, 'missing_dates.json'), missing_json)\n\n print('\\nPreprocessing data...')\n print('-'*21)\n\n # remove outliers from the time series\n if drop_outliers:\n print('- Dropping vegetation outliers...')\n dfs = drop_veg_outliers(dfs, sigmas=3)\n\n # use the same month in different years to fill gaps\n if fill_missing:\n print('- Fill gaps in sub-image time series...')\n dfs = fill_veg_gaps(dfs, missing)\n\n # LOESS smoothing on sub-image time series\n if smoothing:\n print('- Smoothing vegetation time series...')\n dfs = smooth_veg_data(dfs, n=n_smooth)\n\n # store feature vectors before averaging over sub-images\n print('- Saving feature vectors...')\n store_feature_vectors(dfs, output_dir)\n\n # average over sub-images\n ts_df = make_time_series(dfs)\n\n # resample the averaged time series using linear interpolation\n if resample:\n print('- Resampling time series...')\n columns = [c for c in ts_df.columns if any([s in c \n for s in ['offset50', 'precipitation', 'temperature']])]\n ts_df = resample_dataframe(ts_df, columns, period=period)\n\n # save as csv\n ts_filename = os.path.join(output_dir, 'time_series.csv')\n print(f'- Saving time series to \"{ts_filename}\".')\n ts_df.to_csv(ts_filename, index=False)\n\n # additionally save resampled & detrended time series\n if detrend: \n print('- Detrending time series...')\n\n # remove seasonality from sub-image time series\n dfs_detrended = detrend_data(dfs, period=period)\n\n print('- Smoothing vegetation time series after removing seasonlity...')\n dfs_detrended_smooth = smooth_veg_data(dfs_detrended, n=12)\n\n # combine over sub-images\n ts_df_detrended_smooth = make_time_series(dfs_detrended_smooth)\n\n # save output\n ts_filename_detrended = os.path.join(output_dir, 'time_series_detrended.csv')\n print(f'- Saving detrended time series to \"{ts_filename_detrended}\".')\n ts_df_detrended_smooth.to_csv(ts_filename_detrended, index=False)\n\n return output_dir, dfs # for now return `dfs` for spatial plot compatibility \n","sub_path":"pyveg/src/analysis_preprocessing.py","file_name":"analysis_preprocessing.py","file_ext":"py","file_size_in_byte":25869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"322616574","text":"from rest_framework import status\nfrom rest_framework.test import APIRequestFactory\nfrom rest_framework.test import APITestCase\n\nfrom upday.models import Ticket, TicketBook\nfrom upday.modules.common.service.encrypt_service import EncryptService\nfrom upday.modules.ticket.views.ticket_views import BindView\nfrom upday.modules.ticket_book.views.ticket_book_views import CreateView, AddView, UpdateView, DeleteView\nfrom upday.test.tool.format import divide_line\n\nfrom upday.test import teacher_test_util, student_helper\nfrom upday.test.utils.channel_utils import ChannelUtils\nfrom upday.test.utils.project_utils import ProjectUtils\n\nproject_utils = ProjectUtils()\nchannel_utils = ChannelUtils()\n\n\nclass TicketTestCase(APITestCase):\n\n def setUp(self):\n print('ticket test start')\n print('###########\\n###########\\n###########\\n###########\\n###########\\n###########\\n')\n\n def test_ticket(self):\n student_authorization = student_helper.init()\n teacher_authorization, team_id = teacher_test_util.init()\n ########################################################################################\n # 创建并更新项目\n ########################################################################################\n project_id_1 = project_utils.create(teacher_authorization, team_id=team_id)\n divide_line('创建项目1成功')\n project_utils.update(teacher_authorization, team_id, project_id_1, 'Up计划名称1')\n divide_line('更新项目1成功')\n ########################################################################################\n # 创建门票簿\n ########################################################################################\n ticket_book_id = self.create_ticket_book(teacher_authorization, team_id=team_id)\n divide_line('创建门票簿1成功')\n self.add_project(teacher_authorization, team_id=team_id, ticket_book_id=ticket_book_id, project_id=project_id_1,\n associator_id='', channel_id='')\n divide_line('向门票簿添加项目成功')\n self.update_ticket_book(teacher_authorization, team_id=team_id, ticket_book_id=ticket_book_id, name='门票簿名称', coin=100)\n divide_line('更新门票簿成功')\n ########################################################################################\n # 创建门票\n ########################################################################################\n decrypted_ticket_book_id = EncryptService().decrypt_id('HASH_KEY_TICKET_BOOK_ID', ticket_book_id)\n ticket_book = TicketBook.objects.get(id=decrypted_ticket_book_id)\n ticket = Ticket.objects.create(ticket_book=ticket_book, index=1)\n divide_line('创建门票成功')\n ticket_id = EncryptService().encrypt_id('HASH_KEY_TICKET_ID', ticket.id)\n self.bind_ticket(student_authorization, ticket_id=ticket_id)\n divide_line('扫码使用门票成功')\n ########################################################################################\n # 删除\n ########################################################################################\n self.delete_ticket_book(teacher_authorization, team_id=team_id, ticket_book_id=ticket_book_id)\n divide_line('删除门票簿成功')\n project_utils.delete(teacher_authorization, team_id, project_id_1)\n divide_line('删除项目1成功')\n ########################################################################################\n divide_line('门票模块-测试完成')\n\n def create_ticket_book(self, authorization, team_id):\n factory = APIRequestFactory()\n request = factory.post(path='/api/ticket-book/create',\n data={'team_id': team_id},\n HTTP_AUTHORIZATION=authorization)\n view = CreateView.as_view()\n response = view(request)\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n ticket_book_id = response.data['ticket_book_id']\n return ticket_book_id\n\n\n def add_project(self, authorization, team_id, channel_id, project_id, ticket_book_id, associator_id):\n factory = APIRequestFactory()\n request = factory.post(path='/api/ticket-book/add',\n data={'team_id': team_id, 'channel_id': channel_id, 'project_id': project_id,\n 'ticket_book_id': ticket_book_id, 'associator_id': associator_id},\n HTTP_AUTHORIZATION=authorization)\n view = AddView.as_view()\n response = view(request)\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\n def update_ticket_book(self, authorization, team_id, ticket_book_id, name, coin):\n factory = APIRequestFactory()\n request = factory.post(path='/api/ticket-book/update',\n data={'team_id': team_id, 'ticket_book_id': ticket_book_id, 'name': name, 'coin': coin},\n HTTP_AUTHORIZATION=authorization)\n view = UpdateView.as_view()\n response = view(request)\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\n def delete_ticket_book(self, authorization, team_id, ticket_book_id):\n factory = APIRequestFactory()\n request = factory.post(path='/api/ticket-book/delete',\n data={'team_id': team_id, 'ticket_book_id': ticket_book_id},\n HTTP_AUTHORIZATION=authorization)\n view = DeleteView.as_view()\n response = view(request)\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\n def bind_ticket(self, authorization, ticket_id):\n factory = APIRequestFactory()\n request = factory.post(path='/api/ticket/bind',\n data={'ticket_id': ticket_id},\n HTTP_AUTHORIZATION=authorization)\n view = BindView.as_view()\n response = view(request)\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)","sub_path":"upday/test/ticket_test.py","file_name":"ticket_test.py","file_ext":"py","file_size_in_byte":6280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"349493228","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n GetElevationDialog\n A QGIS plugin\n Plugin Qgis 3.0 para obtenção de dados de elevação da API Google Maps.\n -------------------\n begin : 2018-08-31\n git sha : $Format:%H$\n copyright : (C) 2018 by Rodrigo Sousa\n email : rodrigofrcs@hotmail.com\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\n\nfrom qgis.core import *\nfrom qgis.gui import *\nfrom qgis.utils import *\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport os\nimport sys\nimport re\nfrom PyQt4 import QtCore, QtGui, uic\n\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\n os.path.dirname(__file__), 'get_elevation_dialog_base.ui'))\n\n\nclass GetElevationDialog(QtGui.QDialog, FORM_CLASS):\n def __init__(self, parent=None):\n \"\"\"Constructor.\"\"\"\n super(GetElevationDialog, self).__init__(parent)\n # Set up the user interface from Designer.\n # After setupUI you can access any designer object by doing\n # self.<objectname>, and you can use autoconnect slots - see\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\n # #widgets-and-dialogs-with-auto-connect\n self.setupUi(self)\n self.input_mode_default()\n self.outputSelectFileName.clicked.connect(self.browseSaveOutput)\n self.layerInputMode.toggled.connect(self.radio_input_layer)\n self.extentInputMode.toggled.connect(self.radio_input_extent)\n self.fileOutput.toggled.connect(self.radio_output)\n self.getExtent.clicked.connect(self.get_extent)\n self.oldPath = ''\n\n ###################\n #INPUT\n ###################\n\n def input_mode_default(self):\n self.labelLayer.setVisible(True)\n self.layersInput.setVisible(True)\n self.labelExtent.setVisible(False)\n self.labelExtentN.setVisible(False)\n self.labelExtentS.setVisible(False)\n self.labelExtentL.setVisible(False)\n self.labelExtentW.setVisible(False)\n self.labelExtentInterval.setVisible(False)\n self.extentInputInterval.setVisible(False)\n self.extentInputN.setVisible(False)\n self.extentInputS.setVisible(False)\n self.extentInputL.setVisible(False)\n self.extentInputW.setVisible(False)\n self.getExtent.setVisible(False)\n self.extentInputInterval.setText(\"0.010000\")\n\n def radio_input_layer(self):\n \"\"\"Choose between output as memory layer or as shapefile\"\"\"\n if self.layerInputMode.isChecked():\n self.labelLayer.setEnabled(True)\n self.layersInput.setEnabled(True)\n self.labelLayer.setVisible(True)\n self.layersInput.setVisible(True)\n else:\n self.labelLayer.setEnabled(False)\n self.layersInput.setEnabled(False)\n self.labelLayer.setVisible(False)\n self.layersInput.setVisible(False)\n\n def radio_input_extent(self):\n \"\"\"Choose between output as memory layer or as shapefile\"\"\"\n if self.extentInputMode.isChecked():\n self.labelExtent.setEnabled(True)\n self.labelExtentN.setEnabled(True)\n self.labelExtentS.setEnabled(True)\n self.labelExtentL.setEnabled(True)\n self.labelExtentW.setEnabled(True)\n self.labelExtentInterval.setEnabled(True)\n self.extentInputInterval.setEnabled(True)\n self.extentInputN.setEnabled(True)\n self.extentInputS.setEnabled(True)\n self.extentInputL.setEnabled(True)\n self.extentInputW.setEnabled(True)\n self.labelExtent.setVisible(True)\n self.labelExtentN.setVisible(True)\n self.labelExtentS.setVisible(True)\n self.labelExtentL.setVisible(True)\n self.labelExtentW.setVisible(True)\n self.labelExtentInterval.setVisible(True)\n self.extentInputInterval.setVisible(True)\n self.extentInputN.setVisible(True)\n self.extentInputS.setVisible(True)\n self.extentInputL.setVisible(True)\n self.extentInputW.setVisible(True)\n self.getExtent.setVisible(True)\n else:\n self.labelExtent.setEnabled(False)\n self.labelExtentN.setEnabled(False)\n self.labelExtentS.setEnabled(False)\n self.labelExtentL.setEnabled(False)\n self.labelExtentW.setEnabled(False)\n self.labelExtentInterval.setEnabled(False)\n self.extentInputInterval.setEnabled(False)\n self.extentInputN.clear()\n self.extentInputN.setEnabled(False)\n self.extentInputS.clear()\n self.extentInputS.setEnabled(False)\n self.extentInputL.clear()\n self.extentInputL.setEnabled(False)\n self.extentInputW.clear()\n self.extentInputW.setEnabled(False)\n self.labelExtent.setVisible(False)\n self.labelExtentN.setVisible(False)\n self.labelExtentS.setVisible(False)\n self.labelExtentL.setVisible(False)\n self.labelExtentW.setVisible(False)\n self.labelExtentInterval.setVisible(False)\n self.extentInputInterval.setVisible(False)\n self.extentInputN.setVisible(False)\n self.extentInputS.setVisible(False)\n self.extentInputL.setVisible(False)\n self.extentInputW.setVisible(False)\n self.getExtent.setVisible(False)\n\n def get_extent(self):\n xmin=iface.mapCanvas().extent().xMinimum()\n xmax=iface.mapCanvas().extent().xMaximum()\n ymin=iface.mapCanvas().extent().yMinimum()\n ymax=iface.mapCanvas().extent().yMaximum()\n self.extentInputW.setText(str(xmin))\n self.extentInputL.setText(str(xmax))\n self.extentInputN.setText(str(ymax))\n self.extentInputS.setText(str(ymin))\n\n\n ###################\n #OUTPUT\n ###################\n\n def browseSaveOutput(self):\n \"\"\"Opens a window to set the location of the output file.\"\"\"\n fileName0 = QtGui.QFileDialog.getSaveFileName(self, 'Salvar arquivo', self.oldPath, \"Shapefile (*.shp);;Todos os arquivos (*)\")\n fileName = os.path.splitext(str(fileName0))[0]+'.shp'\n if os.path.splitext(str(fileName0))[0] != '':\n self.oldPath = os.path.dirname(fileName)\n layername = os.path.splitext(os.path.basename(str(fileName)))[0]\n if (layername=='.shp'):\n return\n self.outputFileName.setText(fileName)\n\n def radio_output(self):\n \"\"\"Choose between output as memory layer or as shapefile\"\"\"\n if self.fileOutput.isChecked():\n self.labelOutputFileName.setEnabled(True)\n self.outputFileName.setEnabled(True)\n self.outputSelectFileName.setEnabled(True)\n else:\n self.labelOutputFileName.setEnabled(False)\n self.outputFileName.clear()\n self.outputFileName.setEnabled(False)\n self.outputSelectFileName.setEnabled(False)\n\n","sub_path":"get_elevation_dialog.py","file_name":"get_elevation_dialog.py","file_ext":"py","file_size_in_byte":7801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"464536043","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 14 20:33:44 2019\n\n@author: stvyh\n\"\"\"\n\nfrom PyQt5.QtGui import QIcon #,QPainter, QPixmap, QPainterPath, QColor\nfrom PyQt5.QtCore import Qt #QObject, QPointF, QPropertyAnimation, pyqtProperty\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton\nimport sys\n\nfrom PyQt5.QtWidgets import (QWidget, QHBoxLayout, QFrame, \n QSplitter, QStyleFactory, QApplication)\n\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n \n self.title = \"PyQt5 Window\"\n self.top = 100\n self.left = 100\n self.width = 680\n self.height = 500\n \n self.InitWindow()\n \n def InitWindow(self):\n self.setWindowIcon(QIcon(\"icon.png\"))\n self.setWindowTitle(self.title)\n self.setGeometry(self.top, self.left, self.width, self.height)\n self.show()\n\nclass Example(QWidget):\n \n def __init__(self):\n super().__init__()\n \n self.initUI()\n \n \n def initUI(self): \n\n hbox = QHBoxLayout(self)\n\n topleft = QFrame(self)\n topleft.setFrameShape(QFrame.StyledPanel)\n \n topright = QFrame(self)\n topright.setFrameShape(QFrame.StyledPanel)\n\n button = QPushButton(\"Necum!\")\n\n bottom = QFrame(self)\n bottom.setFrameShape(QFrame.StyledPanel)\n\n splitter1 = QSplitter(Qt.Horizontal)\n splitter1.addWidget(topleft)\n splitter1.addWidget(topright)\n splitter1.addWidget(button)\n\n splitter2 = QSplitter(Qt.Vertical)\n splitter2.addWidget(splitter1)\n splitter2.addWidget(bottom)\n\n hbox.addWidget(splitter2)\n self.setLayout(hbox)\n \n self.setGeometry(300, 300, 300, 200)\n self.setWindowTitle('QSplitter')\n self.show()\n\n \nApp = QApplication(sys.argv)\n#window = Window()\nex = Example()\nsys.exit(App.exec())\n\n","sub_path":"Examples/_PyQt5_Base.py","file_name":"_PyQt5_Base.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"629145220","text":"import os\nimport logging\nimport threading\n\nloglevel = os.getenv('SYN_TEST_LOG_LEVEL', 'WARNING')\n_logformat = '%(asctime)s [%(levelname)s] %(message)s [%(filename)s:%(funcName)s:%(threadName)s:%(processName)s]'\nlogging.basicConfig(level=loglevel, format=_logformat)\n\n# import synapse.lib.scope as s_scope\n\ntestdir = os.path.dirname(__file__)\n\ndef getTestPath(*paths):\n return os.path.join(testdir, *paths)\n\nclass CallBack:\n '''\n An easy to use test helper for *synchronous* callbacks.\n '''\n def __init__(self, retval=None):\n self.args = None\n self.kwargs = None\n self.retval = retval\n self.event = threading.Event()\n\n def __call__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n self.event.set()\n return self.retval\n\n def wait(self, timeout=None):\n return self.event.wait(timeout=timeout)\n","sub_path":"synapse/tests/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"580950078","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Wei, Shuowen\n\nhttps://leetcode.com/problems/maximal-square/\n\nLC221, LC1277\n\"\"\"\nclass Solution(object):\n def maximalSquare(self, matrix):\n \"\"\"\n :type matrix: List[List[str]]\n :rtype: int\n \"\"\"\n row, col = len(matrix), len(matrix[0])\n res = 0\n for r in range(row):\n for c in range(col):\n if matrix[r][c] == '1':\n if r == 0 or c == 0:\n res = max(res, 1)\n else:\n new_cell_val = min(int(matrix[r-1][c]),\n int(matrix[r][c-1]), \n int(matrix[r-1][c-1])) + int(matrix[r][c])\n res = max(res, new_cell_val)\n matrix[r][c] = new_cell_val\n return res**2\n ","sub_path":"Medium/LC221.py","file_name":"LC221.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"63230927","text":"import deep_dream as dd\nfrom PIL import Image\n\n\nif __name__ == '__main__':\n\tcnn = dd.CNN(dd.GOOGLENET_BVLC, cpu_workers=64)\n\tinput_img = Image.open('kodim/img0022.jpg').resize((1538, 1024), Image.LANCZOS)\n\n\tguide_img = Image.open('example2_guide.jpg').resize((1538, 1024), Image.LANCZOS)\n\n\toutput_img = cnn.dream_guided(input_img, guide_img, {'inception_4a/pool_proj': 1})\n\t\n\tdd.to_image(output_img).save(\"./Meine_Bilder/standard_example_guided_dream_out.jpg\", quality=90)\n\n\n\n\n\n\n\n\n","sub_path":"GefuehrterTraum.py","file_name":"GefuehrterTraum.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"467255686","text":"# Coffin (AND example) program\n# programmed by Precilla\n# Program date 06/17/2018\n\ncoffinBtn= 17 # Pin for our tour guide button\ncoffinSensor = 22 # Pin for our proximity detector\ndoorSwing = 10 # Pin for our door solenoid\ntourReady = 1 # Input from our coffinBtn pin\ndoorClear = 1 # Input from our coffinSensor pin\nPRESSED = 0 # Readability ‘button press happened’\n\n# setup IO pins\nGPIO.setmode(GPIO.BCM) # Sets the GPIO mapped on the R-Pi\nGPIO.setup(coffinBtn, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n # Set GPIO pin for input from Tour Button\nGPIO.setup(coffinSensor, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n # Set GPIO pin for input from coffin door sensor\nGPIO.setup(doorSwing, GPIO.OUT) # GPIO pin for the door mech.\n\nwhile true:\n tourReady = GPIO.input(coffinBtn) # Tour guide’s button\n doorClear = GPIO.input(coffinSensor) # proximity sensor\n if tourReady == PRESSED and doorClear == PRESSED:\n # if both are true\n GPIO.output(doorSwing, GPIO.HIGH) # open the coffin door\n time.sleep(10) # wait ten seconds\n GPIO.output(doorSwing, GPIO.LOW) # close the coffin door","sub_path":"RaspberryPi/Boolean.py","file_name":"Boolean.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"632920588","text":"import testutils\nimport px_terminal\n\n\ndef test_to_screen_lines_unbounded():\n procs = [testutils.create_process(commandline=\"/usr/bin/fluff 1234\")]\n assert px_terminal.to_screen_lines(procs, None) == [\n \" PID COMMAND USERNAME CPU RAM COMMANDLINE\",\n \"47536 fluff root 0.03s 0% /usr/bin/fluff 1234\"\n ]\n","sub_path":"px/px_terminal_test.py","file_name":"px_terminal_test.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"207489962","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"UiL Django Core\",\n version=\"2.0.0\",\n author=\"UiL OTS Labs\",\n author_email=\"labbeheer.gw@uu.nl\",\n description=\"A shared code library for UiL OTS Django projects\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/UiL-OTS-labs/django-shared-core\",\n packages=setuptools.find_packages(),\n install_requires=[\n 'django>=2.0',\n 'pyscss',\n 'lesscpy',\n 'closure',\n 'vbuild',\n 'requests',\n 'PyJWT',\n 'djangorestframework',\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Framework :: Django :: 2.2\",\n \"License :: Other/Proprietary License\",\n \"Operating System :: OS Independent\",\n ],\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"653728415","text":"class TreeNode:\n def __init__(self, value):\n self.value = value\n self.right = None\n self.left = None\n\nclass BinarySearchTree:\n def __init__(self):\n self.root = None\n \n def insert(self, val):\n treeNode = TreeNode(val)\n\n if self.root == None:\n self.root = treeNode\n return self\n \n runner = self.root\n while True:\n if runner.value > val:\n if runner.left == None:\n runner.left = treeNode\n return self\n runner = runner.left\n\n elif runner.value < val:\n if runner.right == None:\n runner.right = treeNode\n return self\n runner = runner.right\n \n \n def lookup(self, val):\n if self.root == None:\n return False\n\n runner = self.root\n\n while runner:\n if runner.value < val:\n runner = runner.right\n\n elif runner.value > val :\n runner = runner.left\n \n if runner.value == val:\n return runner\n return False\n \n def remove(self, val):\n if self.root == None:\n return False\n \n runner = self.root\n parentNode = None\n while runner:\n if runner.value > val:\n parentNode = runner\n runner = runner.left\n elif runner.value < val:\n parentNode = runner\n runner = runner.right\n elif runner.value == val:\n # match found!\n\n # option1: No right child:\n if runner.right == None:\n if parentNode == None:\n self.root = runner.left\n else:\n # if parent > current value, make current left child a child of parent\n if runner.value < parentNode.value:\n parentNode.left = runner.left\n \n # if parent < current valu, make left child a right child of a parent\n elif runner.value > parentNode.value:\n parentNode.right = runner.left\n\n # option2: Right child with no left child\n elif runner.right.left == None:\n if parentNode == None:\n self.root = runner.left\n else:\n runner.right.left = runner.left\n\n # if parent > runner, make right child of the left the parent\n if runner.value < parentNode.value:\n parentNode.left = runner.right\n \n # if parent < runner, make right child a right child of the parent\n elif runner.value > parentNode.value:\n parentNode.right = runner.right\n \n # Option3: right child that has a left child\n else:\n leftmost = runner.right.left\n leftmostParent = runner.right\n\n # find the right's child leftmost children\n while leftmost != None:\n leftmostParent = leftmost\n leftmost = leftmost.left\n \n # Parent's leftmost subtree is now leftmost's right subtree\n leftmostParent.left = leftmost.right\n leftmost.left = runner.left\n leftmost.right = runner.right\n\n if parentNode == None:\n self.root = leftmost\n else:\n if runner.value < parentNode.value:\n parentNode.left = leftmost\n elif runner.value > parentNode.value:\n parentNode.right = leftmost\n return True\n \n\n\n \ntree1 = BinarySearchTree()\ntree1.insert(9)\ntree1.insert(4)\ntree1.insert(6)\ntree1.insert(20)\ntree1.insert(170)\ntree1.insert(15)\ntree1.insert(1)\n\nprint(tree1.lookup(1))\ntree1.remove(4)\n \n \n\n\n \n\n \n","sub_path":"chapter4/BST(basics).py","file_name":"BST(basics).py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"587733015","text":"def get_weight(u,v,Edges):\r\n for i,j,w in Edges:\r\n if i == u and j == v:\r\n return w\r\n return float(\"Inf\")\r\n\r\n\r\ndef Dijkstra(G, V):\r\n n = len(G)\r\n final = set()\r\n not_final = set(G.keys())\r\n dist = dict()\r\n for i in G.keys():\r\n dist[i] = float(\"Inf\") \r\n dist[V] = 0\r\n\r\n while len(final) < n:\r\n # Getting unsure vertex with minimun distance \r\n u = min(not_final, key = lambda i: dist[i])\r\n # Making u final\r\n not_final.remove(u)\r\n final.add(u)\r\n # Updating all the non final neighbours of u\r\n for v,w in G[u]:\r\n if v not in final:\r\n dist[v] = min(dist[v], dist[u] + w)\r\n \r\n for i, w in dist.items():\r\n print(i, \"=>\", w)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n G = dict()\r\n for i in range(1, int(input(\"Enter No. of vertices: \"))+1):\r\n G[i] = []\r\n for _ in range(int(input(\"Enter No. of Edges: \"))):\r\n i, j, w = map(int, input().split())\r\n G[i].append((j, w))\r\n print(G)\r\n # G = {1: [(3, 1), (4, 3), (2, 7)], 2: [(1, 7), (5, 3), (4, 8)], 3: [(1, 1), (4, 4)], 4: [(3, 4), (1, 3), (2, 8), (5, 5)], 5: [(2, 3), (4, 5)]}\r\n Dijkstra(G, int(input(\"Enter Starting Vertex: \")))\r\n\r\n","sub_path":"ADSA/Lab/Lab6/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"555772675","text":"import csv\nimport json\nimport os\nfrom datetime import datetime\n\nsingle_dict = { #dictionary to translate Dublin Core/GBL into GBLJson\n \"Identifier\":[\"layer_slug_s\",\"dc_identifier_s\"],\n \"Description\":[\"dc_description_s\"],\n \"Format\":[\"dc_format_s\"],\n \"Title\":[\"dc_title_s\"],\n \"Accrual Method\":[\"dct_accrualMethod_s\"],\n \"Date Issued\":[\"dct_issued_s\"],\n \"Provenance\":[\"dct_provenance_s\"],\n \"References\":[\"dct_references_s\"],\n \"Centroid\":[\"b1g_centroid_ss\"],\n \"Code\":[\"b1g_code_s\"],\n \"Date Accessioned\":[\"b1g_dateAccessioned_s\"],\n \"Date Retired\":[\"b1g_dateRetired_s\"],\n \"Image\":[\"b1g_image_ss\"],\n \"Status\":[\"b1g_status_s\"],\n \"Geometry Type\":[\"layer_geom_type_s\"],\n \"Bounding Box\":[\"solr_geom\"],\n \"Solr Year\":[\"solr_year_i\"]\n }\n\nmultiple_dict = {\n \"Creator\":[\"dc_creator_sm\"],\n \"Language\":[\"dc_language_sm\"],\n \"Publisher\":[\"dc_publisher_sm\"],\n \"Source\":[\"dc_source_sm\"],\n \"Subject\":[\"dc_subject_sm\"],\n \"Type\":[\"dc_type_sm\"],\n \"Alternative Title\":[\"alternativeTitle_sm\"],\n \"Is Part Of\":[\"dct_isPartOf_sm\"],\n \"Spatial Coverage\":[\"dct_spatial_sm\"],\n \"Temporal Coverage\":[\"dct_temporal_sm\"],\n \"Genre\":[\"b1g_genre_sm\"],\n \"Geonames\":[\"b1g_geonames_sm\"],\n \"Keyword\":[\"b1g_keyword_sm\"]\n }\nif not os.path.exists(\"json\"): #create a folder to store the jsons\n os.mkdir(\"json\")\n\ncsvfile = open('10b.csv', 'r')\n\nreader = csv.DictReader(csvfile)\ndate_modified = datetime.today().strftime('%Y-%m-%d')+\"T\"+datetime.today().strftime('%X')+\"Z\"\n\nfor row in reader: #row is a dictionary\n small_dict = {\n \"geoblacklight_version\":\"1.0\",\n \"dc_rights_s\":\"Public\",\n \"layer_modified_dt\":date_modified}\n for key,val in row.items():\n if key in single_dict:\n for fieldname in single_dict[key]:\n small_dict[fieldname] = val\n if key in multiple_dict:\n for fieldname in multiple_dict[key]:\n small_dict[fieldname] = val.split('|')\n iden = row['Identifier']\n filename = iden + \".json\"\n with open(\"json/\"+filename, 'w') as jsonfile:\n json.dump(small_dict,jsonfile,indent=2)\n","sub_path":"csv_to_gbljson.py","file_name":"csv_to_gbljson.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"54602562","text":"# -*- coding: utf-8 -*-\nimport json\nfrom collections import OrderedDict \n\nfrom flask import Flask, request, send_from_directory, jsonify, send_file\nfrom elasticsearch import Elasticsearch \nfrom gremlin_python import statics \nfrom gremlin_python.structure.graph import Graph \nfrom gremlin_python.process.graph_traversal import __ \nfrom gremlin_python.process.strategies import * \nfrom gremlin_python.driver.driver_remote_connection import DriverRemoteConnection \nfrom gremlin_python.driver import client \n\n\napplication = Flask(__name__, static_folder=\"local\")\n\n# Set special config based on environment \nif application.config['ENV'] == 'development':\n application.config.from_object('config.DevelopmentConfig')\nelse:\n application.config.from_object('config.ProductionConfig')\n\nes_host = application.config['ELASTICSEARCH_HOST']\nes = Elasticsearch(es_host, timeout=120, max_retries=10, retry_on_timeout=True)\nindex = 'transactions'\n\njanus_host = application.config['JANUS_HOST']\njanus_server_url = 'ws://%s:8182/gremlin' % janus_host\nstatics.load_statics(globals())\ngraph = Graph()\nconnection = DriverRemoteConnection(janus_server_url, 'g')\ng = graph.traversal().withRemote(connection)\n\n# # Create a low level client for Janus graph specific queries \njanus_client = client.Client(janus_server_url, 'g')\n\n\n@application.route(\"/\")\ndef home():\n return send_file('index.html')\n\n\n@application.route('/dist/<path:path>')\ndef send_static(path):\n return send_from_directory('dist', path)\n\n\n@application.route('/transactions', methods=[\"POST\"])\ndef get_transactions():\n \"\"\" Returns all the transactions of a given entity \"\"\"\n entities = request.get_json()['data']['entities']\n query = {\n 'size': 200,\n 'query': {\n 'bool': {\n 'must': [\n { 'terms': { 'ben_entity_id': entities } },\n { 'terms': { 'don_entity_id': entities } }\n ]\n }\n }\n }\n results = es.search(index=index, body=query)\n transactions = [hit['_source'] for hit in results['hits']['hits']]\n columns = [\n 'date_operation',\n 'valeur_euro',\n 'don_id',\n 'don_entity_id',\n 'don_prenom',\n 'don_nom',\n 'don_date_naissance',\n 'don_telephone',\n 'don_numero_piece_identite',\n 'don_pays',\n 'don_pays_code',\n 'don_code_postal',\n 'ben_id',\n 'ben_entity_id',\n 'ben_prenom',\n 'ben_nom',\n 'ben_date_naissance',\n 'ben_telephone',\n 'ben_numero_piece_identite',\n 'ben_pays',\n 'ben_pays_code',\n 'ben_code_postal',\n ]\n\n rows = []\n for transaction in transactions:\n row = OrderedDict()\n for column in columns:\n row[column] = transaction[column]\n rows.append(row)\n\n return json.dumps(rows)\n\ndef format_properties(vp):\n for k in vp.keys():\n p = vp[k]\n if len(p) >= 1:\n vp[k] = p[0]\n vp['prenom_nom'] = vp['prenomnom']\n del vp['prenomnom']\n return vp\n\n@application.route('/neighbors')\ndef get_neighbors():\n \"\"\" Returns the subgraph containing `node` and its neighbors \n :param node: the node in the center of neighbors\n :returns: the subgraph of node and its neighbors\n \"\"\"\n entity = request.args.get(\"node\")\n if entity:\n # Get all neighbors nodes\n query = \"g.V().has('entity', '%s')\\\n .bothE()\\\n .bothV()\\\n .dedup()\\\n .property('degree', __.both().dedup().count())\\\n .property('in_degree_weighted', __.inE().values('valeur_euro').sum())\\\n .property('out_degree_weighted', __.outE().values('valeur_euro').sum())\\\n .valueMap()\" % entity\n nodes = janus_client.submit(query).next()\n nodes = [format_properties(n) for n in nodes]\n # Get all links between the nodes and its neighbors \n query = \"g.V().has('entity', %s)\\\n .bothE()\\\n .as('source', 'target', 'date_operation', 'valeur_euro')\\\n .select('source', 'target', 'date_operation', 'valeur_euro')\\\n .by(__.outV().values('entity'))\\\n .by(__.inV().values('entity'))\\\n .by('date_operation')\\\n .by('valeur_euro')\" % entity\n links = janus_client.submit(query).next()\n subgraph = {\n \"nodes\": nodes,\n \"links\": links\n }\n return jsonify(subgraph)\n\n\n@application.route('/search')\ndef search():\n \"\"\" Search for a specific name containing pattern \"pattern\" in graph \"dataset\" \n and returns top 10 suggestions \n :param search_term: The pattern we are searching \n :param filters: A list of filters \n \"\"\"\n search_term = request.args.get(\"search_term\")\n matches = []\n if search_term:\n query = \"g.V().has('prenomnom', textContainsFuzzy('%s'))\\\n .property('degree', __.both().dedup().count())\\\n .order().by('degree', decr)\\\n .limit(10).valueMap()\" % search_term\n vertices = janus_client.submit(query).all().result() \n # vertices properties are array like {\"entity\": [12568]},\n # format them to {\"entity\": 12568}\n matches = [format_properties(vp) for vp in vertices]\n\n return jsonify(matches)\n\n\nif __name__ == \"__main__\":\n application.run(host='0.0.0.0')","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"255171266","text":"#MAEC Bundle Class\n\n#Copyright (c) 2013, The MITRE Corporation\n#All rights reserved.\n\n#Compatible with MAEC v4.0\n#Last updated 5/13/2013\n\nimport maec\nimport maec.bindings.maec_bundle as bundle_binding\nfrom cybox.objects.process_object import Process\nfrom maec.bundle.action_reference_list import ActionReferenceList\n \nclass ProcessTree(maec.Entity):\n def __init__(self, root_process = None):\n super(ProcessTree, self).__init__()\n self.root_process = root_process\n \n def set_root_process(self, root_process):\n self.root_process = root_process\n \n def to_obj(self):\n process_tree_obj = bundle_binding.ProcessTreeType()\n if self.root_process is not None:\n process_tree_obj.set_Root_Process(self.root_process.to_obj())\n return process_tree_obj\n\n def to_dict(self):\n process_tree_dict = {}\n if self.root_process is not None:\n process_tree_dict['root_process'] = self.root_process.to_dict()\n return process_tree_dict\n\n @staticmethod\n def from_dict(process_tree_dict):\n if not process_tree_dict:\n return None\n process_tree_ = ProcessTree()\n process_tree_.root_process = ProcessTreeNode.from_dict(process_tree_dict.get('root_process'))\n return process_tree_\n\n @staticmethod\n def from_obj(process_tree_obj):\n if not process_tree_obj:\n return None\n process_tree_ = ProcessTree()\n process_tree_.root_process = ProcessTreeNode.from_obj(process_tree_obj.get_Root_Process())\n return process_tree_\n \n \nclass ProcessTreeNode(Process):\n superclass = Process\n\n def __init__(self, id = None, parent_action_idref = None):\n super(ProcessTreeNode, self).__init__()\n self.id = id\n self.parent_action_idref = parent_action_idref\n self.initiated_actions = ActionReferenceList()\n self.spawned_processes = []\n self.injected_processes = []\n\n def add_spawned_process(self, process_node):\n self.spawned_processes.append(process_node)\n \n def add_injected_process(self, process_node):\n self.injected_processes.append(process_node)\n \n def add_initiated_action(self, action_id):\n self.initiated_actions.append(action_id)\n \n def set_id(self, id):\n self.id = id\n \n def set_parent_action(self, parent_action_id):\n self.parent_action_idref = parent_action_id\n \n def to_obj(self):\n process_tree_node_obj = super(ProcessTreeNode, self).to_obj(bundle_binding.ProcessTreeNodeType())\n if self.id is not None : process_tree_node_obj.set_id(self.id)\n if self.parent_action_idref is not None : process_tree_node_obj.set_parent_action_idref(self.parent_action_idref)\n if self.initiated_actions: process_tree_node_obj.set_Initiated_Actions(self.initiated_actions.to_obj())\n if self.spawned_processes: \n for spawned_process in self.spawned_processes:\n process_tree_node_obj.add_Spawned_Process(spawned_process.to_obj())\n if self.injected_processes: \n for injected_process in self.injected_processes:\n process_tree_node_obj.add_Injected_Process(injected_process.to_obj())\n return process_tree_node_obj\n\n def to_dict(self):\n process_tree_node_dict = super(ProcessTreeNode, self).to_dict()\n if self.id is not None : process_tree_node_dict['id'] = self.id\n if self.parent_action_idref is not None : process_tree_node_dict['parent_action_idref'] = self.parent_action_idref\n if self.initiated_actions: process_tree_node_dict['initiated_actions'] = self.initiated_actions.to_list()\n if self.spawned_processes:\n spawned_process_list = []\n for spawned_process in self.spawned_processes:\n spawned_process_list.append(spawned_process.to_dict())\n process_tree_node_dict['spawned_processes'] = spawned_process_list\n if self.injected_processes > 0: \n injected_process_list = []\n for injected_process in self.injected_processes:\n injected_process_list.append(injected_process.to_dict())\n process_tree_node_dict['injected_processes'] = injected_process_list\n return process_tree_node_dict\n\n @staticmethod\n def from_dict(process_tree_node_dict):\n if not process_tree_node_dict:\n return None\n process_tree_node_ = Process.from_dict(process_tree_node_dict, ProcessTreeNode())\n process_tree_node_.id = process_tree_node_dict.get('id')\n process_tree_node_.parent_action_idref = process_tree_node_dict.get('parent_action_idref')\n process_tree_node_.initiated_actions = ActionReferenceList.from_list(process_tree_node_dict.get('initiated_actions'))\n process_tree_node_.spawned_processes = [ProcessTreeNode.from_dict(x) for x in process_tree_node_dict.get('spawned_processes', [])]\n process_tree_node_.injected_processes = [ProcessTreeNode.from_dict(x) for x in process_tree_node_dict.get('injected_processes', [])]\n return process_tree_node_\n\n @staticmethod\n def from_obj(process_tree_node_obj):\n if not process_tree_node_obj:\n return None\n process_tree_node_ = Process.from_obj(process_tree_node_obj, ProcessTreeNode())\n process_tree_node_.id = process_tree_node_obj.get_id()\n process_tree_node_.parent_action_idref = process_tree_node_obj.get_parent_action_idref()\n if process_tree_node_obj.get_Initiated_Actions() is not None:\n process_tree_node_.initiated_actions = ActionReferenceList.from_obj(process_tree_node_obj.get_Initiated_Actions())\n process_tree_node_.spawned_processes = [ProcessTreeNode.from_obj(x) for x in process_tree_node_obj.get_Spawned_Process()]\n process_tree_node_.injected_processes = [ProcessTreeNode.from_obj(x) for x in process_tree_node_obj.get_Injected_Process()]\n return process_tree_node_\n","sub_path":"maec/bundle/process_tree.py","file_name":"process_tree.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"233588301","text":"from itertools import count\n\nfor x in count(3):\n if x > 10:\n break\n print(x)\n\ns = int(input(\"какое максимальное число вы хотите высести? :\"))\nfor x in count(0):\n if x >= s:\n break\n print(x)\n\ns = int(input(\"Введите колличество повторений :\"))\nfrom itertools import cycle\n\nc = 0\nfor el in cycle('Hello, World'):\n if c >= s:\n break\n print(el)\n c += 1\n","sub_path":"задание 6.py","file_name":"задание 6.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"51333125","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nfrom keras.preprocessing import sequence\nfrom keras.datasets import imdb\nfrom keras import layers, models\nfrom keras.models import Sequential\nfrom keras import layers\nimport os\nimport sys\nimport pickle\nimport numpy as np\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.utils import to_categorical\nimport random\nfrom keras import optimizers\nfrom keras.layers import SimpleRNN, Dense\nfrom keras.layers import Bidirectional\nimport tensorflow as tf\nfrom numpy import argmax\nimport argparse\n\ndef load_data(dirname):\n\tlistfile=os.listdir(dirname)\n\tX = []\n\tY = []\n\tfor file in listfile:\n\t\tif \"_\" in file:\n\t\t\tcontinue\n\t\twordname=file\n\t\ttextlist=os.listdir(dirname+wordname)\n\t\tfor text in textlist:\n\t\t\tif \"DS_\" in text:\n\t\t\t\tcontinue\n\t\t\ttextname=dirname+wordname+\"/\"+text\n\t\t\tnumbers=[]\n #print(textname)\n\t\t\twith open(textname, mode = 'r') as t:\n\t\t\t\tnumbers = [float(num) for num in t.read().split()]\n #print(len(numbers[0]))\n\t\t\t\tfor i in range(len(numbers),12600):\n\t\t\t\t\tnumbers.extend([0.000]) #300 frame 고정\n #numbers=np.array(numbers)\n #print(numbers[0])\n #numbers=np.array(numbers)\n #print(numbers)\n\t\t\trow=42*8#앞의 8프레임 제거\n\t\t\tlandmark_frame=[]\n\t\t\tfor i in range(0,100):#뒤의 142프레임제거==> 총 150프레임으로 고정\n #print(numbers[row*42:(row*42)+41])\n\t\t\t\tlandmark_frame.extend(numbers[row:row+42])\n\t\t\t\trow += 42\n\t\t\tlandmark_frame=np.array(landmark_frame)\n\t\t\tlandmark_frame=list(landmark_frame.reshape(-1,42))#2차원으로 변환(260*42)\n #print(landmark_frame.shape)\n\t\t\tX.append(np.array(landmark_frame))\n\t\t\tY.append(wordname)\n\tX=np.array(X)\n\tY=np.array(Y)\n\ttmp = [[x,y] for x, y in zip(X,Y)]\n #random.shuffle(tmp)\n\tX = [n[0] for n in tmp]\n\tY = [n[1] for n in tmp]\n #print(Y)\n #print(X.shape)\n #t = Tokenizer()\n #t.fit_on_texts(Y)\n #encoded=t.texts_to_sequences(Y)\n\n\tx_train = X\n #print(x_train[0])\n\tx_train=np.array(x_train)\n\treturn x_train\n\n\n#prediction\ndef load_label():\n\tlabel = {}\n\tcount = 1\n\tlistfile=['Apple','Bird','Sorry']\n\tfor l in listfile:\n\t\tif \"_\" in l:\n\t\t\tcontinue\n\t\tlabel[l] = count\n\t\tcount += 1\n\treturn label\n\ndef main(input_data_path,output_data_path):\n\tcomp='bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 \\\n mediapipe/examples/desktop/multi_hand_tracking:multi_hand_tracking_cpu'\n #명령어 컴파일\n\tcmd='GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/multi_hand_tracking/multi_hand_tracking_cpu \\\n --calculator_graph_config_file=mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop_live.pbtxt'\n #미디어 파이프 명령어 저장\n\tlistfile=os.listdir(input_data_path)\n\toutput_dir=\"\"\n\tfor file in listfile:\n\t\tif \".DS_\" in file:\n\t\t\tcontinue\n\t\tword=file+'/'\n\t\tfullfilename=os.listdir(input_data_path+word)\n # 하위디렉토리의 모든 비디오들의 이름을 저장\n\t\tif not(os.path.isdir(output_data_path+\"_\"+word)):\n\t\t\tos.mkdir(output_data_path+\"_\"+word)\n\t\tif not(os.path.isdir(output_data_path+word)):\n\t\t\tos.mkdir(output_data_path+word)\n\t\tos.system(comp)\n\t\toutputfilelist=os.listdir(output_data_path+'_'+word)\n\t\tfor mp4list in fullfilename:\n\t\t\tif \".DS_Store\" in mp4list:\n\t\t\t\tcontinue \n\t\t\tinputfilen=' --input_video_path='+input_data_path+word+mp4list\n\t\t\toutputfilen=' --output_video_path='+output_data_path+'_'+word+mp4list\n\t\t\tcmdret=cmd+inputfilen+outputfilen\n\t\t\tos.system(cmdret)\n \n #mediapipe동작 작동 종료:\n\toutput_dir=output_data_path\n\tx_test=load_data(output_dir)\n\tnew_model = tf.keras.models.load_model('simpleRNN.h5')\n\tnew_model.summary()\n\n\tlabels=load_label()\n\n #모델 사용\n\n\txhat = x_test\n #print()\n #xhat=xhat[55:56]\n\tyhat = new_model.predict(xhat)\n #print('## yhat ##')\n\n\tpredictions = np.array([np.argmax(pred) for pred in yhat])\n\trev_labels = dict(zip(list(labels.values()), list(labels.keys())))\n\tprint(\"----------result------------\\n\")\n\tfor i in predictions:\n\t\tprint(rev_labels[i])\n\t\tprint('\\n')\n\tprint(\"------------end-------------\\n\")\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description='operating Mediapipe')\n\tparser.add_argument(\"--input_data_path\",help=\" \")\n\tparser.add_argument(\"--output_data_path\",help=\" \")\n\targs=parser.parse_args()\n\tinput_data_path=args.input_data_path\n\toutput_data_path=args.output_data_path\n #print(input_data_path)\n\tmain(input_data_path,output_data_path)\n","sub_path":"util/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"132048341","text":"from collections import OrderedDict\nfrom logging import getLogger\n\nimport requests\nimport toml\nfrom django.conf import settings\nfrom rest_framework.exceptions import MethodNotAllowed, ValidationError, ParseError\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .exceptions import NotImplementedAPIError\nfrom .models import UserAccount\nfrom .permissions import AdapterGlobalPermission\nfrom .throttling import NoThrottling\n\nlogger = getLogger('django')\n\nSTELLAR_WALLET_DOMAIN = 'rehive.com'\n\n\ndef get_federation_details(address):\n if '*' not in address:\n raise TypeError('Invalid federation address')\n user_id, domain = address.split('*')\n stellar_toml = requests.get('https://' + domain + '/.well-known/stellar.toml')\n url = toml.loads(stellar_toml.text)['FEDERATION_SERVER']\n params = {'type': 'name',\n 'q': address}\n federation = requests.get(url=url, params=params).json()\n return federation\n\n\ndef address_from_domain(domain, code):\n logger.info('Fetching address from domain.')\n stellar_toml = requests.get('https://' + domain + '/.well-known/stellar.toml')\n currencies = toml.loads(stellar_toml.text)['CURRENCIES']\n\n for currency in currencies:\n if currency['code'] == code:\n logger.info('Address: %s' % (currency['issuer'],))\n return currency['issuer']\n\n\nclass StellarFederationView(APIView):\n allowed_methods = ('GET',)\n throttle_classes = (NoThrottling,)\n permission_classes = (AdapterGlobalPermission,)\n\n def post(self, request, *args, **kwargs):\n raise MethodNotAllowed('POST')\n\n def get(self, request, *args, **kwargs):\n if request.query_params.get('type') == 'name':\n address = request.query_params.get('q')\n if address:\n account_id = address\n operating_receive_address = getattr(settings, 'STELLAR_RECEIVE_ADDRESS')\n if UserAccount.objects.filter(account_id=account_id):\n return Response(OrderedDict([('stellar_address', address),\n ('account_id', operating_receive_address),\n ('memo_type', 'text'),\n ('memo', address.split('*')[0])]))\n else:\n raise ValidationError('Stellar address does not exist.')\n else:\n raise ParseError('Invalid query parameter provided.')\n else:\n raise NotImplementedAPIError()","sub_path":"src/adapter/stellar_federation.py","file_name":"stellar_federation.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"399493639","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport cv2\n\n\ndef read():\n a = cv2.imread('t.jpg', cv2.IMREAD_COLOR)\n b = cv2.imread('t.jpg', cv2.IMREAD_GRAYSCALE)\n c = cv2.imread('t.jpg', cv2.IMREAD_UNCHANGED)\n\n cv2.imshow('a', a)\n cv2.imshow('b', b)\n cv2.imshow('c', c)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef bar():\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n img = cv2.imread('t.jpg', cv2.IMREAD_COLOR)\n cv2.imshow('image',img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef video():\n cap = cv2.VideoCapture(0)\n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # Display the resulting frame\n cv2.imshow('frame',gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n\n\ndef bitwise():\n img = cv2.imread('t.jpg')\n cv2.imshow('a', cv2.bitwise_not(img))\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\ndef geom():\n img = cv2.imread('t.jpg')\n rows,cols, _ = img.shape\n M = np.float32([[1,0,100],[0,1,50]])\n dst = cv2.warpAffine(img, M, (cols,rows))\n\n new_img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)\n cv2.imshow('a', dst)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\ndef main():\n geom()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"opencv/tutorials/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"506353373","text":"from halo_mass_function import *\nfrom scipy.integrate import quad\n\n\ndef upcrossing(M1, M2, z1, z2, sig8=sigma8, h=h, kmax=30, window='TopHat', prec=1000, om0=om,\n ol0=oml, omb=omb, camb=False):\n \"\"\"\n Upcrossing rate between halos at masses M1 to M2 at redshifts from z1 to z2 with spherical collapse\n :param M1: float Initial mass\n :param M2: float Final mass\n :param z1: float initial redshift\n :param z2: float final redshift\n :param sig8: float sigma_8 cosmological parameter\n :param h: float H0/100 parameter\n :param kmax: int or float maximum wavenumber up to which to integrate the power spectrum\n :param window: str window function to choose\n :param prec: int how many power spectrum bins used for integration to calculate sigma(R)\n :param om0: float matter density parameter today\n :param ol0: float dark energy density parameter today\n :param omb: float baryon density today\n :param camb: bool whether to use the camb power spectrum\n :return: float conditional probability of going from M1,z1 to M2, s2\n \"\"\"\n w1 = delta_c(z1, om0, ol0) # critical overdensity at z1\n w2 = delta_c(z2, om0, ol0)\n S1 = sigma(M1, sig8, h, kmax, window, 'M', prec, om0, ol0, omb, camb) ** 2\n S2 = sigma(M2, sig8, h, kmax, window, 'M', prec, om0, ol0, omb, camb) ** 2\n dw = w1 - w2\n dS = S1 - S2\n return np.exp(-dw ** 2 / (2 * dS)) * dw / np.sqrt(2 * np.pi * dS ** 3) # follows eq. 2.15 of lacey&cole93\n\n\ndef K(ds, dw, model='sheth', A=0.27, a=0.707, p=0.3):\n \"\"\"\n Conditional probability function form. Either spherical or ellipsoilda collapse model\n :param ds: float difference sigma(M1) - sigma(M2)\n :param dw: float delta_c(z1) - delta_c(z2)\n :param model: str if 'sheth' uses sheth&tormen function otherwise uses lacey&cole one\n :param A: float sheth&tormen parameter\n :param a: float sheth&tormen parameter\n :param p: float sheth&tormen parameter\n :return: float conditional probability\n \"\"\"\n if model == 'sheth':\n ndw = np.sqrt(a) * dw\n return A * (1 + (ds / ndw ** 2) ** p) * ndw * np.exp(-ndw ** 2 / (2 * ds)) / np.sqrt(2 * np.pi * ds ** 3)\n else:\n return dw * np.exp(-dw ** 2 / (2 * ds)) / np.sqrt(2 * np.pi * ds ** 3)\n\n\ndef mu(St, a):\n return (1 + (2 ** a - 1) * St) ** (1 / a)\n\n\ndef f_ec(S1, S0, w1, w0):\n \"\"\"\n Ellipsoidal collapse multiplicity function.\n :param S1: float sigma(M1) where M1 is the initial mass\n :param S0: float sigma(M0) where M0 is the descendant mass\n :param w1: float delta_c(z1) where z1 is the initial redshift\n :param w0: float delta_c(z0) where z0 is the final redshift\n :return: value of the EC multiplicity function\n \"\"\"\n dw = w1 - w0\n dS = S1 - S0\n nu0 = w0 ** 2 / S0\n A0 = 0.8661 * (1 - 0.133 * nu0 ** (-0.615))\n A1 = 0.308 * nu0 ** (-0.115)\n A2 = 0.0373 * nu0 ** (-0.115)\n Sbar = dS / S0\n A3 = A0 ** 2 + 2 * A0 * A1 * np.sqrt(dS * Sbar) / dw\n return A0 * (2 * np.pi) ** (-0.5) * dw * dS ** (-1.5) * \\\n np.exp(-0.5 * A1 ** 2 * Sbar) * (np.exp(-0.5 * A3 * dw ** 2 / dS) + A2 * Sbar ** 1.5 * (\n 1 + 2 * A1 * np.sqrt(Sbar / np.pi))) # equation 5 of Zhang et al. (2008)\n\n\ndef f_sc(S1, S0, w1, w0):\n \"\"\"\n Spherical collapse multiplicity function.\n :param S1: float sigma(M1) where M1 is the initial mass\n :param S0: float sigma(M0) where M0 is the descendant mass\n :param w1: float delta_c(z1) where z1 is the initial redshift\n :param w0: float delta_c(z0) where z0 is the final redshift\n :return: value of the EC multiplicity function\n \"\"\"\n dw = w1 - w0\n ds = S1 - S0\n return (2 * np.pi) ** -0.5 * dw * ds ** (-1.5) * np.exp(-0.5 * dw ** 2 / ds) # lacey&cole multiplicity function\n\n\ndef proba(M, zf, frac=0.5, acc=1000, zi=0.0, sig8=sigma8, h=h, kmax=30, window='TopHat', prec=300, om0=om,\n ol0=oml, omb=omb, camb=False, model='EC', colos=False):\n \"\"\"\n Probability density of a halo of mass M at redshift zi has had a fraction frac of its mass at z=zf.\n :param M: float. mass of the halo considered\n :param zf: float or ndarray. formation redshift(s) at which we want to calculate the probability\n :param frac: float between 0 and 1. Fraction of mass to define formation redshift. Default :0.5\n :param acc: int. Number redshift steps. Default : 1000.\n :param zi: float. Observed redshift of considered halo. Default :0.\n :param sig8: float : sigma 8 cosmo parameter\n :param h: float : H0/100 cosmo parameter\n :param kmax: float or int : maximum wavenumber for CAMB power spectrum.\n :param window: str : type of smoothing window function. either \"TopHat\", \"Gauss\" or k-Sharp'.\n :param prec: int : number of bins for integral calculations.\n :param om0: float : fraction matter density\n :param ol0: float : fraction dark energy density\n :param omb: float : fraction baryon density\n :param camb: boolean : if using camb spectrum or analytical version of Eisenstein and Hu.\n :param model: if Press&Schechter mass function \"press\" or ellipsoidal collapse \"EC\"\n :param colos: :param Colos : boolan : using Colossus halo mass function or not\n :return: Probability density function of redshift at which halos had x fraction of their mass\n \"\"\"\n S0 = sigma(M, sig8, h, kmax, window, 'M', prec, om0, ol0, omb, camb, colos) ** 2 # variance of the field at mass M\n w0 = delta_c(zi, om0, ol0) # critical density at observed redshift\n if type(zf) == np.ndarray: # for probability distribution. This is to have a parallel version with no for loops\n mass = np.logspace(np.log10(M * frac), np.log10(M), acc) # size (0, acc) masses to calculate the integral\n l = len(zf) # number of steps in PDF\n mat_zf = np.array([zf] * acc) # (acc, l)\n mat_mass = np.array([mass] * l).transpose() # duplicating mass array to vectoralize calculations (acc, l)\n mat_wf = delta_c(mat_zf, om0, ol0) - w0 # (acc, l)\n mat_S = sigma(mat_mass, sig8, h, kmax, window, 'M', prec, om0, ol0, omb, camb, colos) ** 2 - S0 # variance difference of all masses (acc, l)\n mat_S[-1, :] = 1e-10 # nonzero value to avoid numerical effects\n mat_nu = mat_wf / np.sqrt(mat_S) # (acc, l) peak height\n if model == 'EC': # Ellipsoidal collapse probability density function\n mat_f = f_ec(mat_S[:, :] + S0, S0, mat_wf[:, :] + w0, w0) # Computing the multiplicity function at each mass\n mat_ds = 0.5 * (mat_S[2:, :] - mat_S[:-2, :]) # differential to use to integrate over\n return -M * np.sum(mat_ds * mat_f[1:-1, :] / mat_mass[1:-1, :], axis=0)\n else:\n mat_f = fps(mat_nu[:-1, :]) / mat_nu[:-1, :] # (acc-1, l) # Press & Schechter multiplicity function\n mat_dnu = (mat_nu[2:-1, :] - mat_nu[:-3, :]) * 0.5 # (acc-3, l) # differential to integrate with\n return M * np.sum(mat_dnu * mat_f[1:-1, :] / mat_mass[1:-2, :], axis=0) # (acc-3, l)\n else: # case of only one value of redshift to get the probability distribution of.\n mass = np.logspace(np.log10(M * frac), np.log10(M), acc)\n wf = delta_c(zf, om0, ol0) - w0\n S = sigma(mass, sig8, h, kmax, window, 'M', prec, om0, ol0, omb, camb, colos) ** 2 - S0\n S[-1] = 1e-10\n nu = wf / np.sqrt(S)\n if model == 'EC':\n f = f_ec(S + S0, S0, wf + w0, w0)\n ds = 0.5 * (S[2:] - S[:-2])\n return -M * np.sum(ds * f[1:-1] / mass[1:-1])\n else:\n f = fps(nu) / nu\n dnu = (nu[2:] - nu[:-2]) * 0.5\n return M * np.sum(dnu * f[1:-1] / mass[1:-1])\n\n\ndef M_integ_proba(masses, weights=None, zf=np.linspace(0, 7, 20), frac=0.5, acc=1000, zi=0.0, sig8=sigma8, h=h,\n kmax=30, window='TopHat', prec=1000, om0=om, diff=False,\n ol0=oml, omb=omb, camb=False, model='EC', colos=False):\n \"\"\"\n Mass weighted cummulative probability of zf\n :param masses: list or np.array masses of halos to get the average zf\n :param weights: list, array or None weights of the masses\n :param zf: float or array redshifts where to give probability\n See proba() function for the rest of the parameters\n :return:\n \"\"\"\n res = []\n if not (type(weights) == np.ndarray or type(weights) == list):\n for mass in masses:\n if diff:\n prob = proba(mass, zf, frac, acc, zi, sig8, h, kmax, window, prec, om0, ol0, omb, camb,\n model, colos)\n dz = zf[2:] - zf[:-2]\n res.append((prob[2:]-prob[:-2])/dz)\n else:\n res.append(proba(mass, zf, frac, acc, zi, sig8, h, kmax, window, prec, om0, ol0, omb, camb,\n model, colos))\n ares = np.array(res)\n return np.sum(ares, axis=0) / len(masses)\n else:\n for i in range(len(masses)):\n mass = masses[i]\n w = weights[i] / np.sum(weights)\n if diff:\n prob = proba(mass, zf, frac, acc, zi, sig8, h, kmax, window, prec, om0, ol0, omb, camb,\n model, colos)\n dz = zf[2:] - zf[:-2]\n res.append(-(prob[2:]-prob[:-2])*w/dz)\n else:\n res.append(proba(mass, zf, frac, acc, zi, sig8, h, kmax, window, prec, om0, ol0, omb, camb,\n model, colos) * w)\n ares = np.array(res)\n return np.sum(ares, axis=0)\n\n\n\ndef median_formation(M, z, frac=0.5, acc=100, nzeds = 10000, sig8=sigma8, h=h, kmax=30, window='TopHat', prec=1000,\n om0=om, ol0=oml, omb=omb, camb=False, colos=True, outc=False):\n \"\"\"\n Calculates the median formation redshift of halos of mass M at redshift z, and gets the concentration if needed\n :param M: float M\n :param z: float redshift\n :param outc: bool if True outputs concentration parameter estimation\n :return: float : z50 or c(z50)\n \"\"\"\n if type(M) == list or type(M) == np.ndarray:\n raise TypeError(\"M should not be an array\")\n zs = np.linspace(z + 0.1, 6 + z, nzeds)\n res = []\n for red in zs:\n res.append(proba(M, red, frac, acc, z, sig8, h, kmax, window, prec, om0, ol0, omb, camb, 'EC', colos))\n res = np.array(res)\n zf = np.max(zs[res > 0.5])\n if outc:\n return 0.7 + 0.77 * np.log10(zf)\n else:\n return zf\n\n\ndef average_formation(M, z, frac=0.5, acc=100, sig8=sigma8, h=h, kmax=30, window='TopHat', prec=1000,\n om0=om, ol0=oml, omb=omb, camb=False, colos=True, outc=False):\n # Gets the average z50 of a population of halos at mass M and redshift z\n if type(M) == list or type(M) == np.ndarray:\n raise TypeError(\"M should not be an array\")\n\n zs = np.linspace(z + 1.2 * sig8 / (2.7 + 0.2 * np.log10(M) + 0.1 * om0), z + 8, acc)\n res = []\n for red in zs:\n res.append(proba(M, red, frac, acc, z, sig8, h, kmax, window, prec, om0, ol0, omb, camb, 'EC', colos))\n res = np.array(res)\n dens = (res[2:] - res[:-2]) / (zs[2:] - zs[:-2])\n dz = zs[1] - zs[0]\n lower = -dz * np.sum(zs[1:-1] * dens)\n deltap = (zs[0] - z) * dens[0]\n upper = lower - deltap\n if outc:\n return 0.7 + 0.77 * np.log10(lower)\n else:\n return [lower, upper]\n\n\ndef peak_formation(M, z, frac=0.5, acc=100, sig8=sigma8, h=h, kmax=30, window='TopHat', prec=1000,\n om0=om, ol0=oml, omb=omb, camb=False, colos=True, outc=False):\n # Gets the redshift at which the z50 probability distribution peaks\n\n if type(M) == list or type(M) == np.ndarray:\n raise TypeError(\"M should not be an array\")\n\n zs = np.linspace(z + 0.1, z + 6, acc)\n res = []\n for red in zs:\n res.append(proba(M, red, frac, acc, z, sig8, h, kmax, window, prec, om0, ol0, omb, camb, 'EC', colos))\n res = np.array(res)\n dens = -(res[2:] - res[:-2]) / (zs[2:] - zs[:-2])\n zf = zs[np.argmax(dens)]\n if outc:\n return 0.7 + 0.77 * np.log10(zf)\n else:\n return zf\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n\n zi = np.linspace(0, 1, 20)\n res1 = []\n res2 = []\n res3 = []\n for el in zi:\n res1.append(peak_formation(M=4e13, z=el, colos=True))\n res2.append(median_formation(M=4e13, z=el, colos=True))\n res3.append(average_formation(M=4e13, z=el, colos=True))\n plt.plot(zi, res1, label='Maximum probability')\n plt.plot(zi, res2, label='Mean')\n plt.plot(zi, res3, label='Average')\n plt.legend()\n plt.xlabel('$z_{0}$', size=15)\n plt.ylabel('$z_{formation}$', size=15)\n plt.show()\n\n\n\n # zs = np.linspace(0.1, 2, 50)\n # masses = [1e8, 1e11, 1e14]\n # for ms in masses:\n # res = proba(ms, zf=zs, acc=400, prec=400, colos=False, model=\"EC\")\n # res2 = proba(ms, zf=zs, acc=400, prec=400, colos=True, model=\"EC\")\n # #dpdw = (res[2:] - res[:-2])/(zs[2:] - zs[:-2])\n # #plt.plot(zs[1:-1], -dpdw, label='log M='+str(np.log10(ms)), linewidth=2.5)\n # plt.plot(zs, res, label='log M='+str(np.log10(ms)))\n # plt.plot(zs, res2, label='Colossus')\n # plt.legend(fontsize='large', fancybox=True)\n # plt.xlabel('z', size=25)\n # plt.ylabel('$P(z_f>z)$', size=20)\n # #plt.ylabel('$dP/dz$', size=20)\n # plt.xticks(size=18)\n # plt.yticks(size=18)\n # plt.show()\n\n # M = np.logspace(8, 14, 100)\n # zfs = np.linspace(0.05, 2, 50)\n # plt.plot(M, median_formation(1e13, z=0.1, colos=False))\n #plt.plot(zfs, proba(1e13, zfs, prec=500, acc=400))\n #print(proba(1e13, 0.3))","sub_path":"formation_time.py","file_name":"formation_time.py","file_ext":"py","file_size_in_byte":13626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"206075553","text":"import pickle\nimport os\nfrom absl import flags\nfrom tools.get_data import *\nfrom tools.statistics import *\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nflags = tf.app.flags\nFLAGS = tf.app.flags.FLAGS\nparams = FLAGS.flag_values_dict()\n\n# restore ckpt from i th model in model_dir and calculate the values of keys\ndef fetch(input_fn, model_fn, model_dir, keys, i, checkpoint_step):\n\n model_dir = model_dir + str(i)\n print('Evaluating eval samples for %s' % model_dir)\n if checkpoint_step is None:\n checkpoint_path = tf.train.latest_checkpoint(model_dir)\n assert checkpoint_path is not None\n else:\n checkpoint_path = os.path.join(model_dir, 'model.ckpt-%d' % checkpoint_step)\n\n estimator = tf.estimator.Estimator(\n model_fn,\n params=params)\n\n batch_results_ = list(estimator.predict(\n input_fn,\n predict_keys=keys,\n checkpoint_path=checkpoint_path,\n yield_single_examples=False))\n\n tuples = []\n\n for key in keys:\n if batch_results_[0][key].shape[-1] in [1, 3, 30]: #when fetching adversarial examples\n each_key = np.concatenate([b[key] for b in batch_results_], axis=0)\n else:\n if key in ['approx_posterior_mean', 'approx_posterior_stddev']:\n each_key = np.concatenate([b[key] for b in batch_results_], axis=0)\n else:\n each_key = np.concatenate([b[key].T for b in batch_results_], axis=0)\n each_key = np.mean(each_key, axis=1)\n\n tuples.append(each_key)\n return tuples\n\n\n# Plot Rate-Distortion Curve for true data (good model minimizes them both)\ndef plot_rd(eval_input_fn, model_fn, model_dir):\n keys = ['rate', 'distortion']\n results = fetch(eval_input_fn, model_fn, model_dir, keys, 0)\n plt.scatter(results[0], results[1])\n plt.set_xlabel('Rate')\n plt.set_ylabel('Distortion')\n\n plt.show()\n\n\n# plot Ensemble ELBO Mean vs Ensemble ELBO Variance\ndef plot_ensemble_stats(eval_input_fn, model_fn, model_dir):\n M = 5\n keys = ['elbo']\n f, axes = plt.subplots(1, len(keys), figsize=(15, 5))\n for i,key in enumerate(keys):\n ensemble = []\n\n for j in range(M):\n results = fetch(eval_input_fn, model_fn, model_dir, keys, j)\n ensemble.append(results)\n\n mean = np.mean(ensemble, axis=0)\n var = np.var(ensemble, axis=0)\n axes[i].scatter(mean, var)\n axes[i].set_xlabel('Ensemble %s mean' % key)\n axes[i].set_ylabel('Ensemble %s variance' % key)\n\n\n# adversarially perturb a normal/uniform noise and get the adversarial example from base_model\n# and calculate the values of keys for the adversarial example using apply_model\ndef adversarial_fetch(eval_input_fn, batch_size, model_fn, model_dir, keys, base_model, apply_model, checkpoint_step=None):\n adv_keys = ['adversarial_normal_noise', 'adversarial_uniform_noise']\n\n fetched = fetch(eval_input_fn, model_fn, model_dir, adv_keys, base_model, checkpoint_step)\n adversarial_normal_noise_results = fetched[0]\n adversarial_uniform_noise_results = fetched[1]\n adv_normal_eval_dataset = tf.data.Dataset.from_tensor_slices((adversarial_normal_noise_results))\n adv_normal_eval_dataset = adv_normal_eval_dataset.batch(batch_size)\n\n adv_uniform_eval_dataset = tf.data.Dataset.from_tensor_slices((adversarial_uniform_noise_results))\n adv_uniform_eval_dataset = adv_uniform_eval_dataset.batch(batch_size)\n\n adv_normal_eval_input_fn = lambda: adv_normal_eval_dataset.make_one_shot_iterator().get_next()\n adv_uniform_eval_input_fn = lambda: adv_uniform_eval_dataset.make_one_shot_iterator().get_next()\n\n adversarial_normal_noise_results = fetch(adv_normal_eval_input_fn, model_fn, model_dir, keys, apply_model, checkpoint_step)\n adversarial_uniform_noise_results = fetch(adv_uniform_eval_input_fn, model_fn, model_dir, keys, apply_model, checkpoint_step)\n\n return adversarial_normal_noise_results, adversarial_uniform_noise_results\n\n\n# adversarially perturb a normal/uniform noise and get the adversarial example from base_model\n# and use that adversarial example to calculate the values of keys for all 5 models.\ndef adversarial_ensemble_fetch(base, batch_size, model_fn, model_dir, keys, base_model, each_size=1000):\n eval_input_fn = get_eval_dataset(base, batch_size, each_size=each_size)\n\n # collect ensemble elbo for adversarial noise input\n adversarial_normal_noise_ensemble = []\n adversarial_uniform_noise_ensemble = []\n M = 5\n for i in range(M):\n adversarial_normal_noise_results, adversarial_uniform_noise_results = adversarial_fetch(eval_input_fn,\n batch_size, model_fn, model_dir, keys, base_model, i)\n\n adversarial_normal_noise_ensemble.append(adversarial_normal_noise_results)\n adversarial_uniform_noise_ensemble.append(adversarial_uniform_noise_results)\n\n return adversarial_normal_noise_ensemble, adversarial_uniform_noise_ensemble\n\n# plot elbo for each dataset and also plot single elbo vs. ensemble variance\ndef ensemble_analysis(datasets, expand_last_dim, noised_list, noise_type_list, batch_size,\n model_fn, model_dir, show_adv, adv_base, feature_shape=(28,28), each_size=1000):\n from tools.statistics import analysis_helper\n M = 5\n f, axes = plt.subplots(1, 2, figsize=(12, 5))\n\n keys = ['elbo'] # or rate\n ensemble_elbos = []\n for i in range(M):\n single_results, datasets_names = analysis_helper(datasets, expand_last_dim, noised_list, noise_type_list, None,\n model_fn,model_dir, i, i, keys, feature_shape, each_size)\n single_elbo = single_results[0]\n ensemble_elbos.append(single_elbo)\n\n ensemble_elbos = np.array(ensemble_elbos)\n ensemble_var = np.var(ensemble_elbos, axis=0)\n\n # histogram of elbo of the last model on different datasets\n if each_size==1000:\n bin_range = (-2000, 1000)\n bin_dict = {'single_elbo':(-2000, 1000,300), 'ensemble_elbo_mean':(-2000, 1000,300)}\n else:\n bin_range = (-100, 0)\n bin_dict = {'single_elbo': (-100, 0, 300), 'ensemble_elbo_mean': (-100, 0, 300)}\n bins = 300\n for i in range(len(datasets)):\n label = datasets_names[i]\n axes[0].hist(single_elbo[each_size*i:each_size*(i+1)], label=label, alpha=0.5, bins=bins, range=bin_range)\n\n\n # scatter plot of single elbo vs enesmble variance on each dataset\n for i in range(len(datasets)):\n label = datasets_names[i]\n axes[1].scatter(single_elbo[each_size*i:each_size*(i+1)], ensemble_var[each_size*i:each_size*(i+1)], label=label, alpha=0.3)\n\n if show_adv is not None:\n adversarial_normal_noise_ensemble, adversarial_uniform_noise_ensemble = adversarial_ensemble_fetch(datasets[0],\n batch_size, model_fn, model_dir, keys, adv_base, each_size=each_size)\n # get ensemble statistics on adversarial noise\n adversarial_normal_noise_ensemble = np.array(adversarial_normal_noise_ensemble)[:,0,:]\n adversarial_uniform_noise_ensemble = np.array(adversarial_uniform_noise_ensemble)[:,0,:]\n\n # elbo of the last model\n single_adv_normal_elbo = adversarial_normal_noise_ensemble[-1]\n single_adv_uniform_elbo = adversarial_uniform_noise_ensemble[-1]\n\n # histogram of elbo of the last model on adversarial noise\n axes[0].hist(single_adv_normal_elbo, label='adversarial normal noise', alpha=0.5, bins=100,\n range=bin_range)\n axes[0].hist(single_adv_uniform_elbo, label='adversarial uniform noise', alpha=0.5, bins=100,\n range=bin_range)\n\n # get ensemble var of elbos for adversarial examples\n adv_normal_ensemble_var = np.var(adversarial_normal_noise_ensemble, axis=0)\n adv_uniform_ensemble_var = np.var(adversarial_uniform_noise_ensemble, axis=0)\n\n # scatter plot of single elbo vs enesmble variance on adversarial noise\n axes[1].scatter(single_adv_normal_elbo, adv_normal_ensemble_var, label='adversarial normal noise', alpha=0.1)\n axes[1].scatter(single_adv_uniform_elbo, adv_uniform_ensemble_var, label='adversarial uniform noise', alpha=0.1)\n\n # add single elbo/ensemble statistics of adversarial examples for score analysis\n single_elbo = np.concatenate([single_elbo, single_adv_normal_elbo, single_adv_uniform_elbo], axis=0)\n ensemble_elbos = np.concatenate([ensemble_elbos, adversarial_normal_noise_ensemble,\n adversarial_uniform_noise_ensemble], axis=1)\n\n datasets_names += ['adv_normal_noise', 'adv_uniform_noise']\n\n ensemble_mean = np.mean(ensemble_elbos, axis=0)\n ensemble_var = np.var(ensemble_elbos, axis=0)\n WAIC = ensemble_mean - ensemble_var\n ensemble_keys = ['single_elbo', 'ensemble_elbo_mean', 'ensemble_elbo_var', 'WAIC']\n ensemble_results = [single_elbo, ensemble_mean, ensemble_var, WAIC]\n if keys[0] == 'rate':\n ensemble_keys = ['single_rate']\n ensemble_results = [single_elbo]\n\n\n from tools.statistics import plot_analysis\n plot_analysis(ensemble_results, datasets_names, ensemble_keys, bins=bin_dict, each_size=each_size)\n\n # adjust range\n axes[0].set_xlabel('single ELBO of each dataset')\n axes[0].set_ylabel('frequency')\n axes[1].set_xlabel('ELBO of single model')\n axes[1].set_ylabel('ensemble variance')\n if each_size==1000:\n top = 40000\n else:\n top = 500\n axes[1].set_xlim(bin_range)\n axes[1].set_ylim(bottom=0, top=top)\n axes[1].legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()\n f.savefig(os.path.join(FLAGS.model_dir,\"elbo.eps\"), bbox_inches=\"tight\", format='eps', dpi=1000)\n\ndef history_compare_elbo(datasets, expand_last_dim, noised_list, noise_type_list, batch_size,\n model_fn, model_dir, show_adv, adv_base, feature_shape=(28,28), each_size=1000):\n \"\"\"Compute plt.scatter(elbo, auroc) for each checkpoint \"\"\"\n from tools.statistics import analysis_helper\n from tools.statistics import get_scores\n M = 5\n f, axes = plt.subplots(1, 2, figsize=(10, 5))\n step_arr = np.arange(0, FLAGS.max_steps, FLAGS.viz_steps)\n\n keys = ['elbo', 'approx_posterior_mean', 'approx_posterior_stddev']\n full_results = {} \n # tmp\n # step_arr = [5000]\n for step in step_arr:\n print('step: %d' % step)\n\n ensemble_elbos = []\n ensemble_posterior_means = []\n ensemble_posterior_vars = []\n\n for i in range(M):\n single_results, datasets_names = analysis_helper(datasets, expand_last_dim, noised_list, noise_type_list, None, model_fn,model_dir, i, i, keys, feature_shape, each_size, step)\n single_elbo = single_results[0]\n single_posterior_mean = single_results[1]\n single_posterior_var = single_results[2]\n ensemble_elbos.append(single_elbo)\n ensemble_posterior_means.append(single_posterior_mean)\n ensemble_posterior_vars.append(single_posterior_var)\n ensemble_elbos = np.array(ensemble_elbos)\n #ensemble_posterior_means = np.array(ensemble_posterior_means)\n #ensemble_posterior_vars = np.array(ensemble_posterior_vars)\n # analyze statistics\n ensemble_var = np.var(ensemble_elbos, axis=0)\n ensemble_mean = np.mean(ensemble_elbos, axis=0)\n # Perform classication based on ensemble var, as a function of ensemble mean scores.\n results = get_scores(ensemble_mean, ensemble_var, datasets_names, each_size, False)\n full_results[step] = results\n \n with open(os.path.join(FLAGS.model_dir, 'train_history_scores.pkl'), 'wb') as f:\n pickle.dump(full_results, f)\n","sub_path":"vae_wgan/tools/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":11808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"185990233","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.utils import timezone\n\nfrom wagtail.core.models import Site\n\nfrom dateutil.relativedelta import relativedelta\n\n\ndef path_without_leading_trailing_slashes(path):\n return path.lstrip(\"/\").rstrip(\"/\")\n\n\nclass Command(BaseCommand):\n help = \"Mark archivable pages within a filterable list as archived\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"url_path\",\n nargs=\"+\",\n type=path_without_leading_trailing_slashes,\n help=(\n \"The URL path (without leading /) of a filterable list page \"\n \"whose content should be archived if it was published the \"\n \"specified number of years, months, and days ago. \"\n \"The filerable list page at the URL will not be archived.\"\n ),\n )\n parser.add_argument(\n \"--years\",\n type=int,\n default=2,\n help=(\n \"Archive content published this number of years ago. \"\n \"Combines with --months --days. Default: 2\"\n ),\n )\n parser.add_argument(\n \"--months\",\n type=int,\n default=0,\n help=(\n \"Archive content published this number of months ago. \"\n \"Combines with --years --days. Default: 0\"\n ),\n )\n parser.add_argument(\n \"--days\",\n type=int,\n default=0,\n help=(\n \"Archive content published this number of days ago. \"\n \"Combines with --years --months. Default: 0\"\n ),\n )\n parser.add_argument(\n \"--by-published-date\",\n choices=[\"first\", \"last\"],\n default=\"first\",\n help=(\n \"Archive based on either first or last published date. \"\n \"Default: first\"\n ),\n )\n\n def handle(self, *args, **options):\n url_paths = options[\"url_path\"]\n\n # Get the current date/time and then get our cutoff date for archiving\n # based on it.\n archived_at = timezone.now()\n cutoff_date = archived_at - relativedelta(\n years=options[\"years\"],\n months=options[\"months\"],\n days=options[\"days\"],\n )\n\n # Construct a Q object to filter on based on this command-line\n # argument.\n if options[\"by_published_date\"] == \"last\":\n published_date_filter = Q(last_published_at__lt=cutoff_date)\n else:\n published_date_filter = Q(first_published_at__lt=cutoff_date)\n\n # We'll use Wagtail's page routing to resolve the page at the given the\n # URL paths.\n default_site = Site.objects.get(is_default_site=True)\n root_page = default_site.root_page\n\n for path in url_paths:\n path_components = path.split(\"/\")\n\n # Get the filterable list page we're interested in.\n try:\n filterable_page = root_page.route(None, path_components).page\n except Http404:\n raise CommandError(\n f\"Unable to find a page at {path}. \"\n \"Ensure that the path is correct, and leave off any \"\n \"leading or trailing / characters.\"\n )\n\n # Get the filterable list QuerySet and filter it.\n filtered_pages = filterable_page.get_filterable_search().search(\n ).filter(\n published_date_filter,\n is_archived=\"no\",\n )\n\n # Archive the content, letting the user know the title of the\n # filterable list page, the cuttoff date and how many pages will be\n # archived.\n with transaction.atomic():\n update_count = filtered_pages.select_for_update().update(\n is_archived=\"yes\",\n archived_at=archived_at\n )\n self.stdout.write(\n f\"Found and archived {update_count} pages within \"\n f\"{filterable_page.title} older than \"\n f\"{cutoff_date:%Y-%m-%d %H:%M %Z}. \"\n )\n","sub_path":"cfgov/v1/management/commands/archive_pages.py","file_name":"archive_pages.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"484282352","text":"import glob\nimport random\nimport os\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport os\nimport os.path as osp\nimport sys\nif sys.version_info[0] == 2:\n import xml.etree.cElementTree as ET\nelse:\n import xml.etree.ElementTree as ET\n\nclass ImageDataset(Dataset):\n def __init__(self, root = \"/media/arg_ws3/5E703E3A703E18EB/data/subt_real_ssd/\", transform_=None, mode='train'):\n self.transform = transforms.Compose(transform_)\n self.root = root\n self.files = []\n for line in open(osp.join(self.root, 'ImageSets/Main', mode + '.txt')):\n self.files.append(line.strip().split(' ')[0])\n #self.files = sorted(glob.glob(os.path.join(root, mode) + '/*.*'))\n #if mode == 'train':\n # self.files.extend(sorted(glob.glob(os.path.join(root, 'test') + '/*.*')))\n\n def __getitem__(self, index):\n file = self.files[index % len(self.files)]\n path = self.root + 'JPEGImages/' + file + '.png'\n img = Image.open(path)\n bbxs = self.get_bbx(file)\n '''target = []\n for i in bbxs[0]:\n target.append(torch.DoubleTensor([i]))'''\n img = np.array(img)/10000.\n #print(np.min(img), np.max(img))\n img = Image.fromarray(img)\n #img = Image.fromarray(img).convert('RGB')\n\n #if np.random.random() < 0.5:\n # img_A = Image.fromarray(np.array(img_A)[:, ::-1, :], 'RGB')\n # img_B = Image.fromarray(np.array(img_B)[:, ::-1, :], 'RGB')\n\n img = self.transform(img)\n return img, bbxs[0]\n\n def get_bbx(self, file, width=640., height=480.):\n path = self.root + 'Annotations/' + file + '.xml'\n target = ET.parse(path).getroot()\n res = []\n for obj in target.iter('object'):\n #difficult = int(obj.find('difficult').text) == 1\n #if not self.keep_difficult and difficult:\n # continue\n name = obj.find('name').text.lower().strip()\n if name != 'bb_extinguisher':\n continue\n bbox = obj.find('bndbox')\n if bbox is not None:\n pts = ['xmin', 'ymin', 'xmax', 'ymax']\n bndbox = []\n for i, pt in enumerate(pts):\n cur_pt = int(bbox.find(pt).text) - 1\n # scale height or width\n cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height\n bndbox.append(cur_pt)\n #label_idx = self.class_to_ind[name]\n bndbox.append(1)\n res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]\n else: # For LabelMe tool\n polygons = obj.find('polygon')\n x = []\n y = []\n bndbox = []\n for polygon in polygons.iter('pt'):\n # scale height or width\n x.append(int(polygon.find('x').text) / width)\n y.append(int(polygon.find('y').text) / height)\n bndbox.append(min(x))\n bndbox.append(min(y))\n bndbox.append(max(x))\n bndbox.append(max(y))\n bndbox.append(0)\n res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]\n return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]\n\n '''def __getitem__(self, index):\n img = Image.open(self.files[index % len(self.files)])\n w, h = img.size\n img_A = img.crop((0, 0, w/2, h))\n img_B = img.crop((w/2, 0, w, h))\n\n if np.random.random() < 0.5:\n img_A = Image.fromarray(np.array(img_A)[:, ::-1, :], 'RGB')\n img_B = Image.fromarray(np.array(img_B)[:, ::-1, :], 'RGB')\n\n img_A = self.transform(img_A)\n img_B = self.transform(img_B)\n\n return {'A': img_A, 'B': img_B}'''\n\n def __len__(self):\n return len(self.files)\n","sub_path":"pix2pix_datasets.py","file_name":"pix2pix_datasets.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"115641037","text":"#!/usr/bin/env/python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n__author__ = 'raquel'\nimport collections\nimport pandas as pd\n\nfrom api_factory import api as API\nfrom geo_info import GeoInfo\nfrom map import Map\nfrom flask import Flask, request\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport json\n\n\ndef zoopla_list(area, listing_status, minimum_beds,maximum_beds, minimum_price, maximum_price):\n listings = []\n api = API(version=1, api_key='k4uew92e27kzs7nbrk93uguh')\n for listing in api.property_listings(area=str(area),\n listing_status=listing_status,\n max_results=None,\n minimum_beds=minimum_beds,\n maximum_beds=maximum_beds,\n minimum_price=minimum_price,\n maximum_price=maximum_price):\n listings.append(listing)\n return listings\n\n# get all of the listings, sort each by keys\n\n\ndef sort_listing_dic(area, listing_status, minimum_beds, maximum_beds, minimum_price, maximum_price):\n listings = zoopla_list(area, listing_status, minimum_beds, maximum_beds, minimum_price, maximum_price)\n for listing in listings:\n if \"new_home\" not in listing.__dict__:\n listing.__dict__.update({\"new_home\": None})\n collections.OrderedDict(sorted(listing.__dict__.items()))\n return listings\n\n\ndef property_keys(properties):\n keys = properties[0].__dict__.keys()\n return sorted(keys)\n\n\ndef property_df(properties):\n df = pd.DataFrame(p.__dict__ for p in properties)\n return df\n\n\ndef property_location(properties):\n geo_listings = []\n for listing in properties:\n geo_listings.append(GeoInfo(listing))\n return geo_listings\n\n\n\n# properties = sort_listing_dic(\"edinburgh\", \"sale\", \"2\", \"350000\")\n#\n# locations_info = property_location(properties)\n#\n# postcodes = [x.postcode for x in locations_info]\n# formatted_addresses = [x.address() for x in locations_info]\n#\n# prop_df = property_df(properties)\n# locations = [x.loc_info for x in locations_info]\n#\n# prop_df[\"Postcode\"] = postcodes\n# prop_df[\"formatted_address\"] = formatted_addresses\n#\n# dep_df = pd.ExcelFile(\"./Deprivation_Index_2016.xls\")\n#\n# dep_full = dep_df.parse(\"All postcodes\")\n#\n# zoopla_dep = prop_df.merge(dep_full, on=[\"Postcode\"])\n#\n# df3 = zoopla_dep[[\"formatted_address\", \"details_url\", \"latitude\", \"longitude\"]][zoopla_dep[\"SIMD16_Vigintile\"] > 17]\n#\n# map = Map()\n#\n# for i in range(len(df3)):\n# map.add_point((df3.iloc[i].latitude, df3.iloc[i].longitude))\n#\n#\n# with open(\"output1.html\", \"w\") as out:\n# print(map, file=out)\n\n\ndef makemap(listing_status = \"rent\", minmdi=1, min_price=50, max_price=150, loc=\"edinburgh\", min_bed=0, max_bed=999):\n app.logger.debug( \"makemap() has been called\" )\n ## for london\n app.logger.debug( \"retrieving listing via sort_listing()\" )\n properties = sort_listing_dic(loc, listing_status, min_bed, max_bed, min_price, max_price)\n app.logger.debug( \"getting property location information for %d properties\" % len(properties) )\n locations_info = property_location(properties)\n\n postcodes = [x.complete_pc() for x in locations_info]\n formatted_addresses = [x.address() for x in locations_info]\n\n prop_df = property_df(properties)\n locations = [x.loc_info for x in locations_info]\n\n prop_df[\"Postcode\"] = postcodes\n prop_df[\"formatted_address\"] = formatted_addresses\n\n app.logger.debug( \"initialising deprivation index information\" )\n zoopla_dep = prop_df.merge(mdi_df[loc], on=mdi_col[loc]['on'])\n df3 = zoopla_dep.ix[:, ][zoopla_dep[mdi_col[loc]['col']] >= minmdi]\n df3.rename(columns={mdi_col[loc]['col']: mdi_col[loc]['to']}, inplace=True)\n #if loc == \"Sutton, London\":\n # dep_full = pd.read_pickle('sutton-deprivation-data.pkl')\n # zoopla_dep = prop_df.merge(dep_full, on=[\"Postcode\"])\n # df3 = zoopla_dep.ix[:, ][zoopla_dep[\"Index of Multiple Deprivation Decile\"] >= minmdi]\n # df3.rename(columns={'Index of Multiple Deprivation Decile': 'MDI Decile'}, inplace=True)\n #elif loc == \"Edinburgh\":\n # dep_full = pd.read_pickle('edinburgh-deprivation-data.pkl')\n # zoopla_dep = prop_df.merge(dep_full, on=[\"Postcode\"])\n # df3 = zoopla_dep.ix[:,][zoopla_dep[\"SIMD16_Vigintile\"] >= minmdi]\n # df3.rename(columns={'SIMD16_Vigintile': 'MDI Vigintile'}, inplace=True)\n\n app.logger.debug( \"creating HTML/JS for map\" )\n map = Map()\n if 'df3' in locals():\n for i in range(len(df3)):\n description = \"\"\"{addr}\n £{price}\n Bedrooms: {beds}\n Deprivation: {mdi}\"\"\".format(addr=df3.iloc[i].formatted_address, price=df3.iloc[i].price, beds=df3.iloc[i].num_bedrooms,\n mdi=df3.iloc[i]['MDI Vigintile'])\n map.add_point((df3.iloc[i].latitude, df3.iloc[i].longitude, df3.iloc[i].details_url, json.dumps(description)))\n app.logger.debug(\"map created, returning\")\n return str(map)\n\n# Set up logging\nhandler = RotatingFileHandler( 'oofy-combined.log', maxBytes=500000, backupCount=10 )\nhandler.setLevel( logging.DEBUG )\nformatter = logging.Formatter( '%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s' )\nhandler.setFormatter( formatter )\n# Underlying WSGI component logger for access log\nlogger = logging.getLogger( 'werkzeug' )\nlogger.addHandler( handler )\napp = Flask( 'oofy' )\n# Also add flask logs to the same file\napp.logger.addHandler(handler)\n#app.config[\"APPLICATION_ROOT\"] = \"/oofy\"\n\n# Load deprivation index dataframes once, in advance\nlogger.debug(\"Loading deprivation index data from pickled files\")\nmdi_df = {}\nmdi_df['Sutton, London'] = pd.read_pickle( 'sutton-deprivation-data.pkl' )\nmdi_df['Edinburgh'] = pd.read_pickle( 'edinburgh-deprivation-data.pkl' )\nmdi_col = { 'Sutton, London': { 'on': 'Postcode', 'col': 'Index of Multiple Deprivation Decile', 'to': 'MDI Decile' },\n 'Edinburgh': { 'on': 'Postcode', 'col': 'SIMD16_Vigintile', 'to': 'MDI Vigintile' } }\n\n@app.route('/oofy/map', methods=['POST', 'GET'])\ndef map_page():\n app.logger.debug( \"Oofy map (/oofy/map) requested\" )\n #print(request.values.keys())\n #print(makemap(minmdi=int(request.args.get('minmdi', 8))))\n #print(\"koko\")\n return makemap(listing_status=request.args.get('listing_status', \"rent\"),\n minmdi=int(request.args.get('min_mdi', 1)),\n min_price=int(request.args.get('min_price', 50)),\n max_price=int(request.args.get('max_price', 150)),\n loc=request.args.get('loc', 'edinburgh'),\n min_bed=int(request.args.get('min_bed', 0)),\n max_bed=int(request.args.get('max_bed', 999)))\n\n@app.route('/oofy/form')\ndef form():\n app.logger.debug( \"Oofy form (/oofy/form) requested\" )\n return \"\"\"<html>\n<head></head><body><form action=\"/oofy/map\" style=\"display:inline\" method=\"get\" target=\"bottom\">\n<table border=\"0\" cellspacing=\"0\" cellpadding=\"0\"><tr>\n\n<tr><td>Location\n<select name=\"loc\">\n <option value=\"Edinburgh\" selected>Edinburgh</option>\n <option value=\"Sutton, London\">Sutton, London</option>\n </select></td>\n\n<tr><td>Location\n<select name=\"listing_status\">\n <option value=\"rent\" selected >Rent</option>\n <option value=\"sale\">Sale</option>\n </select></td>\n\n<tr><td>Minimum Deprivation Index: <input type=\"range\" name=\"min_mdi\" min=\"1\" max=\"20\" value=\"1\"></td>\n <td>Minimum Price<input type=\"text\" name=\"min_price\" value=\"50\"></td>\n<td>Maximum Price<input type=\"text\" name=\"max_price\" value=\"150\"></td>\n<tr><td>Min. Bedrooms\n<select name=\"min_bed\">\n <option value=\"0\">Studio/None</option>\n <option value=\"1\" selected>1</option>\n <option value=\"2\">2</option>\n <option value=\"3\">3</option>\n </select></td>\n<td>Max. Bedrooms\n<select name=\"max_bed\">\n <option value=\"0\">Studio/None</option>\n <option value=\"1\">1</option>\n <option value=\"2\">2</option>\n <option value=\"3\">3</option>\n <option value=\"999\" selected >4+</option>\n </select></td>\n\n<td><input type=\"submit\" value=\"Update Map\"></td>\n</tr></table>\n</form></body>\"\"\"\n\n@app.route('/oofy/')\ndef index():\n app.logger.debug( \"Oofy frameset (/oofy/) requested\" )\n return \"\"\"<!DOCTYPE html>\n<html>\n<head>\n<title>Oofy\n\n\n \n \n \n <body>\n Your browser does not support frames.\n </body>\n \n\n\n\"\"\"\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", debug=True, threaded=True)\n","sub_path":"zoopla/flask_main.py","file_name":"flask_main.py","file_ext":"py","file_size_in_byte":8842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"549390354","text":"import numpy as np\n\n\nclass ReactionNetwork(object):\n \"\"\"\n Reaction indices grouped according to biological processes.\n This is used for sensitivity analysis (target='reaction').\n \"\"\"\n reactions = {\n \n }\n\n def _is_duplicate(self, biological_processes):\n reaction_indices = np.sum(biological_processes, axis=0) \\\n if len(self.reactions) > 1 else biological_processes[0]\n duplicate_reaction = \\\n [i for i in set(reaction_indices) if reaction_indices.count(i) > 1]\n if not duplicate_reaction:\n return False\n else:\n which_process = []\n for reaction_index in duplicate_reaction:\n for process, indices in self.reactions.items():\n if reaction_index in indices:\n which_process.append(process)\n raise ValueError(\n 'Duplicate reaction: {:d} found in {}.'.format(\n reaction_index, which_process\n )\n )\n\n def group(self):\n \"\"\"\n Group reactions according to biological processes\n \"\"\"\n for process, indices in self.reactions.items():\n if not isinstance(indices, list):\n raise TypeError(\n 'Use list for reaction indices in {}'.format(process)\n )\n biological_processes = []\n for process, indices in self.reactions.items():\n biological_processes.append(indices)\n\n if not self._is_duplicate(biological_processes):\n return biological_processes","sub_path":"biomass/builder/model_dev/reaction_network.py","file_name":"reaction_network.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"447028752","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 19 07:58:10 2013\n\n@author: matt\n\"\"\"\n\nimport array\nimport io\nimport logging\nimport unittest\nimport sys\n\nimport numpy\n\nfrom DM_IO import parse_dm3\nfrom DM_IO import dm3_image_utils\n\nfrom nion.data import Calibration\n\n\nclass TestDM3ImportExportClass(unittest.TestCase):\n\n def check_write_then_read_matches(self, data, func, _assert=True):\n # we confirm that reading a written element returns the same value\n s = io.BytesIO()\n header = func(s, outdata=data)\n s.seek(0)\n if header is not None:\n r, hy = func(s)\n else:\n r = func(s)\n if _assert:\n self.assertEqual(r, data)\n return r\n\n def test_dm_read_struct_types(self):\n s = io.BytesIO()\n types = [2, 2, 2]\n parse_dm3.dm_read_struct_types(s, outtypes=types)\n s.seek(0)\n in_types, headerlen = parse_dm3.dm_read_struct_types(s)\n self.assertEqual(in_types, types)\n\n def test_simpledata(self):\n self.check_write_then_read_matches(45, parse_dm3.dm_types[parse_dm3.get_dmtype_for_name('long')])\n self.check_write_then_read_matches(2**30, parse_dm3.dm_types[parse_dm3.get_dmtype_for_name('uint')])\n self.check_write_then_read_matches(34.56, parse_dm3.dm_types[parse_dm3.get_dmtype_for_name('double')])\n\n def test_read_string(self):\n data = \"MyString\"\n ret = self.check_write_then_read_matches(data, parse_dm3.dm_types[parse_dm3.get_dmtype_for_name('array')], False)\n self.assertEqual(data, dm3_image_utils.fix_strings(ret))\n\n def test_array_simple(self):\n dat = array.array('b', [0]*256)\n self.check_write_then_read_matches(dat, parse_dm3.dm_types[parse_dm3.get_dmtype_for_name('array')])\n\n def test_array_struct(self):\n dat = parse_dm3.structarray(['h', 'h', 'h'])\n dat.raw_data = array.array('b', [0, 0] * 3 * 8) # two bytes x 3 'h's x 8 elements\n self.check_write_then_read_matches(dat, parse_dm3.dm_types[parse_dm3.get_dmtype_for_name('array')])\n\n def test_tagdata(self):\n for d in [45, 2**30, 34.56, array.array('b', [0]*256)]:\n self.check_write_then_read_matches(d, parse_dm3.parse_dm_tag_data)\n\n def test_tagroot_dict(self):\n mydata = {}\n self.check_write_then_read_matches(mydata, parse_dm3.parse_dm_tag_root)\n mydata = {\"Bob\": 45, \"Henry\": 67, \"Joe\": 56}\n self.check_write_then_read_matches(mydata, parse_dm3.parse_dm_tag_root)\n\n def test_tagroot_dict_complex(self):\n mydata = {\"Bob\": 45, \"Henry\": 67, \"Joe\": {\n \"hi\": [34, 56, 78, 23], \"Nope\": 56.7, \"d\": array.array('I', [0] * 32)}}\n self.check_write_then_read_matches(mydata, parse_dm3.parse_dm_tag_root)\n\n def test_tagroot_list(self):\n # note any strings here get converted to 'H' arrays!\n mydata = []\n self.check_write_then_read_matches(mydata, parse_dm3.parse_dm_tag_root)\n mydata = [45, 67, 56]\n self.check_write_then_read_matches(mydata, parse_dm3.parse_dm_tag_root)\n\n def test_struct(self):\n # note any strings here get converted to 'H' arrays!\n mydata = tuple()\n f = parse_dm3.dm_types[parse_dm3.get_dmtype_for_name('struct')]\n self.check_write_then_read_matches(mydata, f)\n mydata = (3, 4, 56.7)\n self.check_write_then_read_matches(mydata, f)\n\n def test_image(self):\n im = array.array('h')\n if sys.version < '3':\n im.fromstring(numpy.random.bytes(64))\n else:\n im.frombytes(numpy.random.bytes(64))\n im_tag = {\"Data\": im,\n \"Dimensions\": [23, 45]}\n s = io.BytesIO()\n parse_dm3.parse_dm_tag_root(s, outdata=im_tag)\n s.seek(0)\n ret = parse_dm3.parse_dm_tag_root(s)\n self.assertEqual(im_tag[\"Data\"], ret[\"Data\"])\n self.assertEqual(im_tag[\"Dimensions\"], ret[\"Dimensions\"])\n self.assert_((im_tag[\"Data\"] == ret[\"Data\"]))\n\n def test_data_write_read_round_trip(self):\n dtypes = (numpy.float32, numpy.float64, numpy.complex64, numpy.complex128, numpy.int16, numpy.uint16, numpy.int32, numpy.uint32)\n shapes = ((6, 4), (6, ), (6, 4, 2))\n for dtype in dtypes:\n for shape in shapes:\n s = io.BytesIO()\n data_in = numpy.ones(shape, dtype)\n dimensional_calibrations_in = list()\n for index, dimension in enumerate(shape):\n dimensional_calibrations_in.append(Calibration.Calibration(1.0 + 0.1 * index, 2.0 + 0.2 * index, \"µ\" + \"n\" * index))\n intensity_calibration_in = Calibration.Calibration(4, 5, \"six\")\n metadata_in = dict()\n dm3_image_utils.save_image(data_in, dimensional_calibrations_in, intensity_calibration_in, metadata_in, s)\n s.seek(0)\n data_out, dimensional_calibrations_out, intensity_calibration_out, _, _ = dm3_image_utils.load_image(s)\n self.assertTrue(numpy.array_equal(data_in, data_out))\n dimensional_calibrations_out = [Calibration.Calibration(*d) for d in dimensional_calibrations_out]\n self.assertEqual(dimensional_calibrations_in, dimensional_calibrations_out)\n self.assertEqual(intensity_calibration_in, Calibration.Calibration(*intensity_calibration_out))\n\n def test_rgb_data_write_read_round_trip(self):\n s = io.BytesIO()\n data_in = (numpy.random.randn(6, 4, 3) * 255).astype(numpy.uint8)\n dimensional_calibrations_in = [Calibration.Calibration(1, 2, \"nm\"), Calibration.Calibration(2, 3, u\"µm\")]\n intensity_calibration_in = Calibration.Calibration(4, 5, \"six\")\n metadata_in = {\"abc\": None, \"\": \"\", \"one\": [], \"two\": {}, \"three\": [1, None, 2]}\n dm3_image_utils.save_image(data_in, dimensional_calibrations_in, intensity_calibration_in, metadata_in, s)\n s.seek(0)\n data_out, dimensional_calibrations_out, intensity_calibration_out, title_out, metadata_out = dm3_image_utils.load_image(s)\n self.assertTrue(numpy.array_equal(data_in, data_out))\n # s = \"/Users/cmeyer/Desktop/EELS_CL.dm3\"\n # data_out, dimensional_calibrations_out, intensity_calibration_out, title_out, metadata_out = dm3_image_utils.load_image(s)\n\n def test_calibrations_write_read_round_trip(self):\n s = io.BytesIO()\n data_in = numpy.ones((6, 4), numpy.float32)\n dimensional_calibrations_in = [Calibration.Calibration(1.1, 2.1, \"nm\"), Calibration.Calibration(2, 3, u\"µm\")]\n intensity_calibration_in = Calibration.Calibration(4.4, 5.5, \"six\")\n metadata_in = dict()\n dm3_image_utils.save_image(data_in, dimensional_calibrations_in, intensity_calibration_in, metadata_in, s)\n s.seek(0)\n data_out, dimensional_calibrations_out, intensity_calibration_out, title_out, metadata_out = dm3_image_utils.load_image(s)\n dimensional_calibrations_out = [Calibration.Calibration(*d) for d in dimensional_calibrations_out]\n self.assertEqual(dimensional_calibrations_in, dimensional_calibrations_out)\n intensity_calibration_out = Calibration.Calibration(*intensity_calibration_out)\n self.assertEqual(intensity_calibration_in, intensity_calibration_out)\n\n def test_metadata_write_read_round_trip(self):\n s = io.BytesIO()\n data_in = numpy.ones((6, 4), numpy.float32)\n dimensional_calibrations_in = [Calibration.Calibration(1, 2, \"nm\"), Calibration.Calibration(2, 3, u\"µm\")]\n intensity_calibration_in = Calibration.Calibration(4, 5, \"six\")\n metadata_in = {\"abc\": 1, \"def\": \"abc\", \"efg\": { \"one\": 1, \"two\": \"TWO\", \"three\": [3, 4, 5] }}\n dm3_image_utils.save_image(data_in, dimensional_calibrations_in, intensity_calibration_in, metadata_in, s)\n s.seek(0)\n data_out, dimensional_calibrations_out, intensity_calibration_out, title_out, metadata_out = dm3_image_utils.load_image(s)\n self.assertEqual(metadata_in, metadata_out)\n\n def test_metadata_difficult_types_write_read_round_trip(self):\n s = io.BytesIO()\n data_in = numpy.ones((6, 4), numpy.float32)\n dimensional_calibrations_in = [Calibration.Calibration(1, 2, \"nm\"), Calibration.Calibration(2, 3, u\"µm\")]\n intensity_calibration_in = Calibration.Calibration(4, 5, \"six\")\n metadata_in = {\"abc\": None, \"\": \"\", \"one\": [], \"two\": {}, \"three\": [1, None, 2]}\n dm3_image_utils.save_image(data_in, dimensional_calibrations_in, intensity_calibration_in, metadata_in, s)\n s.seek(0)\n data_out, dimensional_calibrations_out, intensity_calibration_out, title_out, metadata_out = dm3_image_utils.load_image(s)\n metadata_expected = {\"one\": [], \"two\": {}, \"three\": [1, 2]}\n self.assertEqual(metadata_out, metadata_expected)\n\n def disabled_test_series_data_ordering(self):\n s = \"/Users/cmeyer/Downloads/NEW_7FocalSeriesImages_Def_50000nm.dm3\"\n data_out, dimensional_calibrations_out, intensity_calibration_out, title_out, metadata_out = dm3_image_utils.load_image(s)\n import pprint\n pprint.pprint(metadata_out)\n print(data_out.shape)\n\n# some functions for processing multiple files.\n# useful for testing reading and writing a large number of files.\nimport os\n\n\ndef process_dm3(path, mode):\n opath = path + \".out.dm3\"\n data = odata = None\n if mode == 0 or mode == 1: # just open source\n # path=opath\n with open(path, 'rb') as f:\n data = parse_dm3.parse_dm_header(f)\n if mode == 1: # open source, write to out\n with open(opath, 'wb') as f:\n parse_dm3.parse_dm_header(f, outdata=data)\n elif mode == 2: # open both\n with open(path, 'rb') as f:\n data = parse_dm3.parse_dm_header(f)\n with open(opath, 'rb') as f:\n odata = parse_dm3.parse_dm_header(f)\n # this ensures keys in root only are the same\n assert(sorted(odata) == sorted(data))\n return data, odata\n\n\ndef process_all(mode):\n for f in [x for x in os.listdir(\".\")\n if x.endswith(\".dm3\")\n if not x.endswith(\"out.dm3\")]:\n print(\"reading\", f, \"...\")\n data, odata = process_dm3(f, mode)\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.DEBUG)\n unittest.main()\n # process_all(1)\n","sub_path":"DM_IO/dm3parsertest.py","file_name":"dm3parsertest.py","file_ext":"py","file_size_in_byte":10362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"475945686","text":"import tweepy\n\nimport config\nimport tweetchecker\n\nauth = tweepy.OAuthHandler(config.CONSUMER_KEY, config.CONSUMER_SECRET)\nauth.set_access_token(config.ACCESS_KEY, config.ACCESS_SECRET)\napi = tweepy.API(auth)\nsampleValue = 0\nclass StdOutListener(tweepy.StreamListener):\n ''' Handles data received from the stream. '''\n def on_status(self, status):\n parent_id = status.in_reply_to_status_id\n parent_status = api.get_status(parent_id)\n tweet_text = tweetchecker.checker(parent_statuss)\n api.update_status(tweet_text, parent_id)\n\n\n def on_error(self, status_code):\n print(\"on_error\")\n print('Got an error with status code: ' + str(status_code))\n return True # To continue listening\n\n\n def on_timeout(self):\n print('Timeout...')\n return True # To continue listening\n\nif __name__ == '__main__':\n\tlistener = StdOutListener()\n\tprint(\"ping!\")\n\tstream = tweepy.Stream(auth, listener)\n\tstream.filter(track=['@hateishate_'])\n","sub_path":"src/hateishate.py","file_name":"hateishate.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"198269064","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nfrom DinamicGraph import *\n\nG = nx.Graph()\n#Quitar el comentario y comentar 8 y 9 para el anterior programa\n#g = {\"C:\" : {c1 : { ca1: {}, ca2: {} }, c2: { qe: \"f\" }, a1: \"f\", a2: \"f\"}\ng = DynamicGraphGenerator()\ng = g.create()\n\nfor k,v in g.items():\n G.add_node(\"%s\" % (k))\n print(\"Procesando el vertice %s \" % (k))\n\n for vertex,weight in v.items():\n G.add_edge(k,vertex , weight = weight)\n print(\"\\t El vertice %s tiene una arista con %s con peso %s\" % (k,vertex,weight))\n\nnx.draw(G,with_labels= True)\nplt.show()\n\ndef plot(self,j, parent=None):\n\n for k,v in j.items():\n\n if not parent: G.add_node(k)\n else: G.add_edge(parent,k)\n \n if isinstance(v,dict):\n for a,b in v.items():\n if isinstance(b,dict):\n G.add_edge(k,a)\n self.plot(b,a)\n else: G.add_edge(k,a)\n \n return True\n \n \n ","sub_path":"Unidad II/III PAC/Grafo Dinamico/GraphDraw.py","file_name":"GraphDraw.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"282954451","text":"from typing import Callable, List, NamedTuple, Optional, Union, cast\n\nimport pendulum\nfrom dagster import check\nfrom dagster.core.definitions.sensor import (\n PipelineRunReaction,\n SensorDefinition,\n SensorEvaluationContext,\n SkipReason,\n)\nfrom dagster.core.errors import PipelineSensorExecutionError, user_code_error_boundary\nfrom dagster.core.events import DagsterEvent, DagsterEventType\nfrom dagster.core.storage.pipeline_run import PipelineRun, PipelineRunsFilter\nfrom dagster.serdes import (\n deserialize_json_to_dagster_namedtuple,\n serialize_dagster_namedtuple,\n whitelist_for_serdes,\n)\nfrom dagster.serdes.errors import DeserializationError\nfrom dagster.seven import JSONDecodeError\nfrom dagster.utils.error import serializable_error_info_from_exc_info\n\n\nclass PipelineFailureSensorContext(\n NamedTuple(\n \"_PipelineFailureSensorContext\",\n [(\"sensor_name\", str), (\"pipeline_run\", PipelineRun), (\"failure_event\", DagsterEvent)],\n )\n):\n \"\"\"The ``context`` object available to a decorated function of ``pipeline_failure_sensor``.\n\n Attributes:\n sensor_name (str): the name of the sensor.\n pipeline_run (PipelineRun): the failed pipeline run.\n failure_event (DagsterEvent): the pipeline failure event.\n \"\"\"\n\n def __new__(cls, sensor_name, pipeline_run, failure_event):\n\n return super(PipelineFailureSensorContext, cls).__new__(\n cls,\n sensor_name=check.str_param(sensor_name, \"sensor_name\"),\n pipeline_run=check.inst_param(pipeline_run, \"pipeline_run\", PipelineRun),\n failure_event=check.inst_param(failure_event, \"failure_event\", DagsterEvent),\n )\n\n\n@whitelist_for_serdes\nclass PipelineFailureSensorCursor(\n NamedTuple(\n \"_PipelineFailureSensorCursor\",\n [(\"record_id\", int), (\"update_timestamp\", str)],\n )\n):\n def __new__(cls, record_id, update_timestamp):\n\n return super(PipelineFailureSensorCursor, cls).__new__(\n cls,\n record_id=check.int_param(record_id, \"record_id\"),\n update_timestamp=check.str_param(update_timestamp, \"update_timestamp\"),\n )\n\n @staticmethod\n def is_valid(json_str: str) -> bool:\n try:\n obj = deserialize_json_to_dagster_namedtuple(json_str)\n return isinstance(obj, PipelineFailureSensorCursor)\n except (JSONDecodeError, DeserializationError):\n return False\n\n def to_json(self) -> str:\n return serialize_dagster_namedtuple(cast(NamedTuple, self))\n\n @staticmethod\n def from_json(json_str: str) -> tuple:\n return deserialize_json_to_dagster_namedtuple(json_str)\n\n\ndef pipeline_failure_sensor(\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n pipeline_selection: Optional[List[str]] = None,\n) -> Callable[\n [Callable[[PipelineFailureSensorContext], Union[SkipReason, PipelineRunReaction]]],\n SensorDefinition,\n]:\n \"\"\"\n Creates a sensor that reacts to pipeline failure events, where the decorated function will be\n run when a pipeline run fails.\n\n Takes a :py:class:`~dagster.PipelineFailureSensorContext`.\n\n Args:\n name (Optional[str]): The name of the pipeline failure sensor. Defaults to the name of the\n decorated function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n pipeline_selection (Optional[List[str]]): Names of the pipelines that will be monitored by\n this failure sensor. Defaults to None, which means the alert will be sent when any\n pipeline in the repository fails.\n \"\"\"\n\n from dagster.core.storage.event_log.base import RunShardedEventsCursor, EventRecordsFilter\n\n dagster_event_type = DagsterEventType.PIPELINE_FAILURE\n\n def inner(\n fn: Callable[[\"PipelineFailureSensorContext\"], Union[SkipReason, PipelineRunReaction]]\n ) -> SensorDefinition:\n check.callable_param(fn, \"fn\")\n if name is None or callable(name):\n sensor_name = fn.__name__\n else:\n sensor_name = name\n\n def _wrapped_fn(context: SensorEvaluationContext):\n # initiate the cursor to (most recent event id, current timestamp) when:\n # * it's the first time starting the sensor\n # * or, the cursor isn't in valid format (backcompt)\n if context.cursor is None or not PipelineFailureSensorCursor.is_valid(context.cursor):\n most_recent_event_records = list(\n context.instance.get_event_records(ascending=False, limit=1)\n )\n most_recent_event_id = (\n most_recent_event_records[0].storage_id\n if len(most_recent_event_records) == 1\n else -1\n )\n\n new_cursor = PipelineFailureSensorCursor(\n update_timestamp=pendulum.now(\"UTC\").isoformat(),\n record_id=most_recent_event_id,\n )\n context.update_cursor(new_cursor.to_json())\n yield SkipReason(f\"Initiating {sensor_name}. Set cursor to {new_cursor}\")\n return\n\n record_id, update_timestamp = PipelineFailureSensorCursor.from_json(context.cursor)\n\n # Fetch failure events after the cursor id\n # * we move the cursor forward to the latest visited event's id to avoid revisits\n # * when the daemon is down, bc we persist the cursor info, we can go back to where we\n # left and backfill alerts for the qualified events (up to 5 at a time) during the downtime\n # Note: this is a cross-run query which requires extra handling in sqlite, see details in SqliteEventLogStorage.\n event_records = context.instance.get_event_records(\n EventRecordsFilter(\n after_cursor=RunShardedEventsCursor(\n id=record_id, run_updated_after=pendulum.parse(update_timestamp)\n ),\n event_type=dagster_event_type,\n ),\n ascending=True,\n limit=5,\n )\n\n for event_record in event_records:\n event_log_entry = event_record.event_log_entry\n storage_id = event_record.storage_id\n\n # get run info\n run_records = context.instance.get_run_records(\n filters=PipelineRunsFilter(run_ids=[event_log_entry.run_id])\n )\n check.invariant(len(run_records) == 1)\n pipeline_run = run_records[0].pipeline_run\n update_timestamp = run_records[0].update_timestamp\n\n # skip if any of of the followings happens:\n pipeline_repo_name = (\n pipeline_run.external_pipeline_origin.external_repository_origin.repository_name\n )\n\n if any(\n [\n # the failed pipeline does not belong to the current repository\n pipeline_repo_name != context.repository_name,\n # if pipeline is not selected\n pipeline_selection and pipeline_run.pipeline_name not in pipeline_selection,\n ]\n ):\n context.update_cursor(\n PipelineFailureSensorCursor(\n record_id=storage_id, update_timestamp=update_timestamp.isoformat()\n ).to_json()\n )\n continue\n\n serializable_error = None\n\n try:\n with user_code_error_boundary(\n PipelineSensorExecutionError,\n lambda: f'Error occurred during the execution sensor \"{sensor_name}\".',\n ):\n # one user code invocation maps to one failure event\n fn(\n PipelineFailureSensorContext(\n sensor_name=sensor_name,\n pipeline_run=pipeline_run,\n failure_event=event_log_entry.dagster_event,\n )\n )\n except PipelineSensorExecutionError as pipeline_sensor_execution_error:\n # When the user code errors, we report error to the sensor tick not the original run.\n serializable_error = serializable_error_info_from_exc_info(\n pipeline_sensor_execution_error.original_exc_info\n )\n\n context.update_cursor(\n PipelineFailureSensorCursor(\n record_id=storage_id, update_timestamp=update_timestamp.isoformat()\n ).to_json()\n )\n\n # Yield PipelineRunReaction to indicate the execution success/failure.\n # The sensor machinery would\n # * report back to the original run if success\n # * update cursor and job state\n yield PipelineRunReaction(\n pipeline_run=pipeline_run,\n error=serializable_error,\n )\n\n return SensorDefinition(\n name=sensor_name,\n evaluation_fn=_wrapped_fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n )\n\n # This case is for when decorator is used bare, without arguments, i.e. @pipeline_failure_sensor\n if callable(name):\n return inner(name)\n\n return inner\n","sub_path":"python_modules/dagster/dagster/core/definitions/pipeline_sensor.py","file_name":"pipeline_sensor.py","file_ext":"py","file_size_in_byte":9873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"98869379","text":"from datetime import datetime\n\nfrom sqlalchemy import (\n BigInteger,\n Boolean,\n Column,\n ForeignKey,\n Integer,\n String,\n TIMESTAMP\n)\nfrom sqlalchemy.dialects.postgresql.json import JSONB\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\n\nBase = declarative_base()\n\n\nclass BaseInfo:\n __table__ = None\n\n def __init__(self, **kwargs):\n for column in self.__table__.columns.items():\n column_name = column[0]\n if column_name not in kwargs:\n default = getattr(self.__table__.c, column_name).default\n if default:\n kwargs[column_name] = default.arg if not hasattr(default.arg, '__call__') else None\n\n super(BaseInfo, self).__init__(**kwargs)\n\n id = Column(BigInteger, primary_key=True)\n created_at = Column(TIMESTAMP, default=datetime.utcnow, nullable=False)\n updated_at = Column(TIMESTAMP, default=datetime.utcnow, nullable=False)\n\n\nclass User(BaseInfo, Base):\n __tablename__ = 'user'\n\n profile_id = Column(BigInteger, ForeignKey('user_profile.id'), nullable=True)\n\n email = Column(String, nullable=False, unique=True)\n password = Column(String, nullable=False)\n admin = Column(Boolean, nullable=False, default=False)\n active = Column(Boolean, nullable=False, default=False)\n\n profile = relationship(\"UserProfile\", uselist=False, back_populates=\"user\")\n\n def as_dict(self):\n return {\n 'id': self.id,\n 'email': self.email,\n 'admin': self.admin,\n 'active': self.active,\n 'profile': self.profile.as_dict()\n }\n\n\nclass UserProfile(Base):\n __tablename__ = 'user_profile'\n\n id = Column(BigInteger, primary_key=True)\n first_name = Column(String, nullable=False)\n last_name = Column(String, nullable=False, default='')\n\n user = relationship(\"User\", uselist=False, back_populates=\"profile\")\n\n def as_dict(self):\n return {\n 'first_name': self.first_name,\n 'last_name': self.last_name,\n }\n\n\nclass UserToken(Base):\n __tablename__ = 'user_token'\n\n user_id = Column(BigInteger, nullable=False, primary_key=True)\n token = Column(String, nullable=False)\n\n\nclass Project(BaseInfo, Base):\n __tablename__ = 'projects'\n\n creator_id = Column(BigInteger, nullable=False)\n workflow_id = Column(BigInteger, nullable=False)\n\n name = Column(String, nullable=False)\n key = Column(String, nullable=False, unique=True)\n description = Column(String, nullable=False, default='')\n\n def as_dict(self):\n return {\n 'key': self.key,\n 'name': self.name,\n 'description': self.description,\n }\n\n\nclass Item(BaseInfo, Base):\n __tablename__ = 'items'\n\n key = Column(String, nullable=False, unique=True)\n set_id = Column(BigInteger, nullable=False)\n item_type_id = Column(BigInteger, nullable=False)\n state_id = Column(BigInteger, nullable=False)\n\n summary = Column(String, nullable=False)\n description = Column(String, nullable=False, default='')\n\n position = Column(Integer, nullable=False, default='')\n is_header = Column(Boolean, nullable=False, default=False)\n creator_id = Column(BigInteger, nullable=False)\n\n def as_dict(self):\n return {\n 'key': self.key,\n 'summary': self.summary,\n 'description': self.description,\n 'position': self.position,\n 'is_header': self.is_header,\n }\n\n\nclass ItemsSet(BaseInfo, Base):\n __tablename__ = 'items_sets'\n\n creator_id = Column(BigInteger, nullable=False)\n project_id = Column(BigInteger, nullable=False)\n item_type_id = Column(BigInteger, nullable=False)\n workflow_id = Column(BigInteger, nullable=False)\n\n key = Column(String, nullable=False, unique=True)\n name = Column(String, nullable=False)\n description = Column(String, nullable=False, default='')\n\n def as_dict(self):\n return {\n 'key': self.key,\n 'name': self.name,\n 'description': self.description\n }\n\n\nclass ProjectMember(Base):\n __tablename__ = 'projects_members'\n\n id = Column(BigInteger, primary_key=True)\n\n project_id = Column(BigInteger, nullable=False)\n user_id = Column(BigInteger, nullable=False)\n\n\nclass MemberToSetPermissions(Base):\n __tablename__ = 'members_to_set_permissions'\n\n id = Column(BigInteger, primary_key=True)\n\n set_id = Column(BigInteger, nullable=False)\n member_id = Column(BigInteger, nullable=False)\n permissions_id = Column(BigInteger, nullable=False)\n\n\nclass SetPermissions(Base):\n __tablename__ = 'sets_permissions'\n\n id = Column(BigInteger, primary_key=True)\n\n read = Column(Boolean, nullable=False, default=False)\n edit = Column(Boolean, nullable=False, default=False)\n manage = Column(Boolean, nullable=False, default=False)\n review = Column(Boolean, nullable=False, default=False)\n\n def as_dict(self):\n return {\n 'read': self.read,\n 'edit': self.edit,\n 'manage': self.manage,\n 'review': self.review\n }\n\n\nclass MemberToProjectPermissions(Base):\n __tablename__ = 'members_to_project_permissions'\n\n id = Column(BigInteger, primary_key=True)\n\n project_id = Column(BigInteger, nullable=False)\n member_id = Column(BigInteger, nullable=False)\n permissions_id = Column(BigInteger, nullable=False)\n\n\nclass ProjectPermissions(Base):\n __tablename__ = 'projects_permissions'\n\n id = Column(BigInteger, primary_key=True)\n\n read = Column(Boolean, nullable=False, default=False)\n edit = Column(Boolean, nullable=False, default=False)\n manage = Column(Boolean, nullable=False, default=False)\n\n def as_dict(self):\n return {\n 'read': self.read,\n 'edit': self.edit,\n 'manage': self.manage,\n }\n\n\nclass ItemType(Base):\n __tablename__ = 'item_types'\n\n id = Column(BigInteger, primary_key=True)\n\n key = Column(String, nullable=False)\n name = Column(String, nullable=False)\n plural = Column(String, nullable=False)\n\n def as_dict(self):\n return {\n 'key': self.key,\n 'name': self.name,\n 'plural': self.plural\n }\n\n\nclass ItemsRelationsWorkflow(Base):\n __tablename__ = 'items_relations_workflows'\n\n id = Column(BigInteger, primary_key=True)\n\n name = Column(String, nullable=False)\n description = Column(String, nullable=False)\n system = Column(Boolean, nullable=False, default=False)\n default = Column(Boolean, nullable=False, default=False)\n\n def as_dict(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'system': self.system,\n 'default': self.default\n }\n\n\nclass ItemRelation(Base):\n __tablename__ = 'item_relations'\n\n id = Column(BigInteger, primary_key=True)\n workflow_id = Column(BigInteger, nullable=False)\n\n parent_id = Column(String, nullable=False)\n child_id = Column(String, nullable=False)\n\n\nclass ItemDependency(Base):\n __tablename__ = 'item_dependencies'\n\n id = Column(BigInteger, primary_key=True)\n project_id = Column(BigInteger, nullable=False)\n parent_id = Column(String, nullable=False)\n parent_type_id = Column(String, nullable=False)\n child_id = Column(String, nullable=False)\n child_type_id = Column(String, nullable=False)\n\n\nclass ItemState(Base):\n __tablename__ = 'item_states'\n\n id = Column(BigInteger, primary_key=True)\n workflow_id = Column(BigInteger, nullable=False)\n name = Column(String, nullable=False)\n start = Column(String, nullable=False, default=False)\n conditions = Column(JSONB, nullable=False, default='{}')\n\n def as_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'start': self.start,\n 'conditions': self.conditions\n }\n\n\nclass ItemStatesWorkflow(Base):\n __tablename__ = 'item_states_workflows'\n\n id = Column(BigInteger, primary_key=True)\n name = Column(String, nullable=False)\n description = Column(String, nullable=False)\n system = Column(Boolean, nullable=False, default=False)\n default = Column(Boolean, nullable=False, default=False)\n\n def as_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n 'system': self.system,\n 'default': self.default\n }\n\n\nclass ItemStatesTransition(Base):\n __tablename__ = 'item_states_transitions'\n\n id = Column(BigInteger, primary_key=True)\n workflow_id = Column(BigInteger, nullable=False)\n name = Column(String, nullable=False)\n start_state_id = Column(BigInteger, nullable=False)\n end_state_id = Column(BigInteger, nullable=False)\n auto = Column(Boolean, nullable=False, default=False)\n conditions = Column(JSONB, nullable=False, default={})\n\n def as_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'auto': self.auto,\n 'conditions': self.conditions\n }\n\n\nclass ItemReview(Base):\n __tablename__ = 'items_reviews'\n\n id = Column(BigInteger, primary_key=True)\n item_id = Column(BigInteger, nullable=False)\n archived = Column(BigInteger, nullable=False, default=False)\n\n\nclass ItemReviewResult(Base):\n __tablename__ = 'item_review_results'\n\n id = Column(BigInteger, primary_key=True)\n review_item_id = Column(BigInteger, nullable=False)\n approved_user_id = Column(BigInteger, nullable=False)\n rejected_user_id = Column(BigInteger, nullable=False)\n","sub_path":"src/db/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"555669728","text":"import os\nimport sys\nimport numpy as np\nimport cv2\nimages = []\nlabels = []\n\ndef read_path(path_name):\n for dir_item in os.listdir(path_name):\n # print(dir_item)\n # 从初始路径开始叠加,合并成可识别的操作路径\n full_path = os.path.abspath(os.path.join(path_name, dir_item))\n if os.path.isdir(full_path): # 如果是文件夹,继续递归调用\n read_path(full_path)\n\n if \".jpg\" in full_path:\n print(\"ok\")\n print(cv2.imread(full_path))\n\n\nread_path(\"./data\")","sub_path":"CNN_face recognition/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"125188164","text":"# -*- encoding: utf-8 -*-\n\n\"\"\"\nDefine endpoint to connect to slack\n.. code:: ini\n [default]\n ; general configuration: default endpoint\n endpoint=dev\n [dev]\n ; configuration specific to 'dev' endpoint\n bot_id=id\n bot_token=token\nThe client will successively attempt to locate this configuration file in\n1. Current working directory: ``./louisebot.conf``\n2. Current user's home directory ``~/.louisebot.conf``\n3. System wide configuration ``/etc/louisebot.conf``\nThis lookup mechanism makes it easy to overload credentials for a specific\nproject or user.\n\"\"\"\n\nimport os\n\ntry:\n from ConfigParser import RawConfigParser, NoSectionError, NoOptionError\nexcept ImportError: # pragma: no cover\n # Python 3\n from configparser import RawConfigParser, NoSectionError, NoOptionError\n\n__all__ = ['config']\n\n#: Locations where to look for configuration file by *increasing* priority\nCONFIG_PATH = [\n '/etc/louisebot.conf',\n os.path.expanduser('~/.louisebot.conf'),\n os.path.realpath('./louisebot.conf'),\n]\n\n\nclass ConfigurationManager(object):\n '''\n Application wide configuration manager\n '''\n def __init__(self):\n '''\n Create a config parser and load config from environment.\n '''\n # create config parser\n self.config = RawConfigParser()\n self.config.read(CONFIG_PATH)\n\n def get(self, section, name):\n '''\n Load parameter ``name`` from configuration, respecting priority order.\n Most of the time, ``section`` will correspond to the current api\n ``endpoint``. ``default`` section only contains ``endpoint`` and\n general configuration.\n :param str section: configuration section or region name. Ignored when\n looking in environment\n :param str name: configuration parameter to lookup\n '''\n # try from specified section/endpoint\n try:\n return self.config.get(section, name)\n except (NoSectionError, NoOptionError):\n pass\n\n # not found, sorry\n return None\n\n def read(self, config_file):\n # Read an other config file\n self.config.read(config_file)\n\n\n#: System wide instance :py:class:`ConfigurationManager` instance\nconfig = ConfigurationManager()\n","sub_path":"louisebot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"152959225","text":"from __future__ import print_function\nimport itertools\nimport time\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom layer_definition import *\nimport tensorflow as tf\nfrom sklearn.metrics import confusion_matrix\nfrom scipy.ndimage import rotate\n\n# Don't pre-allocate all GPU memory; allocate only as-needed\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\n\n# Confusion matrix plot function\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix (rotMNIST)',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig(\"confusion_matrix1.png\", dpi=300)\n\n\n# --------------------------------------------------------------------------------------------------------------------\n# Load ROT validation set\ndata = np.load('MNIST_RR_test.npz')\nx_test, y_test = data['x'], tf.keras.utils.to_categorical(data['y'], 10)\n\n# --------------------------------------------------------------------------------------------------------------------\nx_test = np.reshape(x_test, (-1, 28, 28, 1))\nx_test = x_test / 255.0\n# --------------------------------------------------------------------------------------------------------------------\n\n# --------------------------------------------------------------------------------------------------------------------\n# Load model\nmodel = tf.keras.models.load_model('RRT_REDNN_16.h5', custom_objects={'Rig2DConv': Rig2DConv,\n 'Periodic_Pad': Periodic_Pad})\n# --------------------------------------------------------------------------------------------------------------------\n\n# Print summary and learned weights\nprint(model.summary())\nweights = model.layers[1].get_weights()\nprint(f'Trained parameters: l={weights[0]}, alpha={weights[1]}, beta={weights[2]}')\n\n# Print accuracy and loss\nt1 = time.time()\nscore = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\nprint('Test time:', time.time()-t1)\n\n# --------------------------------------------------------------------------------------------------------------------\n# Plot confusion matrix\nY_pred = model.predict(x_test)\nY_pred_classes = np.argmax(Y_pred, axis = 1)\nY_true = np.argmax(y_test, axis = 1)\n\nconfusion_mtx = confusion_matrix(Y_true, Y_pred_classes)\nplot_confusion_matrix(confusion_mtx, classes = range(10), normalize=False)\n# --------------------------------------------------------------------------------------------------------------------\n\n# --------------------------------------------------------------------------------------------------------------------\n# Print table P and angular indexes\nfor _ in range(8, -8, -1):\n input_image = x_test[1051]\n input_image = rotate(input_image, 22.50 * _, reshape=False)\n input_image = np.reshape(input_image, (1, 28, 28, 1))\n\n # Input image plot\n # plt.imshow(input_image[0, :, :, 0])\n # plt.show()\n\n layer_name = 'Output_table'\n int_output = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer(layer_name).output)\n\n intout = int_output.predict(input_image)\n out_row = np.unravel_index(intout.argmax(), intout.shape)\n\n # Print all the table\n # print(np.round(intout, 2))\n\n # Print the predicted index row\n print(f\"Predicted index row: {out_row[1]}\")\n# --------------------------------------------------------------------------------------------------------------------\n","sub_path":"load_model.py","file_name":"load_model.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"563531413","text":"# ==============================================================================\n# DAMAGES TABLE\n# ==============================================================================\n# STAB = KNIFES / SPEARS / PICKAXES / ARROW / ICEPICK / GRENADES(?)\n# SLASH = SALVAGE AXE / HATCHETS / BARRICADES / FLOOR SPIKES\n# BLUNT = TORCH / ROCK / SALVAGE HAMMER\n# BITE = ANIMALS / SNAP TRAP\n# BULLET = GUNS / BOW\n# EXPLOSION = C4 / ROCKET\n# ==============================================================================\n# METABOLISM\n# ==============================================================================\n# FALL | DROWNED | POISON | COLD | HEAT | RADIATION LEVEL/POISON\n# HUNGER | THIRST | BLEEDING |\n# ==============================================================================\n# ANIMALS\n# ==============================================================================\n# HORSE | WOLF | BEAR | BOAR | STAG | CHICKEN\n# ==============================================================================\n\nimport re\nimport BasePlayer\nimport StringPool\nfrom UnityEngine import Random\nfrom UnityEngine import Vector3\n\n# GLOBAL VARIABLES\nDEV = False\nLATEST_CFG = 4.2\nLINE = '-' * 50\n\nclass deathnotes:\n\n # ==========================================================================\n # <>> PLUGIN\n # ==========================================================================\n def __init__(self):\n\n self.Title = 'Death Notes'\n self.Author = 'SkinN'\n self.Description = 'Broadcasts players/animals deaths to chat'\n self.Version = V(2, 5, 4)\n self.ResourceId = 819\n\n # ==========================================================================\n # <>> CONFIGURATION\n # ==========================================================================\n def LoadDefaultConfig(self):\n\n self.Config = {\n 'CONFIG_VERSION': LATEST_CFG,\n 'SETTINGS': {\n 'PREFIX': self.Title.upper(),\n 'BROADCAST TO CONSOLE': True,\n 'SHOW SUICIDES': True,\n 'SHOW METABOLISM DEATHS': True,\n 'SHOW EXPLOSION DEATHS': True,\n 'SHOW TRAP DEATHS': True,\n 'SHOW ANIMAL DEATHS': False,\n 'SHOW PLAYER KILLS': True,\n 'SHOW ANIMAL KILLS': True,\n 'MESSAGE IN RADIUS': False,\n 'MESSAGES RADIUS': 300.0\n },\n 'COLORS': {\n 'MESSAGE': '#FFFFFF',\n 'PREFIX': '#FF0000',\n 'ANIMAL': '#00FF00',\n 'BODYPART': '#00FF00',\n 'WEAPON': '#00FF00',\n 'VICTIM': '#00FF00',\n 'ATTACKER': '#00FF00',\n 'DISTANCE': '#00FF00'\n },\n 'MESSAGES': {\n 'RADIATION': ('{victim} died from radiation.', '{victim} did not know that radiation kills.'),\n 'HUNGER': ('{victim} starved to death.', '{victim} should learn how to hunt so he finds something to eat.'),\n 'THIRST': ('{victim} died of thirst.', '{victim} died dehydrated.'),\n 'DROWNED': ('{victim} drowned.', '{victim} thought he could swim.'),\n 'COLD': ('{victim} froze to death.', '{victim} is an ice cold dead man.'),\n 'HEAT': ('{victim} burned to death.',),\n 'FALL': ('{victim} died from a big fall.', '{victim} fell to his death.'),\n 'BLEEDING': ('{victim} bled to death.', '{victim} emptied in blood.'),\n 'EXPLOSION': ('{victim} died from a {weapon} explosion.', 'A {weapon} blew {victim} up.'),\n 'POISON': ('{victim} died poisoned.',),\n 'SUICIDE': ('{victim} committed suicide.', '{victim} has put an end to his life.'),\n 'GENERIC': ('{victim} died.', '{victim} has been killed by the gods.'),\n 'TRAP': ('{victim} died stuck on a {attacker}.',),\n 'STAB': ('{attacker} stabbed {victim} to death with a {weapon} and hit the {bodypart}.',),\n 'STAB SLEEP': ('{attacker} stabbed {victim}, while he slept.',),\n 'SLASH': ('{attacker} sliced {victim} into pieces with a {weapon} and hit the {bodypart}.',),\n 'SLASH SLEEP': ('{attacker} stabbed {victim}, while he slept.',),\n 'BLUNT': ('{attacker} killed {victim} with a {weapon} and hit the {bodypart}.', '{attacker} killed {victim} with a {weapon} causing a blunt trauma.'),\n 'BLUNT SLEEP': ('{attacker} killed {victim} with a {weapon}, while he slept.',),\n 'BULLET': ('{attacker} killed {victim} with a {weapon}, hitting the {bodypart} from {distance}m.', '{attacker} made {victim} eat some bullets with a {weapon} from {distance}m.'),\n 'BULLET SLEEP': ('{attacker} killed {victim}, while sleeping. (In the {bodypart} with a {weapon}, from {distance}m)', '{attacker} killed {victim} with a {weapon}, while sleeping.'),\n 'ARROW': ('{attacker} killed {victim} with an arrow {distance}m, hitting the {bodypart}.',),\n 'ARROW SLEEP': ('{attacker} killed {victim} with an arrow from {distance}m, while he slept.',),\n 'BITE': ('A {attacker} killed {victim}.',),\n 'BITE SLEEP': ('A {attacker} killed {victim}, while he slept.',),\n 'ANIMAL DEATH': ('{attacker} killed a {victim} with a {weapon} from {distance}m.',)\n },\n 'BODYPARTS': {\n 'SPINE': 'Spine',\n 'LIP': 'Lips',\n 'JAW': 'Jaw',\n 'NECK': 'Neck',\n 'TAIL': 'Tail',\n 'HIP': 'Hip',\n 'FOOT': 'Feet',\n 'PELVIS': 'Pelvis',\n 'LEG': 'Leg',\n 'HEAD': 'Head',\n 'ARM': 'Arm',\n 'JOINT': 'Joint',\n 'PENIS': 'Penis',\n 'WING': 'Wing',\n 'EYE': 'Eye',\n 'EAR': 'Ear',\n 'STOMACH': 'Stomach',\n 'MANE': 'Mane',\n 'CLAVICLE': 'Clavicle',\n 'FINGERS': 'Fingers',\n 'THIGH': 'Thigh',\n 'GROUP': 'Group',\n 'SHOULDER': 'Shoulder',\n 'CALF': 'Calf',\n 'TOE': 'Toe',\n 'HAND': 'Hand',\n 'KNEE': 'Knee',\n 'FOREARM': 'Forearm',\n 'UPPERARM': 'Upperarm',\n 'TONGUE': 'Tongue',\n 'SHIN': 'Shin',\n 'ULNA': 'Ulna',\n 'ROOTBONE': 'Chicken Rootbone',\n 'BROW': 'Brow'\n },\n 'WEAPONS': {\n 'WOODEN_SPEAR.WEAPON': 'Wooden Spear',\n 'STONE_SPEAR.WEAPON': 'Stone Spear',\n 'STONE_PICKAXE.WEAPON': 'Stone Pickaxe',\n 'HUNTING.WEAPON': 'Hunting Bow',\n 'AK47U.WEAPON': 'Assault Rifle',\n 'ROCK.WEAPON': 'Rock',\n 'HATCHET.WEAPON': 'Hatchet',\n 'PICKAXE.WEAPON': 'Pickaxe',\n 'BOLTRIFLE.WEAPON': 'Bolt Action Rifle',\n 'SALVAGED_HAMMER.WEAPON': 'Salvaged Hammer',\n 'SAWNOFFSHOTGUN.WEAPON': 'Pump Shotgun',\n 'SALVAGED_AXE.WEAPON': 'Salvaged Axe',\n 'BONEKNIFE.WEAPON': 'Bone Knife',\n 'WATERPIPE.WEAPON': 'Waterpipe Shotgun',\n 'HATCHET_STONE.WEAPON': 'Stone Hatchet',\n 'EOKA.WEAPON': 'EOKA Pistol',\n 'SALVAGED_ICEPICK.WEAPON': 'Salvaged Icepick',\n 'TORCH.WEAPON': 'Torch',\n 'THOMPSON.WEAPON': 'Thompson',\n 'REVOLVER.WEAPON': 'Revolver',\n 'ROCKET_BASIC': 'Rocket',\n 'GRENADE.F1.DEPLOYED': 'F1 Grenade',\n 'GRENADE.BEANCAN.DEPLOYED': 'Beancan Grenade',\n 'TIMED.EXPLOSIVE.DEPLOYED': 'Timed Explosive Charge',\n 'SMG.WEAPON': 'Custom SMG'\n },\n 'TRAPS': {\n 'BARRICADE.METAL': 'Metal Barricade',\n 'BARRICADE.WOOD': 'Wooden Barricade',\n 'BARRICADE.WOODWIRE': 'Barbed Wooden Barricade',\n 'FLOOR_SPIKES': 'Wooden Floor Spikes',\n 'BEARTRAP': 'Snap Trap'\n },\n 'ANIMALS': {\n 'STAG': 'Stag',\n 'CHICKEN': 'Chicken',\n 'WOLF': 'Wolf',\n 'BEAR': 'Bear',\n 'BOAR': 'Boar',\n 'HORSE': 'Horse'\n }\n }\n\n self.console('Loading default configuration file', True)\n\n # --------------------------------------------------------------------------\n def UpdateConfig(self):\n\n # IS OLDER CONFIG TOO OLD?\n if self.Config['CONFIG_VERSION'] <= LATEST_CFG - 0.2 or DEV:\n\n self.console('Current configuration file is two or more versions older than the latest (Current: v%s / Latest: v%s)' % (self.Config['CONFIG_VERSION'], LATEST_CFG), True)\n\n # RESET CONFIGURATION\n self.Config.clear()\n\n # LOAD DEFAULTS CONFIGURATION\n self.LoadDefaultConfig()\n\n else:\n\n self.console('Applying new changes to the configuration file (Version: %s)' % LATEST_CFG, True)\n\n # NEW VERSION VALUE\n self.Config['CONFIG_VERSION'] = LATEST_CFG\n\n # NEW CHANGES\n self.Config['MESSAGES']['GENERIC'] = ('{victim} died.', '{victim} has been killed by the gods.')\n\n # SAVE CHANGES\n self.SaveConfig()\n\n # ==========================================================================\n # <>> PLUGIN SPECIFIC\n # ==========================================================================\n def Init(self):\n\n if self.Config['CONFIG_VERSION'] < LATEST_CFG or DEV:\n self.UpdateConfig()\n\n global MSG, PLUGIN, COLOR, PARTS, WEAPONS, TRAPS, ANIMALS\n MSG = self.Config['MESSAGES']\n TRAPS = self.Config['TRAPS']\n COLOR = self.Config['COLORS']\n PARTS = self.Config['BODYPARTS']\n PLUGIN = self.Config['SETTINGS']\n WEAPONS = self.Config['WEAPONS']\n ANIMALS = self.Config['ANIMALS']\n\n self.prefix = '%s' % (COLOR['PREFIX'], PLUGIN['PREFIX']) if PLUGIN['PREFIX'] else None\n self.title = '%s' % self.Title.upper()\n self.metabolism = ('DROWNED', 'HEAT', 'COLD', 'THIRST', 'POISON', 'HUNGER', 'RADIATION', 'BLEEDING', 'FALL', 'GENERIC')\n\n command.AddChatCommand('deathnotes', self.Plugin, 'plugin_CMD')\n\n # ==========================================================================\n # <>> MESSAGE FUNTIONS\n # ==========================================================================\n def console(self, text, force=False):\n\n if self.Config['SETTINGS']['BROADCAST TO CONSOLE'] or force:\n print('[%s v%s] :: %s' % (self.Title, str(self.Version), text))\n\n # --------------------------------------------------------------------------\n def debug(self, text):\n\n if DEV:\n self.console(text)\n\n # --------------------------------------------------------------------------\n def say(self, text, color='white', userid=0):\n\n if self.prefix:\n rust.BroadcastChat('%s : %s' % (self.prefix, color, text), None, str(userid))\n else:\n rust.BroadcastChat('%s' % (color, text), None, str(userid))\n\n # --------------------------------------------------------------------------\n def tell(self, player, text, color='white', userid=0, force=True):\n\n if self.prefix and force:\n rust.SendChatMessage(player, '%s : %s' % (self.prefix, color, text), None, str(userid))\n else:\n rust.SendChatMessage(player, '%s' % (color, text), None, str(userid))\n\n # --------------------------------------------------------------------------\n def say_filter(self, text, raw, vpos, attacker):\n\n color = COLOR['MESSAGE']\n if PLUGIN['MESSAGE IN RADIUS']:\n for player in BasePlayer.activePlayerList:\n if self.distance(player.transform.position, vpos) <= float(PLUGIN['MESSAGES RADIUS']):\n self.tell(player, text, color)\n elif attacker and player == attacker:\n self.tell(player, text, color)\n else:\n self.say(text, color)\n if PLUGIN['BROADCAST TO CONSOLE']:\n self.console(raw)\n\n # ==========================================================================\n # <>> MAIN HOOKS\n # ==========================================================================\n def OnEntityDeath(self, vic, hitinfo):\n\n # IS ENTITY NOT A CORPSE?\n if 'corpse' not in str(vic):\n\n # DEATH INFOS\n clr = {}\n msg = None\n dmg = str(vic.lastDamage).upper()\n vps = vic.transform.position\n att = hitinfo.Initiator if hitinfo else None\n\n raw = {\n 'bodypart': self.bodypart(hitinfo.HitBone) if hitinfo else 'None',\n 'weapon': self.weapon(hitinfo.Weapon) if hitinfo else 'None',\n 'distance': '%.2f' % self.distance(vps, att.transform.position) if att else 'None'\n }\n\n # ATTACKER\n if att:\n if att.ToPlayer():\n raw['attacker'] = att.displayName\n else:\n raw['attacker'] = str(att.LookupShortPrefabName()).upper()\n else:\n raw['attacker'] = 'None'\n\n if vic:\n\n # IS VICTIM A PLAYER OR NPC PLAYER?\n if vic.ToPlayer():\n\n raw['victim'] = vic.displayName\n\n # IS VICTIM SLEEPING?\n sleep = vic.IsSleeping()\n\n # IS DEATHS SUICIDE OR METABOLISM TYPE?\n if (dmg == 'SUICIDE' and PLUGIN['SHOW SUICIDES']) or (dmg in self.metabolism and PLUGIN['SHOW METABOLISM DEATHS']):\n\n msg = dmg\n\n # IS ATTACKER A PLAYER?\n elif att and att.ToPlayer() and dmg in ('SLASH', 'BLUNT', 'STAB', 'BULLET') and PLUGIN['SHOW PLAYER KILLS']:\n\n if 'hunting' in str(hitinfo.Weapon):\n msg = 'ARROW SLEEP' if sleep else 'ARROW'\n else:\n msg = '%s SLEEP' % dmg if sleep else dmg\n\n # IS ATTACKER AN EXPLOSIVE? (?)\n elif dmg == 'EXPLOSION' or raw['attacker'].startswith('GRENADE') and PLUGIN['SHOW EXPLOSION DEATHS']:\n\n raw['weapon'] = WEAPONS[raw['attacker']] if raw['attacker'] in WEAPONS else raw['attacker']\n msg = 'EXPLOSION'\n\n # IS ATTACKER A TRAP?\n elif dmg in ('SLASH', 'STAB') or 'beartrap' in str(att) and PLUGIN['SHOW TRAP DEATHS']:\n\n raw['attacker'] = TRAPS[raw['attacker']] if raw['attacker'] in TRAPS else raw['attacker']\n msg = 'TRAP'\n\n # IS ATTACKER AN ANIMAL?\n elif dmg == 'BITE' and PLUGIN['SHOW ANIMAL KILLS']:\n\n raw['attacker'] = ANIMALS[raw['attacker']] if raw['attacker'] in ANIMALS else raw['attacker']\n msg = 'BITE SLEEP' if sleep else dmg\n\n # OTHERWISE IS ANIMAL (NPC?)\n elif 'animals' in str(vic) and att and att.ToPlayer() and PLUGIN['SHOW ANIMAL DEATHS']:\n\n animal = str(vic.LookupShortPrefabName()).upper()\n raw['victim'] = ANIMALS[animal] if animal in ANIMALS else animal\n msg = 'ANIMAL DEATH'\n\n # DEBUG REPORT\n #self.debug(LINE)\n #self.debug(' # REPORT')\n #self.debug(LINE)\n #self.debug('- DAMAGE : %s' % dmg)\n #self.debug('- VICTIM : %s ( %s )' % (raw['victim'], vic))\n #self.debug('- ATTACKER : %s ( %s )' % (raw['attacker'], att))\n #self.debug('- WEAPON : %s' % raw['weapon'])\n #self.debug('- BODY PART : %s' % raw['bodypart'])\n #self.debug('- DISTANCE : %s' % raw['distance'])\n #self.debug(LINE)\n\n if msg:\n\n # MESSAGE STRING\n msg = MSG[msg]\n\n if isinstance(msg, tuple):\n msg = msg[Random.Range(0, len(msg))]\n\n if msg:\n\n # PLACE NAMES COLORS\n for n in raw:\n clr[n] = '%s' % (COLOR[n.upper()], raw[n])\n\n # FILTER MESSAGE\n try:\n self.say_filter(msg.format(**clr), msg.format(**raw), vps, att)\n except:\n self.console('# NAME FORMAT ERROR')\n self.console(LINE)\n self.console('Unrecognized name format found in message:')\n self.console('\\'%s\\'' % msg)\n self.console(LINE)\n self.console('You may only use these name formats in messages:')\n self.console('{victim}, {attacker}, {weapon}, {bodypart}, {distance}')\n self.console(LINE)\n\n #self.debug(LINE)\n\n # ==========================================================================\n # <>> SIDE FUNTIONS\n # ==========================================================================\n def distance(self, p1, p2):\n\n return Vector3.Distance(p1, p2)\n\n # --------------------------------------------------------------------------\n def bodypart(self, part):\n\n if part:\n part = StringPool.Get(part).upper()\n for p in PARTS:\n part = p if p in part else part\n return PARTS[part] if part in PARTS else part\n return 'None'\n\n # --------------------------------------------------------------------------\n def weapon(self, weapon):\n\n if weapon:\n x = str(weapon.LookupShortPrefabName()).upper()\n return WEAPONS[x] if x in WEAPONS else x\n return 'None'\n\n # ==========================================================================\n # <>> COMMANDS\n # ==========================================================================\n def plugin_CMD(self, player, cmd, args):\n\n self.tell(player, LINE, force=False)\n self.tell(player, '%s v%s by SkinN' % (self.title, self.Version), force=False)\n self.tell(player, self.Description, 'lime', force=False)\n self.tell(player, '| RESOURSE ID: %s | CONFIG: v%s |' % (self.ResourceId, self.Config['CONFIG_VERSION']), force=False)\n self.tell(player, LINE, force=False)\n self.tell(player, '<< Click the icon to contact me.', userid='76561197999302614', force=False)\n\n# ==============================================================================","sub_path":"Plugins/deathnotes.py","file_name":"deathnotes.py","file_ext":"py","file_size_in_byte":19210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"163136635","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef ab(p):\n return render(p,\"countwords.html\");\ndef about(p):\n return render(p,\"about.html\")\ndef bc(p):\n mess=p.GET['message'];\n a=mess.split();\n print(a)\n le=len(a)\n wordscount={}\n for word in a:\n if word in wordscount:\n wordscount[word]+=1;\n else:\n wordscount[word]=1;\n\n\n return render(p,\"count.html\",{'msg':mess, 'length':le, 'abc':wordscount});\n\ndef nav(request):\n d={'vamsi':100,'naveen':200 }\n return render(request,\"url.html\", {'avatar':d.values()});\n","sub_path":"naveen/n.py","file_name":"n.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"608944261","text":"# -*- encoding: utf-8 -*-\n#\n# Copyright © 2016-2017 Red Hat, Inc.\n# Copyright © 2014-2015 eNovance\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"Timeseries cross-aggregation.\"\"\"\nimport collections\n\n\nimport daiquiri\nimport iso8601\nimport pandas\nimport six\n\nfrom gnocchi import storage as gnocchi_storage\n\n\nLOG = daiquiri.getLogger(__name__)\n\n\nclass UnAggregableTimeseries(Exception):\n \"\"\"Error raised when timeseries cannot be aggregated.\"\"\"\n def __init__(self, reason):\n self.reason = reason\n super(UnAggregableTimeseries, self).__init__(reason)\n\n\nclass MetricUnaggregatable(Exception):\n \"\"\"Error raised when metrics can't be aggregated.\"\"\"\n\n def __init__(self, metrics, reason):\n self.metrics = metrics\n self.reason = reason\n super(MetricUnaggregatable, self).__init__(\n \"Metrics %s can't be aggregated: %s\"\n % (\", \".join((str(m.id) for m in metrics)), reason))\n\n\ndef get_cross_metric_measures(storage, metrics, from_timestamp=None,\n to_timestamp=None, aggregation='mean',\n reaggregation=None,\n granularity=None, needed_overlap=100.0,\n fill=None, transform=None):\n \"\"\"Get aggregated measures of multiple entities.\n\n :param storage: The storage driver.\n :param metrics: The metrics measured to aggregate.\n :param from timestamp: The timestamp to get the measure from.\n :param to timestamp: The timestamp to get the measure to.\n :param granularity: The granularity to retrieve.\n :param aggregation: The type of aggregation to retrieve.\n :param reaggregation: The type of aggregation to compute\n on the retrieved measures.\n :param fill: The value to use to fill in missing data in series.\n :param transform: List of transformation to apply to the series\n \"\"\"\n for metric in metrics:\n if aggregation not in metric.archive_policy.aggregation_methods:\n raise gnocchi_storage.AggregationDoesNotExist(metric, aggregation)\n if granularity is not None:\n for d in metric.archive_policy.definition:\n if d.granularity == granularity:\n break\n else:\n raise gnocchi_storage.GranularityDoesNotExist(\n metric, granularity)\n\n if reaggregation is None:\n reaggregation = aggregation\n\n if granularity is None:\n granularities = (\n definition.granularity\n for metric in metrics\n for definition in metric.archive_policy.definition\n )\n granularities_in_common = [\n g\n for g, occurrence in six.iteritems(\n collections.Counter(granularities))\n if occurrence == len(metrics)\n ]\n\n if not granularities_in_common:\n raise MetricUnaggregatable(\n metrics, 'No granularity match')\n else:\n granularities_in_common = [granularity]\n\n tss = storage._map_in_thread(storage._get_measures_timeserie,\n [(metric, aggregation, g,\n from_timestamp, to_timestamp)\n for metric in metrics\n for g in granularities_in_common])\n\n if transform is not None:\n tss = list(map(lambda ts: ts.transform(transform), tss))\n\n try:\n return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v)\n for timestamp, r, v\n in aggregated(tss, reaggregation, from_timestamp, to_timestamp,\n needed_overlap, fill)]\n except UnAggregableTimeseries as e:\n raise MetricUnaggregatable(metrics, e.reason)\n\n\ndef aggregated(timeseries, aggregation, from_timestamp=None,\n to_timestamp=None, needed_percent_of_overlap=100.0,\n fill=None):\n index = ['timestamp', 'granularity']\n columns = ['timestamp', 'granularity', 'value']\n dataframes = []\n\n if not timeseries:\n return []\n\n for timeserie in timeseries:\n timeserie_raw = list(timeserie.fetch(from_timestamp, to_timestamp))\n\n if timeserie_raw:\n dataframe = pandas.DataFrame(timeserie_raw, columns=columns)\n dataframe = dataframe.set_index(index)\n dataframes.append(dataframe)\n\n if not dataframes:\n return []\n\n number_of_distinct_datasource = len(timeseries) / len(\n set(ts.sampling for ts in timeseries)\n )\n\n left_boundary_ts = None\n right_boundary_ts = None\n if fill is not None:\n fill_df = pandas.concat(dataframes, axis=1)\n if fill != 'null':\n fill_df = fill_df.fillna(fill)\n single_df = pandas.concat([series for __, series in\n fill_df.iteritems()]).to_frame()\n grouped = single_df.groupby(level=index)\n else:\n grouped = pandas.concat(dataframes).groupby(level=index)\n maybe_next_timestamp_is_left_boundary = False\n\n left_holes = 0\n right_holes = 0\n holes = 0\n for (timestamp, __), group in grouped:\n if group.count()['value'] != number_of_distinct_datasource:\n maybe_next_timestamp_is_left_boundary = True\n if left_boundary_ts is not None:\n right_holes += 1\n else:\n left_holes += 1\n elif maybe_next_timestamp_is_left_boundary:\n left_boundary_ts = timestamp\n maybe_next_timestamp_is_left_boundary = False\n else:\n right_boundary_ts = timestamp\n holes += right_holes\n right_holes = 0\n\n if to_timestamp is not None:\n holes += left_holes\n if from_timestamp is not None:\n holes += right_holes\n\n if to_timestamp is not None or from_timestamp is not None:\n maximum = len(grouped)\n percent_of_overlap = (float(maximum - holes) * 100.0 /\n float(maximum))\n if percent_of_overlap < needed_percent_of_overlap:\n raise UnAggregableTimeseries(\n 'Less than %f%% of datapoints overlap in this '\n 'timespan (%.2f%%)' % (needed_percent_of_overlap,\n percent_of_overlap))\n if (needed_percent_of_overlap > 0 and\n (right_boundary_ts == left_boundary_ts or\n (right_boundary_ts is None\n and maybe_next_timestamp_is_left_boundary))):\n LOG.debug(\"We didn't find points that overlap in those \"\n \"timeseries. \"\n \"right_boundary_ts=%(right_boundary_ts)s, \"\n \"left_boundary_ts=%(left_boundary_ts)s, \"\n \"groups=%(groups)s\", {\n 'right_boundary_ts': right_boundary_ts,\n 'left_boundary_ts': left_boundary_ts,\n 'groups': list(grouped)\n })\n raise UnAggregableTimeseries('No overlap')\n\n # NOTE(sileht): this call the aggregation method on already\n # aggregated values, for some kind of aggregation this can\n # result can looks weird, but this is the best we can do\n # because we don't have anymore the raw datapoints in those case.\n # FIXME(sileht): so should we bailout is case of stddev, percentile\n # and median?\n agg_timeserie = getattr(grouped, aggregation)()\n agg_timeserie = agg_timeserie.dropna().reset_index()\n\n if from_timestamp is None and left_boundary_ts:\n agg_timeserie = agg_timeserie[\n agg_timeserie['timestamp'] >= left_boundary_ts]\n if to_timestamp is None and right_boundary_ts:\n agg_timeserie = agg_timeserie[\n agg_timeserie['timestamp'] <= right_boundary_ts]\n\n points = agg_timeserie.sort_values(by=['granularity', 'timestamp'],\n ascending=[0, 1])\n return six.moves.zip(points.timestamp, points.granularity,\n points.value)\n","sub_path":"gnocchi/rest/cross_metric.py","file_name":"cross_metric.py","file_ext":"py","file_size_in_byte":8666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"486815699","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\n\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\nbatchsize = 500\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\ntrainloader = DataLoader(trainset, batch_size=batchsize,\n shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\ntestloader = DataLoader(testset, batch_size=batchsize,\n shuffle=False, num_workers=2)\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n\n self.conv1 = nn.Conv2d(3, 10, 5) # (10,28,28)\n self.conv2 = nn.Conv2d(10, 20, 5) # (20,24,24)\n\n self.pool = nn.MaxPool2d(2, 2) # (20,12,12)\n\n self.conv3 = nn.Conv2d(20, 29, 3) # (29,10,10)\n\n # myconv1*1 = (14,10,10) [1x1conv on channels: (0,1,2), (0,3,4), (0,5,6), (0,7,8)]\n\n self.fc1 = nn.Linear(14*10*10, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n # x = 60 (20*3) --> 0,1,...59 -> 0,4,7 -> 1*1 -> 1 output\n def myconv1x1(self, x): # (9,10,10)\n x_in = torch.zeros((x.shape[0], 3, x.shape[2], x.shape[3])) # (3,10,10)\n x_out = torch.zeros(x.shape[0], int((x.shape[1]-1)/2), x.shape[2], x.shape[3]) # (4,10,10)\n x_out = x_out.to(device)\n for i in range(x_out.shape[1]): # 4\n x_in[:, 0, :, :] = x[:, 0, :, :]\n x_in[:, 1, :, :] = x[:, (i*2)+1, :, :]\n x_in[:, 2, :, :] = x[:, (i*2)+2, :, :]\n x_out[:, i, :, :] = torch.squeeze(nn.Conv2d(3, 1, kernel_size=1)(x_in))\n return x_out\n\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.pool(x)\n x = self.conv3(x)\n x = self.myconv1x1(x)\n x = x.view(-1, 14*10*10)\n x = self.fc1(x)\n x = self.fc2(x)\n x = self.fc3(x)\n return x\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# Assuming that we are on a CUDA machine, this should print a CUDA device:\nprint(device)\n\nnet = Net()\nnet.to(device)\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n\nfor epoch in range(500): # loop over the dataset multiple times\n # running_loss = 0.0\n print(\"Epoch\", epoch)\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data[0].to(device), data[1].to(device)\n torch.autograd.set_detect_anomaly(True)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # # print statistics\n # running_loss += loss.item()\n # if i % 2000 == 0: # print every 2000 mini-batches\n # print('[%d, %5d] loss: %.3f' %\n # (epoch, i, running_loss / 2000))\n # running_loss = 0.0\n","sub_path":"perm_conv_pytorch_discussion.py","file_name":"perm_conv_pytorch_discussion.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"356916790","text":"from django.apps import AppConfig\nfrom django.conf import settings\n\n\ndef ensure_default_language():\n from .models import Language\n try:\n Language.objects.get(id=1)\n except Exception:\n try:\n Language.objects.create(name=\"English\", local_name=\"English\", iso_code=\"eng\")\n except Exception:\n print(\"Failed to create default language\")\n\n\ndef collect_public_state(state, global_constants, context_dict):\n from .models import Language, Country, TranslationKey, TranslationEntry\n\n state.add(Language.objects.all())\n state.add(TranslationEntry.objects.all())\n state.add(TranslationKey.objects.all())\n state.add(Country.objects.all())\n\n\nclass LocalizationAppConfig(AppConfig):\n name = \"establishment.localization\"\n\n def ready(self):\n ensure_default_language()\n settings.PUBLIC_STATE_COLLECTORS.append(collect_public_state)\n","sub_path":"localization/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"598345392","text":"import calendar\nimport datetime\nimport re\n\nfrom dateutil import parser\nfrom dateutil.relativedelta import relativedelta\nfrom django.views.generic.base import ContextMixin\n\n\ndef to_float(x):\n x = re.sub('[$,]', '', x)\n return float(x)\n\n\ndef to_date(x):\n if isinstance(x, str):\n x = parser.parse(x)\n if isinstance(x, datetime.datetime):\n x = x.date()\n return x\n\n\nclass LoadRequestDateMixin(ContextMixin):\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['dates'] = {\n 'date': self.date,\n 'start': self.start_date,\n 'end': self.end_date,\n 'next': self.date + relativedelta(months=1),\n 'prev': self.date - relativedelta(months=1),\n 'current': self.date.strftime('%Y-%m') == datetime.date.today().strftime('%Y-%m')\n }\n return context\n\n def load_request_date(self):\n date = self.request.GET.get('date', datetime.date.today())\n self.date = to_date(date)\n self.year = self.date.year\n self.month = self.date.month\n _, days = calendar.monthrange(self.year, self.month)\n self.start_date = self.date.replace(day=1)\n self.end_date = self.date.replace(day=days)\n if self.date < datetime.date.today():\n self.date = self.end_date\n self.next_date = self.date + relativedelta(months=1),\n self.prev_date = self.date - relativedelta(months=1)\n","sub_path":"nerdbudget/nerdbudget/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"321668848","text":"import cv2\nimport DataPreProcessing\nimport numpy as np\nfrom scipy import misc\nimport matplotlib.pyplot as plt\nfrom skimage.transform import resize\n\nroi_gray = 0\nstopPresent = False\n\n#STOP Sign Detection\ndef stopDetection(img):\n global roi_gray\n global stopPresent\n stopCascade = cv2.CascadeClassifier('stop_class.xml')\n stop = stopCascade.detectMultiScale(img,\n scaleFactor=1.1,\n minNeighbors=2\n )\n for (x, y, w, h) in stop:\n stopPresent = True\n cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0),2)\n print(\"STOP Detection Complete\")\n print(stopPresent)\n\n\n#recv_img = misc.imread('/Volumes/TRANSCEND/RPi-Self-Driving-Car/Testing/STOP-testing/image.png')\n\n#img = np.array(recv_img, dtype='uint8')\nimg = cv2.imread('/Volumes/TRANSCEND/RPi-Self-Driving-Car/Testing/Traffic-Light-Testing/stopactual.png')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n#image_resized = resize(gray, (18,22), mode='reflect')\nstopDetection(gray)\nplt.imshow(gray, cmap='Greys')\nplt.show()\n\n'''\nimport numpy as np\nimport cv2\n\nstopCascade = cv2.CascadeClassifier('stop_class.xml')\n\nimg = cv2.imread('image.png')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nstop = stopCascade.detectMultiScale(gray, 1.3, 5)\n\nfor (x,y,w,h) in stop:\n cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n\ncv2.imshow('img',roi_gray)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n'''\n","sub_path":"Testing/STOP-testing/stopTesting.py","file_name":"stopTesting.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"245451886","text":"# coding:utf-8\nfrom __future__ import print_function\nfrom imp import reload\nimport maya.cmds as cmds\nimport os\nimport sys\nimport re\nimport glob\n# sys.path.append(r\"Y:\\users\\env\\maya\\scripts\\Python\\site-packages\")\n\ndef eulerfilter(attr_list):\n for attr in attr_list:\n anim_cv = map(lambda x: x.rstrip('.output'), attr)\n try:\n anim_cv = filter(lambda x: cmds.nodeType(x) in [\n 'animCurveTL', 'animCurveTU', 'animCurveTA', 'animCurveTT'], anim_cv)\n cmds.filterCurve(anim_cv, f='euler')\n except:\n continue\n\n\ndef get_reference_file(obj):\n return cmds.referenceQuery(obj, f=True)\n\n\ndef reference_ma(ma, ns):\n # cmds.file(ma, reference=True, ns=ns, force=False, pmt=True)\n cmds.file(ma, i=True, ns=ns, force=True)\n\n\ndef get_scene_ns_list():\n namespaces = cmds.namespaceInfo(lon=True)\n _nestedNS = []\n for ns in namespaces:\n try:\n nestedNS = cmds.namespaceInfo(ns, lon=True)\n if nestedNS != None:\n _nestedNS += nestedNS\n except:\n continue\n namespaces += _nestedNS\n try:\n namespaces.remove('UI')\n namespaces.remove('shared')\n except:\n pass\n return namespaces\n\n\ndef get_tg_ns_list(scene_ns_list, input_ns_list):\n tg_ns_list = []\n for scene_ns in scene_ns_list:\n for input_ns in input_ns_list:\n match = re.match(input_ns+'$', scene_ns)\n print(input_ns, scene_ns, match)\n if match != None:\n tg_ns_list.append(scene_ns)\n return tg_ns_list\n\n\ndef get_rec_sets(set):\n set_items = cmds.sets(set, q=True)\n result = []\n for set_item in set_items:\n if cmds.objectType(set_item) == 'objectSet':\n result.extend(get_rec_sets(set_item))\n else:\n result.append(set_item)\n return result\n\n\ndef get_tg_nodes(ns_list, regex_list):\n short_nodes = []\n all_objs = cmds.ls('*:*')\n for ns in ns_list:\n nodes = []\n for regex in regex_list:\n nodes.extend([i for i in all_objs[:] if re.search(\n r\"{}:{}\".format(ns, regex), i) != None])\n nodes = list(set(nodes))\n for node in nodes[:]:\n if cmds.objectType(node) == 'objectSet':\n nodes.extend(get_rec_sets(node))\n for node in nodes:\n short_nodes.append(node.split('|')[-1])\n return short_nodes\n\n\ndef getConstraintAttributes(nodes):\n attrs = []\n for n in nodes:\n const = cmds.listConnections(\n n, s=True, d=False, p=False, c=True, t='constraint')\n if const is None:\n continue\n for i in range(0, len(const), 2):\n attrs.append(const[i])\n return attrs\n\n\ndef getPairBlendAttributes(nodes):\n attrs = []\n for n in nodes:\n pairblend = cmds.listConnections(\n n, s=True, d=False, p=False, c=True, t='pairBlend')\n if pairblend is None:\n continue\n for i in range(0, len(pairblend), 2):\n attrs.append(pairblend[i])\n const = cmds.listConnections(\n pairblend, s=True, d=False, p=False, c=True, t='constraint')\n if const is None:\n continue\n for i in range(0, len(const), 2):\n attrs.append(const[i])\n return attrs\n\n\ndef getMotionPathAttributes(nodes):\n attrs = []\n for n in nodes:\n pairblend = cmds.listConnections(\n n, s=True, d=False, p=False, c=True, t='motionPath')\n if pairblend is None:\n continue\n for i in range(0, len(pairblend), 2):\n attrs.append(pairblend[i])\n return attrs\n\n\ndef getAddDoubleLinearAttributes(nodes):\n attrs = []\n for n in nodes:\n pairblend = cmds.listConnections(\n n, s=True, d=False, p=False, c=True, t='addDoubleLinear')\n if pairblend is None:\n continue\n for i in range(0, len(pairblend), 2):\n attrs.append(pairblend[i])\n return attrs\n\n\ndef getTransformConnectionAttributes(nodes):\n attrs = []\n for n in nodes:\n pairblend = cmds.listConnections(\n n, s=True, d=False, p=False, c=True, t='transform')\n if pairblend is None:\n continue\n for i in range(0, len(pairblend), 2):\n attrs.append(pairblend[i])\n return attrs\n\n\ndef getAnimLayerConnectionAttributes(nodes):\n attrs = []\n for n in nodes:\n pairblend = cmds.listConnections(\n n, s=True, d=False, p=False, c=True, t='animLayer')\n if pairblend is None:\n continue\n for i in range(0, len(pairblend), 2):\n attrs.append(pairblend[i])\n return attrs\n\n\ndef getAnimCurveAttributes(nodes):\n attrs = []\n for n in nodes:\n pairblend = cmds.listConnections(\n n, s=True, d=False, p=False, c=True, t='animCurveTL')\n if pairblend is not None:\n for i in range(0, len(pairblend), 2):\n attrs.append(pairblend[i])\n continue\n pairblend = cmds.listConnections(\n n, s=True, d=False, p=False, c=True, t='animCurveTU')\n if pairblend is not None:\n for i in range(0, len(pairblend), 2):\n attrs.append(pairblend[i])\n continue\n pairblend = cmds.listConnections(\n n, s=True, d=False, p=False, c=True, t='animCurveTA')\n if pairblend is not None:\n for i in range(0, len(pairblend), 2):\n attrs.append(pairblend[i])\n continue\n pairblend = cmds.listConnections(\n n, s=True, d=False, p=False, c=True, t='animCurveTT')\n if pairblend is not None:\n for i in range(0, len(pairblend), 2):\n attrs.append(pairblend[i])\n continue\n return attrs\n\n\ndef getNoKeyAttributes(nodes):\n attrs = []\n for n in nodes:\n if '.' in n:\n n = n.split('.')[0]\n gAttrs = cmds.listAttr(n, keyable=True)\n if gAttrs is None:\n continue\n for attr in gAttrs:\n if '.' not in attr:\n if cmds.listConnections(n+'.'+attr, s=True, d=False) is None:\n attrs.append(n+'.'+attr)\n return attrs\n\n\ndef getKeyAttributes(nodes):\n attrs = []\n for n in nodes:\n if '.' in n:\n n = n.split('.')[0]\n gAttrs = cmds.listAttr(n, keyable=True)\n if gAttrs is None:\n continue\n for attr in gAttrs:\n if '.' not in attr:\n if cmds.listConnections(n+'.'+attr, s=True, d=False) is None:\n pass\n else:\n attrs.append(n+'.'+attr)\n return attrs\n\n\ndef getNodehasPairBlends(nodes):\n result_nodes = []\n for n in nodes:\n pairblend = cmds.listConnections(\n n, s=True, d=False, p=False, c=False, t='pairBlend')\n if pairblend is not None:\n result_nodes.append(n)\n return result_nodes\n\n\ndef getPairBlend(node):\n pairblends = cmds.listConnections(\n node, s=True, d=False, p=False, c=False, t='pairBlend')\n if pairblends is not None:\n for pairblend in pairblends:\n if \"pairBlend\" in pairblend:\n return pairblend\n return pairblend\n\n\ndef replacePairBlendstoLocator(nodes, sframe, eframe):\n for node in nodes:\n if \"Constraint\" in node:\n continue\n blend_attrs = [\"outTranslateX\", \"outTranslateY\",\n \"outTranslateZ\", \"outRotateX\", \"outRotateY\", \"outRotateZ\"]\n nml_attrs = [\"translateX\", \"translateY\",\n \"translateZ\", \"rotateX\", \"rotateY\", \"rotateZ\"]\n blend = getPairBlend(node)\n loc = cmds.spaceLocator(n=\"tmp\")[0]\n for blend_attr, attr in zip(blend_attrs, nml_attrs):\n cmds.connectAttr(\"{}.{}\".format(blend, blend_attr),\n \"{}.{}\".format(loc, attr))\n cmds.bakeResults(loc, t=(sframe, eframe))\n cmds.delete(blend)\n connect_nodes = cmds.listConnections(node, p=True, s=True)\n for connect_node in connect_nodes:\n if connect_node.split(\".\")[-1] == \"output\":\n try:\n cmds.delete(connect_node.split(\".\")[0])\n except:\n pass\n for attr in nml_attrs:\n cmds.connectAttr(\"{}.{}\".format(loc, attr),\n \"{}.{}\".format(node, attr))\n cmds.bakeResults(node, t=(sframe, eframe))\n cmds.delete(loc)\n\n\ndef unlockAttributes(nodes):\n for node in nodes:\n if cmds.getAttr(node, lock=True):\n try:\n cmds.setAttr(node, lock=False)\n except Exception as e:\n pass\n\n\ndef unmuteAttributes(nodes):\n for node in nodes:\n try:\n cmds.mute(\"NursedesseiShip:root_ctrl\", d=True)\n except Exception as e:\n pass\n\n\ndef export_anim_main(**kwargs):\n if kwargs['debug'] == False:\n TOOLNAME = 'ND_AssetExporter'\n else:\n TOOLNAME = 'ND_AssetExporter_dev'\n sys.path.append(r\"Y:\\tool\\ND_Tools\\DCC\\{}\\pycode\\maya_lib\".format(TOOLNAME))\n import ndPyLibAnimIOExportContain\n reload(ndPyLibAnimIOExportContain)\n\n import pprint\n pprint.pprint(kwargs)\n\n # cacheをハイドしてみる\n top_nodes = cmds.ls(assemblies=True)\n cache_nodes = cmds.ls(type='cacheFile')\n hidden_objs = []\n # hidden_objs.extend(cmds.hide(top_nodes, rh=True))\n hidden_objs.extend(cmds.hide(cache_nodes, rh=True))\n ignore_attrs = []\n if hidden_objs is not None:\n for obj in hidden_objs:\n ignore_attrs.append('{}Shape.visibility'.format(obj.lstrip('|')))\n ignore_attrs.append('{}.visibility'.format(obj.lstrip('|')))\n\n output_files = []\n all_nodes = []\n node_and_attrs = []\n\n frame_handle = kwargs['frame_handle']\n publish_ver_anim_path = kwargs['publish_ver_anim_path']\n\n sframe = cmds.playbackOptions(q=True, min=True) - float(frame_handle)\n eframe = cmds.playbackOptions(q=True, max=True) + float(frame_handle)\n\n if 'frame_range' in kwargs.keys():\n frame_range = kwargs['frame_range']\n else:\n frame_range = [sframe, eframe]\n if 'on_maya' in kwargs.keys():\n frame_range = [sframe, sframe+1]\n\n with open(os.path.dirname(os.path.dirname(os.path.dirname(publish_ver_anim_path))) + '/sceneConf.txt', 'w') as f:\n f.write(str(sframe)+'\\n')\n f.write(str(eframe)+'\\n')\n with open(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(publish_ver_anim_path))), \"resolutionConf.txt\"), \"w\") as f:\n f.write(str(cmds.getAttr(\"defaultResolution.width\"))+\"\\n\")\n f.write(str(cmds.getAttr(\"defaultResolution.height\"))+\"\\n\")\n\n input_ns_list = kwargs['namespace'][0].replace(\n ' ', '').rstrip(',').split(',')\n regex_list = [i for i in kwargs['export_item']['anim'].replace(\n ' ', '').split(',') if not '.' in i] # 通常のエクスポート対象\n regex_attr_list = [i for i in kwargs['export_item']\n ['anim'].split(',') if '.' in i] # アトリビュートを直接指定\n\n scene_ns_list = get_scene_ns_list()\n tg_ns_list = get_tg_ns_list(scene_ns_list, input_ns_list)\n\n all_nodes += get_tg_nodes(tg_ns_list, regex_list)\n\n all_nodes = list(set(all_nodes))\n\n character_set = cmds.ls(type='character')\n if len(character_set) != 0:\n cmds.delete(character_set)\n\n for node in all_nodes:\n try:\n cmds.select(node, add=True)\n except Exception as e:\n print(e)\n baseAnimationLayer = cmds.animLayer(q=True, r=True)\n if baseAnimationLayer != None and len(cmds.ls(sl=True)) != 0:\n animLayers = cmds.ls(type='animLayer')\n for al in animLayers:\n cmds.animLayer(al, e=True, sel=False)\n cmds.animLayer(baseAnimationLayer, e=True, sel=True)\n cmds.bakeResults(t=(sframe, eframe), sb=True,\n ral=True, dic=True, pok=True, sm=True)\n\n attrs = getNoKeyAttributes(all_nodes)\n if len(node_and_attrs) != 0:\n attrs.extend(getNoKeyAttributes(node_and_attrs))\n for tg_ns in tg_ns_list:\n for regex_obj_and_attr in regex_attr_list:\n obj_and_attr = tg_ns+':' + regex_obj_and_attr\n if cmds.objExists(obj_and_attr):\n attrs.append(obj_and_attr)\n\n if len(attrs) != 0:\n attrs = list(set(attrs)-set(ignore_attrs))\n cmds.setKeyframe(attrs, t=sframe, insertBlend=True)\n\n attrs += getConstraintAttributes(all_nodes)\n attrs += getMotionPathAttributes(all_nodes)\n attrs += getAddDoubleLinearAttributes(all_nodes)\n attrs += getTransformConnectionAttributes(all_nodes)\n\n attrs += getKeyAttributes(all_nodes)\n attrs += getAnimLayerConnectionAttributes(all_nodes)\n # attrs += getPairBlendAttributes(all_nodes)\n attrs = list(set(attrs)-set(ignore_attrs))\n unlockAttributes(attrs)\n unmuteAttributes(attrs)\n\n '''\n 関連するアトリビュートの追加\n '''\n for node in all_nodes:\n if cmds.listConnections(node, s=True, type=\"constraint\") is not None:\n attrs.extend(\n list(set(cmds.listConnections(node, s=True, type=\"constraint\"))))\n\n if kwargs['scene_timewarp'] == True:\n time_value_set_list = []\n ref_files = []\n ref_attrs = []\n for scene_ns in scene_ns_list:\n top_obj = '{}:root'.format(scene_ns)\n if not cmds.objExists(top_obj):\n continue\n if cmds.referenceQuery(top_obj, inr=True):\n ref_file = get_reference_file(top_obj)\n try:\n reference_ma(ref_file, \"tmp_\"+scene_ns)\n ref_files.append([scene_ns, ref_file])\n\n for obj in cmds.listRelatives(top_obj, ad=True):\n try:\n if cmds.listAttr(obj, keyable=True) != None:\n for attr in cmds.listAttr(obj, keyable=True):\n ref_attrs.append(obj+\".\"+attr)\n except Exception as e:\n print(e)\n\n except Exception as e:\n print(e)\n # continue\n cmds.setAttr(\"time1.enableTimewarp\", 0)\n # store timewarp\n for t in range(int(sframe), int(eframe+1)):\n cmds.currentTime(t)\n warp_time = cmds.getAttr(\"time1.outTime\", time=t)\n for attr in ref_attrs:\n obj = attr.split(\".\")[0]\n try:\n if attr in attrs:\n value = cmds.getAttr(attr, time=warp_time)\n time_value_set_list.append([t, attr, value])\n except Exception as e:\n print(attr)\n print(e)\n for ref in ref_files:\n ns = ref[0]\n ref_file = ref[1]\n try:\n cmds.file(ref_file, rr=True)\n except:\n pass\n for ns_obj in cmds.ls(\"tmp_*:*\"):\n try:\n cmds.rename(ns_obj, ns_obj.replace(\"tmp_\", \":\"))\n except:\n pass\n # restore timewarp\n cmds.setAttr(\"time1.enableTimewarp\", 0)\n current_f = 0\n for time_list in time_value_set_list:\n frame = time_list[0]\n attr = time_list[1]\n value = time_list[2]\n if current_f != frame:\n cmds.currentTime(frame)\n current_f = frame\n try:\n cmds.setAttr(attr, value)\n cmds.setKeyframe(attr)\n except Exception as e:\n print(e)\n else:\n attrs = list(set(attrs)-set(ignore_attrs))\n for obj_and_attr in attrs:\n if cmds.objExists(obj_and_attr) == True:\n cmds.select(obj_and_attr, add=True)\n cmds.select(attrs, add=True)\n print(sframe, eframe)\n cmds.bakeResults(at=attrs, t=(sframe, eframe), dic=True)\n eulerfilter(attrs)\n cmds.showHidden(hidden_objs)\n if 'on_maya' in kwargs.keys():\n return\n\n for ns in tg_ns_list:\n pick_nodes = []\n pick_node_and_attrs = []\n for node in all_nodes:\n if ns+':' in node:\n pick_nodes.append(node)\n for node in pick_node_and_attrs:\n if ns + ':' in node:\n pick_node_and_attrs.append(node)\n if len(pick_nodes) != 0 or len(pick_node_and_attrs) != 0:\n argsdic = {}\n argsdic['is_filter'] = True\n argsdic['inPfxInfo'] = ['3', '']\n argsdic['anim_file_name'] = 'anim_'+ns+'.ma'\n argsdic['publish_ver_anim_path'] = kwargs['publish_ver_anim_path']\n argsdic['pick_nodes'] = pick_nodes\n argsdic['pick_node_and_attrs'] = pick_node_and_attrs\n argsdic['frame_range'] = frame_range\n argsdic['scene_timewarp'] = kwargs['scene_timewarp']\n argsdic['is_check_constraint'] = True\n argsdic['is_check_anim_curve'] = True\n ndPyLibAnimIOExportContain.ndPyLibAnimIOExportContain_main(\n **argsdic)\n return output_files\n\n\ndef ndPyLibExportAnim_caller(args):\n export_anim_main(**args)\n import pprint\n pprint.pprint(args)\n print(\"ndPylibExportAnim End\")\n\n\nif __name__ == '__main__':\n sys.path.append(r\"Y:\\tool\\ND_Tools\\DCC\\ND_AssetExporter_dev\\pycode\\maya\")\n import ndPyLibExportAnim\n reload(ndPyLibExportAnim)\n argsdic = {'namespace': ['PR2022_BG_LO'],\n 'anim_item': 'ctrl_set, root',\n 'export_item': {'abc': 'abc_Root', 'anim': 'animSets, treeSet'},\n 'frame_handle': False,\n 'frame_range': False,\n 'publish_ver_anim_path': 'C:/Users/k_ueda/Desktop/temp/v001/anim',\n 'scene_timewarp': False,\n 'step_value': False,\n 'on_maya': True}\n # ndPyLibExportAnim.ndPyLibExportAnim_caller(argsdic)\n","sub_path":"pycode/maya_lib/ndPyLibExportAnim.py","file_name":"ndPyLibExportAnim.py","file_ext":"py","file_size_in_byte":18029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"537180776","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2005 onwards University of Deusto\n# All rights reserved.\n#\n# This software is licensed as described in the file COPYING, which\n# you should have received as part of this distribution.\n#\n# This software consists of contributions made by many individuals,\n# listed below:\n#\n# Author: Jaime Irurzun \n#\n\nfrom voodoo import log\nfrom voodoo.gen.caller_checker import caller_check\nfrom voodoo.log import logged\nfrom voodoo.override import Override\nfrom voodoo.sessions.checker import check_session\nfrom weblab.data import server_type as ServerType\nfrom weblab.data.command import Command\nimport weblab.translator.exc as TranslatorErrors\nimport weblab.translator.translator as Translator\nimport weblab.experiment.util as ExperimentUtil\n\n\ncheck_session_params = (\n TranslatorErrors.InvalidTranslatorSessionIdError,\n \"Translator Server\"\n )\n\n\nclass AddsATrippleAAtTheBeginingTranslator(Translator.Translator):\n \"\"\"This Translator exists only for testing purposes. It's used with two aims:\n 1. To test that ProxyServer really calls the methods in Translator (it acts as a fake logger).\n 2. To have a first example (executed when tests are runned) of a Translator that stores info in a session_manager.\n Since this Translators uses the SessionManager provided by ProxyServer, it can not be instanced as a stand-alone WebLab server.\"\"\"\n\n @Override(Translator.Translator)\n @logged(log.level.Info)\n @caller_check(ServerType.Proxy)\n @check_session(*check_session_params)\n def do_on_start(self, session):\n session['log'] = \"on_start \"\n\n @Override(Translator.Translator)\n @logged(log.level.Info)\n @caller_check(ServerType.Proxy)\n @check_session(*check_session_params)\n def do_before_send_command(self, session, command):\n session['log'] += \"before_send_command \"\n return Command(\"AAA%s\" % command.commandstring)\n\n @Override(Translator.Translator)\n @logged(log.level.Info)\n @caller_check(ServerType.Proxy)\n @check_session(*check_session_params)\n def do_after_send_command(self, session, response):\n session['log'] += \"after_send_command \"\n return Command(\"AAA%s\" % response.commandstring)\n\n @Override(Translator.Translator)\n @logged(log.level.Info)\n @caller_check(ServerType.Proxy)\n @check_session(*check_session_params)\n def do_before_send_file(self, session, file):\n session['log'] += \"before_send_file \"\n file_content = ExperimentUtil.deserialize(file.commandstring)\n return Command(ExperimentUtil.serialize(\"AAA%s\" % file_content))\n\n @Override(Translator.Translator)\n @logged(log.level.Info)\n @caller_check(ServerType.Proxy)\n @check_session(*check_session_params)\n def do_after_send_file(self, session, response):\n session['log'] += \"after_send_file \"\n return Command(\"AAA%s\" % response.commandstring)\n\n @Override(Translator.Translator)\n @logged(log.level.Info)\n @caller_check(ServerType.Proxy)\n @check_session(*check_session_params)\n def do_on_finish(self, session):\n session['log'] += \"do_on_finish \"\n return Command(session['log'])\n","sub_path":"server/src/test/unit/weblab/proxy/adds_triple_translator.py","file_name":"adds_triple_translator.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"587318702","text":"import pika\r\n#广播模式下,每个消费者都有单独的一个队列,对列名字需要服务端自动随机生成\r\n#注意:广播模式下消费者收不到生产者之前发布的消息\r\ncredentials = pika.PlainCredentials('newnew', 'rabbitmq123')\r\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\r\n '182.61.17.151',credentials=credentials))\r\nchannel = connection.channel()\r\n\r\n\r\nchannel.exchange_declare(exchange='logs', type='fanout')\r\n\r\n#不指定queue名字,rabbit会随机分配一个名字,exclusive=True会在使用此queue的消费者断开后,自动将queue删除\r\nresult = channel.queue_declare(exclusive=True) \r\n\r\nqueue_name = result.method.queue\r\n#把随机生成的队列queue绑定到生成的exchange(交换机/转发器)上;\r\nchannel.queue_bind(exchange='logs', queue=queue_name)\r\n\r\nprint(' [*] Waiting for logs. To exit press CTRL+C')\r\n\r\n\r\ndef callback(ch, method, properties, body):\r\n print(\" [x] %r\" % body)\r\n\r\n\r\nchannel.basic_consume(callback, queue=queue_name,no_ack=True)\r\n\r\nchannel.start_consuming()\r\n","sub_path":"demo_rabbitmq/fanout_receive.py","file_name":"fanout_receive.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"543669820","text":"from dolfin import *\nimport rbs\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprofile = False\n\nNf = 80\npenalization = Constant(1e8)\ndamping = Constant(1e8)\n\ndomain = Rectangle(0, 0, 1, 1) - Circle(0.25, 0.25, 0.1) - Circle(0.25, 0.75, 0.1) - Circle(0.75, 0.25, 0.1) - Circle(0.75, 0.75, 0.1)\nMCell = Mesh(domain, Nf)\n\ndef boundary(x, on_boundary):\n return on_boundary\n\n\n# P1 elements for the density\nVCell = FunctionSpace(MCell, \"P\", 1)\n\n# Vector P1 elements for the convection term\nVCellV = VectorFunctionSpace(MCell, \"P\", 1)\n\nbc = DirichletBC(VCell, Constant(0.0), boundary)\n\nbuildings = Expression('(pow((c*(x[0]+shift_x) - floor(c*(x[0]+shift_x))-0.5),2) + pow(c*(x[1]+shift_y) - floor(c*(x[1]+shift_y))-0.5, 2)) > r', r=0.04, c=10, shift_x=0, shift_y=0)\nstreets = 1-buildings\nwind = Expression(['ws', 'c*ws'], ws=1, c=5, element=VCellV.ufl_element())\n\nwind.ws = 2\nwind.c = 1\nbuildings.c=2\n\nf = Expression('exp(-l*(x[0]+shift_x))', l=1, shift_x=0, shift_y=0)\nf.l=1\n\nu = TrialFunction(VCell)\nv = TestFunction(VCell)\n\na0 = inner(grad(u), grad(v))*dx\na1 = u*v*dx\na2 = u.dx(0)*v*dx\na3 = u.dx(1)*v*dx\nforms = [a0, a1, a2, a3]\n\n[c0, c1, c2, c3] = [Constant(0.0) for i in range(4)]\ncoeffs = [c0, c1, c2, c3]\n\nf0 = lambda p:1\nf1 = lambda p:1\nf2 = lambda p:p[0]*cos(p[1])\nf3 = lambda p:p[0]*sin(p[1])\nfactors = [f0, f1, f2, f3]\n\nfull_form = sum(map(lambda i:i[0]*i[1], zip(coeffs, forms)))\nfull_form = c0*a0 + c1*a1 + c2*a2 + c3*a3\n\nsource = f*v*dx\n\nparam_space = [\n np.linspace(0, 5000, 200),\n np.linspace(0, 2*pi, 200),\n ]\n\nsolvr = rbs.AffineReducedBasisSolver(full_form, coeffs, factors, source, VCell, param_space, bcs=bc)\n\nif profile:\n import cProfile\n cProfile.run(\"solvr.reduce(1e-10, do_ortho=True, basis_size=50, progress_plots=False, method='ap')\")\nelse:\n errs, exception = solvr.reduce(1e-10, do_ortho=True, basis_size=50, progress_plots=False, method='ap')\n\n plt.semilogy(errs)\n plt.show()\n\n if exception:\n raise exception\n","sub_path":"convection_rb.py","file_name":"convection_rb.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"334050058","text":"# Filename: q12_find_factors.py\r\n# Author: Ang Yong Loong\r\n# Class: 5C23\r\n# Created: 05022013\r\n# Modified: 05022013\r\n# Description: Program that reads an integer and\r\n# displays all its smallest factors\r\n\r\n# main\r\n\r\n#prompt for an integer\r\nx = int(input(\"Enter an integer: \"))\r\n\r\n#initialise i and empty factors array, y\r\ni = 2\r\nfactors = []\r\n\r\n#loop while x is not 1\r\nwhile (x != 1):\r\n if(x % i == 0):\r\n #save factors into empty array, and divide integer by factor \r\n factors.append(i)\r\n x = x / i\r\n else:\r\n #increase by 1 if it is not a factor\r\n i = i + 1\r\n\r\n#display result\r\nprint (factors)\r\n\r\n\r\ninput(\"\\n\\nPlease hit the enter key to exit.\")\r\nexit()\r\n","sub_path":"practical02/q12_find_factors.py","file_name":"q12_find_factors.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"292297407","text":"from smartnet.grpcpy import smartnet_pb2_grpc, smartnet_pb2\nfrom smartnet.impl.DataAccessor import create_community, admin_login, get_list_of_communities\nfrom elixir import session\nfrom smartnet.utils.utils import to_java_date\n\nclass GrpcHandler(smartnet_pb2_grpc.SmartnetServicer):\n \n \n def createCommunity(self, request, context):\n res = create_community(request)\n return smartnet_pb2.BooleanResponse(result=res)\n \n def getCommunityById(self, request, context):\n pass\n \n def getListOfCommunities(self, request, context):\n community_list = get_list_of_communities(request)\n l = smartnet_pb2.CommunityList()\n r = []\n for c in community_list:\n r.append(smartnet_pb2.Community(id=c.id,name=c.name, city=c.city ,type= c.type ,state=c.state, created_timestamp=to_java_date(c.created_timestamp)))\n l.community.extend(r)\n return l\n \n def adminLogin(self,request, context):\n try:\n res = admin_login(request)\n if not res:\n raise\n return smartnet_pb2.Admin(id=res.id, role=res.role, email = res.email, first_name = res.first_name, last_name = res.last_name)\n finally:\n session.close()","sub_path":"PyProj/src/smartnet/impl/GrpcHandler.py","file_name":"GrpcHandler.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"14719062","text":"import numpy as np\nimport pandas as pd\n\ndef gen_data():\n points,labels = [],[]\n tags = ['green','blue','red','yellow']\n x = [np.random.rand()*100,np.random.rand()*10]\n for i in range(60):\n points.append([np.random.rand()*100,np.random.rand()*100])\n labels.append(tags[np.random.randint(0,4)])\n return x,np.array(points),np.array(labels)\n\ndef visualization(points,labels):\n import matplotlib.pyplot as plt\n\n for i in range(len(points)):\n plt.scatter(points.T[0, i], points.T[1, i], marker='^', c=labels[i])\n plt.axis([0, 100, 0, 100])\n plt.show()\n\ndef visualization1(points,labels,x,L):\n import matplotlib.pyplot as plt\n\n for i in range(len(points)):\n plt.scatter(points.T[0, i], points.T[1, i], marker='^', c=labels[i])\n for e in L:\n plt.plot([x[0],e[0]],[x[1],e[1]],label=distance(x,e))\n plt.axis([0, 100, 0, 100])\n plt.show()\n\n\nclass kdtree(object):\n def __init__(self,points,dim):\n self.left = None #左节点\n self.right = None #右节点\n self.points = points #此节点分开的数据\n self.dim = dim #记录此节点分开的维度\n self.feature = [0,0]\n if len(self.points)==1:\n self.feature = self.points.values[0]\n else:\n self.calcMid()\n self.departtree()\n\n #得到中位数\n def calcMid(self):\n self.points.sort_values(self.dim, inplace=True,ignore_index=True)\n l = self.points.shape[0]\n self.feature = self.points.values[l//2]\n self.points = self.points.drop(index=l//2,axis=0)\n\n # 构造下一级节点\n def departtree(self):\n # 还有多的节点\n left = []\n right = []\n for e in self.points.values:\n if e[self.dim] < self.feature[self.dim]:\n left.append(e)\n else:\n right.append(e)\n if len(left):\n self.left = kdtree(pd.DataFrame(left),(self.dim+1)%self.points.shape[1])\n if len(right):\n self.right = kdtree(pd.DataFrame(right),(self.dim+1)%self.points.shape[1])\n return\n\ndef distance(x,y):\n d = 0\n for i in range(len(x)):\n d+=(x[i]-y[i])**2\n return d\n\n\ndef argmaxdis(x,L):\n dis = []\n for e in L:\n dis.append(distance(x,e))\n return np.argmax(dis),np.max(dis)\n\ndef insertL(x,p,L,maxl):\n if len(L) 0:\n name_request = ses.get('https://peeringdb.com/api/net?asn=' + str(asn)).json()\n peer[name][asn] = dict()\n peer[name][asn]['description'] = name_request['data'][0]['name']\n if name_request['data'][0]['irr_as_set'] is not None and len(name_request['data'][0]['irr_as_set']) > 0:\n peer[name][asn]['import'] = name_request['data'][0]['irr_as_set']\n else:\n peer[name][asn]['import'] = \"AS\" + str(asn)\n peer[name][asn]['export'] = \"AS-GITOYEN\"\n peer[name][asn]['peerings'] = []\n if name_request['data'][0]['info_prefixes4'] is not None:\n peer[name][asn]['limit_ipv4'] = int(name_request['data'][0]['info_prefixes4'])\n if name_request['data'][0]['info_prefixes6'] is not None:\n peer[name][asn]['limit_ipv6'] = int(name_request['data'][0]['info_prefixes6'])\n delete = True\n for routeur in result:\n if routeur['ipaddr4'] is not None:\n peer[name][asn]['peerings'].append(routeur['ipaddr4'])\n print(\n \"Generating configuration at \" + name + \" for the router \" + str(routeur['ipaddr4']) + \" of the AS \" + str(\n asn) + \" \" + peer[name][asn]['description'])\n if routeur['ipaddr6'] is not None:\n peer[name][asn]['peerings'].append(routeur['ipaddr6'])\n print(\n \"Generating configuration at \" + name + \" for the router \" + str(routeur['ipaddr6']) + \" of the AS \" + str(\n asn) + \" \" + peer[name][asn]['description'])\n delete = False\n if delete:\n peer[name].pop(asn, None)\n\nfor gix in peer:\n with open(\"peers/\" + gix + '.yml', 'w') as outfile:\n yaml.dump(peer[gix], outfile, default_flow_style=False)\n outfile.close()\n","sub_path":"generate_peers.py","file_name":"generate_peers.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"63266273","text":"from pathlib import Path\nfrom NaClProfile import NaClProfile\nimport Profile\nimport ds_client\nfrom OpenWeather import OpenWeather\nfrom LastFM import LastFM\n\ndef file_search(dirpath, filename=None, extension=None, onlyfiles=False, recursive=False): \n '''\n Lists files in given directory that match the specified requirements.\n If recursive = True, all subdirectories contents are displayed.\n ''' \n dir_list = []\n for child in dirpath.iterdir():\n if child.is_file():\n if file_meets_criteria(child, filename, extension):\n print(child)\n else:\n dir_list.append(child)\n \n for d in dir_list:\n if not onlyfiles:\n print(d)\n if recursive:\n file_search(d, filename, extension, onlyfiles, recursive)\n\n\n\ndef file_meets_criteria(file, filename, extension):\n '''\n Returns True if the file satisfies a filename or extension requirement if they exist.\n '''\n crit_met = True\n if filename != None:\n if file.name != filename:\n crit_met = False\n\n if extension != None:\n if file.suffix != '.' + extension:\n crit_met = False\n\n return crit_met\n\n\n\ndef list_files(path, command_tokens):\n '''\n Assigns specific variables for command requirements and calls file_search().\n ''' \n recursive = False\n only_files = False\n extension = None\n filename = None\n\n #whether the next token is an input\n get_input = False\n input_type = \"\"\n\n for token in command_tokens[2:]:\n if get_input:\n if input_type == '-s':\n filename = token\n elif input_type == '-e':\n extension = token\n get_input = False\n input_type = \"\"\n\n elif token == \"-r\":\n recursive = True\n elif token == '-f':\n only_files = True\n elif token == '-s':\n only_files = True\n get_input = True\n input_type = \"-s\"\n elif token == '-e':\n only_files = True\n get_input = True\n input_type = \"-e\"\n else:\n print(\"ERROR\")\n return\n \n if get_input:\n print(\"ERROR\")\n return\n\n file_search(path, filename, extension, only_files, recursive)\n\n\n\ndef create_file(path, command_tokens, profile) -> str:\n '''\n -Creates new .dsu file in given directory.\n -Prompts user for Profile input and populates the profile object along with the created file.\n -Returns path of created file\n '''\n if len(command_tokens) != 4 or command_tokens[2] != '-n':\n print(\"ERROR\")\n return\n\n if not path.is_dir():\n print(\"ERROR\")\n return\n \n file_path = path / (command_tokens[3] + \".dsu\")\n file_path.touch()\n print(file_path)\n\n profile.dsuserver = input(\"Enter dsu server:\\n\")\n profile.username = input(\"Enter username:\\n\")\n profile.password = input(\"Enter password:\\n\")\n \n try:\n profile.save_profile(file_path)\n except Profile.DsuFileError:\n print(\"ERROR: Couldn't save profile to file\")\n return\n\n prompt_bio(profile)\n prompt_post(profile)\n\n return file_path\n \n\n\ndef delete_file(path, command_tokens):\n '''\n Deletes file specified in path.\n '''\n if error_found(path, command_tokens):\n print(\"ERROR\")\n return\n \n path.unlink()\n print(path, \"DELETED\")\n\n \n\ndef read_file(path, command_tokens):\n '''\n Prints contents of .dsu file or EMPTY if it is empty.\n '''\n if error_found(path, command_tokens):\n print(\"ERROR\")\n return\n\n if path.read_text() == \"\":\n print(\"EMTPY\")\n return\n\n print(path.read_text(), end=\"\")\n \n\n\ndef error_found(path, command_tokens):\n '''\n Returns true if the command for read_file(), delete_file(), or load_file() is incorrect.\n '''\n error = False\n if len(command_tokens) != 2:\n error = True\n\n if not path.is_file():\n error = True\n\n if path.suffix != \".dsu\":\n error = True\n\n return error\n\n\n\ndef load_file(path, command_tokens, profile) -> str:\n '''\n Populates the profile object with information in the specified .dsu file.\n Returns path of specified file.\n '''\n if error_found(path, command_tokens):\n print(\"ERROR\")\n return\n\n try:\n profile.load_profile(str(path))\n except Profile.DsuProfileError:\n print(\"ERROR: Couldn't load file\")\n return\n \n prompt_bio(profile)\n prompt_post(profile)\n\n return str(path)\n\n\n\ndef command_info():\n '''\n prints available commands \n '''\n print(\"COMMANDS:\")\n print(\"list files -- [L [DIRECTORY] [[-]OPTION] [INPUT]]\")\n print(\"create file -- [C [DIRECTORY] -n [FILE]]\")\n print(\"delete file --[D [FILE]]\")\n print(\"read file -- [R [FILE]]\")\n print(\"load file -- [O [FILE]]\")\n print(\"write post -- [P]\")\n print(\"write bio -- [B]\")\n print(\"quit -- [Q]\")\n print()\n\n \n\ndef prompt_post(profile):\n '''\n Prompts the user if they would like to write a post.\n '''\n if input(\"Would you like to write a post? (Y/N)\\n\") == \"Y\":\n write_post(profile)\n\n\n \ndef write_post(profile):\n '''\n Saves post to profile object.\n Sends to server if indicated by the user.\n '''\n if not profile_is_loaded(profile):\n print(\"ERROR: No profile is currently loaded.\")\n return\n \n print()\n print(\"*TRANSCLUDE OPTIONS*\")\n print(\"@weather -- short description of current weather\")\n print(\"@lastfm -- current top artist along with their play count\")\n print()\n\n msg = input(\"Enter message:\\n\")\n msg = transclusion_check(msg)\n post = Profile.Post()\n post.set_entry(msg)\n \n profile.add_post(post)\n\n if input(\"Would you like to post your message online? (Y/N)\\n\") == \"Y\":\n token = ds_client.join(profile.dsuserver, 2021, profile.username, profile.password, profile.public_key)\n\n if token == None:\n print(\"ERROR: There was an error when trying to join the server\")\n return\n \n encrypted_post = profile.encrypt_entry(profile.get_posts()[-1].get_entry(), token).decode(encoding='UTF-8')\n if ds_client.send_post(profile.dsuserver, 2021, encrypted_post, post.get_time(), profile.public_key) == False:\n print(\"ERROR: There was an error when trying to post your message\")\n return\n\n\ndef transclusion_check(msg:str) ->str:\n '''\n Checks whether msg has any predefined transclusion keywords.\n Returns msg with the keywords replaced with their transcluded information.\n '''\n try:\n if \"@weather\" in msg:\n open_weather = OpenWeather(\"92660\", \"US\", \"965cf2e5bf9db105ac06164bddce42d6\")\n msg = open_weather.transclude(msg)\n\n if \"@lastfm\" in msg:\n lastfm = LastFM(\"c4a1c7835447a0cd7870d45c5bc416da\")\n msg = lastfm.transclude(msg)\n\n except AttributeError:\n print(\"Your message failed to transclude\\n\")\n \n return msg\n \n \n\ndef prompt_bio(profile):\n '''\n Prompts the user if they would like to write a bio.\n '''\n if input(\"Would you like to create a bio? (Y/N)\\n\") == \"Y\":\n write_bio(profile)\n\ndef write_bio(profile):\n '''\n Saves bio to profile object.\n Posts bio to dsuserver if indicated by user.\n '''\n if not profile_is_loaded(profile):\n print(\"ERROR: No profile is currently loaded.\")\n return\n \n bio = input(\"Enter bio:\\n\")\n profile.bio = bio\n\n if input(\"Would you like to post your bio online? (Y/N)\\n\") == \"Y\":\n token = ds_client.join(profile.dsuserver, 2021, profile.username, profile.password, profile.public_key)\n\n if token == None:\n print(\"ERROR: There was an error when trying to join the server\")\n return\n\n encrypted_bio = profile.encrypt_entry(profile.bio , token).decode(encoding='UTF-8')\n if ds_client.send_bio(profile.dsuserver, 2021, encrypted_bio, profile.public_key) == False:\n print(\"ERROR: There was an error when trying to post your bio\")\n return\n\n\n\ndef profile_is_loaded(profile) -> bool:\n '''\n returns True if a profile was already created or loaded\n '''\n return profile.dsuserver != None\n\n\n \ndef main():\n \n profile = NaClProfile()\n profile_path = \"\"\n\n command_info()\n \n while True:\n if profile_is_loaded(profile):\n try:\n profile.save_profile(profile_path)\n except Profile.DsuFileError:\n print(\"ERROR: Couldn't save profile to file\")\n \n command = input().strip()\n if command == 'Q':\n break\n elif command == \"P\":\n write_post(profile)\n continue\n elif command == \"B\":\n write_bio(profile)\n continue\n \n command_tokens = command.split()\n if len(command_tokens) < 2:\n print(\"ERROR\")\n continue\n \n path = Path(command_tokens[1])\n if not path.exists():\n print(\"ERROR\")\n continue\n if command_tokens[0] == 'L':\n list_files(path, command_tokens)\n elif command_tokens[0] == 'C':\n profile = NaClProfile() #resets profile to remove any existing profile \n profile_path = create_file(path, command_tokens, profile)\n elif command_tokens[0] == 'D':\n delete_file(path, command_tokens)\n elif command_tokens[0] == 'R':\n read_file(path, command_tokens)\n elif command_tokens[0] == 'O':\n profile = NaClProfile() #resets profile to remove any existing profile\n profile_path = load_file(path, command_tokens, profile)\n else:\n print(\"ERROR\")\n continue\n\n\nif __name__ == \"__main__\":\n main()\n\n\n \n \n","sub_path":"a5/a5.py","file_name":"a5.py","file_ext":"py","file_size_in_byte":9874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"208650543","text":"import pygame\nimport game_sprite\n\nID_SHOW_ENEMY_SPRITE = pygame.USEREVENT\nID_SHOW_BULLET_SPRITE = pygame.USEREVENT + 1\n\n\nclass PlaneWar(object):\n\n def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode(game_sprite.SCREEN_RECT.size)\n self.clock = pygame.time.Clock()\n self.background_sprite = game_sprite.BackgroundSprite(\"./images/background.png\")\n self.background_sprite_alt = game_sprite.BackgroundSprite(\"./images/background.png\", is_alt=True)\n self.plane_sprite = game_sprite.PlaneSprite(\"./images/me1.png\")\n self.bg_group = pygame.sprite.Group()\n self.player_group = pygame.sprite.Group()\n self.enemy_group = pygame.sprite.Group()\n self.bg_group.add(self.background_sprite, self.background_sprite_alt)\n self.player_group.add(self.plane_sprite)\n pygame.time.set_timer(ID_SHOW_ENEMY_SPRITE, 1000)\n pygame.time.set_timer(ID_SHOW_BULLET_SPRITE, 200)\n\n def start_game(self):\n print(\"开始游戏\")\n while self.plane_sprite.is_survive:\n self.handle_key_event()\n self.check_collide()\n self.update_screen()\n self.clock.tick(60)\n print(\"游戏结束\")\n PlaneWar.end_game()\n\n @staticmethod\n def end_game():\n pygame.quit()\n exit()\n\n def handle_key_event(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n PlaneWar.end_game()\n elif event.type == ID_SHOW_ENEMY_SPRITE:\n enemy_sprite = game_sprite.EnemySprite(\"./images/enemy1.png\")\n self.enemy_group.add(enemy_sprite)\n elif event.type == ID_SHOW_BULLET_SPRITE:\n bullet_sprite = game_sprite.BulletSprite(\"./images/bullet1.png\", self.plane_sprite)\n self.player_group.add(bullet_sprite)\n\n pressed_key = pygame.key.get_pressed()\n self.plane_sprite.control_direction(pressed_key)\n\n def update_screen(self):\n self.bg_group.update()\n self.bg_group.draw(self.screen)\n\n self.player_group.update()\n self.player_group.draw(self.screen)\n\n self.enemy_group.update()\n self.enemy_group.draw(self.screen)\n\n pygame.display.update()\n\n def check_collide(self):\n pygame.sprite.groupcollide(self.enemy_group, self.player_group, True, True)\n\n\nif __name__ == \"__main__\":\n PlaneWar().start_game()\n","sub_path":"game_main.py","file_name":"game_main.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"16848684","text":"button_dict = {\n \"header\": (3500, 1688),\n \"trailer\": (465, 75050),\n \"zero\": (465, 1252),\n \"one\": (465, 385),\n \"margin\": 100,\n \"word_length\":56,\n \"PRE_DATA\": 0x4004011200,\n \"KEY_POWER1\": 0xBCAF, # Was: Power\n \"Theater_Room\": 0x7162,\n \"Living_Room\": 0xF1E2,\n \"KEY_FAVORITES\": 0x8192, # Was: Favorite\n \"Aspect\": 0x7B68,\n \"Picture_Adj\": 0x7E6D,\n \"KEY_ENTER\": 0x091A, # Was: Return\n \"KEY_MENU\": 0x5E4D, # Was: Menu\n \"KEY_UP\": 0x5A49, # Was: Up\n \"KEY_DOWN\": 0xDAC9, # Was: Down\n \"KEY_LEFT\": 0x3A29, # Was: Left\n \"KEY_RIGHT\": 0xBAA9, # Was: Right\n \"KEY_ENTER1\": 0x4E5D, # Was: Enter\n \"Default\": 0x8695,\n \"Freeze\": 0x4053,\n \"KEY_SLEEP\": 0x998A, # Was: Sleep\n \"Input_Select\": 0xB6A5,\n \"KEY_POWER2\": 0x7C6F, # Was: Power_On\n \"KEY_POWER3\": 0xFCEF\n}\n","sub_path":"buttons_panasonic_proj.py","file_name":"buttons_panasonic_proj.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"137731892","text":"import scrapy\nfrom ..utils import helpers, state_farm_logic\nfrom ..items import StateFarmItem\n\n\nclass StateFarmSpider(scrapy.Spider):\n name = 'state-farm-spider'\n allowed_domains = ['statefarm.com']\n start_urls = ['https://www.statefarm.com/agent/US/']\n\n def __init__(self):\n self.base_url = 'https://www.statefarm.com'\n self.agent_url = 'https://www.statefarm.com/agent/US'\n\n def start_requests(self):\n yield scrapy.Request(url=self.agent_url, callback=self.parse)\n\n def parse(self, response):\n links = [href.extract() for href in response.css('a::attr(href)') if 'agent/us/' in href.extract().lower()]\n url_depth = len(response.url.split('/'))\n if url_depth < 6:\n callback = self.parse\n else:\n callback = self.parse_agent_site\n\n for link in links:\n next_page = self.base_url + link\n next_page = response.urljoin(next_page)\n yield scrapy.Request(\n url=next_page,\n callback=callback\n )\n\n def parse_agent_site(self, response):\n links = [href.extract() for href in response.css('a::attr(href)') if 'agent/us/' in href.extract().lower()]\n agent_links = (link for link in links if 'https://www.statefarm.com' in link)\n for next_page in agent_links:\n yield scrapy.Request(\n url=next_page,\n callback=self.parse_agent\n )\n\n def parse_agent(self, response):\n self.logger.info('Scraping ' + response.url)\n item = StateFarmItem()\n try:\n item['report_dt'] = helpers.get_run_date()\n item['process_ts'] = helpers.get_gmt_utc()\n item['license_nbr'] = state_farm_logic.get_license_nbr(response)\n item['agent_nm'], item['agency_nm'] = state_farm_logic.get_agent_name(response)\n item['address1'], item['address2'] = state_farm_logic.get_address(response)\n item['city_nm'] = state_farm_logic.get_city(response)\n item['state_cd'] = state_farm_logic.get_state(response)\n item['zip_cd'] = state_farm_logic.get_postalcode(response)\n item['phone_nbr'] = state_farm_logic.get_phone_nbr(response)\n item['fax_nbr'] = state_farm_logic.get_fax_nbr(response)\n item['office_hours'] = state_farm_logic.get_office_hours(response)\n item['products_offered'] = state_farm_logic.get_products_offered(response)\n item['languages_spoken'] = state_farm_logic.get_languages_spoken(response)\n except Exception as e:\n self.logger.warning('Failed to scrape ' + response.url + ': ', e)\n yield item\n","sub_path":"insurance/insurance/spiders/state_farm_spider.py","file_name":"state_farm_spider.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"107167432","text":"\"\"\"\nDefines arm gestures, controlling how the arms should move\n\"\"\"\nimport time\n\nimport rospy\nfrom std_msgs.msg import String\n\n\nclass DrawArm():\n \"\"\"\n This is the code to move the UR5 arms for the Fall 18 HIRo project to draw portraits\n of celebrities using OCR.\n This module draws the portraits by moving the arm with a sharpie in the gripper to\n discrete xyz positions\n \"\"\"\n\n def __init__(self):\n # rospy.init_node(\"draw_arm\", anonymous=True)\n\n # --------CASTOR----------\n # self.name = 'castor'\n # self.coordinates_pub_castor = rospy.Publisher(\"/coordinates_cmd_castor\", String, queue_size=10)\n # self.joints_pub_castor = rospy.Publisher(\"/behaviors_cmd_castor\", String, queue_size=10)\n\n # --------POLLOX----------\n self.name = 'pollux'\n self.coordinates_pub_pollux = rospy.Publisher(\"/coordinates_cmd_pollux\", String, queue_size=10)\n self.joints_pub_pollux = rospy.Publisher(\"/behaviors_cmd_pollux\", String, queue_size=10)\n\n time.sleep(1)\n\n def move_gesture(self, msg):\n print(\"Sending: \", msg)\n # self.joints_pub_castor.publish(msg)\n self.joints_pub_pollux.publish(msg)\n print('Sent gesture')\n\n def move_coord(self, msg):\n print(\"Sending: \", msg)\n # self.coordinates_pub_castor.publish(msg)\n self.coordinates_pub_pollux.publish(msg)\n\n def draw_line(self, p1, p2):\n x1 = p1[0] / 1000.0\n y1 = p1[1] / 1000.0\n x2 = p2[0] / 1000.0\n y2 = p2[1] / 1000.0\n\n self.move_coord(str(x1) + ' ' + str(y1) + ' 0.305')\n self.move_coord(str(x2) + ' ' + str(y2) + ' 0.305')\n time.sleep(1)\n\n def run(self):\n print(\"Draw Arm running\")\n while not rospy.is_shutdown():\n try:\n self.move_gesture('portrait_hover')\n time.sleep(5)\n return\n except KeyboardInterrupt:\n break\n\n\nif __name__ == \"__main__\":\n draw = DrawArm()\n # draw.run()\n","sub_path":"hiro_archive/Fall_2018/text_to_portrait/move_arm.py","file_name":"move_arm.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"203977065","text":"import re\nfrom os import path\nfrom fabric.api import cd, task, sudo, abort\n\nfrom braid import info\nfrom braid.utils import fails\n\npypyVersion = \"5.10.0\"\n\npypyURLs = {\n 'x86_64': 'https://bitbucket.org/pypy/pypy/downloads/pypy2-v{version}-linux64.tar.bz2',\n 'x86': 'https://bitbucket.org/pypy/pypy/downloads/pypy2-v{version}-linux.tar.bz2',\n }\npypyDirs = {\n 'x86_64': '/opt/pypy2-v{version}-linux64',\n 'x86': '/opt/pypy2-v{version}-linux',\n }\n\n@task\ndef install():\n arch = info.arch()\n if re.match('i?86', arch):\n arch = 'x86'\n pypyURL = pypyURLs.get(arch).format(version=pypyVersion)\n pypyDir = pypyDirs.get(arch).format(version=pypyVersion)\n if pypyURL is None or pypyDir is None:\n abort(\"Can't install pypy on unknown architecture.\")\n\n sudo('/bin/mkdir -p /opt')\n if fails('/usr/bin/id {}'.format('pypy')):\n sudo('/usr/sbin/useradd --home-dir {} --gid bin '\n '-M --system --shell /bin/false '\n 'pypy'.format(pypyDir))\n else:\n sudo('/usr/sbin/usermod --home {} pypy'.format(pypyDir))\n\n with cd('/opt'):\n\n sudo('/usr/bin/wget -nc {}'.format(pypyURL))\n sudo('/bin/tar xf {}'.format(path.basename(pypyURL)))\n","sub_path":"braid/pypy.py","file_name":"pypy.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"163368916","text":"import asyncio\nimport inspect\nimport typing\n\nfrom vkwave.api import API, APIOptionsRequestContext\nfrom vkwave.api.token.token import (\n BotSyncPoolTokens,\n UserSyncSingleToken,\n Token,\n BotSyncSingleToken,\n)\nfrom vkwave.bots.core import BaseFilter\nfrom vkwave.bots import (\n Dispatcher,\n BaseEvent,\n UserEvent,\n BotEvent,\n BotLongpollExtension,\n UserLongpollExtension,\n ChatActionFilter,\n CommandsFilter,\n EventTypeFilter,\n PayloadFilter,\n RegexFilter,\n TextFilter,\n FromMeFilter,\n DefaultRouter,\n TokenStorage,\n UserTokenStorage,\n UserId,\n GroupId,\n BotType,\n FwdMessagesFilter,\n MessageArgsFilter,\n MessageFromConversationTypeFilter,\n TextContainsFilter\n)\nfrom vkwave.bots.core.dispatching.dp.middleware.middleware import BaseMiddleware, MiddlewareResult\nfrom vkwave.bots.core.dispatching.filters.extension_filters import VBMLFilter\nfrom vkwave.bots.fsm.filters import StateFilter\nfrom vkwave.bots.core.dispatching.handler.callback import BaseCallback\nfrom vkwave.bots.core.dispatching.router.router import BaseRouter\nfrom vkwave.client import AIOHTTPClient\nfrom vkwave.longpoll import BotLongpoll, BotLongpollData, UserLongpoll, UserLongpollData\nfrom vkwave.types.bot_events import BotEventType\nfrom vkwave.types.objects import BaseBoolInt\nfrom vkwave.types.user_events import EventId\n\n\nclass _APIContextManager:\n def __init__(self, tokens: typing.Union[str, typing.List[str]], bot_type: BotType):\n self.client = AIOHTTPClient()\n if bot_type.USER:\n self.tokens = (\n UserSyncSingleToken(Token(tokens))\n if isinstance(tokens, str)\n else BotSyncPoolTokens([Token(token) for token in tokens])\n )\n else:\n self.tokens = (\n BotSyncSingleToken(Token(tokens))\n if isinstance(tokens, str)\n else BotSyncPoolTokens([Token(token) for token in tokens])\n )\n self.api = API(clients=self.client, tokens=self.tokens)\n\n async def __aenter__(self):\n return self.api.get_context()\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.close()\n\n async def close(self):\n await self.client.close()\n\n\ndef create_api_session_aiohttp(token: str, bot_type: BotType = BotType.BOT) -> _APIContextManager:\n return _APIContextManager(token, bot_type)\n\n\nclass SimpleUserEvent(UserEvent):\n def __init__(self, event: UserEvent):\n super().__init__(event.object, event.api_ctx)\n self.user_data = event.user_data\n\n def __setitem__(self, key: typing.Any, item: typing.Any) -> None:\n self.user_data[key] = item\n\n def __getitem__(self, key: typing.Any) -> typing.Any:\n return self.user_data[key]\n\n async def answer(\n self,\n message: typing.Optional[str] = None,\n keyboard: typing.Optional[str] = None,\n attachment: typing.Optional[BaseBoolInt] = None,\n payload: typing.Optional[str] = None,\n forward_messages: typing.Optional[typing.List[int]] = None,\n dont_parse_links: typing.Optional[bool] = None,\n disable_mentions: typing.Optional[bool] = None,\n sticker_id: typing.Optional[int] = None,\n domain: typing.Optional[str] = None,\n lat: typing.Optional[BaseBoolInt] = None,\n long: typing.Optional[BaseBoolInt] = None,\n reply_to: typing.Optional[int] = None,\n group_id: typing.Optional[int] = None,\n ):\n await self.api_ctx.messages.send(\n domain=domain,\n lat=lat,\n long=long,\n attachment=attachment,\n reply_to=reply_to,\n forward_messages=forward_messages,\n sticker_id=sticker_id,\n group_id=group_id,\n keyboard=keyboard,\n payload=payload,\n dont_parse_links=dont_parse_links,\n disable_mentions=disable_mentions,\n peer_id=self.object.object.peer_id,\n message=message,\n random_id=0,\n )\n\n\nclass SimpleBotEvent(BotEvent):\n def __init__(self, event: BotEvent):\n super().__init__(event.object, event.api_ctx)\n self.user_data = event.user_data\n\n def __setitem__(self, key: typing.Any, item: typing.Any) -> None:\n self.user_data[key] = item\n\n def __getitem__(self, key: typing.Any) -> typing.Any:\n return self.user_data[key]\n\n async def answer(\n self,\n message: typing.Optional[str] = None,\n keyboard: typing.Optional[str] = None,\n attachment: typing.Optional[BaseBoolInt] = None,\n payload: typing.Optional[str] = None,\n forward_messages: typing.Optional[typing.List[int]] = None,\n dont_parse_links: typing.Optional[bool] = None,\n disable_mentions: typing.Optional[bool] = None,\n sticker_id: typing.Optional[int] = None,\n domain: typing.Optional[str] = None,\n lat: typing.Optional[BaseBoolInt] = None,\n long: typing.Optional[BaseBoolInt] = None,\n reply_to: typing.Optional[int] = None,\n group_id: typing.Optional[int] = None,\n template: typing.Optional[str] = None,\n ):\n await self.api_ctx.messages.send(\n domain=domain,\n lat=lat,\n long=long,\n attachment=attachment,\n reply_to=reply_to,\n forward_messages=forward_messages,\n sticker_id=sticker_id,\n group_id=group_id,\n keyboard=keyboard,\n payload=payload,\n dont_parse_links=dont_parse_links,\n disable_mentions=disable_mentions,\n peer_id=self.object.object.message.peer_id,\n message=message,\n random_id=0,\n template=template\n )\n\n\nclass BaseSimpleLongPollBot:\n def __init__(\n self,\n tokens: typing.Union[str, typing.List[str]],\n bot_type: BotType,\n router: typing.Optional[BaseRouter] = None,\n group_id: typing.Optional[int] = None,\n uvloop: bool = False\n ):\n if uvloop:\n import uvloop\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n self.bot_type = bot_type\n self.api_session = create_api_session_aiohttp(tokens, bot_type)\n self.api_context: APIOptionsRequestContext = self.api_session.api.get_context()\n if self.bot_type is BotType.USER:\n if not isinstance(tokens, str):\n raise RuntimeError(\"Only one str token\")\n\n self.SimpleBotEvent = SimpleUserEvent\n self._lp = UserLongpoll(self.api_context, UserLongpollData())\n self._token_storage = UserTokenStorage[UserId](tokens)\n self.dispatcher = Dispatcher(self.api_session.api, self._token_storage)\n self._lp = UserLongpollExtension(self.dispatcher, self._lp)\n else:\n self.SimpleBotEvent = SimpleBotEvent\n self._lp = BotLongpoll(self.api_context, BotLongpollData(group_id))\n self._token_storage = TokenStorage[GroupId]()\n self.dispatcher = Dispatcher(self.api_session.api, self._token_storage)\n self._lp = BotLongpollExtension(self.dispatcher, self._lp)\n\n self.middleware_manager = self.dispatcher.middleware_manager # auf\n\n self.router = router or DefaultRouter()\n self.handler = self.router.registrar.with_decorator\n self.dispatcher.add_router(self.router)\n\n self.text_filter = TextFilter\n self.event_type_filter = EventTypeFilter\n self.payload_filter = PayloadFilter\n self.chat_action_filter = ChatActionFilter\n self.command_filter = CommandsFilter\n self.regex_filter = RegexFilter\n self.state_filter = StateFilter\n self.vbml_filter = VBMLFilter\n self.args_filter = MessageArgsFilter\n self.fwd_filter = FwdMessagesFilter\n self.conversation_type_filter = MessageFromConversationTypeFilter\n self.text_contains_filter = TextContainsFilter\n if self.bot_type is BotType.USER:\n self.from_me_filter = FromMeFilter\n\n class SimpleBotCallback(BaseCallback):\n def __init__(\n self,\n func: typing.Callable[[BaseEvent], typing.Awaitable[typing.Any]],\n bot_type: BotType,\n ):\n self.bot_type = bot_type\n self.func = func\n\n async def execute(self, event: typing.Union[UserEvent, BotEvent]) -> typing.Any:\n if self.bot_type is BotType.BOT:\n new_event = SimpleBotEvent(event)\n else:\n new_event = SimpleUserEvent(event)\n if inspect.iscoroutinefunction(self.func):\n return await self.func(new_event)\n return self.func(new_event)\n\n class SimpleBotMiddleware(BaseMiddleware):\n async def pre_process_event(self, event: BaseEvent) -> MiddlewareResult:\n pass\n\n def handler(self, *filters: BaseFilter):\n \"\"\"\n Handler for all events\n \"\"\"\n\n def decorator(func: typing.Callable[..., typing.Any]):\n record = self.router.registrar.new()\n record.with_filters(*filters)\n record.handle(self.SimpleBotCallback(func, self.bot_type))\n self.router.registrar.register(record.ready())\n return func\n\n return decorator\n\n def message_handler(self, *filters: BaseFilter):\n \"\"\"\n Handler only for message events\n \"\"\"\n\n def decorator(func: typing.Callable[..., typing.Any]):\n record = self.router.registrar.new()\n record.with_filters(*filters)\n if self.bot_type is BotType.BOT:\n record.filters.append(EventTypeFilter(BotEventType.MESSAGE_NEW))\n else:\n record.filters.append(EventTypeFilter(EventId.MESSAGE_EVENT.value))\n record.handle(self.SimpleBotCallback(func, self.bot_type))\n self.router.registrar.register(record.ready())\n return func\n\n return decorator\n\n def middleware(self):\n def decorator(func: typing.Callable[[typing.Union[UserEvent, BotEvent]], MiddlewareResult]):\n middleware = self.SimpleBotMiddleware()\n middleware.pre_process_event = func\n self.middleware_manager.add_middleware(middleware)\n\n return func\n\n return decorator\n\n async def run(self, ignore_errors: bool):\n if self.bot_type is BotType.BOT:\n await self.dispatcher.cache_potential_tokens()\n await self._lp.start(ignore_errors)\n\n def run_forever(self, ignore_errors: bool = False, loop: typing.Optional[asyncio.AbstractEventLoop] = None):\n loop = loop or asyncio.get_event_loop()\n loop.create_task(self.run(ignore_errors))\n loop.run_forever()\n","sub_path":"vkwave/bots/addons/easy/base_easy_bot.py","file_name":"base_easy_bot.py","file_ext":"py","file_size_in_byte":10799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"330138942","text":"##!/usr/bin/python\n#10 November 2016\n#this code sequence that have more than 25 average quality score\n\nimport sys\nimport gzip\n\nargvs = sys.argv \ntitle = str(argvs[1].split(\".length.filter\")[0])\nin1 = argvs[1]\n\nif in1.endswith('.gz'): \n\toutSuffix='.fq.gz'\nelse:\n\toutSuffix='.fq'\n\n#print out the ussage command if the user type the wrong input\nif len(argvs) != 3:\n\tprint (\"ussage : python hq_qual_filter.py file-after-length_filter ascii-table\")\n\tsys.exit(0)\n\nascii_dict={}\nasciifile = open(sys.argv[2],\"r\")\nfor line in asciifile:\n ascii_dict[line.split(\"\\t\")[0]] = int(line.split(\"\\t\")[1]) - 33\n\n#defining functions\n#open file function\ndef myopen(infile, mode='r'):\n\tif infile.endswith('.gz'):\n\t\treturn gzip.open(infile,mode=mode)\n\telse:\n\t\treturn open(infile,mode=mode)\n\nwith myopen(title+'.qual.filter'+outSuffix,'w') as f2:\n\twith myopen(in1) as f1:\n\n\t\twhile True:\n\t\t\t\n\t\t\tline1=f1.readline()\n\t\t\tif not line1: break\n\t\t\tline2=f1.readline()\n\t\t\tline3=f1.readline()\t\n\t\t\tline4=f1.readline()\t\n\t\t\tqc_sum=float(0)\n\t\t\tqc_num=float(0)\n\t\t\tqc_val=float(0)\n\t\t\tfor Q in list(line4[:-1]):\n\t\t\t#\tif Q in ascii_dict:\n\t\t\t\t\t#qc_sum += float(ascii_dict[Q])\n\t\t\t\tqc_sum += float(ord(Q)-33)\n\t\t\t\tqc_num += float(1)\n\t\t\t\tqc_val =float(qc_sum/qc_num)\n\t\t\t\t\n\t\t\t#FOR WRITING UP THE FILE\n\t\t\t#extract the line that contain more than or equal 25 quality \n\t\t\tif qc_val>=25.00 :\n\t\t\t\tf2.write(line1)\n\t\t\t\tf2.write(line2)\n\t\t\t\tf2.write(line3)\n\t\t\t\tf2.write(line4)\n\n#last edited 11 May 2016 --> using ASCII table rather than ord function\n\n\n","sub_path":"High_quality_filtering/hq_qual_filter.py","file_name":"hq_qual_filter.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"364891005","text":"from dataclasses import dataclass\nfrom typing import List, Dict\n\nfrom immolate.cpu import Cpu\nfrom immolate.instructions import Instruction, _assert_left_arrow, _parse_register\n\n\n@dataclass\nclass Add(Instruction):\n register_a: int # 2\n register_b: int # 2\n destination_register: int # 2\n\n def execute(self, cpu: Cpu):\n cpu.add(cpu.registers[self.register_a], cpu.registers[self.register_b], self.destination_register)\n\n @staticmethod\n def decode(b: bytes):\n register_a = (b[0] & 0b00110000) >> 4\n register_b = (b[0] & 0b00001100) >> 2\n destination_register = b[0] & 0b00000011\n return Add(register_a, register_b, destination_register)\n\n def __bytes__(self) -> bytes:\n return bytes([(self.register_a << 4) + (self.register_b << 2) + self.destination_register, 0])\n\n @staticmethod\n def decode_assembly(tokens: List[str], labels: Dict[str, int]):\n destination_register = _parse_register(tokens[1])\n _assert_left_arrow(tokens[2])\n register_a = _parse_register(tokens[3])\n register_b = _parse_register(tokens[4])\n return Add(register_a, register_b, destination_register)\n\n def __str__(self):\n return f\"{self.assembly_name()} r{self.destination_register} <- r{self.register_a} r{self.register_b}\"\n\n @staticmethod\n def assembly_name() -> str:\n return \"ADD\"\n","sub_path":"immolate/instructions/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"98197802","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'知识点:集成、多态'\n\nfrom enum import Enum #枚举类\nMonth = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))\nfor name, member in Month.__members__.items():\n print(name, '=>', member, ',', member.value)\n\n\nclass Human(object):\n\n __slots__ = ('name', 'age', '__weight') #限制实例属性的设置范围\n def run(self):\n print(\"I'm a human\")\n\nclass Student(Human): #Student继承Human\n country = \"China\" #类属性\n\n #直接作用在实例的方法__call__\n def __call__(self):\n print(\"I'm a student\")\n\n def run(self):\n print(\"I'm a good student\")\n\n def __init__(self, name, age, weight): #特殊方法__init__用来初始化实例,self代表实例本身,不需要传\n self.name = name #实例属性\n self.__age = age #实例属性\n self.__weight = weight #把体重设置成私有变量\n\n def set_weight(self, weight): #用set方法可以进行条件判断\n if 0 <= weight <= 200:\n self.__weight = weight\n else:\n raise ValueError('wrong weight')\n\n def get_weight(self):\n return self.__weight\n\n #property修饰器把一个get方法变成属性调用,a.age等价于a.get_age\n @property #定义一个get方法,并可以直接属性调用\n def age(self):\n return self.__age\n\n #@func.setter修饰器把一个set方法变成属性调用,a.age=等价于a.set_age\n @age.setter #定义一个set方法,并可以直接属性调用\n def age(self, value):\n if not isinstance(value, int):\n raise ValueError('age must be an integer')\n if 0 <= value <= 100:\n self.__value = value\n else:\n raise ValueError('wrong age scope')\n self.__age = value\n\n #访问不存在的属性时,调用__getattr__\n def __getattr__(self, attr):\n print(\"Are you looking for '%s'?Can't find it\" % attr)\n\n #重新定义print函数, 例如'print(alice)'\n def __str__(self):\n return 'Student object (name: %s)' % self.name\n #命令行直接显示的效果用的是__reptr__方法,例如'alice'\n __reptr__ = __str__\n\n\nclass Child(Human):\n \n def run(self):\n print(\"I'm a little child\")\n\nbob = Human()\nbob.run()\n#bob.height = \"168\" #报错,因为被__slots__限制\n\nalice = Student(\"alice\", 17, 50) #调用__init__方法\nprint(alice) #调用__str__方法\nalice() #直接作用在实例的方法__call__\nalice.run()\n\nprint(isinstance(alice, Student), isinstance(alice, Human), isinstance(bob, Student))\nprint(dir(alice)) #dir获取对象的所有属性和方法\nprint(alice.name)\n\n#私有属性不可直接访问\n#print(alice.__weight) #报错,没有这个属性\nprint(alice._Student__weight) #际上被python改成了_Student__weight,这是设计机制,不要这样调用\n\n#访问不存在的属性\nprint(alice.asdfas)\n\n#通过实例的方法设置实例属性\nalice.set_weight(77)\nprint(alice.get_weight())\n\n#property修饰器\nalice.age = 27\nprint(alice.age)\n\n#多态:根据当前实例选择对应的方法\ndef test_duotai(human):\n human.run()\n human.run()\n\ntest_duotai(bob)\ntest_duotai(alice)\ntest_duotai(Child())\n\n#用type方法动态创建类\ndef fn(self, name='world'):\n print('Hello, %s' % name)\n\nSay = type('Say', (object,), dict(sayIt=fn))\ns = Say()\ns.sayIt(\"boy~\")\n\n#metaclass元类\n","sub_path":"basic/02对象.py","file_name":"02对象.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"618342552","text":"import os\nimport time\nimport pyperclip\nimport hashlib\nimport json\nimport random\nimport requests\nfrom aip import AipOcr\n\ndef init():\n\n \"\"\"OCR Servlet\"\"\"\n\n global url, appid, secretKey, salt\n url = \"http://api.fanyi.baidu.com/api/trans/vip/translate\"\n appid = '20200709000515966'\n secretKey = 'zkBZZvSCzzIILP96StRh'\n salt = random.randint(32768, 65536)\n\n \"\"\"Translate Servlet\"\"\"\n APP_ID = \"21197258\"\n API_KEY = \"2nh0SuB0Z1zbZFtR6mfATYVb\"\n SECRET_KEY = \"rwXvvdP1iufWGhWGWFfwp0oSPE2gDTfN\"\n\n aipOcr = AipOcr(APP_ID, API_KEY, SECRET_KEY)\n return aipOcr\n\n\ndef get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\n\ndef result_(filePath, options):\n result = \"\"\n try:\n data = aipOcr.basicGeneral(get_file_content(filePath), options)\n except:\n raise ConnectionError(\"网络连接异常!\")\n words_result = data['words_result']\n for i in range(len(words_result)):\n result += words_result[i]['words']\n return result\n\n\ndef translate_(q):\n \"\"\"auto自动识别语种\"\"\"\n\n # 生成签名\n sign = appid + q + str(salt) + secretKey\n sign = hashlib.md5(sign.encode()).hexdigest()\n # post请求参数\n data = {\n \"appid\": appid,\n \"q\": q,\n \"from\": fromLang,\n \"to\": toLang,\n \"salt\": str(salt),\n \"sign\": sign,\n }\n # 返回时一个json\n trans_result = json.loads(requests.post(url, data=data).content).get('trans_result')[0].get(\"dst\")\n return trans_result\n\n\nif __name__ == '__main__':\n\n \"\"\"Default\"\"\"\n options = {\n 'detect_direction': 'true',\n 'language_type': 'CHN_ENG',\n }\n\n \"\"\"存放要识别照片的目录\"\"\"\n fileDir = r\"/Users/jeffrey/Desktop/screen\"\n\n \"\"\"检查的文件类型\"\"\"\n fileType = [\"png\", \"jpg\", \"bmp\"]\n\n \"\"\"是否翻译\"\"\"\n translate = True\n\n \"\"\"翻译或运行失败后是否声音提示,一声为成功,两声为失败\"\"\"\n sound_success = True\n sound_failed = True\n\n \"\"\"翻译语种,auto 为自动识别\"\"\"\n fromLang = 'auto'\n toLang = 'auto'\n\n \"\"\"刷新间隔秒数\"\"\"\n waitTime = 2\n\n \"\"\"获取指定目录下的文件,第一次运行不会处理这些文件\"\"\"\n oldPhotoList = os.listdir(fileDir)\n print(\"脚本已启动\")\n while True:\n time.sleep(waitTime)\n try:\n newPhotoList = os.listdir(fileDir)\n \"\"\"利用set集合不可重复性获取不一致的信息\"\"\"\n diffList = list((set(oldPhotoList) ^ set(newPhotoList)))\n if len(diffList) >= 1:\n for i in diffList:\n oldPhotoList.append(i)\n diffList.remove(i)\n target = fileDir + os.sep + i\n if not os.path.exists(target):\n print(\"文件不存在\" + str(len(diffList)))\n print(\"\\a\\a\")\n continue\n if os.path.splitext(target)[-1].replace(\".\", \"\") not in fileType:\n print(\"这不是一张图片\")\n print(\"\\a\\a\")\n continue\n aipOcr = init()\n result = result_(target, options)\n if len(result) == 0:\n print(\"本次截图中没有找到内容\")\n print(\"\\a\\a\")\n continue\n if translate:\n result = translate_(result)\n pyperclip.copy(result)\n if sound_success:\n print(\"\\a\")\n print(\"已将本次结果复制到粘贴板\")\n except Exception as e:\n if sound_failed:\n print(\"\\a\\a\")\n print(e)\n","sub_path":"OCR_image_recognition_and_Translation.py","file_name":"OCR_image_recognition_and_Translation.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"207499470","text":"import requests\n\n\ndef main():\n url = \"http://0.0.0.0:8103/respond\"\n\n request_data = [\n {\"sentences\": [[\"what is the capital of russia?\"]]},\n {\"sentences\": [[\"let's talk about politics.\"]]},\n ]\n\n gold_results = [\n [\n {\n \"entities\": [\"capital\", \"russia\"],\n \"labelled_entities\": [\n {\"finegrained_label\": [[\"misc\", 0.871]], \"label\": \"misc\", \"offsets\": [12, 19], \"text\": \"capital\"},\n {\n \"finegrained_label\": [[\"loc\", 0.9927]],\n \"label\": \"location\",\n \"offsets\": [23, 29],\n \"text\": \"russia\",\n },\n ],\n }\n ],\n [\n {\n \"entities\": [\"politics\"],\n \"labelled_entities\": [\n {\"finegrained_label\": [[\"misc\", 0.9984]], \"label\": \"misc\", \"offsets\": [17, 25], \"text\": \"politics\"}\n ],\n }\n ],\n ]\n\n count = 0\n for data, gold_result in zip(request_data, gold_results):\n result = requests.post(url, json=data).json()\n if result == gold_result:\n count += 1\n assert count == len(request_data)\n print(\"Success\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"annotators/entity_detection/test_entity_detection.py","file_name":"test_entity_detection.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"246833834","text":"import requests\nfrom models.letter import Letter\n\nclass User:\n def __init__(self, _id, username, full_name, role, disabled, access_token, hashed_password):\n self._id = _id\n self.username = username\n self.full_name = full_name\n self.role = role\n self.disabled = disabled\n self.access_token = access_token\n\n def __str__(self):\n return \"\\n\\tИмя пользователя: {0}\\n\\tПолное имя: {1}\\n\\tРоль: {2}\\n\".format(self.username, self.full_name, self.role)\n\n def get_letters(self):\n headers = { 'Authorization': 'Bearer {0}'.format(self.access_token) }\n resp = requests.get('http://127.0.0.1:8000/letters', headers=headers)\n if resp.status_code == 200:\n result = resp.json()\n letters = []\n for i in result:\n letters.append(Letter(**i))\n return letters\n else:\n return False\n\n def create_letter(self, recipient, address, track=None, express=None):\n headers = { 'Authorization': 'Bearer {0}'.format(self.access_token) }\n data = { 'recipient': recipient, 'address': address, 'track':track, 'express': express }\n resp = requests.post('http://127.0.0.1:8000/letters', headers=headers, json=data)\n if resp.status_code == 201:\n return Letter(**resp.json())\n else:\n return False\n \n def delete_letter(self, letter_id ):\n headers = { 'Authorization': 'Bearer {0}'.format(self.access_token) }\n resp = requests.delete('http://127.0.0.1:8000/letters/{}'.format(letter_id), headers=headers)\n if resp.status_code == 204:\n return True\n else:\n return False\n","sub_path":"client/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"392886856","text":"#encoding: utf-8\n\"\"\"\n@project = importSpecification\n@file = mergeOfferGroupMain\n@author = Think\n@Create_time = 2019/3/13 10:57\n\"\"\"\n\nimport time, os, sys\ncurPath = os.path.dirname(os.path.abspath(__file__))\ndirName = os.path.dirname(curPath)\nsys.path.append(dirName)\n\nfrom reader.mergeOfferGroupProc import *\n\nif __name__ == '__main__':\n print(\"------------------begin time:\" + time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(time.time())) + \"----------------------\")\n info = mergeOfferGroupProc()\n info.mergeOfferGroupProcMain()\n print(\"------------------end time:\" + time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(time.time())) + \"----------------------\")","sub_path":"src/reader/mergeOfferGroupMain.py","file_name":"mergeOfferGroupMain.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"648115983","text":"__author__ = 'ptrigo'\n\nfrom pyswip import Prolog\nimport math\nimport os\n\n\nclass PrologAA:\n def __init__(self):\n self.prolog = Prolog()\n self.prolog.assertz(\":-use_module(library(lists))\")\n self.ficheiro = None\n\n def assercao(self,texto):\n if self.ficheiro is None:\n self.ficheiro = open(r\"f:\\temp\\pee.pl\",\"w\")\n self.ficheiro.write(texto+ '.\\n')\n\n def query(self, texto):\n if self.ficheiro is None:\n self.ficheiro = open(r\"f:\\temp\\factos_pee.pl\",\"w\")\n self.ficheiro.write(texto + '.\\n')\n self.ficheiro.close()\n return dict(V=['a','b','c'])\n\n def procurar(self, problema):\n prlg = self.prolog\n\n objectivo = [ (x,y) for (x,y) in problema.modelo_mundo._elementos if problema.modelo_mundo._elementos[(x,y)] == 'alvo'][0]\n\n self.assercao( \"final({}/{}/G/H/V,V,C) :- C is G+H.\".format( objectivo[0], objectivo[1]))\n posicoes = [ (x,y) for (x,y) in problema.modelo_mundo._elementos if problema.modelo_mundo._elementos[(x,y)] != 'obst']\n for (x,y) in posicoes:\n self.assercao(\"posicao({},{})\".format(x,y))\n\n accoes=[(1,0,1), (1,1,2), (0,1,3),(-1,1,4),(-1,0,5),(-1,-1,6),(0,-1,7),(1,-1,8)]\n for (x,y, z) in accoes:\n self.assercao(\"accao({},{},{},{})\".format(x,y, z/8.0 * 2.0* math.pi, math.sqrt( x**2 + y**2)))\n\n self.problema = problema\n (xi, yi) = problema.estado_inicial() #(0,1)\n caminho = list(self.query(\"teste( V, _) :- aStar( {}/{},V, C )\".format(xi, yi)))\n input(\"Parar aqui, foi apenas usado para obter a representacao do mundo num ficheiro de factos a usar em prolog\")\n","sub_path":"src/lib/pee/procura_melhor_prim/prolog_aa.py","file_name":"prolog_aa.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"651664775","text":"from selenium import webdriver\nfrom selenium.webdriver import ActionChains\nimport time\n\ndriver = webdriver.Chrome()\ndriver.get(\"https://www.qcc.com\")\ndriver.maximize_window()\n\n\ntime.sleep(6)\n# 取消框\ndriver.find_element_by_xpath(\"//*[@id='addfavorModal']/div/div/div[1]\").click()\n\ndriver.find_element_by_link_text(\"登录 | 注册\").click()\ntime.sleep(5)\ntag = driver.find_element_by_xpath(\"//*[@id='nc_1_n1z']\")\n\nac = ActionChains(driver)\nfor i in range(0,35):\n ac.click_and_hold(tag).move_by_offset(i*10,0).perform()","sub_path":"autotest02/企查查.py","file_name":"企查查.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"583533073","text":"from __future__ import division, absolute_import, print_function\n\n__copyright__ = \"Copyright (C) 2016 Matt Wala\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom loopy.kernel.data import temp_var_scope\nfrom loopy.schedule import (BeginBlockItem, CallKernel, EndBlockItem,\n RunInstruction, Barrier)\n\nfrom pytools import memoize_method\n\n\n# {{{ block boundary finder\n\ndef get_block_boundaries(schedule):\n \"\"\"\n Return a dictionary mapping indices of\n :class:`loopy.schedule.BlockBeginItem`s to\n :class:`loopy.schedule.BlockEndItem`s and vice versa.\n \"\"\"\n block_bounds = {}\n active_blocks = []\n for idx, sched_item in enumerate(schedule):\n if isinstance(sched_item, BeginBlockItem):\n active_blocks.append(idx)\n elif isinstance(sched_item, EndBlockItem):\n start = active_blocks.pop()\n block_bounds[start] = idx\n block_bounds[idx] = start\n return block_bounds\n\n# }}}\n\n\n# {{{ instruction query utility\n\nclass InstructionQuery(object):\n\n def __init__(self, kernel):\n self.kernel = kernel\n block_bounds = get_block_boundaries(kernel.schedule)\n subkernel_slices = {}\n from six import iteritems\n for start, end in iteritems(block_bounds):\n sched_item = kernel.schedule[start]\n if isinstance(sched_item, CallKernel):\n subkernel_slices[sched_item.kernel_name] = slice(start, end + 1)\n self.subkernel_slices = subkernel_slices\n\n @memoize_method\n def subkernels(self):\n return frozenset(self.subkernel_slices.keys())\n\n @memoize_method\n def insns_reading_or_writing(self, var):\n return frozenset(insn.id for insn in self.kernel.instructions\n if var in insn.read_dependency_names()\n or var in insn.assignee_var_names())\n\n @memoize_method\n def insns_in_subkernel(self, subkernel):\n return frozenset(sched_item.insn_id for sched_item\n in self.kernel.schedule[self.subkernel_slices[subkernel]]\n if isinstance(sched_item, RunInstruction))\n\n @memoize_method\n def temporaries_read_in_subkernel(self, subkernel):\n return frozenset(\n var\n for insn in self.insns_in_subkernel(subkernel)\n for var in self.kernel.id_to_insn[insn].read_dependency_names()\n if var in self.kernel.temporary_variables)\n\n @memoize_method\n def temporaries_written_in_subkernel(self, subkernel):\n return frozenset(\n var\n for insn in self.insns_in_subkernel(subkernel)\n for var in self.kernel.id_to_insn[insn].assignee_var_names()\n if var in self.kernel.temporary_variables)\n\n @memoize_method\n def temporaries_read_or_written_in_subkernel(self, subkernel):\n return (\n self.temporaries_read_in_subkernel(subkernel) |\n self.temporaries_written_in_subkernel(subkernel))\n\n @memoize_method\n def inames_in_subkernel(self, subkernel):\n subkernel_start = self.subkernel_slices[subkernel].start\n return frozenset(self.kernel.schedule[subkernel_start].extra_inames)\n\n @memoize_method\n def pre_and_post_barriers(self, subkernel):\n subkernel_start = self.subkernel_slices[subkernel].start\n subkernel_end = self.subkernel_slices[subkernel].stop\n\n def is_global_barrier(item):\n return isinstance(item, Barrier) and item.kind == \"global\"\n\n try:\n pre_barrier = next(item for item in\n self.kernel.schedule[subkernel_start::-1]\n if is_global_barrier(item)).originating_insn_id\n except StopIteration:\n pre_barrier = None\n\n try:\n post_barrier = next(item for item in\n self.kernel.schedule[subkernel_end:]\n if is_global_barrier(item)).originating_insn_id\n except StopIteration:\n post_barrier = None\n\n return (pre_barrier, post_barrier)\n\n @memoize_method\n def hw_inames(self, insn_id):\n \"\"\"\n Return the inames that insn runs in and that are tagged as hardware\n parallel.\n \"\"\"\n from loopy.kernel.data import HardwareParallelTag\n return set(iname for iname in self.kernel.insn_inames(insn_id)\n if isinstance(self.kernel.iname_to_tag.get(iname),\n HardwareParallelTag))\n\n @memoize_method\n def common_hw_inames(self, insn_ids):\n \"\"\"\n Return the common set of hardware parallel tagged inames among\n the list of instructions.\n \"\"\"\n # Get the list of hardware inames in which the temporary is defined.\n if len(insn_ids) == 0:\n return set()\n return set.intersection(*(self.hw_inames(id) for id in insn_ids))\n\n# }}}\n\n\n# {{{ add extra args to schedule\n\ndef add_extra_args_to_schedule(kernel):\n \"\"\"\n Fill the `extra_args` fields in all the :class:`loopy.schedule.CallKernel`\n instructions in the schedule with global temporaries.\n \"\"\"\n new_schedule = []\n\n insn_query = InstructionQuery(kernel)\n\n for sched_item in kernel.schedule:\n if isinstance(sched_item, CallKernel):\n subrange_temporaries = (insn_query\n .temporaries_read_or_written_in_subkernel(sched_item.kernel_name))\n more_args = set(tv\n for tv in subrange_temporaries\n if\n kernel.temporary_variables[tv].scope == temp_var_scope.GLOBAL\n and\n kernel.temporary_variables[tv].initializer is None\n and\n tv not in sched_item.extra_args)\n new_schedule.append(sched_item.copy(\n extra_args=sched_item.extra_args + sorted(more_args)))\n else:\n new_schedule.append(sched_item)\n\n return kernel.copy(schedule=new_schedule)\n\n# }}}\n","sub_path":"loopy/schedule/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"433431957","text":"'''WAP in python to create following functions:\r\n\r\n1) average(a,b,c,d)\r\n2) sum(a,b,c)\r\n3) product(a,b,c)\r\n4) max(a,b)\r\n5) min(a,b,c)\r\n6) percentage(a,b,c)\r\n'''\r\n\r\n\r\n\r\n\r\na=int(input(\"Enter 1st number:\"))\r\nb=int(input(\"enter 2nd number:\"))\r\nc=int(input(\"Enter 3rd number:\"))\r\nd=int(input(\"enter 4th number:\"))\r\n\r\n\r\ndef average():\r\n avg=a+b+c+d;\r\n print(\"Average=\",(avg/4))\r\n\r\naverage();\r\n\r\n\r\n","sub_path":"01.10.2020 python/average.py","file_name":"average.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"636608198","text":"import matplotlib.pyplot as plt\r\nage=[12,2,3,4,5,56,43,2,34,56,12,23]\r\nrange=(0,50)\r\nbins=10\r\n#plotting the histogram\r\nplt.hist(age,bins,range,color='red',label='Age',histtype= 'bar',rwidth= 0.8)\r\nplt.xlabel('x axis')\r\nplt.ylabel('Y axis')\r\nplt.title('Histogram graph')\r\nplt.legend()\r\nplt.show()","sub_path":"histogram graph.py","file_name":"histogram graph.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"353848766","text":"def readpara(iterable):\n para = \"\"\n for line in iterable:\n if line.strip() == '' and para:\n yield para[1:]\n para = \"\"\n else:\n para += \" \" + line\n if para:\n yield para[1:]\n\n\ntext = \"\"\"This is text and\nit contains 3 paragraphs.\n\nEach paragraph contains multiple lines.\n\nAfter every paragraph,\nthere is line.\"\"\".split('\\n')\n\nfor paraline in readpara(text):\n print(paraline)\n print('-' * 42)\n\n","sub_path":"03Functions/03Generators/06Read_Paragraphs.py","file_name":"06Read_Paragraphs.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"354234690","text":"# coding=utf-8\n\"\"\"\nCreated on 08/05/2018\nAuthor: Ciarán\n\"\"\"\n\nimport nltk\nfrom nltk.corpus import wordnet\n\n\nclass NLTKFormatter:\n\n def __init__(self):\n self.stopwords = set(nltk.corpus.stopwords.words('english'))\n\n def format_sentence(self, sentence):\n tokenised_sentence = nltk.word_tokenize(sentence.lower())\n\n sentence_no_stopwords = []\n for word in tokenised_sentence:\n if (word not in self.stopwords) and (len(word) > 2):\n sentence_no_stopwords.append(word)\n\n sentence_no_stopwords = ' '.join(sentence_no_stopwords)\n\n return sentence_no_stopwords\n\n '''\n sentences = nltk.sent_tokenize(sentence)\n sentences = [nltk.word_tokenize(sent) for sent in sentences]\n sentences = [nltk.pos_tag(sent) for sent in sentences]\n\n synonyms = []\n synonym_list = ()\n for word in tokenised_sentence:\n for synonym in wordnet.synsets(word):\n synonym_list += synonym\n\n synonyms.append(synonym_list)\n '''\n\n\ndef main():\n nltk_formatter = NLTKFormatter()\n formatted_sentence = nltk_formatter.format_sentence(\"Is it possible to do mathematics inside CSS?\")\n print(formatted_sentence)\n\n\nif __name__ == '__main__':\n main()","sub_path":"Ciaran's Workspace/src/NLTK/NLTKFormatter.py","file_name":"NLTKFormatter.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"169744521","text":"from fastapi import FastAPI\nfrom fastapi.routing import APIRouter\nfrom fastapi.responses import HTMLResponse\nfrom libs.routing import ResponseTyp\n\ndef __set_routes(app, routes):\n for route in routes:\n if route.typ == ResponseTyp.json:\n app.add_api_route(\n path=route.path,\n endpoint=route.endpoint,\n methods=[route.method]\n )\n else:\n app.add_api_route(\n path=route.path,\n endpoint=route.endpoint,\n methods=[route.method],\n response_class=HTMLResponse\n )\n\ndef run_server(routes, debug=False):\n app = FastAPI(debug=debug)\n __set_routes(app, routes)\n return app\n","sub_path":"src/application/libs/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"323456300","text":"\"\"\"\nTest Tree\n=========\n\nChecks that your tree performs basic functionality.\n\"\"\"\n\nimport unittest\n\nfrom colours import Colours\nfrom node import Node\nfrom tree import Tree\n\n\nclass TestTree(unittest.TestCase):\n \"\"\"\n Checks super basic tree functionality\n \"\"\"\n\n def test_put(self):\n \"\"\"\n Can we insert into tree?\n \"\"\"\n\n root = Node(Colours.CYAN)\n\n t = Tree(root)\n\n a = Node(Colours.CYAN)\n\n t.put(root, a)\n\n assert len(root.children) == 1, \\\n \"[tree.put] should add child to node.\"\n\n assert root.children[0] == a, \\\n \"[tree.put] should add the correct node, yours did not.\"\n\n t.put(a, Node(Colours.YELLOW))\n\n assert len(root.children) == 1, \\\n \"[tree.put] should add child to node.\"\n\n assert root.children[0] == a, \\\n \"[tree.put] should add the correct node, yours did not.\"\n\n assert len(a.children) == 1, \\\n \"[tree.put] should add child to node.\"\n\n def test_put_propagate(self):\n \"\"\"\n Does the colour propagate?\n \"\"\"\n\n root = Node(Colours.CYAN)\n\n t = Tree(root)\n a = Node(Colours.BLUE)\n\n # Nothing should propagate yet\n assert Colours.CYAN.cmp(root.propagated_colour) == 0, \\\n \"[propagate] Your colour didn't propagate correctly.\"\n\n t.put(root, a)\n\n # It should now be blue!\n assert Colours.BLUE.cmp(root.propagated_colour) == 0, \\\n \"[propagate] Your colour didn't propagate correctly.\"\n\n t.put(a, Node(Colours.RED))\n\n # It should now be red!\n assert Colours.RED.cmp(root.propagated_colour) == 0, \\\n \"[propagate] Your colour didn't propagate correctly.\"\n\n assert Colours.RED.cmp(a.propagated_colour) == 0, \\\n \"[propagate] Your colour didn't propagate correctly.\"\n\n def test_update_colour_propagates(self):\n \"\"\"\n Does the colour propagate when changed?\n \"\"\"\n\n root = Node(Colours.CYAN)\n\n t = Tree(root)\n a = Node(Colours.BLUE)\n\n t.put(root, a)\n t.put(a, Node(Colours.RED))\n\n # It should now be red!\n assert Colours.RED.cmp(root.propagated_colour) == 0, \\\n \"[propagate] Your colour didn't propagate correctly.\"\n\n assert Colours.RED.cmp(a.propagated_colour) == 0, \\\n \"[propagate] Your colour didn't propagate correctly.\"\n\n t.update_node_colour(a.children[0], Colours.NYAN)\n\n assert Colours.NYAN.cmp(root.propagated_colour) == 0, \\\n \"[propagate] Your colour didn't propagate correctly.\"\n\n assert Colours.NYAN.cmp(a.propagated_colour) == 0, \\\n \"[propagate] Your colour didn't propagate correctly.\"\n\n def test_can_rm(self):\n \"\"\"\n Can we remove a child?\n \"\"\"\n\n root = Node(Colours.CYAN)\n\n t = Tree(root)\n\n a = Node(Colours.GREEN)\n b = Node(Colours.RED)\n\n t.put(root, a)\n t.put(root, b)\n\n assert len(root.children) == 2\n\n t.rm(b)\n\n assert len(root.children) == 1, \\\n \"[tree.rm] did not remove the node.\"\n\n assert b not in root.children, \\\n \"[tree.rm] did not remove the correct child.\"\n\n def test_rm_propagate(self):\n \"\"\"\n Can we remove a child and the colour propagates?\n \"\"\"\n\n root = Node(Colours.CYAN)\n\n t = Tree(root)\n\n a = Node(Colours.GREEN)\n b = Node(Colours.RED)\n\n t.put(root, a)\n t.put(root, b)\n\n assert Colours.RED.cmp(root.propagated_colour) == 0, \\\n \"Colour did not propagate with .put\"\n\n assert Colours.GREEN.cmp(a.propagated_colour) == 0, \\\n \"Colour of sibling changed?\"\n\n t.rm(b)\n\n assert Colours.GREEN.cmp(root.propagated_colour) == 0, \\\n \"Colour did not propagate when removing a child!\"\n\n def test_can_swap_example(self):\n \"\"\"\n Can you perform the swap in the comments?\n \"\"\"\n\n A = Node(Colours.GREEN)\n\n B = Node(Colours.RED)\n C = Node(Colours.BLUE)\n\n D = Node(Colours.CYAN)\n J = Node(Colours.CYAN)\n K = Node(Colours.YELLOW)\n\n t = Tree(A)\n\n t.put(A, B)\n t.put(A, C)\n t.put(B, D)\n t.put(C, J)\n t.put(C, K)\n\n # Let's swap\n t.swap(D, C)\n\n # Let's check if it worked!\n assert D.parent == A, \\\n \"[tree.swap] Did not change parent.\"\n\n assert C.parent == B, \\\n \"[tree.swap] Did not change parent.\"\n\n assert D not in B.children, \\\n \"[tree.swap] Did not remove child from old parent.\"\n\n assert C not in A.children, \\\n \"[tree.swap] Did not remove child from old parent.\"\n\n assert C in B.children, \\\n \"[tree.swap] child incorrectly swapped to children list.\"\n\n assert D in A.children, \\\n \"[tree.swap] child incorrectly swapped to children list.\"\n\n def test_depth_example(self):\n \"\"\"\n Can you perform the is_coloured function?\n\n\n (start)---> G\n / \\\n (A) G G (B)\n /| \\\n (A1) G R(A2) G (B1)\n |\n R (A21)\n \"\"\"\n\n root = Node(Colours.GREEN)\n\n A = Node(Colours.GREEN)\n B = Node(Colours.GREEN)\n\n A1 = Node(Colours.GREEN)\n A2 = Node(Colours.RED)\n A21 = Node(Colours.RED)\n\n B1 = Node(Colours.GREEN)\n\n t = Tree(root)\n\n t.put(root, A)\n t.put(root, B)\n\n t.put(A, A1)\n t.put(A, A2)\n t.put(A2, A21)\n t.put(B, B1)\n\n assert t.is_coloured_to_depth_k(root, Colours.GREEN, 0), \\\n \"[is_coloured] Returned false, should be true!\"\n\n assert not t.is_coloured_to_depth_k(root, Colours.RED, 0), \\\n \"[is_coloured] Returned true, should be false!\"\n\n assert not t.is_coloured_to_depth_k(root, Colours.GREEN, 2), \\\n \"[is_coloured] Returned true, should be false!\"\n\n assert t.is_coloured_to_depth_k(root, Colours.GREEN, 1), \\\n \"[is_coloured] Returned false, should be true!\"\n","sub_path":"tests/test_sample_tree.py","file_name":"test_sample_tree.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"276549368","text":"\"\"\"MDEntropy: Analyze correlated motions in MD trajectories with only a few\n lines of Python code.\n\nMDEntropy is a python library that allows users to perform\n information-theoretic analyses on molecular dynamics (MD) trajectories.\n\"\"\"\nimport sys\nfrom setuptools import setup, find_packages\n\nNAME = \"mdentropy\"\nVERSION = \"0.2\"\n\n\ndef read(filename):\n import os\n BASE_DIR = os.path.dirname(__file__)\n filename = os.path.join(BASE_DIR, filename)\n with open(filename, 'r') as fi:\n return fi.read()\n\n\ndef readlist(filename):\n rows = read(filename).split(\"\\n\")\n rows = [x.strip() for x in rows if x.strip()]\n return list(rows)\n\n\nextra = {}\nif sys.version_info >= (3, 0):\n extra.update(\n use_2to3=True,\n )\n\nsetup(\n name=NAME,\n version=VERSION,\n scripts=['./scripts/dmutinf', './scripts/dtent'],\n platforms=[\"Windows\", \"Linux\", \"Mac OS-X\", \"Unix\"],\n classifiers = (\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n ),\n author=\"Carlos Xavier Hernandez\",\n author_email=\"cxh@stanford.edu\",\n url = 'https://github.com/cxhernandez/%s' % NAME,\n download_url = 'https://github.com/cxhernandez/%s/tarball/master' % NAME,\n description=(\"Analyze correlated motions in MD trajectories with only \"\n \"a few lines of Python code.\"),\n license=\"MIT\",\n packages = find_packages('mdentropy'),\n package_dir = {'': 'mdentropy'},\n include_package_data = True,\n package_data = {\n '': ['README.md',\n 'requirements.txt'],\n },\n keywords=\"molecular dynamics entropy analysis\",\n zip_safe=True,\n install_requires=readlist('requirements.txt'),\n **extra\n)\n","sub_path":"pypi_install_script/mdentropy-0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"246799890","text":"# -*- coding: utf-8 -*-\n# © 2017 Ibrohim Binladin | ibradiiin@gmail.com | +62-838-7190-9782 | http://ibrohimbinladin.wordpress.com\n##########################################################################################################\nfrom openerp.osv import fields, osv\nfrom openerp import tools\n\n\nclass hr_payslip_custom_view(osv.osv):\n _name = \"payslip.custom.list\"\n _auto = False\n _rec_name = 'number'\n _columns = {\n 'number': fields.char('Payslip Ref#', readonly=True), #payslip\n 'employee_id': fields.many2one('hr.employee', 'Employee', readonly=True),\n 'date_from': fields.date('Date From', readonly=True),\n 'date_to': fields.date('Date To', readonly=True),\n 'struct_id': fields.many2one('hr.payroll.structure', 'Structure', readonly=True),\n 'contract_id': fields.many2one('hr.contract', 'Contract', readonly=True),\n\n 'description': fields.char('Description', readonly=False),#payslip_lines\n 'code': fields.char('Code', size=64, readonly=True),\n 'category_id': fields.many2one('hr.salary.rule.category', 'Category', readonly=True),\n 'total_salary': fields.float('Total Salary', readonly=True),\n 'rate': fields.float('Rate (%)', readonly=True),\n 'amount': fields.float('Amount', readonly=True),\n 'quantity': fields.float('Quantity', readonly=True),\n }\n _order = 'payslip desc'\n\n def init(self, cr):\n tools.drop_view_if_exists(cr, 'payslip_custom_list')\n cr.execute(\"\"\"\n create or replace view payslip_custom_list as (\n select\n min(l.id) as id,\n l.name as description,\n l.code as code, \n l.category_id as category_id,\n l.total as total_salary,\n l.rate as rate, \n l.amount as amount, \n l.quantity as quantity,\n ps.number as number,\n ps.employee_id as employee_id,\n ps.date_from as date_from,\n ps.date_to as date_to,\n ps.struct_id as struct_id,\n ps.contract_id as contract_id\n from\n hr_payslip_line l\n join hr_payslip ps on (l.slip_id=ps.id)\n group by\n l.id, \n l.category_id, \n ps.id\n )\n \"\"\")","sub_path":"ib_immobi_reports/models/hr_payslip_list.py","file_name":"hr_payslip_list.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"337033346","text":"import pandas as pd\nimport requests\nimport urllib3\nurllib3.disable_warnings()\n\n#Dados URL\nURL_login = 'https://granito.ime.eb.br/Granito/SisGradWeb/login.asp'\nURL_Alunos = 'https://granito.ime.eb.br/Granito/SisGradWeb/AlunosConsultaResult.asp'\nURL_Notas = 'https://granito.ime.eb.br/Granito/SisGradWeb/BolGrausAluno.asp'\n\ndef Login (Usuario, Senha):\n payload = { 'txtUser': '17407', 'txtSenha': '17407' }\n\n session = requests.session()\n r = session.post(URL_login, data=payload, verify=False)\n return session\n\ndef DF_Alunos(AnoCivil, session):\n #Pega Lista Alunos\n payload = {\n 'selCurso':'0',\n 'txtAnoCivil':AnoCivil,\n 'txtPeriodo':'',\n 'txtAnoEscolar':'',\n 'selEspecialidade':'0'\n }\n\n r = session.post(URL_Alunos, data=payload, verify=False)\n\n return pd.read_html(r.text,header = 0)[0]\n\ndef DF_Notas(CodigoAluno, Ano, Periodo, session):\n #Pegar Nota\n payload = {'txtCodigo': CodigoAluno,'txtAno': Ano,'txtPeriodo': Periodo }\n\n r = session.post(URL_Notas, data=payload, verify=False)\n\n return pd.read_html(r.text)[2]\n\n\n#Login\nsession = Login('17407','17407')\n\n#DF Alunos\nAlunos = pd.DataFrame()\n\n\nfor ano in list(range(2016,2020)):\n Aux = DF_Alunos(ano,session)\n Aux['Ano Consulta'] = ano\n \n Alunos = pd.concat((Alunos,Aux))\n\nAlunos.to_csv(\"Alunos.csv\",headr = none)\n\nNotas = pd.DataFrame()\n\nfor index, Aluno in Alunos.iterrows():\n for Periodo in list(range(1,3)):\n\n Columns = ['Código', 'Disciplina', 'VE', 'VC', 'VF', 'RecEscrita','RecOral', 'Média']\n\n try:\n Aux = DF_Notas(Aluno['Código'], Aluno['Ano Consulta'], Periodo, session)\n Aux.columns = Columns\n \n Aux.insert (0, \"Codigo Aluno\", Aluno['Código'])\n Aux.insert (1, \"Semestre\", Periodo)\n Aux.insert (2, \"Ano Consulta\", Aluno['Ano Consulta'])\n \n Aux[['VE','VC','VF','RecEscrita','RecOral']] = Aux[['VE','VC','VF','RecEscrita','RecOral']]/10\n Aux[['Média']] = Aux[['Média']]/100\n \n Notas = pd.concat((Notas,Aux))\n print(\"Notas Adicionadas, Aluno: \"+str(Aluno['Código']))\n\n except:\n print(\"Erro Aluno: \" + Aluno['Nome'])\n\nNotas.iloc[4] = Notas.iloc[4].str.replace(\",\",\"\")\nNotas.to_csv(\"Notas.csv\",header = None,index = False,encoding='utf-8-sig')","sub_path":"Webscraping.py","file_name":"Webscraping.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"43703074","text":"# write a simple convertor for pounds to kilograms\nimport math # This imports math module\n\nlbs = raw_input(\"Enter weight in pounds: \")\n\ndef poundsTokg(lbs):\n\tkgFloat = float(lbs) * 0.453592\n\tkgString = str(kgFloat)\n\tprint(\"That's about \" + kgString + \"kgs\")\n\ntry:\n\tval = int(lbs)\n\tpoundsTokg(val)\n\t\nexcept ValueError:\n\tprint(\"Please enter a number!\")\n\tlbs2 = raw_input(\"Do it again!\")\n\tpoundsTokg(lbs2)\n\n\n\n\n# need a way to reloop on user input error that without writing it out again and again!!","sub_path":"python/playground/lb-kg.py","file_name":"lb-kg.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"573694081","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n'''\r\ntest and prediction results\r\n'''\r\n\r\nimport argparse, os, math,glob\r\nimport numpy\r\nfrom PIL import Image\r\nimport piexif\r\nimport cv2\r\nimport chainer\r\nfrom chainer import cuda\r\nfrom chainer import serializers\r\nimport network\r\nimport scipy.io as scio\r\nimport datetime\r\n\r\nparser = argparse.ArgumentParser(description='')\r\nparser.add_argument('-i', help='File path of input image.', default='./testing_samples')\r\nparser.add_argument('-o', help='Output directory.', default='./testing_samples')\r\nparser.add_argument('-gpu', help='GPU device specifier. Two GPU devices must be specified, such as 0,1.', default='0')\r\nparser.add_argument('-dm', help='File path of a downexposure model.', default='./models/downexposure_model_2.chainer')\r\nparser.add_argument('-um', help='File path of a upexposure model.', default='./models/upexposure_model_2.chainer')\r\nparser.add_argument('-al', help='Output directory.', default='0.6')\r\nargs = parser.parse_args()\r\n\r\nstart = datetime.datetime.now()\r\nalpha = numpy.array(args.al).astype(numpy.float32)\r\ndir_path_list = glob.glob(args.i+'/*')\r\ndir_path_list = dir_path_list[:]\r\ndir_outpath = glob.glob(args.o)\r\nmodel_path_list = [args.dm, args.um] \r\nbase_outdir_path = args.o \r\ngpu_list = []\r\nif args.gpu != '-1':\r\n for gpu_num in (args.gpu).split(','):\r\n gpu_list.append(int(gpu_num))\r\n\r\n'Estimate up-/donwn-exposed images'\r\nmodel_list = [network.CNNAE3D512(), network.CNNAE3D512()] \r\nxp = cuda.cupy if len(gpu_list) > 0 else numpy\r\nif len(gpu_list) > 0:\r\n cuda.check_cuda_available()\r\n cuda.get_device().use()\r\n for i in range(2):\r\n model_list[i].to_gpu()\r\n serializers.load_npz(model_path_list[i], model_list[i]) \r\n \r\nelse:\r\n for i in range(2):\r\n serializers.load_npz(model_path_list[i], model_list[i]) \r\n\r\ndef estimate_images(input_img, model): \r\n # \r\n model.train_dropout = False\r\n input_img_ = (input_img.astype(numpy.float32)/255.).transpose(2,0,1) \r\n input_img_ = chainer.Variable(xp.array([input_img_]))\r\n res = model(input_img_).data[0]\r\n if len(gpu_list)>0:\r\n res = cuda.to_cpu(res)\r\n\r\n out_img_list = list()\r\n for i in range(res.shape[1]):\r\n if i ==0:\r\n out_img = (res[:,i,:,:].transpose(1,2,0)).astype(numpy.float) \r\n else:\r\n out_img = (255.*res[:,i,:,:].transpose(1,2,0)).astype(numpy.uint8)\r\n out_img_list.append(out_img)\r\n\r\n return out_img_list\r\n\r\nprint('\\nStarting prediction...\\n\\n')\r\nN = len(dir_path_list)\r\nfor i in range (N):\r\n dir_path = dir_path_list[i]\r\n frames = [glob.glob(dir_path + '/LDR/1.png')[0], glob.glob(dir_path + '/LDR/4.png')[0], glob.glob(dir_path + '/LDR/7.png')[0]]\r\n frame_H = [glob.glob(dir_path + '/HDR/1.hdr')[0]]\r\n HDR_Ground = cv2.imread(frame_H[0], flags=cv2.IMREAD_ANYDEPTH)\r\n filename_root = os.path.basename(dir_path) \r\n print('filename',filename_root)\r\n save_path = dir_outpath[0] + '/' + filename_root + '/result'\r\n if not os.path.exists(save_path):\r\n os.makedirs(save_path)\r\n cv2.imwrite(save_path + '/HDR_Ground.hdr', HDR_Ground)\r\n print('\\tReading...')\r\n for ii in range (len(frames)):\r\n img = cv2.imread(frames[ii])\r\n img_zeros = numpy.zeros(numpy.shape(img)).astype(numpy.float)\r\n out_img_list = list()\r\n if len(gpu_list)>0:\r\n cuda.get_device().use()\r\n for i in range(2):\r\n out_img_list.extend(estimate_images(img, model_list[i]))\r\n if i == 0:\r\n out_img_list.reverse() \r\n out_img_list.append(img)\r\n else:\r\n for i in range(2):\r\n out_img_list.extend(estimate_images(img, model_list[i]))\r\n if i == 0:\r\n out_img_list.reverse()\r\n out_img_list.append(img)\r\n out_img_list[8] = ((numpy.array(out_img_list[6], dtype=float)+numpy.array(out_img_list[10], dtype=float))/2).astype(numpy.uint8)\r\n\r\n prev_img_log_mean = (out_img_list[7].astype(numpy.float32)+out_img_list[9].astype(numpy.float32))*3-5 \r\n pre_img_hdr = numpy.power(10, prev_img_log_mean)\r\n\r\n 'Select and Merge'\r\n del out_img_list[9]\r\n del out_img_list[7]\r\n\r\n threshold = 32 \r\n stid = 0\r\n \r\n prev_img = out_img_list[7].astype(numpy.float32) \r\n out_img_list.reverse() \r\n for out_img in out_img_list[8:]:\r\n img = out_img.astype(numpy.float32)\r\n if (img>(prev_img+threshold)).sum() > 0: \r\n break\r\n prev_img = img[:,:,:]\r\n stid+=1\r\n\r\n edid = 0\r\n prev_img = out_img_list[7].astype(numpy.float32)\r\n out_img_list.reverse() \r\n for out_img in out_img_list[8:]:\r\n img = out_img.astype(numpy.float32)\r\n if (img<(prev_img-threshold)).sum() > 0: \r\n break\r\n prev_img = img[:,:,:]\r\n edid+=1\r\n\r\n\r\n out_img_list_ = out_img_list[7-stid:8+edid] \r\n exposure_times = list()\r\n lowest_exp_time = 1/32. \r\n for i in range(len(out_img_list_)):\r\n exposure_times.append(lowest_exp_time*math.pow(math.sqrt(2.),i))\r\n exposure_times = numpy.array(exposure_times).astype(numpy.float32)\r\n print('exposure_times.len',len(exposure_times))\r\n merge_debvec = cv2.createMergeDebevec()\r\n hdr_debvec = merge_debvec.process(out_img_list_, times=exposure_times.copy())\r\n\r\n merge_final_debvec = alpha*pre_img_hdr+(1-alpha)*hdr_debvec/numpy.max(hdr_debvec)*numpy.max(pre_img_hdr)\r\n \r\n if ii == 0:\r\n cv2.imwrite(save_path+'/HDR_Log_1.hdr', pre_img_hdr)\r\n scio.savemat(save_path+'/HDR_Log_1.mat',{'HDR_Log_1':pre_img_hdr})\r\n cv2.imwrite(save_path+'/HDR_Debvec_1.hdr', hdr_debvec)\r\n scio.savemat(save_path+'/HDR_Debvec_1.mat',{'HDR_Debvec_1':hdr_debvec})\r\n cv2.imwrite(save_path + '/HDR_HybridNet_1.hdr', merge_final_debvec)\r\n scio.savemat(save_path+'/HDR_HybridNet_1.mat',{'HDR_HybridNet_1':merge_final_debvec})\r\n elif ii == 1:\r\n cv2.imwrite(save_path+'/HDR_Log_4.hdr', pre_img_hdr)\r\n scio.savemat(save_path+'/HDR_Log_4.mat',{'HDR_Log_4':pre_img_hdr})\r\n cv2.imwrite(save_path+'/HDR_Debvec_4.hdr', hdr_debvec)\r\n scio.savemat(save_path+'/HDR_Debvec_4.mat',{'HDR_Debvec_4':hdr_debvec})\r\n cv2.imwrite(save_path + '/HDR_HybridNet_4.hdr', merge_final_debvec)\r\n scio.savemat(save_path+'/HDR_HybridNet_4.mat',{'HDR_HybridNet_4':merge_final_debvec})\r\n elif ii == 2:\r\n cv2.imwrite(save_path+'/HDR_Log_7.hdr', pre_img_hdr)\r\n scio.savemat(save_path+'/HDR_Log_7.mat',{'HDR_Log_7':pre_img_hdr})\r\n cv2.imwrite(save_path+'/HDR_Debvec_7.hdr', hdr_debvec)\r\n scio.savemat(save_path+'/HDR_Debvec_7.mat',{'HDR_Debvec_7':hdr_debvec})\r\n cv2.imwrite(save_path + '/HDR_HybridNet_7.hdr', merge_final_debvec)\r\n scio.savemat(save_path+'/HDR_HybridNet_7.mat',{'HDR_HybridNet_7':merge_final_debvec})\r\n # print('\\tDone\\n')\r\n del out_img_list\r\n\r\n","sub_path":"test_network.py","file_name":"test_network.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"499624290","text":"from flask import Blueprint,render_template,redirect,url_for\nfrom myproject import db\nfrom myproject.models import Owner\nfrom myproject.owners.forms import AddForm\n\nowners_blueprints = Blueprint('owners', __name__, template_folder='templates/owners')\n\n@owners_blueprints.route('/add', methods=['GET','POST'])\ndef add():\n form = AddForm()\n\n if form.validate_on_submit():\n new_owner = Owner(form.name_owner.data, form.id_pup.data)\n db.session.add(new_owner)\n db.session.commit()\n\n return redirect(url_for('puppies.list'))\n\n return render_template('add_owner.html', form=form)\n","sub_path":"DatabaseWithViews/myproject/owners/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"51238576","text":"import matplotlib.pyplot as plt\nfrom pylab import mpl\nmpl.rcParams['font.sans-serif'] = ['SimHei']\n\n\ndef main():\n price = []\n week = []\n infile = open(\"1994_Weekly_Gas_Averages.txt\", \"r\")\n line = infile.readline()\n while line != '':\n amount = float(line)\n price.append(amount)\n week.append(len(week) + 1)\n line = infile.readline()\n infile.close()\n plt.plot(week, price)\n plt.title(\"1994年间每周天然气的平均价格\")\n plt.xlabel(\"周数\")\n plt.ylabel(\"平均价格\")\n plt.show()\n\n\nmain()\n","sub_path":"diagram/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"463052388","text":"import json, math\nfrom scrapy.spiders import Spider\nfrom scrapy import Request\n\n\nclass CitySpider(Spider):\n name = 'city'\n allowed_domains = ['m.ctrip.com']\n redis_key = 'city:start_urls'\n url = \"https://m.ctrip.com/restapi/soa2/16593/json/getCityList?_fxpcqlniredt=09031157111978003970\" \\\n \"&__gw_appid=99999999&__gw_ver=1.0&__gw_from=10320666865&__gw_platform=H5\"\n headers = {'content-type': 'application/json'}\n\n def get_payload(self):\n payload = {\n \"args\": \"{\\\"parameter\\\":{\\\"version\\\":\\\"1\\\"}}\",\n \"head\": {\"cid\": \"09031157111978003970\", \"ctok\": \"\", \"cver\": \"1.0\", \"lang\": \"01\", \"sid\": \"8888\",\n \"syscode\": \"09\", \"auth\": None,\n \"extension\": [\n {\"name\": \"terminaltype\", \"value\": \"20\"},\n {\"name\": \"devicetype\", \"value\": \"Macintosh\"},\n {\"name\": \"devicebrand\", \"value\": \"undefined\"},\n {\"name\": \"devicephone\", \"value\": \"Mac\"},\n {\"name\": \"browsername\", \"value\": \"Safari\"},\n {\"name\": \"browserver\", \"value\": \"605.1.15\"},\n {\"name\": \"os\", \"value\": \"IOS\"},\n {\"name\": \"osver\", \"value\": \"10.146\"},\n {\"name\": \"channelid\", \"value\": \"2\"},\n {\"name\": \"page\", \"value\": \"10320666865\"},\n {\"name\": \"refpage\", \"value\": \"\"},\n {\"name\": \"currentpage\",\n \"value\": \"feb85487-ea6f-811d-20ae-1731ec2d34dc\"},\n {\"name\": \"pagename\", \"value\": \"citylist\"},\n {\"name\": \"refpagename\", \"value\": \"\"},\n {\"name\": \"refpageid\", \"value\": \"\"},\n {\"name\": \"vid\", \"value\": \"\"},\n {\"name\": \"la\", \"value\": \"\"},\n {\"name\": \"lo\", \"value\": \"\"},\n {\"name\": \"geoType\", \"value\": \"\"}, {\"name\": \"traceid\",\n \"value\": \"899371e7-ea7d-7c28-35dd-f263725e8d8b\"},\n {\"name\": \"protocal\", \"value\": \"https\"}]},\n \"contentType\": \"json\"\n }\n\n return payload\n\n\n def start_requests(self):\n payload = json.dumps(self.get_payload())\n yield Request(self.url, self.parse, method=\"POST\", body=payload,\n headers=self.headers, dont_filter=True)\n\n def parse(self, response):\n body = response.body.decode('utf8')\n _body = json.loads(body)\n cities = []\n try:\n _result = _body['result']\n result = json.loads(_result)\n data = result['data']\n _cities = data['cityGroup']['cities']\n for cities_letter_group in _cities.values():\n for city in cities_letter_group:\n cities.append({'id': city['cityId'], 'name': city['cityName']})\n cities_js = json.dumps(cities)\n a = cities_js\n except Exception as e:\n self.logger.exception(e)\n\n","sub_path":"ctrip_scrapy/ctrip_scrapy/ctrip/build/lib/ctrip/spiders/city.py","file_name":"city.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"208445035","text":"from allennlp.data.tokenizers.word_splitter import SimpleWordSplitter\nfrom tqdm import tqdm\nimport unicodedata\nimport json\nimport re\nimport sys\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--in_file\", type=str, required=True)\nparser.add_argument(\"--out_dir\", type=str, required=True)\nparser.add_argument(\"--split\", help=\"which part of dataset\", type=str, required=True)\nparser.add_argument(\"--max_sent_len\", help=\"maximum sentence length\", type=int, default=200)\nargs = parser.parse_args()\nin_file = args.in_file\nout_dir = args.out_dir\nsplit = args.split\nmax_sent_len = args.max_sent_len\n\njsondecoder = json.JSONDecoder()\n\ntokenizer = SimpleWordSplitter()\n\npremise_fp = open(out_dir + \"/\" + split + \".premise\", \"w\")\nhypothesis_fp = open(out_dir + \"/\" + split + \".hypothesis\", \"w\")\nlabel_fp = open(out_dir + \"/\" + split + \".label\", \"w\")\nindex_fp = open(out_dir + \"/\" + split + \".index\", \"w\")\n\nwith open(in_file, \"r\") as in_fp:\n for line in tqdm(in_fp.readlines()):\n struct = jsondecoder.decode(line)\n\n hypothesis = struct[\"claim\"]\n\n premise_idx = 0\n for sentence in struct[\"predicted_sentences\"]:\n underlined_title = sentence[0]\n label = 0 # placeholder, but must be a valid index\n premise = sentence[3]\n\n # Prefix the premise sentence with [ TITLE ] (from source article)\n title = underlined_title.replace(\"_\", \" \")\n title_words = tokenizer.split_words(title)\n tokenized_title = \" \".join(map(lambda x: x.text, title_words))\n premise = \"[ \" + tokenized_title + \" ] \" + premise\n\n premise_words = premise.split(\" \")\n if(len(premise_words) > max_sent_len):\n premise = \" \".join(premise_words[0:max_sent_len])\n\n info = str(struct[\"id\"]) + \"\\t\" + str(premise_idx) + \"\\t\"\n info = info + str(sentence[0]) + \"\\t\" + str(sentence[1])\n\n premise_fp.write(premise + \"\\n\")\n hypothesis_fp.write(hypothesis + \"\\n\")\n label_fp.write(str(label) + \"\\n\")\n index_fp.write(info + \"\\n\")\n\n premise_idx = premise_idx + 1\n\npremise_fp.close()\nhypothesis_fp.close()\nlabel_fp.close()\nindex_fp.close()\n\n","sub_path":"test-entailment-title-input.py","file_name":"test-entailment-title-input.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"19048653","text":"from twisted.internet import protocol,reactor\r\nfrom urllib3 import *\r\nhost = 'localhost'\r\nport = 1234\r\nbuffersize = 1024\r\nclass MyProtocol(protocol.Protocol):\r\n def sendData(self):\r\n data = input(\">\")\r\n if data:\r\n print(\".....正在发送\",data)\r\n self.transport.write(data.encode(encoding='uft-8'))\r\n else:\r\n self.transport.loseConnection()\r\n def connectionMade(self):\r\n self.sendData()\r\n def dataReceived(self, data):\r\n print(data.decode('utf-8'))\r\n self.sendData()\r\nclass MyFactory(protocol.ClientFactory):\r\n protocol = MyProtocol\r\n clientConnectionLost = clientConnectionFailed = lambda self,connector,reason:reactor.stop()\r\nreactor.connectTCP(host,port,MyFactory)\r\nreactor.run()","sub_path":"Python学习基础知识/高级python篇/第16章:网络高级编程/Twisted框架编写时间戳客户端.py","file_name":"Twisted框架编写时间戳客户端.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"4090193","text":"\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\nhostName = \"0.0.0.0\"\nhostPort = 8084\n\nclass MyServer(BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(\"DEMO WEB SERVER\", \"utf-8\"))\n self.wfile.write(bytes(\"

HELLO WORLD!!

\", \"utf-8\"))\n self.wfile.write(bytes(\"\", \"utf-8\"))\n\nmyServer = HTTPServer((hostName, hostPort), MyServer)\nmyServer.serve_forever()\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"649613630","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright (c) 2019 Galib F. Arrieta\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of \nthis software and associated documentation files (the \"Software\"), to deal in \nthe Software without restriction, including without limitation the rights to \nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies \nof the Software, and to permit persons to whom the Software is furnished to do \nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all \ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE \nSOFTWARE.\n\"\"\"\nimport bpy\nimport os\n\n#The modules of lumbermixalot\nif __package__ is None or __package__ == \"\":\n # When running as a standalone script from Blender Text View \"Run Script\"\n import actormixalot\n import motionmixalot\n import commonmixalot\n from commonmixalot import Status\nelse:\n # When running as an installed AddOn, then it runs in package mode.\n from . import actormixalot\n from . import motionmixalot\n from . import commonmixalot\n from .commonmixalot import Status\n\nif \"bpy\" in locals():\n from importlib import reload\n if \"actormixalot\" in locals():\n reload(actormixalot)\n if \"motionmixalot\" in locals():\n reload(motionmixalot)\n if \"commonmixalot\" in locals():\n reload(commonmixalot)\n\n\n#Exports the current scene with the right settings for Lumberayrd.\n#@fbxFilePath (string) a fully qualified file path, suitable for file\n# exporting.\ndef _ExportFbxInternal(fbxFilePath):\n bpy.ops.export_scene.fbx(filepath=fbxFilePath, check_existing=False, axis_forward='Y', axis_up='Z', path_mode='COPY')\n print(\"{} exported successfully\".format(fbxFilePath))\n\n\n# Looks across first level children and see if at least one of them is of type\n# 'MESH'\n# @obj (bpy.types.Object). Object.type is assumed to be 'ARMATURE'\ndef _CheckArmatureContainsMesh(obj):\n children = obj.children\n for childObj in children:\n if childObj.type == 'MESH':\n return True\n return False\n\n\n# Returns a tuple (basePath, lastSubdir)\n# @fbxOutputPath (string) A directory path.\n# Example:\n# if @fbxOutputPath == \"C:\\\\Some\\\\Cool\\\\Path\"\n# returns (\"C:\\\\Some\\\\Cool\", \"Path\")\n# if @fbxOutputPath == \"C:\\\\Some\\\\Cool\\\\Path\\\\\"\n# returns (\"C:\\\\Some\\\\Cool\", \"Path\")\ndef _SplitPathByLastSubDir(fbxOutputPath):\n if fbxOutputPath[-1] == os.path.sep:\n fbxOutputPath = fbxOutputPath[0:-1]\n return os.path.split(fbxOutputPath)\n\n\n#Returns a fully qualified file path, suitable for file exporting.\n#@isActor (bool) If True the path is expected to be for an Actor.\n# If False the path is expected to be for a Motion.\n# This parameter is only relevant if @appendActorOrMotionPath is True.\n#@fbxFilename (string). File name (no path). '.fbx' extension is optional.\n# If Empty or None, automatic FBX exporting won't be done upon\n# conversion.\n#@fbxOutputPath (string). Output directory. Only relevant if @fbxFilename\n# is valid. CAVEAT: \n#@appendActorOrMotionPath (bool). If True, If an Actor is being converted\n# then the 'Actor/' path will be appended to @fbxOutputPath. If a Motion\n# is being converted then the 'Motions/' path will be appended.\n# If False, no path is appended to @fbxOutputPath.\ndef _GetOutputFilename(isActor, fbxFilename, fbxOutputPath,\n appendActorOrMotionPath):\n fbxFilename = \"\" if (fbxFilename is None) else fbxFilename.strip()\n fbxOutputPath = \"\" if (fbxOutputPath is None) else fbxOutputPath.strip()\n\n if fbxFilename == \"\":\n return None\n #Clean the fbxFilename.\n name, ext = os.path.splitext(fbxFilename)\n fbxFilename = \"{}.fbx\".format(name)\n if fbxOutputPath == \"\":\n fbxOutputPath = \".\"\n if appendActorOrMotionPath:\n baseDirs, lastDir = _SplitPathByLastSubDir(fbxOutputPath)\n if isActor:\n dirToAppend = \"Actor\"\n else:\n dirToAppend = \"Motions\"\n if (lastDir == \"Actor\") or (lastDir == \"Motions\"):\n fbxOutputPath = os.path.join(baseDirs, dirToAppend)\n else:\n fbxOutputPath = os.path.join(fbxOutputPath, dirToAppend)\n #Make sure the output directory exists. If not, create it.\n if not os.path.exists(fbxOutputPath):\n try:\n os.makedirs(fbxOutputPath)\n except:\n msg = \"Failed to create output dir:{}.\\n \\\n Will convert without exporting.\".format(fbxOutputPath)\n print(msg)\n return None\n return os.path.join(fbxOutputPath, fbxFilename)\n \n\ndef Convert(sceneObj, armatureObj, hipBoneName=\"\", rootBoneName=\"\",\n extractTranslationX=True, zeroOutTranslationX=False,\n extractTranslationY=True, zeroOutTranslationY=False,\n extractTranslationZ=True, zeroOutTranslationZ=False,\n extractRotationZ=False, zeroOutRotationZ=False,\n fbxFilename=\"\", fbxOutputPath=\"\",\n appendActorOrMotionPath=True,\n dumpCSVs=False):\n \"\"\"\n Main function to bake hipmotion to RootMotion in Mixamo Rigs.\n If this function finds at least one 'MESH' type of child object, then\n it will assume it is converting an Actor type if asset. If no 'MESH' type\n of child object is found, then it will assume it is converting a Motion\n type of asset.\n\n @sceneObj (bpy.types.Scene)\n @armatureObj (bpy.types.Object). Object.type is assumed to be 'ARMATURE'\n @hipBoneName (string). Name of the \"Hips\" bone as originated by Mixamo.\n If Empty or None, the value will be assumed to tbe \"Hips\".\n @rootBoneName (string). Name of the root motion bone that will be added to\n the armature. If Empty or None, the value will be assumed to be \"root\".\n @extractTranslationX,Y,Z (bool). Extract X,Y,Z Axis Translation.\n @zeroOutTranslationX,Y,Z (bool). Zero Out X,Y,Z Axis Translation upon\n extraction.\n @extractRotationZ (bool). Extract Rotation around Z Axis.\n @zeroOutRotationZ (bool). Zero Out Rotation around Z Axis upon extraction.\n @fbxFilename (string). File name (no path). '.fbx' extension is optional.\n If Empty or None, automatic FBX exporting won't be done upon\n conversion.\n @fbxOutputPath (string). Output directory. Only relevant if @fbxFilename\n is valid. CAVEAT: \n @appendActorOrMotionPath (bool). If True, If an Actor is being converted\n then the 'Actor/' path will be appended to @fbxOutputPath. If a Motion\n is being converted then the 'Motions/' path will be appended.\n If False, no path is appended to @fbxOutputPath.\n @dumpCSVs (bool) DEBUG Only. Dump motion vector data as CSV files\n \"\"\"\n yield Status(\"starting Convert\")\n \n hipBoneName = \"\" if (hipBoneName is None) else hipBoneName.strip()\n rootBoneName = \"\" if (rootBoneName is None) else rootBoneName.strip()\n if hipBoneName == \"\":\n hipBoneName = \"Hips\"\n if rootBoneName == \"\":\n rootBoneName = \"root\"\n\n isActor = _CheckArmatureContainsMesh(armatureObj)\n \n yield Status(\"Checked Asset Type. isActor={}\".format(isActor))\n \n outputFilename = _GetOutputFilename(isActor, fbxFilename, fbxOutputPath,\n appendActorOrMotionPath)\n\n yield Status(\"Processed output path strings\")\n\n if isActor:\n conversion_iterator = actormixalot.ProcessActor(armatureObj, rootBoneName)\n else:\n conversion_iterator = motionmixalot.ProcessMotion(sceneObj, armatureObj,\n hipBoneName, rootBoneName,\n extractTranslationX, zeroOutTranslationX,\n extractTranslationY, zeroOutTranslationY,\n extractTranslationZ, zeroOutTranslationZ,\n extractRotationZ, zeroOutRotationZ, dumpCSVs)\n\n for status in conversion_iterator:\n yield Status(str(status))\n\n yield Status(\"Completed Asset Conversion.\")\n\n if outputFilename is not None:\n _ExportFbxInternal(outputFilename)\n yield Status(\"FBX Assert exported.\")\n\n return 1\n\n\ndef ExportFBX(armatureObj, fbxFilename, fbxOutputPath, appendActorOrMotionPath):\n \"\"\"\n Convenience function to export the current scene as FBX per the required\n Lumberyard configuration. \n\n @armatureObj (bpy.types.Object). Object.type is assumed to be 'ARMATURE'\n @fbxFilename (string). File name (no path). '.fbx' extension is optional.\n @fbxOutputPath (string). Output directory. Only relevant if @fbxFilename\n is valid. CAVEAT: \n @appendActorOrMotionPath (bool). If True, If an Actor is being converted\n then the 'Actor/' path will be appended to @fbxOutputPath. If a Motion\n is being converted then the 'Motions/' path will be appended.\n If False, no path is appended to @fbxOutputPath.\n \"\"\"\n isActor = _CheckArmatureContainsMesh(armatureObj)\n outputFilename = _GetOutputFilename(isActor, fbxFilename,\n fbxOutputPath,\n appendActorOrMotionPath)\n if outputFilename is None:\n raise Exception(\"Undefined output filename\")\n _ExportFbxInternal(outputFilename)\n return outputFilename","sub_path":"mainmixalot.py","file_name":"mainmixalot.py","file_ext":"py","file_size_in_byte":9616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"212656252","text":"import matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport math\nfrom matplotlib.animation import FuncAnimation\n\n\"\"\"\nThis is a file to build an example\nsee the other python file!!\n\"\"\"\n\n# this code is inspired by an excellent blog post. Please refer to\n# https://apmonitor.com/me575/index.php/Main/SimulatedAnnealing\n\n# define objective function\n# in the case of machine learning this would be a loss function\ndef objective_function(x,y):\n res = 0.2 + x**2 + y**2 - 0.1*math.cos(6.0*3.1415*x) - 0.1*math.cos(6.0*3.1415*y)\n return(res)\n\n\"\"\"\nsimulated annealing: optimize a function through simulated annealing!\n--------------------------------\n\ninputs:\n objective: function, the objective function\n\n p_start: the probability of accepting a worse solution at the start\n\n p_end: the probability of accepting a worse solution at the end\n\n initial_try: starting points, list\n\n n_cycles: the number of tries. Defaults to 50\n\n n_rounds: number of tries per cycle. Defaults to 50\n\noutputs:\n best_try: list\n\n cycle_results: list of best tries per cycle\n\"\"\"\n# objective = thing to minimize\ndef simulated_annealing(objective, p_start, p_end, initial_try, n_cycles=50, n_rounds=50):\n # initial and final temperatures\n t_initial = -1.0/math.log(p_start)\n t_final = -1.0/math.log(p_end)\n # temperature reduction every cycle\n frac = (t_final/t_initial)**(1.0/(n_cycles-1.0))\n # change in energy\n DeltaE_avg = 0.0\n # number of accepted solutions:\n na = 0.0 # one because we are accepting the first solution\n # list of tries, per cycle:\n tries = [initial_try]\n # objective function results\n results = []\n # initialize\n xi = tries[0][0]\n yi = tries[0][1]\n # calculate current result with the initial solution\n x_current = xi\n y_current = yi\n f_current = objective(xi, yi)\n results.append(f_current)\n na = na + 1.0\n # current temperature\n t = t_initial\n for i in range(n_cycles):\n print('cycle: ' + str(i) + ' Temperature: ' + str(t))\n for j in range(n_rounds):\n # generate random new trial points\n xi, yi = (w + random.random() - 0.5 for w in [x_current, y_current])\n # clip the trial points\n xi, yi = (max(min(w, 1.0), -1.0) for w in [xi, yi])\n # calculate local change of energy\n DeltaE = abs(objective(xi, yi) - f_current)\n if (objective(xi, yi) > f_current):\n # update average energy change if we found a worse solution on\n # fisrst iteration\n if(i == 0 and j == 0):\n DeltaE_avg = DeltaE\n # probability of accepting the worse solution\n p = math.exp(-DeltaE/(DeltaE_avg * t))\n # determine whether to accept worse point\n if (random.random() < p):\n accept = True\n else:\n accept = False\n else:\n # if objective function is better, automatically accept\n accept = True\n if (accept == True):\n # update current best solution\n x_current = xi\n y_current = yi\n f_current = objective(x_current, y_current)\n # update number of accepted solutions\n na = na + 1.0\n # update average energy change\n DeltaE_avg = (DeltaE_avg * (na-1.0) + DeltaE) / na\n # record best values at end of every cycle\n tries = tries + [[x_current, y_current]]\n results.append(f_current)\n # lower_bound the temperature for next cycle\n t = frac*t\n # best solution\n best_try = [x_current, y_current, f_current]\n # cycle results, a list of lists where each list is a value\n # there are definitely better ways to do this\n cycle_results = [[t[0] for t in tries]] + [[t[1] for t in tries]] + [results]\n return(best_try, cycle_results)\n\nbest, cyc_results = simulated_annealing(objective_function, 0.9, 0.0001, [0.5, 0.5], 50, 1000)\n\nprint(best)\n\n# make sure initial try is good\nprint([c[0] for c in cyc_results])\n\n# visualization\n# create the meshes\n# function to create a mesh. This is used not for the simulated annealing, but\n# for the visualization. Do not worry too much about it, I will point out the\n# code you actually need to do simulated annealing on your own\n\ndef mesh(lower_bound, upper_bound, step, objective = objective_function):\n xi = np.arange(lower_bound, upper_bound, step)\n yi = np.arange(lower_bound, upper_bound, step)\n # create mesh coords\n mesh_x, mesh_y = np.meshgrid(xi,yi)\n # create mesh of objective\n # first allocate the space\n mesh_objective = np.zeros(mesh_x.shape)\n # next we populate\n for i in range(0, mesh_x.shape[0]):\n for j in range(0, mesh_y.shape[1]):\n mesh_objective[i,j] = objective(mesh_x[i,j], mesh_y[i,j])\n return(mesh_x, mesh_y, mesh_objective)\n\nxmesh, ymesh, fmesh = mesh(-1.0, 1.0, 0.01, objective_function)\n\n# code for animation\ndef animate(i):\n line.set_xdata(cyc_results[0][0:i])\n line.set_ydata(cyc_results[1][0:i])\n\nfig, ax = plt.subplots()\n# Specify contour lines\n#lines = range(2,52,2)\n# Plot contours\nCS = plt.contour(xmesh, ymesh, fmesh)#,lines)\n# Label contours\nplt.clabel(CS, inline=1, fontsize=10)\n# Add some text to the plot\nplt.title('Non-Convex Function')\nplt.xlabel('x')\nplt.ylabel('y')\n\nline = ax.plot(cyc_results[0][0], cyc_results[1][0], 'r-o')[0]\nanim = FuncAnimation(\n fig, animate, interval = 500, frames = 50\n)\nplt.draw()\nanim.save('sa.mp4')\n\nplt.show()\n\n\n# show our progress\nfig = plt.figure()\nax1 = fig.add_subplot(211)\nax1.plot(cyc_results[2],'r.-')\nax1.legend(['Objective'])\nax2 = fig.add_subplot(212)\nax2.plot(cyc_results[0],'b.-')\nax2.plot(cyc_results[1],'g--')\nax2.legend(['x','y'])\nplt.savefig('iterations.png')\nplt.show()\n\n\"\"\"\nThings to think about\n------------------------\n\nHow can we update this for any number of dimensions?\n\nThis is a situation where OOP makes a lot of sense, how can we improve this code\nusing that?\n\nWhat other optimization functions can we try/discuss (metropolis hastings?)?\n\"\"\"\n","sub_path":"anneal_tutorial.py","file_name":"anneal_tutorial.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"338166696","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server import util\n\n\nclass Teachers(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, teacher_id: int=None, full_name: str=None, email: str=None, phone: str=None, address: str=None, grade: str=None): # noqa: E501\n \"\"\"Teachers - a model defined in Swagger\n\n :param teacher_id: The teacher_id of this Teachers. # noqa: E501\n :type teacher_id: int\n :param full_name: The full_name of this Teachers. # noqa: E501\n :type full_name: str\n :param email: The email of this Teachers. # noqa: E501\n :type email: str\n :param phone: The phone of this Teachers. # noqa: E501\n :type phone: str\n :param address: The address of this Teachers. # noqa: E501\n :type address: str\n :param grade: The grade of this Teachers. # noqa: E501\n :type grade: str\n \"\"\"\n self.swagger_types = {\n 'teacher_id': int,\n 'full_name': str,\n 'email': str,\n 'phone': str,\n 'address': str,\n 'grade': str\n }\n\n self.attribute_map = {\n 'teacher_id': 'teacher_id',\n 'full_name': 'full_name',\n 'email': 'email',\n 'phone': 'phone',\n 'address': 'address',\n 'grade': 'grade'\n }\n self._teacher_id = teacher_id\n self._full_name = full_name\n self._email = email\n self._phone = phone\n self._address = address\n self._grade = grade\n\n @classmethod\n def from_dict(cls, dikt) -> 'Teachers':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The teachers of this Teachers. # noqa: E501\n :rtype: Teachers\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def teacher_id(self) -> int:\n \"\"\"Gets the teacher_id of this Teachers.\n\n\n :return: The teacher_id of this Teachers.\n :rtype: int\n \"\"\"\n return self._teacher_id\n\n @teacher_id.setter\n def teacher_id(self, teacher_id: int):\n \"\"\"Sets the teacher_id of this Teachers.\n\n\n :param teacher_id: The teacher_id of this Teachers.\n :type teacher_id: int\n \"\"\"\n\n self._teacher_id = teacher_id\n\n @property\n def full_name(self) -> str:\n \"\"\"Gets the full_name of this Teachers.\n\n\n :return: The full_name of this Teachers.\n :rtype: str\n \"\"\"\n return self._full_name\n\n @full_name.setter\n def full_name(self, full_name: str):\n \"\"\"Sets the full_name of this Teachers.\n\n\n :param full_name: The full_name of this Teachers.\n :type full_name: str\n \"\"\"\n if full_name is None:\n raise ValueError(\"Invalid value for `full_name`, must not be `None`\") # noqa: E501\n\n self._full_name = full_name\n\n @property\n def email(self) -> str:\n \"\"\"Gets the email of this Teachers.\n\n\n :return: The email of this Teachers.\n :rtype: str\n \"\"\"\n return self._email\n\n @email.setter\n def email(self, email: str):\n \"\"\"Sets the email of this Teachers.\n\n\n :param email: The email of this Teachers.\n :type email: str\n \"\"\"\n\n self._email = email\n\n @property\n def phone(self) -> str:\n \"\"\"Gets the phone of this Teachers.\n\n\n :return: The phone of this Teachers.\n :rtype: str\n \"\"\"\n return self._phone\n\n @phone.setter\n def phone(self, phone: str):\n \"\"\"Sets the phone of this Teachers.\n\n\n :param phone: The phone of this Teachers.\n :type phone: str\n \"\"\"\n\n self._phone = phone\n\n @property\n def address(self) -> str:\n \"\"\"Gets the address of this Teachers.\n\n\n :return: The address of this Teachers.\n :rtype: str\n \"\"\"\n return self._address\n\n @address.setter\n def address(self, address: str):\n \"\"\"Sets the address of this Teachers.\n\n\n :param address: The address of this Teachers.\n :type address: str\n \"\"\"\n\n self._address = address\n\n @property\n def grade(self) -> str:\n \"\"\"Gets the grade of this Teachers.\n\n\n :return: The grade of this Teachers.\n :rtype: str\n \"\"\"\n return self._grade\n\n @grade.setter\n def grade(self, grade: str):\n \"\"\"Sets the grade of this Teachers.\n\n\n :param grade: The grade of this Teachers.\n :type grade: str\n \"\"\"\n\n self._grade = grade\n","sub_path":"done_get_all_id/swagger_server/models/teachers.py","file_name":"teachers.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"637972217","text":"import os\r\nimport SimpleITK as sitk\r\nimport numpy as np\r\n\r\n# read 3d image. it works only with tiff/tif\r\ndef Read3DImage(image_file_dir):\r\n \r\n # get target image list\r\n file_list = os.listdir(image_file_dir)\r\n file_list_tiff = [file for file in file_list if file.endswith(\".tiff\") or file.endswith(\".tif\")]\r\n\r\n # initialize file reader itk\r\n file_reader = sitk.ImageFileReader()\r\n file_reader.SetImageIO('TIFFImageIO')\r\n \r\n # set first image\r\n file_reader.SetFileName(image_file_dir + \"\\\\\" + file_list_tiff[0])\r\n first_img = file_reader.Execute()\r\n #img3D = sitk.Image(first_img.GetSize()[0], first_img.GetSize()[1], len(file_list_tiff), sitk.sitkFloat32)\r\n img3D = sitk.Image(first_img.GetSize()[0], first_img.GetSize()[1], len(file_list_tiff), sitk.sitkUInt16)\r\n img3D[:, :, 0] = first_img\r\n \r\n # read otehr images\r\n for slice in range(1 , len(file_list_tiff)):\r\n #print(file_list[slice])\r\n file_reader.SetFileName(image_file_dir + \"\\\\\" + file_list_tiff[slice])\r\n current_img = file_reader.Execute()\r\n img3D[:, :, slice] = current_img\r\n \r\n return img3D\r\n\r\n# read 3d displacement from image\r\ndef Read3DDisplacement(displacement_file_dir):\r\n \r\n # get target displacement image list\r\n file_list = os.listdir(displacement_file_dir)\r\n file_list_tiff = [file for file in file_list if file.endswith(\".tiff\") or file.endswith(\".tif\")]\r\n \r\n # sort displacement image list to u,v,w\r\n file_u=[]\r\n file_v=[]\r\n file_w=[]\r\n for file_name in file_list_tiff:\r\n if file_name.find(\"u_\") != -1:\r\n file_u.append(file_name) \r\n elif file_name.find(\"v_\") != -1:\r\n file_v.append(file_name) \r\n elif file_name.find(\"w_\") != -1:\r\n file_w.append(file_name) \r\n \r\n assert len(file_u) == len(file_v) == len(file_w), \"input u,v,w files are not mathing\"\r\n\r\n # initialize file reader itk\r\n file_reader = sitk.ImageFileReader()\r\n file_reader.SetImageIO('TIFFImageIO')\r\n \r\n # # read first image\r\n file_reader.SetFileName(displacement_file_dir + \"\\\\\" + file_list_tiff[0])\r\n dis_img = file_reader.Execute()\r\n \r\n # create displacement array\r\n # displacement3D = np.zeros((dis_img.GetSize()[0], dis_img.GetSize()[1], len(file_u), 3))\r\n displacement3D = np.empty((len(file_u), dis_img.GetSize()[1], dis_img.GetSize()[0], 3))\r\n # read otehr images\r\n slice = 0\r\n for w in file_w:\r\n file_reader.SetFileName(displacement_file_dir + \"\\\\\" + w)\r\n current_img = file_reader.Execute()\r\n displacement3D[slice,:,:,2] = -sitk.GetArrayFromImage(current_img)\r\n slice += 1\r\n slice = 0\r\n for v in file_v:\r\n file_reader.SetFileName(displacement_file_dir + \"\\\\\" + v)\r\n current_img = file_reader.Execute()\r\n displacement3D[slice,:,:,1] = -sitk.GetArrayFromImage(current_img)\r\n slice += 1\r\n slice = 0\r\n for u in file_u:\r\n file_reader.SetFileName(displacement_file_dir + \"\\\\\" + u)\r\n current_img = file_reader.Execute()\r\n displacement3D[slice,:,:,0] = -sitk.GetArrayFromImage(current_img) \r\n slice += 1\r\n return displacement3D\r\n\r\n#create test image\r\n\r\n# #test test test test\r\n# testdp = np.zeros((180,210,210,3))\r\n# for i in range(0,210):\r\n# testdp[:,i,:,0] = (i-105)/10\r\n \r\n# testdp = testdp.reshape(-1)\r\n# dpf.SetParameters(testdp)\r\n# test_resample = sitk.Resample(next_img3D, dpf)\r\n\r\n# # testdp_restore = -testdp\r\n# # dpf.SetParameters(testdp_restore)\r\n# # test_resample_restored = sitk.Resample(test_resample, dpf)\r\n\r\n# # sitk.Show(test_resample)\r\n# # sitk.Show(test_resample_restored)\r\n# # sitk.Show(next_img3D)\r\n\r\n\r\n# #export image files\r\n\r\n# for slice_number in range(0, 180):\r\n# output_file_name_3D = os.path.join(Path.cwd(), folder_name_output, \r\n# ('img_from_sikt_' + str('{0:03d}'.format(slice_number)) + '.tiff'))\r\n# sitk.WriteImage(test_resample[:,:,slice_number], output_file_name_3D)\r\n\r\n# #test test test test\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"SImpleITK_myutil.py","file_name":"SImpleITK_myutil.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"481770559","text":"'''\r\nDQN Agent\r\n\r\nImplements: Double DQN (DDQN) and Deuling DQN \r\n\r\nNote: Vanilla DQN is not implmented as it is not very stable, therefore DDQN is selected as the standard\r\n\r\nSam Tukra\r\n\r\nAcknowledgements:\r\n\r\n'''\r\nfrom datetime import datetime\r\nfrom time import gmtime, strftime\r\nimport os\r\nimport random\r\nimport numpy as np\r\nimport torch\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nimport gym\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nfrom memory import Replay_Buffer\r\nfrom models import DQNbn, DQN\r\nfrom utils import PreProcess\r\n\r\nclass DQN_Agent():\r\n\r\n\t\"\"\" The following is the steps of implementation of what the agent does:\r\n\t1. First random actions are computed for collecting some sample data from the environment. These are stored in replay memory\r\n\t2. for every N step iteration, a batch (user defined size) of samples are taken from the replay memory\r\n\t3. For each example in the batch / or for each batch as a whole, we calculate the target value (estimate of the state and action):\r\n\t\tQ_value = r_t + gamma * argmax_a Q(s', a)\r\n\t and we also calculate the current Q estimate of the state and action (this is the prediction from the NN model):\r\n\t\tQ* = model.predict(Q(s,a))\r\n\t4. Calculate the loss i.e. the Temporal difference error: delta:\r\n\t\tdelta= Q* - Q_value\r\n\t and using the above: huber loss (L): L(delta)\r\n\t5. Calculate the gradient of L w.r.t all parameters and update the DQN model\r\n\t6. Repeat the steps above till optimal action convergence\r\n\t\"\"\"\r\n\r\n\tdef __init__(self, env_name, model='default', env_class= 'atari', discount_rate = 0.99, Lr = 0.001, epsilon_max = 1.0, epsilon_decay = 0.999,\r\n\t\t\t\t\t\tepsilon_min = 0.01, batch_size = 64, episodes = 4000, max_t= 100, memory_size = 10000, device_type=\"cuda:0\", \r\n\t\t\t\t\t\toptimizer_fn= 'Adam', plotting= False):\r\n\t\t\r\n\t\t# the following are the input arguments:\r\n\t\tself.model=model\r\n\t\tself.env_name= env_name\r\n\t\tself.env= gym.make(self.env_name) # the environment in which you want to test the agent in. i.e. (gym; 'Atari-breakoutV1')\r\n\t\tself.discount_rate = discount_rate # the gamma function for discounting future reward\r\n\t\tself.Lr = Lr # the learning rate for optimising the neural network\r\n\t\tself.epsilon_max = epsilon_max # the probability of choosing a random action in the beginning\r\n\t\tself.epsilon_decay = epsilon_decay # the decay (to attain a balance between exploration and exploitation)\r\n\t\tself.epsilon_min = epsilon_min # decay towards this value, we still want some exploration in the end \r\n\t\tself.batch_size = batch_size # defines the number of samples we want to randomly take for experience replay\r\n\t\tself.episodes = episodes \r\n\t\tself.memory_size = memory_size # the maximum capacity of the replay buffer\r\n\t\tself.device= device_type # the device on which you want the network to train on, can also be: cpu:0\r\n\t\tself.optimizer_fn=optimizer_fn # the optimizer function you want to use\r\n\t\tself.env_class= env_class\r\n\t\tself.nb_actions= self.env.action_space.n\r\n\t\tself.nb_states= self.env.observation_space.shape\r\n\t\tself.tau= 1e-3\t\r\n\t\tself.plotting=plotting\r\n\t\t# define time step for update, (initialise it at 0)\r\n\t\tself.t_step= 0\r\n\t\tself.max_t = max_t #maximum number of steps in the environment before the episode restarts\r\n\t\t# Declaring Memory\r\n\t\t# initialise memory as zeros:\r\n\t\tself.memory= Replay_Buffer(self.memory_size, self.batch_size, self.device)\r\n\t\tself.frames= 4 # for the atari environment that's the number of images we need to put it and stack together\r\n\t\t# if you stack more than 4 frames... you will have to change that in the training loop too.\r\n\r\n\t\t# implementing the Default DQN model & Declaring the Q network\r\n\t\tif self.model == 'default':\r\n\t\t\tif self.env_class == 'discerete':\r\n\t\t\t\tself.model= DQNbn(nb_inputs= int(self.nb_states[0]), nb_actions= self.nb_actions).to(self.device)# the prediction Q net (the one we want to learn from)\r\n\r\n\t\t\telif self.env_class == 'atari':\r\n\t\t\t\tself.model= DQN(nb_inputs=self.frames , nb_actions= self.nb_actions).to(self.device)# the prediction Q net (the one we want to learn from)\r\n\t\t\r\n\t\t# define optimizer for training:\r\n\t\tself.Q_policy_net= self.model\r\n\t\tself.Q_target_net= self.model\r\n\r\n\t\tif self.optimizer_fn == 'Adam':\r\n\t\t\tself.optimizer= optim.Adam(self.Q_policy_net.parameters(), lr=Lr)\r\n\t\telif self.optimizer_fn == 'RMSprop':\r\n\t\t\tself.optimizer= optim.RMSprop(self.Q_policy_net.parameters(), lr=Lr)\r\n\t\telif self.optimizer_fn == 'SGD':\r\n\t\t\tself.optimizer= optim.SGD(self.Q_policy_net.parameters(), lr= Lr)\r\n\r\n\r\n\t# define how actions will be performed intially (uses Epsilon Greedy method): i.t. the Policy\r\n\tdef select_action(self, state):\r\n\t\t\"\"\" We sometimes use the model for selecting an action and sometimes we sample one uniformly, typical method= EpsilonGreedy\r\n\t\tEpsilon Greedy method:\r\n\t\t- Take a random action with probability epsilon\r\n\t\t- Take current best action with probability (1- epsilon)\r\n\t\t\"\"\"\r\n\t\t# first convert state to tensor:\r\n\t\tif type(state) == 'torch.FloatTensor':\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tstate= torch.tensor(state, dtype= torch.float32).unsqueeze(0).to(self.device) \r\n\r\n\t\tif np.random.random() > self.epsilon_max:\r\n\t\t\t# the random value is bigger than the epsilon max probability then we use the network to predict\r\n\t\t\tself.Q_policy_net.eval()\r\n\t\t\twith torch.no_grad():\r\n\t\t\t\taction_values= np.argmax(self.Q_policy_net(state))\r\n\t\t\tself.Q_policy_net.train()\r\n\r\n\t\telse:\r\n\t\t\t# otherwise we select a random action:\r\n\t\t\treturn torch.tensor(random.randrange(self.nb_actions)).to(self.device)\r\n\r\n\r\n\t# agent learning process, this function performs a single step of the optimization, later on I define a train function\r\n\t# which optimises the model in over the training loop.\r\n\t# this is where we get the TD error\r\n\tdef optimise_learn(self, states, actions, rewards, next_states, dones):\r\n\r\n\t\t# the online network predicts the actions and the target network estimates the Q value\r\n\r\n\t\t# getting the predicted values(next_state) i.e. V(next_state), from target model:\r\n\t\tQ_next_target= self.Q_target_net(next_states)\r\n\t\tQ_next_target= Q_next_target.gather(1,torch.max(Q_next_target,1)[1].unsqueeze(1)).squeeze(1)\r\n\t\t# current state targets:\r\n\t\tValue_expected= rewards + (self.discount_rate * Q_next_target * (1-dones))\r\n\r\n\t\t# get the predicted Q values from the policy net (i.e. expected Q values) i.e. Q(s,a):\r\n\t\tQ_value_predicted= self.Q_policy_net(states).gather(1,actions.unsqueeze(1)).squeeze(1)\r\n\r\n\t\t# now compute huber loss:\r\n\t\tloss= F.smooth_l1_loss(Q_value_predicted, Variable(Value_expected, requires_grad= False)) #mean((Q_value_predicted-Value_expected)^2)\r\n\r\n\t\tself.optimizer.zero_grad()\r\n\t\tloss.backward()\r\n\t\tself.optimizer.step()\r\n\r\n\t\t# Update the target network for each 1 pass:\r\n\t\tself.update_soft(self.Q_policy_net, self.Q_target_net, self.tau)\r\n\r\n\t\treturn loss.detach().cpu().numpy()\r\n\r\n\t# storing transitions & perform the optimisation:\r\n\tdef step_transition(self, state, action, reward, next_state, done):\r\n\t\t# adding the step to our memory (use write_to_memory function from Replay_buffer class)\r\n\t\tself.memory.write_to_memory(state, action, reward, next_state, done) # transition = \r\n\r\n\t\t# update time step:\r\n\t\tself.t_step+=1\r\n\t\tloss_val= 0 \r\n\t\t# if we have enough transition samples in the memory, extract a random batch:\r\n\t\tif len(self.memory) > self.batch_size:\r\n\t\t\tstates, actions, rewards, next_states, dones= self.memory.sample() #using the sample function from the memory class\r\n\t\t\t\r\n\t\t\t# then we train with this experience every timestep:\r\n\t\t\tloss_val= self.optimise_learn(states, actions, rewards, next_states, dones)\r\n\r\n\t\treturn loss_val\r\n\r\n\tdef update_soft(self, Policy_net, Target_net, tau):\r\n\t\t\"\"\"Soft update model parameters.\r\n\t\tθ_target = τ*θ_local + (1 - τ)*θ_target\r\n\t\tParams\r\n\t\t======\r\n\t\tPolicy_net: PyTorch model (weights will be copied from)\r\n\t\tTarget_net: PyTorch model (weights will be copied to)\r\n\t\ttau (float): interpolation parameter \r\n\t\t\"\"\"\r\n\t\tfor target_parameters, policy_parameters in zip(Target_net.parameters(), Policy_net.parameters()):\r\n\t\t\ttarget_parameters.data.copy_(tau*policy_parameters.data + (1.0-tau)*target_parameters.data)\r\n\r\n\t# hard update simply copies the parameters of the policy directly onto the target net, whereas soft update allows some variablitiy\r\n\tdef update_hard(self, Policy_net, Target_net):\r\n\t\tfor target_parameters, policy_parameters in zip(Target_net.parameters(), Policy_net.parameters()):\r\n\t\t\ttarget_parameters.data.copy_(policy_parameters.data)\r\n\r\n \r\n\tdef plot(i_episode, reward, loss):\r\n\t\tplt.figure(fizsize= (20,10))\r\n\t\t\r\n\t\tplt.subplot(121)\r\n\t\tplt.title('Cumulative Reward vs Episodes')\r\n\t\tplt.plot(reward)\r\n\t\tplt.xlable('Episode number (#)')\r\n\t\tplt.ylable('Cumulative Reward')\r\n\r\n\t\tplt.subplot(122)\r\n\t\tplt.title(\"Model Loss (Q(s,a)-Q'(s,a)\")\r\n\t\tplt.plot(loss)\r\n\t\tplt.xlable('Episode number')\r\n\t\tplt.ylable('Loss Value')\r\n\r\n\t\tplt.show()\r\n\r\n\tdef train_agent(self):\r\n\t\t\"\"\" in this loop, the following is being done:\r\n\t\t1. Reset the environment\r\n\t\t2. initialise state tensor\r\n\t\t3. sample an action, execute it, observe the next screen and the reward and then optimise the model\r\n\t\t4. at the end of the episode, the loop restarts.\r\n\t\t\"\"\"\r\n\r\n\t\t# for atari: store the scores from each episode:\r\n\t\tRewards= [] \t\t\t\t\t\t\t\t# list containing rewards from each episode\r\n\t\tLosses=[] \t\t\t\t\t\t\t\t# recording the loss value for each episode\r\n\r\n\t\teps= self.epsilon_max \t\t\t\t\t\t# epsilon initial value\r\n\t\t# training loop over each episode\r\n\t\tfor i_episode in range(self.episodes):\r\n\t\t\t\r\n\t\t\tstate= self.env.reset()\r\n\t\t\t# pre process the state here: (i.e. gray scale + resize the images)\r\n\t\t\tif self.env_class == 'atari':\r\n\t\t\t\t# stack the 4 images observations\r\n\t\t\t\tstate= PreProcess(state)\r\n\t\t\t\t# stack 4 images together... in first instance... it will be the first image stacked 4 times and then we will remove one\r\n\t\t\t\t# and replace it with the next state every step/episode\r\n\t\t\t\tstate= np.stack((state, state, state, state), axis=0)\r\n # input into a convolution: [Batch, Channels, height, width] \r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\t\t\t\r\n\t\t\t# initialise episode reward at 0\r\n\t\t\tepi_reward= 0\r\n\t\t\t# can set a for loop for maximum time steps i.e. for t in range(max_t):\r\n\t\t\tfor i in range(self.max_t):\t\r\n\t\t\t\t\"\"\"\r\n\t\t\t\t# visualise the environment:\r\n\t\t\t\tif self.visualise == True:\r\n\t\t\t\t\tself.env.render()\r\n\t\t\t\telse:\r\n\t\t\t\t\tpass\r\n\t\t\t\t\"\"\"\r\n\t\t\t\t# select an action:\r\n\t\t\t\taction= self.select_action(state)\r\n\t\t\t\t# convert action to cpu and numpy for it to act on the environment:\r\n\t\t\t\taction= action.detach().cpu().numpy()\r\n\r\n\t\t\t\t# implement the above action in the environment and get the observations:\r\n\t\t\t\tnext_state, reward, done, _= self.env.step(action) # don't want the info variable\r\n\t\t\t\t\r\n\t\t\t\t# now we move to the next state:\t\t\t\r\n\t\t\t\t\r\n\t\t\t\tif self.env_class =='atari':\r\n\t\t\t\t\t# preprocess the next state accordingly for assingment to the state variable\r\n\t\t\t\t\tnext_state= PreProcess(next_state)\r\n\t\t\t\t\t# concatenate the states together:\r\n\t\t\t\t\tstate_mat= state[0:3,:,:].copy() # take the first 3 frames\r\n\t\t\t\t\tnext_state= np.expand_dims(next_state,0) # unsqueeze the next_state for allowing manipulation\r\n\t\t\t\t\tstate= np.expand_dims(state, 0)\r\n\t\t\t\t\t# now concatenate the new state and the original matrix:\r\n\t\t\t\t\tnext_state= np.concatenate((next_state, state_mat), axis=0)\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tpass\r\n\r\n\t\t\t\t# store the above experience to replay memory:\r\n\t\t\t\t# in this function we also end up learning\r\n\t\t\t\tloss_value= self.step_transition(state, action, reward, next_state, done)\r\n\r\n\r\n\t\t\t\t# get the new rewards:\r\n\t\t\t\tepi_reward+= reward\r\n\r\n\t\t\t\tif done:\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tstate= next_state\r\n\r\n\r\n\t\t\tLosses.append(loss_value)\r\n\t\t\tRewards.append(epi_reward)\r\n\t\t\t\r\n\t\t\t# printing loop:\r\n\t\t\tprint('[{}], Episodes: {}, Frames: {}, Loss: {}, Reward:{}'. format(datetime.now(), i_episode, (i_episode+1)*self.max_t, loss_value, epi_reward))\r\n\r\n\t\t\t\r\n\t\t\t# every episode, save the model:\t\r\n\t\t\tself.save_model(name='DQN_{}_{}_{}'.format(self.env_name, strftime(\"%Y-%m-%d_%H-%M-%S\", gmtime()), i_episode))\r\n\r\n\t\t\tif self.plotting==True:\r\n\t\t\t\tif i_episode % 200 == 0:\r\n\t\t\t\t\tself.plot(i_episode, epi_reward, loss_value)\r\n\r\n\t\tself.save_to_csv([Rewards, Losses], name='DQN_{}_{}'.format(self.env_name, strftime(\"%Y-%m-%d_%H-%M-%S\", gmtime())))\r\n\t\tprint('data saved as csv :)')\r\n\t\r\n\tdef save_to_csv(self, matrix, name):\r\n\t\t\t# right now the data arrays are stacked in rows... turn them into columns\r\n\t\t\tmatrix= np.transpose(matrix)\r\n\t\t\t# save to csv:\r\n\t\t\tnp.savetxt(\"{}.csv\".format(name), matrix, delimiter=\",\", header= \"Losses, Rewards\")\r\n\r\n\tdef save_model(self, name):\r\n\t\t# add in dict name + file type:\r\n\t\tcurrent_directory = os.getcwd()\r\n\t\tmodel_directory = os.path.join(current_directory, r'DQN_model')\r\n\t\tmodel_dict_directory= os.path.join(current_directory, r'DQN_model_dict')\r\n\t\tif not os.path.exists(model_directory):\r\n\t\t\tos.makedirs(model_directory)\r\n\t\tif not os.path.exists(model_dict_directory):\r\n\t\t\tos.makedirs(model_dict_directory)\r\n\t\tdict_name= name + '_dict.pth'\r\n\t\tmodel_name= name+'.pth'\r\n\t\tmodel_dictP= os.path.join(model_dict_directory, dict_name)\r\n\t\tmodel_P=os.path.join(model_directory, model_name)\r\n\t\t# add in model name + flite type:\r\n\t\tmodel_dict = torch.save(self.Q_policy_net.state_dict(), model_dictP)\r\n\t\tmodel= torch.save(self.Q_policy_net,model_P)\r\n\r\n\r\n\tdef test_agent(self, model_path, test_episodes = 50):\r\n\t\tnet_model = torch.load(model_path)\r\n\t\tself.Q_policy_net.load_state_dict(net_model)\r\n\t\t# put the model in eval mode so that gradients aren't calculated:\r\n\t\tself.Q_policy_net.eval()\r\n\t\t\r\n\t\tstep=0\r\n\r\n\t\tfor i_episode in range(test_episodes):\r\n\t\t\tstate= self.env.reset()\r\n\t\t\tepi_rewards= 0\r\n\r\n\t\t\tfor t in range(self.max_t):\r\n\t\t\t\taction=self.select_action(state)\r\n\t\t\t\tself.env.render()\r\n\t\t\t\tnext_state, reward, done, info= self.env.step(action)\r\n\t\t\t\tepi_rewards += reward\r\n\t\t\t\tstep +=1\r\n\t\t\t\tsaved_frame=self.env.render()\r\n\t\t\t\tcv2.putText(saved_frame, 'Episode #: {}'.format(i_episode), (20,20), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,0,255), 1)\r\n\t\t\t\tcv2.imwrite('saved_frames/'+str(step).zfill(4) + '.png', np.unit8(saved_frame))\r\n\r\n\t\t\t\tif done: \r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tstate= next_state\r\n\t\t\t\t# for each episode print the reward value:\r\n\t\t\tprint('Episode Reward:{}'. format(epi_rewards))\r\n\r\n\t\tself.env.close()\r\n\t\tprint('Testing complete! :)')\r\n\t\r\n\t# If Deuling_DQN enabled:\r\n\t\r\n\t# Double DQN = true (stable)\r\n","sub_path":"Agents/DQN_agent.py","file_name":"DQN_agent.py","file_ext":"py","file_size_in_byte":14328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"448902995","text":"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport argparse\nimport csv\nimport logging\nimport os\nimport random\nimport sys\nfrom io import open\nimport pandas as pd\nimport numpy as np\nimport torch\nimport gc\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nimport tqdm\nfrom sklearn.metrics import f1_score\nfrom model_bert import BertForSequenceClassification\nfrom transformers import BertTokenizer, BertConfig, AdamW, get_linear_schedule_with_warmup\nfrom sklearn.model_selection import KFold\nfrom adv_train import FGM\n\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\n\nlogger = logging.getLogger(__name__)\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\n\nfilemap = {\n 'train': '/train_pse_4.csv',\n 'test': '/test.csv',\n 'validation': '/dev.csv'\n}\n\ndef open_file(path, mode = 'train'):\n filename =path + filemap[mode]\n texts = []\n with open(filename, encoding='utf-8') as f:\n f_csv = csv.reader(f, delimiter='\\t')\n for line in f_csv:\n texts.append(line)\n return texts\n\n\ndef read_data(texts, mode='train'):\n queries, replies, labels = [], [], []\n for text in tqdm.tqdm(texts, desc='loading ' + mode):\n queries.append(text[2])\n replies.append(text[3])\n if mode == 'test':\n labels.append(0)\n else:\n labels.append(int(text[4]))\n return queries, replies, labels\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, mode, texts):\n self.tokenizer = BertTokenizer.from_pretrained(\"model/chinese-bert-wwm-ext\")\n self.queries, self.replies, self.labels = read_data(texts=texts, mode=mode)\n\n def __getitem__(self, index):\n token = _convert_to_transformer_inputs(self.queries[index], self.replies[index], self.tokenizer, max_sequence_length=100)\n label = self.labels[index]\n return [torch.tensor(token[0]), torch.tensor(token[1]), torch.tensor(token[2]), torch.tensor(label)]\n\n def __len__(self):\n return len(self.queries)\n\n\ndef _convert_to_transformer_inputs(question, answer, tokenizer, max_sequence_length):\n \"\"\"Converts tokenized input to ids, masks and segments for transformer (including bert)\"\"\"\n\n def return_id(str1, str2, truncation_strategy, length):\n inputs = tokenizer.encode_plus(str1, str2,\n add_special_tokens=True,\n max_length=length,\n truncation=True,\n # truncation=True\n )\n\n input_ids = inputs[\"input_ids\"]\n input_masks = [1] * len(input_ids)\n input_segments = inputs[\"token_type_ids\"]\n padding_length = length - len(input_ids)\n padding_id = tokenizer.pad_token_id\n input_ids = input_ids + ([padding_id] * padding_length)\n input_masks = input_masks + ([0] * padding_length)\n input_segments = input_segments + ([0] * padding_length)\n\n return [input_ids, input_masks, input_segments]\n\n input_ids_q, input_masks_q, input_segments_q = return_id(\n question, answer, True, max_sequence_length)\n\n return [input_ids_q, input_masks_q, input_segments_q]\n\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return f1_score(labels, outputs)\n\n\ndef main():\n output_dir_o = './out_model/bert_wwm_ext_adv_pse_4_epoch5/'\n train_batch_size = 64\n eval_batch_size = 128\n setup_seed(324)\n path = 'model/chinese-bert-wwm-ext'\n dir = './data/data_KFold/'\n epochs = 5\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n test_data = open_file('./data/data_KFold/data_origin_0', 'test')\n test_dataset = Dataset('test', test_data)\n\n # Setup logging\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\n try:\n os.makedirs(output_dir_o)\n except:\n pass\n config = BertConfig.from_pretrained('model/chinese-bert-wwm-ext')\n res_proba = np.zeros((len(test_dataset), 2))\n for i in range(5):\n name = 'data_origin_{}'.format(i)\n data_dir = dir + name\n output_dir = output_dir_o + 'bert_{}'.format(i)\n weight_decay = 0.01\n learning_rate = 3e-5\n nb_tr_examples, nb_tr_steps = 0, 0\n try:\n os.makedirs(output_dir)\n except:\n pass\n\n train_texts = open_file(data_dir, mode=\"train\")\n train_data = Dataset('train', train_texts)\n train_sampler = RandomSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=train_batch_size, shuffle=False,\n num_workers=0)\n epoch_steps = len(train_data) / train_batch_size\n total_steps = epoch_steps * epochs\n\n model = BertForSequenceClassification(path=path, config=config)\n fgm = FGM(model)\n model.to(device)\n param_optimizer = list(model.named_parameters())\n param_optimizer = [n for n in param_optimizer]\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay': weight_decay},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=100, num_training_steps=total_steps)\n\n global_step = 0\n if i != 0:\n logger.info(\"*\" * 80)\n logger.info(\"*\" * 80)\n logger.info(\"*\" * 80)\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_data))\n logger.info(\" Batch size = %d\", train_batch_size)\n logger.info(\" Num steps = %d\", total_steps)\n logger.info(\" Num Fold = %d\", i)\n for epoch in range(epochs):\n loader_t = tqdm.tqdm(train_dataloader, desc='epoch:{}/{}'.format(epoch, epochs))\n for step, (batch_input_ids, batch_input_mask, segment_ids, batch_label) in enumerate(loader_t):\n batch_input_ids = batch_input_ids.to(device)\n batch_input_mask = batch_input_mask.to(device)\n segment_ids = segment_ids.to(device)\n batch_label = batch_label.to(device)\n model.train()\n loss, _ = model(input_ids=batch_input_ids, attention_mask=batch_input_mask,\n token_type_ids=segment_ids, labels=batch_label)\n nb_tr_examples += batch_input_ids.size(0)\n nb_tr_steps += 1\n loss.backward()\n \n fgm.attack() \n loss_adv = model(input_ids=batch_input_ids, attention_mask=batch_input_mask,\n token_type_ids=segment_ids, labels=batch_label)[0]\n loss_adv.backward() \n fgm.restore() \n \n del batch_input_ids, batch_input_mask, batch_label\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n loader_t.set_postfix(training=\"loss:{:.6f}\".format(loss.item()))\n global_step += 1\n model_to_save = model.module if hasattr(model,\n 'module') else model # Only save the model it-self\n output_model_file = os.path.join(output_dir, \"pytorch_model_{}.bin\".format(i))\n torch.save(model_to_save.state_dict(), output_model_file)\n for file, flag in [('dev.csv', 'validation'), ('test.csv', 'test')]:\n inference_labels = []\n gold_labels = []\n eval_data = open_file(data_dir, flag)\n eval_dataset = Dataset(flag, eval_data)\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=eval_batch_size,\n shuffle=False,\n num_workers=0)\n model.eval()\n for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n\n with torch.no_grad():\n logits = model(input_ids=input_ids, attention_mask=input_mask,\n token_type_ids=segment_ids).detach().cpu().numpy()\n label_ids = label_ids.to('cpu').numpy()\n inference_labels.append(logits)\n gold_labels.append(label_ids)\n if flag == 'test':\n logits = np.concatenate(inference_labels, 0)\n res_proba += logits\n df = pd.read_csv(os.path.join(data_dir, file), encoding=\"utf-8\", sep='\\t', header=None)\n df.columns = ['query_id', 'reply_id', 'query', 'reply']\n df['logits_0'] = logits[:, 0]\n df['logits_1'] = logits[:, 1]\n df['label'] = np.argmax(logits, axis=1)\n df[['query_id', 'reply_id', 'label', 'logits_0', 'logits_1']].to_csv(\n os.path.join(output_dir, \"sub_{}.csv\".format(i)), index=False,\n sep=\"\\t\", header=None)\n if flag == 'validation':\n gold_labels = np.concatenate(gold_labels, 0)\n logits = np.concatenate(inference_labels, 0)\n print(flag, accuracy(logits, gold_labels))\n df = pd.read_csv(os.path.join(data_dir, file), encoding=\"utf-8\", sep='\\t', header=None)\n df.columns = ['query_id', 'reply_id', 'query', 'reply', 'label']\n df['pred'] = np.argmax(logits, axis=1)\n df['logits_0'] = logits[:, 0]\n df['logits_1'] = logits[:, 1]\n df[['query_id', 'reply_id', 'pred', 'label', 'logits_0', 'logits_1']].to_csv(\n os.path.join(output_dir, \"logits_dev_{}.csv\".format(i)), index=False,\n sep=\"\\t\", header=None)\n del input_ids, input_mask, label_ids, segment_ids\n torch.save(model_to_save.state_dict(), output_model_file)\n df = pd.read_csv('./data/data_KFold/data_origin_0/test.csv', encoding=\"utf-8\", sep='\\t', header=None)\n df.columns = ['query_id', 'reply_id', 'query', 'reply']\n res_proba = res_proba / 5\n df['logits_0'] = res_proba[:,0]\n df['logits_1'] = res_proba[:,1]\n df['label'] = np.argmax(res_proba, axis=1)\n df[['query_id', 'reply_id', 'label']].to_csv( os.path.join(output_dir, \"submission.csv\"), index=False, sep=\"\\t\", header=None)\n df[['query_id', 'reply_id', 'logits_0', 'logits_1']].to_csv( os.path.join(output_dir, \"logits_test.csv\"), index=False, sep=\"\\t\", header=None)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"run_bert_pse_adv.py","file_name":"run_bert_pse_adv.py","file_ext":"py","file_size_in_byte":12226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"572171124","text":"import json\n\nfrom cosrlib.sources import Source\nfrom cosrlib.url import URL\n\n\nclass CorpusSource(Source):\n \"\"\" Source that yields documents from a static corpus. Mostly used in tests \"\"\"\n\n def iter_items(self):\n\n if self.args.get(\"path\"):\n with open(self.args[\"path\"], \"r\") as f:\n docs = json.load(f)\n else:\n docs = self.args[\"docs\"]\n\n for doc in docs:\n\n url = URL(doc[\"url\"].encode(\"utf-8\"))\n\n do_parse, index_level = self.filter_url(url)\n\n if do_parse:\n\n yield (\n url,\n {\"Content-Type\": \"text/html\"},\n \"html\",\n index_level,\n doc[\"content\"].encode(\"utf-8\")\n )\n","sub_path":"cosrlib/sources/corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160512133","text":"from PIL import Image\nfrom math import ceil\nimport os\nimport sys\n\nos.chdir(os.path.dirname(sys.argv[0]))\n\n\nimg = Image.open(\"fonts.png\").convert('L')\nfonts = [\n\t(2, 1, 4, 4, False),\n\t(77, 5, 4, 4, True),\n\t(2, 16, 3, 5, True),\n\t(79, 21, 5, 5, True)]\n\nresult = \"\"\nstatic_def_fonts = \"\"\nstatic_get = \"\"\nelseif = \"\"\nfor x, y, w, h, a in fonts:\n\tif not a:\n\t\tcontinue\n\tstatic_def_fonts += f\"\\tstatic uint8_t Font{w}x{h}[];\\n\"\n\tfont = []\n\tsize = ceil(w*h/8)\n\tfor k in range(26):\n\t\tc = [0]*size\n\t\tfor j in range(h):\n\t\t\tfor i in range(w):\n\t\t\t\tif img.getpixel((x+(k%13)*(w+1)+i, y+(k//13)*(h+1)+j)) == 0:\n\t\t\t\t\tc[(j*w+i)//8] += 2**((j*w+i)%8)\n\t\tfont.append(c)\n\tname = f\"Font{w}x{h}\"\n\tresult += f\"uint8_t Fonts::{name}[] = {{ \" + \", \".join(f\"0x{h:02x}\" for c in font for h in c) + \" };\\n\\n\"\n\tstatic_get += f\"\\t{elseif}if (width == {w} && height == {h}) {{ return {{ {name}, width, height, {size} }}; }}\\n\"\n\telseif = \"else \"\n\nprint(result)\nprint(static_def_fonts)\ncontent_h = \"\"\"\\\n#ifndef __FONT_H__\n#define __FONT_H__\n\n#include \"utils/types.h\"\n\n\nstruct Font {\n\tuint8_t* stencils;\n\tuint8_t width;\n\tuint8_t height;\n\tuint8_t size;\n\n\tuint8_t* get(char letter);\n};\n\n\nclass Fonts {\nprivate:\n\tstatic uint8_t FUnknown[];\n\"\"\" + static_def_fonts + \"\"\"\npublic:\n\tFont static get(uint8_t width, uint8_t height);\n};\n\n\n#endif\n\"\"\"\n\n\ncontent_cpp = \"\"\"\\\n#include \"font.h\"\n\n\nuint8_t* Font::get(char letter) {\n\tif ('A' <= letter && letter <= 'Z') {\n\t\treturn stencils + (letter-'A')*size;\n\t}\n\tif ('a' <= letter && letter <= 'z') {\n\t\treturn stencils + (letter-'a')*size;\n\t}\n\treturn 0;\n}\n\n\nuint8_t Fonts::FUnknown[256] = { 0xFF };\n\n\"\"\" + result + \"\"\"\nFont Fonts::get(uint8_t width, uint8_t height) {\n\"\"\" + static_get + \"\"\"\n\treturn { FUnknown, width, height, 0 };\n}\n\"\"\"\n\n\nwith open(\"font.h\", \"w\") as file_h:\n\tfile_h.write(content_h)\n\nwith open(\"font.cpp\", \"w\") as file_cpp:\n\tfile_cpp.write(content_cpp)\n","sub_path":"src/gui/fonts/font.py","file_name":"font.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"390844033","text":"# Django-CIM-Forms\n# Copyright (c) 2012 CoG. All rights reserved.\n#\n# Developed by: Earth System CoG\n# University of Colorado, Boulder\n# http://cires.colorado.edu/\n#\n# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].\n\nfrom django.db import models\nfrom django import forms\nimport django.forms.models\nimport django.forms.widgets\nimport django.forms.fields\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes import generic\n\nfrom django_cim_forms.controlled_vocabulary import *\nfrom django_cim_forms.helpers import *\n\n############################################################\n# the types of fields that a model can have #\n# (these are specified as needed in the models themselves) #\n############################################################\n\nclass FieldType(EnumeratedType):\n pass\n\n#######################################################\n# the ways that (relationship) fields can be added to #\n#######################################################\n\nclass FieldAddMode(EnumeratedType):\n pass\n\nFieldAddModes = EnumeratedTypeList([\n FieldAddMode(\"INLINE\",\"add field in-line only\"),\n FieldAddMode(\"REMOTE\",\"add field remote only\"),\n FieldAddMode(\"BOTH\",\"add field in-line or remote\"),\n])\n\n\n##########################################################################\n# this is a way to customise _any_ widgets used by metadata fields #\n# without having to hard-code _all_ of them; it inserts some css classes #\n# and the template knows what to do with that (currently JQuery stuff) #\n##########################################################################\n\n\ndef customize_metadata_widgets(field):\n formfield = field.formfield()\n\n try:\n # some fields have the isFixed method, which means they should be readonly\n if field.isFixed():\n formfield.widget.attrs.update({\"readonly\" : \"readonly\"})\n except AttributeError:\n pass\n\n # only customize custom fields (ie: only apply the following logic to subclasses of MetadataFields)...\n if isinstance(field,MetadataField):\n \n if field.isReadOnly():\n formfield.widget.attrs.update({\"readonly\":\"readonly\"})\n\n if field.isEnabler():\n\n # turn the content of field._enables into an associative array that can be passed to the\n # javascript function that controls enabling/disabling of fields\n java_string = \"\"\n for (key,value) in field._enables.iteritems():\n java_string += \"\\'\" + key + \"\\':[\" + \",\".join(u'\\'%s\\''%fieldName for fieldName in value) + \"],\"\n java_string = \"toggleStuff(this,{%s})\" % java_string[:-1]\n\n # TODO: MOVE THIS LOGIC TO THE END OF THESE BLOCKS\n # (JUST SET newAttrs HERE)\n if isinstance(formfield.widget,MetadataBoundWidget):\n # unfortunately, this has to be called on the widget (or formfield) and not the field\n # b/c this is the instance that gets rendered in the template\n formfield.widget.updateBoundAttrs({\"class\":\"enabler\",\"onchange\":java_string})\n else:\n newAttrs = {\"class\":\"enabler\",\"onchange\":java_string}\n for (key,value) in newAttrs.iteritems():\n try:\n currentAttrs = formfield.widget.attrs[key]\n formfield.widget.attrs[key] = \"%s %s\" % (currentAttrs,value)\n except KeyError:\n formfield.widget.attrs[key] = value\n # this is commented out b/c it doesn't take into account potential pre-existing attributes\n #formfield.widget.attrs.update({\"class\":\"enabler\",\"onchange\":java_string})\n\n\n if isinstance(field,MetadataEnumerationField):\n # BoundFields are a bit different, b/c their formfields are MultiValueFields,\n # so I have to specify which corresponding widget I wish to modify...\n if field.isReadOnly():\n # Select widgets use the keyword \"disabled\" instead of \"readonly\"... go figure\n## formfield.widget.widgets[0].attrs.update({\"disabled\":\"disabled\",})\n##\n## currentClasses = formfield.widget.widgets[0].attrs[\"class\"]\n## formfield.widget.widgets[0].attrs.update({\"class\": currentClasses + \" disabled\"})\n\n# THIS IS A HACK, I DON'T _REALLY_ WANT TO REPLACE THE WIDGETS\n# I'D RATHER BE ABLE TO USE THE DISABLED WIDGETS, BUT STILL SAVE A VALUE\n# (SEE http://groups.google.com/group/django-users/browse_thread/thread/8710ceea619b0e9d or http://stackoverflow.com/questions/7743208/making-a-text-input-field-look-disabled-but-act-readonly FOR A DESCRIPTION OF THE PROBLEM)\n formfield.widget.widgets[0] = django.forms.fields.TextInput()\n formfield.widget.widgets[1] = django.forms.fields.HiddenInput() \n \n if isinstance(field,MetadataAtomicField):\n\n newAttrs = {\"class\" : \"atomic\"}\n for (key,value) in newAttrs.iteritems():\n try:\n currentAttrs = formfield.widget.attrs[key]\n formfield.widget.attrs[key] = \"%s %s\" % (currentAttrs,value)\n except KeyError:\n formfield.widget.attrs[key] = value\n\n if isinstance(field,models.DateField):\n newAttrs = {\"class\":\"datepicker\"}\n for (key,value) in newAttrs.iteritems():\n try:\n currentAttrs = formfield.widget.attrs[key]\n formfield.widget.attrs[key] = \"%s %s\" % (currentAttrs,value)\n except KeyError:\n formfield.widget.attrs[key] = value\n\n### if isinstance(field,MetadataDocumentField):\n### formfield.widget.attrs.update({\"class\" : \"adder\"})\n### formfield.widget.attrs.update({\"title\": u'%s/%s'%(field.getAppName(),field.getModelName())})\n###\n### if isinstance(field,MetadataAbstractField):\n### java_string = \"toggleForm($(this).val(),[%s])\" % \",\".join([u'\"%s\"' % choice[0] for choice in field.getChoices()])\n### formfield.widget.attrs.update({\"onclick\" : java_string})\n### formfield.widget.attrs.update({\"class\":\"abstract-choice\"})\n###\n### # TODO: other if branches for other field types?\n###\n\n return formfield\n\n########################################################\n# the set of customizable fields for metadata models #\n# each item consists of a name, a corresponding class, #\n# and a set of default kwargs required for that class. #\n########################################################\n\nMODELFIELD_MAP = {\n \"booleanfield\" : [models.BooleanField, {}],\n \"charfield\" : [models.CharField, { \"max_length\" : BIG_STRING}],\n \"datefield\" : [models.DateField, {}],\n \"datetimefield\" : [models.DateTimeField, {}],\n \"decimalfield\" : [models.DecimalField, { \"null\" : True}],\n \"emailfield\" : [models.EmailField, {}],\n \"integerfield\" : [models.IntegerField, { \"null\" : True}],\n \"nullbooleanfield\" : [models.NullBooleanField, {}],\n \"positiveintegerfield\" : [models.PositiveIntegerField, {}],\n \"textfield\" : [models.TextField, {}],\n \"timefield\" : [models.TimeField, {}],\n \"urlfield\" : [models.URLField, {}],# DEPRECATED IN DJANGO V1.5 { \"verify_exists\" : False}],\n\n}\n\n\n################################\n# the base class of all fields #\n################################\n\nclass MetadataField(models.Field):\n class Meta:\n abstract = True\n\n _required = False # by default, fields are not required\n _readonly = False # a field can be readonly\n _unique = False # a field can be unique (but I have to deal w/ this manually; I can't change the db tables)\n _enables = {} # a field can toggle (enable) other fields\n \n @classmethod\n def comparator(cls,fieldName,fieldOrderList):\n if fieldName in fieldOrderList:\n return fieldOrderList.index(fieldName)\n return len(fieldOrderList)+1\n\n def isRequired(self):\n return self._required\n\n def isUnique(self):\n return self._unique\n \n def enables(self,enablingDictionary):\n # enablingDictiony lists fields to enable based on value:\n # { val1 : [\"f1\",\"f2\",\"f3\"], val2 : [\"f4\",\"f5\",\"f6\" }\n self._enables = enablingDictionary\n\n def isEnabler(self):\n return bool(self._enables)\n\n def isReadOnly(self):\n return self._readonly\n\n def getVerboseName(self):\n verbose_name = self.verbose_name\n if verbose_name != self.name:\n return verbose_name\n return pretty_string(verbose_name)\n\n def __init__(self,*args,**kwargs):\n kwargs[\"blank\"] = kwargs.pop(\"blank\",True)\n required = not kwargs[\"blank\"]\n readonly = kwargs.pop(\"readonly\",False)\n super(MetadataField,self).__init__(*args,**kwargs)\n self._required = required\n self._readonly = readonly\n\n### def south_field_triple(self):\n### from south.modelsinspector import introspector\n### field_class = \"django_cim_forms.fields.\" + self.__class__.__name__\n### args, kwargs = introspector(self)\n### return (field_class, args, kwargs)\n\n \nclass MetadataAtomicField(MetadataField):\n\n def __init__(self,*args,**kwargs):\n super(MetadataAtomicField,self).__init__(**kwargs)\n \n\n @classmethod\n def Factory(cls,modelFieldClassName,**kwargs):\n modelFieldClassInfo = MODELFIELD_MAP[modelFieldClassName.lower()]\n modelFieldClass = modelFieldClassInfo[0]\n modelFieldKwargs = modelFieldClassInfo[1]\n# in theory, I could also have created a new metaclass to achieve multiple inheritance\n# but in practise, these two field types are just too dissimilar for that\n# class _MetadataAtomicFieldMetaClass(MetadataField.Meta,modelFieldClass.Meta):\n# pass\n\n class _MetadataAtomicField(cls,modelFieldClass):\n def __init__(self,*args,**kwargs):\n # set of kwargs passed to constructor\n # should be default set plus any overrides\n for (key,value) in modelFieldKwargs.iteritems():\n if not key in kwargs:\n kwargs[key] = value\n super(_MetadataAtomicField,self).__init__(**kwargs)\n \n return _MetadataAtomicField(**kwargs)\n\n\n\nclass MetadataRelationshipField(MetadataField):\n\n class Meta:\n abstract = True\n\n _addMode = FieldAddModes.BOTH # by default, relationships can be to existing models or to new models created inline\n\n _sourceModelName = None\n _sourceAppName = None\n _targetModelName = None\n _targetAppName = None\n\n\n## def __init__(self,*args,**kwargs):\n## # this fn doesn't actually ever get called;\n## # the Method Resolution Order for subclasses of MetadataRelationshipField has the django.db.models class first,\n## # so that class's __init__ fn gets called. However, these classes still have access to all RelationshipField's attributes.\n## super(MetadataRelationshipField,self).__init__(*args,**kwargs)\n\n # do some post-initialization\n # (see above comment for an explanation of why this stuff isn't in __init__)\n def initRelationship(self,*args,**kwargs):\n self.related_name = self.name # related_name has to be unique to distinguish between different relationshipFields from the same model to the same model\n self.null = True # null values have to be allowed in order to initialize subForms w/ potentially brand-new (empty) models\n self.blank = not self.isRequired()\n\n addMode = kwargs.pop(\"addMode\",FieldAddModes.BOTH)\n required = not(kwargs.pop(\"blank\",True))\n targetModel = kwargs.pop(\"targetModel\",None)\n sourceModel = kwargs.pop(\"sourceModel\",None)\n\n if sourceModel:\n sourceAppAndModel = sourceModel.split(\".\")\n self._sourceModelName = sourceAppAndModel[1].lower()\n self._sourceAppName = sourceAppAndModel[0].lower()\n if targetModel:\n targetAppAndModel = targetModel.split(\".\")\n self._targetModelName = targetAppAndModel[1].lower()\n self._targetAppName = targetAppAndModel[0].lower()\n\n self._required = required\n self._addMode = addMode\n\n\n def canAddRemote(self):\n # returns True if this field is one that can link to existing models\n # returns False if the linked model must be created in-line\n return self._addMode != FieldAddModes.INLINE\n\n def getTargetModelClass(self):\n try:\n ModelType = ContentType.objects.get(app_label=self._targetAppName,model=self._targetModelName)\n ModelClass = ModelType.model_class()\n return ModelClass\n except django.contrib.contenttypes.models.ContentType.DoesNotExist:\n # handles the case where model is accessed before target is loaded (during syncdb, for instance)\n return None\n\n def getSourceModelClass(self):\n try:\n ModelType = ContentType.objects.get(app_label=self._sourceAppName,model=self._sourceModelName)\n ModelClass = ModelType.model_class()\n return ModelClass\n except django.contrib.contenttypes.models.ContentType.DoesNotExist:\n # handles the case where model is accessed before target is loaded (during syncdb, for instance)\n return None\n\n\n\nclass MetadataAbstractField():\n pass\n\n\nclass MetadataManyToOneField(models.ForeignKey,MetadataRelationshipField):\n pass\n\n def __init__(self,*args,**kwargs):\n targetModel = kwargs.pop(\"targetModel\",None)\n sourceModel = kwargs.pop(\"sourceModel\",None)\n addMode = kwargs.pop(\"addMode\",FieldAddModes.BOTH)\n on_delete = kwargs.pop(\"on_delete\",models.CASCADE)\n super(MetadataManyToOneField,self).__init__(targetModel,**kwargs)\n self.initRelationship(sourceModel=sourceModel,targetModel=targetModel,addMode=addMode,**kwargs)\n self.help_text = kwargs.pop(\"help_text\",\"\")\n\nclass MetadataManyToManyField(models.ManyToManyField,MetadataRelationshipField):\n pass\n\n def __init__(self,*args,**kwargs):\n targetModel = kwargs.pop(\"targetModel\",None)\n sourceModel = kwargs.pop(\"sourceModel\",None)\n addMode = kwargs.pop(\"addMode\",FieldAddModes.BOTH)\n on_delete = kwargs.pop(\"on_delete\",models.CASCADE)\n super(MetadataManyToManyField,self).__init__(targetModel,**kwargs)\n self.initRelationship(sourceModel=sourceModel,targetModel=targetModel,addMode=addMode,**kwargs)\n self.help_text = kwargs.pop(\"help_text\",\"\")\n\n### def south_field_triple(self):\n### from south.modelsinspector import introspector\n### field_class = \"django_cim_forms.fields.\" + self.__class__.__name__\n### args, kwargs = introspector(self)\n### return (field_class, args, kwargs)\n\n\n# TODO: \"BoundField\" has a particular meaning in Django\n# I ought to change this class name to something else\nclass MetadataBoundField(MetadataField):\n\n _open = False # can a user override the bound values?\n _multi = False # can a user select more than one bound value?\n _nullable = False # can a user select no bound values?\n _empty = True # is there a default \"empty\" value?\n \n class Meta:\n abstract = True\n\n# I don't understand why __init__ gets called here, but not for RelationshipFields;\n# both have multiple inheritance issues?\n def __init__(self,*args,**kwargs):\n open = kwargs.pop(\"open\",False)\n multi = kwargs.pop(\"multi\",False)\n nullable = kwargs.pop(\"nullable\",False)\n empty = kwargs.pop(\"empty\",True)\n super(MetadataBoundField,self).__init__(**kwargs)\n self._open = open\n self._multi = multi\n self._nullable = nullable\n self._empty = empty\n self.blah = \"blahblahblah\"\n\n# no longer needed b/c __init__ is called for subclasses\n# def initBound(self,*args,**kwargs):\n# self._open = kwargs.pop(\"open\",False)\n# self._multi = kwargs.pop(\"multi\",False)\n# self._nullable = kwargs.pop(\"nullable\",False)\n\n def isOpen(self):\n return self._open\n\n def isMulti(self):\n return self._multi\n\n def isNullable(self):\n return self._nullable\n\n def isEmpty(self):\n return self._empty\n\n def setInitialValue(self,value):\n self._initialValue = value\n\nclass MetadataBoundWidget(django.forms.widgets.MultiWidget):\n\n custom_choices = []\n _multi = False\n\n # this gets called by customize_metadata_widget;\n # when updating the attributes of a widget,\n # I have to treat MultiWidgets separately to ensure that both widgets comprising the MultiWidget are updated\n def updateBoundAttrs(self,newAttrs):\n for widget in self.widgets:\n for (key,value) in newAttrs.iteritems():\n try:\n currentAttrs = widget.attrs[key]\n widget.attrs[key] = \"%s %s\" % (currentAttrs,value)\n except KeyError:\n widget.attrs[key] = value\n \n def __init__(self,*args,**kwargs):\n\n custom_choices = kwargs.pop(\"choices\",None)\n multi = kwargs.pop(\"multi\",False)\n\n length = 4\n if custom_choices:\n length = max([length,(len(custom_choices)/2)])\n if multi:\n widgets = (\n django.forms.fields.SelectMultiple(choices=custom_choices,attrs={\"class\":\"enumeration-value\",\"size\":length}),\n django.forms.fields.TextInput(attrs={\"class\":\"enumeration-other\"}),\n )\n else:\n widgets = (\n django.forms.fields.Select(choices=custom_choices,attrs={\"class\":\"enumeration-value\"}),\n django.forms.fields.TextInput(attrs={\"class\":\"enumeration-other\"}),\n )\n super(MetadataBoundWidget,self).__init__(widgets,*args,**kwargs)\n self.custom_choices = custom_choices\n self._multi = multi\n\n def decompress(self, value):\n\n if self._multi:\n if value:\n val = [val.split(\"|\") for val in value.split(\"||\")]\n return [val[0],val[1][0]]\n return [[u''],u'']\n else:\n if value:\n return value.split(\"|\")\n return [u'',u'']\n\nclass MetadataBoundFormField(django.forms.fields.MultiValueField):\n\n custom_choices = []\n _multi = False\n _empty = True\n _required = True\n _custom = False\n _initialValue = None\n\n def __init__(self,*args,**kwargs):\n\n custom_choices = kwargs.pop(\"choices\",None)\n multi = kwargs.pop(\"multi\",False)\n #empty = kwargs.pop(\"empty\",False)\n empty = kwargs.pop(\"empty\",True)\n required = not(kwargs.pop(\"blank\",True))\n custom = kwargs.pop(\"custom\",False)\n initial = kwargs.pop(\"initial\",None)\n\n fields = (\n django.forms.fields.CharField(max_length=HUGE_STRING,required=required),\n django.forms.fields.CharField(max_length=HUGE_STRING,required=False),\n )\n widget = MetadataBoundWidget(choices=custom_choices,multi=multi)\n super(MetadataBoundFormField,self).__init__(fields,widget,*args,**kwargs)\n self.widget = widget # why is this line still needed, even though widget is passed into *args above?\n self.custom_choices = custom_choices\n self._multi = multi\n self._empty = empty\n self._required = required\n self._custom = custom\n self._initialValue = initial\n\n def setInitialValue(self,value):\n self._initialValue = value\n\n# def getInitialValue(self):\n# return self._initialValue\n\n def isReadOnly(self):\n isFirstWidgetReadOnly = False\n isSecondWidgetReadOnly = False\n # THIS TRY/CATCH IS ONLY HERE TO DEAL W/ THE FACT THAT A READONLY ENUMERATION'S WIDGETS ARE\n # REMAPPED AS TEXTBOXES ABOVE; HOPEFULLY AT SOME POINT I CAN GET RID OF THIS\n try:\n isSecondWidgetReadOnly = self.widget.widgets[1].attrs[\"class\"].find(\"disabled\") != -1\n isFirstWidgetReadonly = self.widget.widgets[0].attrs[\"class\"].find(\"disabled\") != -1\n except KeyError:\n pass\n return isFirstWidgetReadOnly or isSecondWidgetReadOnly\n\n def compress(self, data_list):\n if self._multi:\n if data_list:\n return \"||\".join([\"|\".join(data_list[0]),data_list[1]])\n else:\n if data_list:\n return \"|\".join(data_list)\n\n def clean(self,value):\n # an empty string \"\" is false\n # an explicit none is false\n if self._required and (not value[0] or value[0] == [u'']):\n msg = \"this field is required\"\n raise forms.ValidationError(msg)\n\n # ordinarily, a disabled select widget will not post a value\n # so I _cheat_ here by setting it manually before the cleaning starts\n if self.isReadOnly():\n # TODO: STILL NEED TO FIGURE OUT HOW TO DO THIS\n # IN THE MEANTIME, I CHANGE THE WIDGETS TO TEXTBOXES IN CUSTOMIZE_METADATA_WIDGETS\n\n #print self._initialValue\n #print self.fields[0]\n #print self.fields[1]\n #print value\n #print self.widget.widgets[0]\n #print self.widget.widgets[1]\n pass\n \n\n if value != [None,None]:\n \n if value[0]==None:\n value[0] = u' '\n if value[1]==None:\n value[1]= u' '\n\n if self._multi:\n # if this is a multiple bound field\n # then the value will be a list of lists\n if not OTHER_CHOICE[0][0] in value[0]:\n value[1] = u' '\n else:\n #if value[1].strip() == u'':\n if value[1].strip() == u'' and not self._custom:\n msg = \"unspecified OTHER value\"\n raise forms.ValidationError(msg)\n elif \"|\" in value[1]:\n msg = \"bad value ('|' character is not allowed)\"\n raise forms.ValidationError(msg)\n return \"||\".join([\"|\".join(value[0]),value[1]])\n\n else:\n # if this is not a multiple bound field\n # then the value will just be a simple list\n if value[0] != OTHER_CHOICE[0][0]:\n value[1] = u' '\n else:\n #if value[1].strip() == u'':\n if value[1].strip() == u'' and not self._custom:\n msg = \"unspecified OTHER value\"\n raise forms.ValidationError(msg)\n elif \"|\" in value[1]:\n msg = \"bad value ('|' character is not allowed)\"\n raise forms.ValidationError(msg)\n\n return \"|\".join(value)\n\nclass MetadataEnumerationField(models.CharField,MetadataBoundField):\n\n _enumerationModelName = None\n _enumerationAppName = None\n _initialValue = None\n\n\n def formfield(self,*args,**kwargs):\n custom_choices = self.getCustomChoices()\n return MetadataBoundFormField(choices=custom_choices,multi=self.isMulti(),empty=self.isEmpty())\n\n def getEnumerationClass(self):\n try:\n ModelType = ContentType.objects.get(app_label=self._enumerationAppName,model=self._enumerationModelName)\n return ModelType.model_class()\n except:\n return None\n\n def getCustomChoices(self):\n EnumerationClass = self.getEnumerationClass()\n if EnumerationClass:\n if not EnumerationClass.isLoaded():\n EnumerationClass.loadEnumerations()\n\n custom_choices = [(enumeration.name,enumeration.name) for enumeration in EnumerationClass.objects.all()]\n if self.isOpen() and OTHER_CHOICE[0] not in custom_choices:\n custom_choices += OTHER_CHOICE\n if self.isNullable() and NONE_CHOICE[0] not in custom_choices:\n custom_choices += NONE_CHOICE\n if self.isEmpty() and EMPTY_CHOICE[0] not in custom_choices:\n #custom_choices += EMPTY_CHOICE\n custom_choices.insert(0,EMPTY_CHOICE[0])\n\n return custom_choices\n\n return []\n\n def __init__(self,*args,**kwargs):\n enumeration = kwargs.pop('enumeration',None)\n kwargs[\"max_length\"] = HUGE_STRING \n super(MetadataEnumerationField,self).__init__(*args,**kwargs)\n if enumeration:\n enumerationAppAndModel = enumeration.split(\".\")\n self._enumerationModelName = enumerationAppAndModel[1].lower()\n self._enumerationAppName = enumerationAppAndModel[0].lower()\n# apparently, this is not needed (super() above calls BoundField.__init__()\n# self.initBound(*args,**kwargs)\n\n def setInitialEnumeratedValue(self,value):\n self._initialValue = value\n #super(MetadataEnumerationField,self).setInitialValue(value)\n\n# def getInitialValue(self):\n# return self._initialValue\n\nclass MetadataPropertyField(models.CharField,MetadataBoundField):\n pass\n\n def formfield(self,**kwargs):\n # for MetadataProperties, the choices are customized w/in the form not here\n return MetadataBoundFormField(choices=self._choices,blank=self.blank)\n \n def __init__(self,*args,**kwargs):\n kwargs[\"max_length\"] = HUGE_STRING\n super(MetadataPropertyField,self).__init__(*args,**kwargs)\n \n# TODO: MUTLIPLE FIELD\n\n##\n### migration information for custom fields...\n##from south.modelsinspector import add_introspection_rules\n##add_introspection_rules([], [\"^django_cim_forms\\.extra\\.fields\\.MetadataField\"])\n##add_introspection_rules([], [\"^django_cim_forms\\.extra\\.fields\\.MetadataManyToOneField\"])\n##add_introspection_rules([], [\"^django_cim_forms\\.extra\\.fields\\.MetadataManyToManyField\"])\n##add_introspection_rules([], [\"^django_cim_forms\\.extra\\.fields\\._MetadataAtomicField\"])\n##add_introspection_rules([], [\"^django_cim_forms\\.extra\\.fields\\.MetadataEnumerationField\"])\n##add_introspection_rules([], [\"^django_cim_forms\\.extra\\.fields\\.MetadataControlledVocabularyValueField\"])\n","sub_path":"CIM_Questionnaire/django_cim_forms/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":26540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"9048859","text":"\"\"\"\nModule that holds the entry point for the console.\n\"\"\"\n\nimport sys\nimport json\nimport signal\nimport logging\nimport cProfile\nimport pstats\n\nfrom rich.traceback import install\nfrom rich.console import Console\n\nfrom spotdl.console.download import download\nfrom spotdl.console.sync import sync\nfrom spotdl.console.save import save\nfrom spotdl.console.meta import meta\nfrom spotdl.console.web import web\nfrom spotdl.download import Downloader\nfrom spotdl.providers.audio.base import AudioProviderError\nfrom spotdl.providers.audio.ytmusic import YouTubeMusic\nfrom spotdl.utils.config import DEFAULT_CONFIG, get_config, get_config_file\nfrom spotdl.utils.arguments import parse_arguments\nfrom spotdl.utils.spotify import SpotifyClient, SpotifyError\nfrom spotdl.utils.console import ACTIONS\nfrom spotdl.download.downloader import DownloaderError\nfrom spotdl.utils.ffmpeg import (\n FFmpegError,\n download_ffmpeg,\n is_ffmpeg_installed,\n)\n\n\nOPERATIONS = {\n \"download\": download,\n \"sync\": sync,\n \"save\": save,\n \"meta\": meta,\n}\n\n\ndef entry_point():\n \"\"\"\n Console entry point for spotdl. This is where the magic happens.\n \"\"\"\n\n # Don't log too much\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n logging.getLogger(\"spotipy\").setLevel(logging.NOTSET)\n logging.getLogger(\"asyncio\").setLevel(logging.WARNING)\n\n # Create a console\n console = Console()\n\n # Install rich traceback handler\n install(show_locals=False, extra_lines=1)\n\n # Create config file if it doesn't exist\n if get_config_file().is_file() is False:\n config_path = get_config_file()\n with open(config_path, \"w\", encoding=\"utf-8\") as config_file:\n json.dump(DEFAULT_CONFIG, config_file, indent=4)\n\n if getattr(sys, \"frozen\", False) and len(sys.argv) == 1:\n # If the application is frozen, we check for ffmpeg\n # if it's not present download it create config file\n if is_ffmpeg_installed() is False:\n download_ffmpeg()\n\n # Check if sys.argv contains an action\n # If it does, we run the action and exit\n for action_name, action in ACTIONS.items():\n if action_name in sys.argv:\n action()\n return None\n\n # Parse the arguments\n arguments = parse_arguments()\n\n # Get the config file\n # It will automatically load if the `load_config` is set to True\n # in the config file\n config = {}\n if arguments.config or (\n get_config_file().exists() and get_config().get(\"load_config\")\n ):\n config = get_config()\n\n # Create settings dict\n # Argument value has always the priority, then the config file\n # value, and if neither are set, use default value\n settings = {}\n for key, default_value in DEFAULT_CONFIG.items():\n argument_val = arguments.__dict__.get(key)\n config_val = config.get(key)\n\n if argument_val is not None:\n settings[key] = argument_val\n elif config_val is not None:\n settings[key] = config_val\n else:\n settings[key] = default_value\n\n # Check if ffmpeg is installed\n if is_ffmpeg_installed(settings[\"ffmpeg\"]) is False:\n raise FFmpegError(\n \"FFmpeg is not installed. Please run `spotdl --download-ffmpeg` to install it, \"\n \"or `spotdl --ffmpeg /path/to/ffmpeg` to specify the path to ffmpeg.\"\n )\n\n if \"youtube-music\" in settings[\"audio_providers\"]:\n # Check if we are getting results from YouTube Music\n ytm = YouTubeMusic(settings)\n test_results = ytm.get_results(\"a\")\n if len(test_results) == 0:\n raise AudioProviderError(\n \"Could not connect to YouTube Music API. Use a VPN or other audio provider.\"\n )\n\n # Initialize spotify client\n SpotifyClient.init(\n client_id=settings[\"client_id\"],\n client_secret=settings[\"client_secret\"],\n auth_token=settings[\"auth_token\"],\n user_auth=settings[\"user_auth\"],\n cache_path=settings[\"cache_path\"],\n no_cache=settings[\"no_cache\"],\n open_browser=not settings[\"headless\"],\n )\n\n # If the application is frozen start web ui\n # or if the operation is `web`\n if (\n getattr(sys, \"frozen\", False)\n and len(sys.argv) == 1\n or arguments.operation == \"web\"\n ):\n # Start web ui\n web(settings)\n\n return None\n\n # Check if save file is present and if it's valid\n if isinstance(settings[\"save_file\"], str) and not settings[\"save_file\"].endswith(\n \".spotdl\"\n ):\n raise DownloaderError(\"Save file has to end with .spotdl\")\n\n if arguments.query and \"saved\" in arguments.query and not settings[\"user_auth\"]:\n raise SpotifyError(\n \"You must be logged in to use the saved query. \"\n \"Log in by adding the --user-auth flag\"\n )\n\n # Initialize the downloader\n # for download, load and preload operations\n downloader = Downloader(\n audio_providers=settings[\"audio_providers\"],\n lyrics_providers=settings[\"lyrics_providers\"],\n ffmpeg=settings[\"ffmpeg\"],\n bitrate=settings[\"bitrate\"],\n ffmpeg_args=settings[\"ffmpeg_args\"],\n output_format=settings[\"format\"],\n threads=settings[\"threads\"],\n output=settings[\"output\"],\n save_file=settings[\"save_file\"],\n overwrite=settings[\"overwrite\"],\n cookie_file=settings[\"cookie_file\"],\n filter_results=settings[\"filter_results\"],\n search_query=settings[\"search_query\"],\n log_level=settings[\"log_level\"],\n simple_tui=settings[\"simple_tui\"],\n restrict=settings[\"restrict\"],\n print_errors=settings[\"print_errors\"],\n sponsor_block=settings[\"sponsor_block\"],\n playlist_numbering=settings[\"playlist_numbering\"],\n preserve_original_audio=settings[\"preserve_original_audio\"],\n )\n\n def graceful_exit(_signal, _frame):\n downloader.progress_handler.close()\n sys.exit(0)\n\n signal.signal(signal.SIGINT, graceful_exit)\n signal.signal(signal.SIGTERM, graceful_exit)\n\n try:\n # Pick the operation to perform\n # based on the name and run it!\n\n OPERATIONS[arguments.operation](\n query=arguments.query,\n save_path=settings[\"save_file\"],\n preload=settings[\"preload\"],\n downloader=downloader,\n m3u_file=settings[\"m3u\"],\n archive=settings[\"archive\"],\n )\n\n except Exception:\n downloader.progress_handler.close()\n\n console.print_exception(show_locals=False, extra_lines=1)\n\n sys.exit(1)\n\n downloader.progress_handler.close()\n\n return None\n\n\ndef console_entry_point():\n \"\"\"\n Wrapper around `entry_point` so we can profile the code\n \"\"\"\n\n if \"--profile\" in sys.argv:\n with cProfile.Profile() as profile:\n entry_point()\n\n stats = pstats.Stats(profile)\n stats.sort_stats(pstats.SortKey.TIME)\n\n # Use snakeviz to visualize the profile\n stats.dump_stats(\"spotdl.profile\")\n else:\n entry_point()\n","sub_path":"spotdl/console/entry_point.py","file_name":"entry_point.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"312730968","text":"import math\r\n\r\nnumber2char = {}\r\nchar2number = {}\r\nfor i in range(65, 91):\r\n char2number[chr(i)] = i - 65\r\n number2char[i - 65] = chr(i)\r\n\r\n\r\ndef is_prime(n):\r\n \"\"\"\r\n n: number to check\r\n\r\n returns: -0: number not prime\r\n -1: number prime\r\n \"\"\"\r\n\r\n # n is the prime number to find out\r\n bruteforce_numbers = []\r\n for i in range(2, int(math.sqrt(n) + 1)):\r\n bruteforce_numbers.append(i)\r\n\r\n for number in bruteforce_numbers:\r\n if n != number and n % number == 0:\r\n return 0\r\n\r\n return 1\r\n\r\n\r\ndef euclide(a, b):\r\n \"\"\"\r\n a,b: number to use\r\n\r\n returns: MCD of a and b\r\n \"\"\"\r\n\r\n if b > a:\r\n a, b = b, a\r\n\r\n while True:\r\n newa = a % b\r\n if newa == 0:\r\n return b\r\n break\r\n else:\r\n a = b\r\n b = newa\r\n\r\n\r\ndef generate_keys():\r\n # INSERTING PRIME NUMBERS\r\n while True:\r\n p, q = input(\"Insert (P,Q)>> \").split(\",\")\r\n p = int(p)\r\n q = int(q)\r\n\r\n if is_prime(p) and is_prime(q) and p > 1 and q > 1:\r\n print(\"numbers ok\")\r\n break\r\n\r\n # CALCULATING n & m\r\n n = p * q\r\n print(f\"n = {n}\")\r\n\r\n m = int(((p - 1) * (q - 1)) / euclide(p - 1, q - 1))\r\n print(f\"m = ({(p - 1) * (q - 1)}) / {euclide(16, 10)} = {m}\")\r\n\r\n # CALCULATING c NUMBERS AND ASK USER TO CHOSE ONE\r\n c_list = []\r\n for i in range(2, m + 1):\r\n if euclide(i, m) == 1:\r\n c_list.append(i)\r\n\r\n while True:\r\n c = int(input(f\"Chose c {c_list}>>\"))\r\n if c in c_list:\r\n break\r\n\r\n # CALCULATING d NUMBERS AND ASK USER TO CHOSE ONE\r\n d_list = []\r\n for i in range(1, 81):\r\n if ((37 * i) % 80) == 1:\r\n d_list.append(i)\r\n\r\n while True:\r\n d = int(input(f\"Chose d {d_list}>> \"))\r\n if d in d_list:\r\n break\r\n print(m)\r\n return n, c, p, q, m, d\r\n\r\n\r\ndef main():\r\n n, c, p, q, m, d = generate_keys()\r\n\r\n print(f\"\"\"\r\n PUBLIC KEYS\r\n -n:\\t{n}\r\n -c:\\t{c}\r\n PRIVATE KEYS\r\n -p:\\t{p}\r\n -q:\\t{q}\r\n -m:\\t{m}\r\n -d:\\t{d}\r\n \"\"\")\r\n\r\n msg = input(\"MSG>> \")\r\n msg = msg.upper()\r\n\r\n cifred_msg = []\r\n number_msg = []\r\n for a in msg:\r\n number_msg.append(char2number[a])\r\n print(char2number[a])\r\n cifred_msg.append(int((char2number[a] ** c) % n))\r\n\r\n print(f\"cifred msg = {cifred_msg}\")\r\n print(f\"number msg = {number_msg}\")\r\n\r\n decifred_msg = \"\"\r\n for b in cifred_msg:\r\n decifred_msg = decifred_msg + str(number2char[int((b ** d) % n)])\r\n print(decifred_msg)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n#[459541, 134033, 243696, 243696, 497836, 121848, 497836, 252297, 243696, 357421]","sub_path":"Python/Cryptazione/Rsa.py","file_name":"Rsa.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"205234459","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 13 12:24:53 2019\n input data must contain:\n 'drive_time', 'cycle_time', 'walk_time', 'PT_time', 'walk_time_PT',\n 'drive_cost','cycle_cost', 'walk_cost','PT_cost', 'main_mode'\n Other columns are treated as individual-specific variables\n@author: doorleyr\n\"\"\" \nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn_porter import Porter\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import _tree\nimport pickle\n\n# =============================================================================\n# Functions\n# =============================================================================\ndef forest_to_code(rf, feature_names):\n tab=\" \"\n # takes a fitted decision tree and outputs a python function\n with open(CHOICE_FUNCTION_PATH, 'w') as the_file: \n the_file.write('model choiceModel\\n\\n')\n the_file.write('import \"MoBalance.gaml\"\\n\\n')\n the_file.write('global{\\n\\n')\n the_file.write('action choose_mode_per_people(people p,float walk_time, float drive_time, float PT_time, float cycle_time, float walk_time_PT,float drive_time_PT)'+'{ \\n')\n the_file.write(tab+'list probs<-[0.0,0.0,0.0,0.0];\\n'); \n for i in range(len(rf)):\n the_file.write('// '+ 'Tree #'+str(i)+'\\n')\n tree=rf[i]\n tree_ = tree.tree_\n feature_name = [\n feature_names[i] if i != _tree.TREE_UNDEFINED else \"undefined!\"\n for i in tree_.feature\n ]\n def recurse(node, depth):\n indent = tab * (depth+1)\n if tree_.feature[node] != _tree.TREE_UNDEFINED:\n name = feature_name[node]\n threshold = tree_.threshold[node]\n # print (\"{}if {} <= {}:\".format(indent, name, threshold))\n # the_file.write(\"%sif (%s <= %s) { \\n\"%(indent, name, str(threshold)))\n the_file.write(\"%s if (%s <= %.2f) \"%(indent, name, threshold)+\"{ \\n\")\n recurse(tree_.children_left[node], depth + 1)\n the_file.write(indent+'}'+'\\n')\n # the_file.write(\"{}else \\{ # if {} > {} \\n\".format(indent, name, threshold))\n the_file.write(indent+'else {'+ \"// if %s > %.2f \\n\"%(name, threshold)) \n # print (\"{}else: # if {} > {}\".format(indent, name, threshold))\n recurse(tree_.children_right[node], depth + 1)\n the_file.write(indent+'}'+'\\n')\n else:\n n_samples=sum([int(v) for v in tree_.value[node][0]])\n # the_file.write(\"{}p.mode<-['car', 'bike', 'walk', 'PT'][rnd_choice({})];\".format(indent, [round(v/n_samples,2) for v in tree_.value[node][0]])+\"} \\n\")\n the_file.write(\"{}list pred<-{}\".format(indent, [round(v/n_samples,2) for v in tree_.value[node][0]])+\"; \\n\")\n the_file.write(indent+\"loop o from: 0 to:3{probs[o]<-probs[o]+pred[o]; }\")\n # print (\"{}return {}\".format(indent, [int(v) for v in tree_.value[node][0]]))\n recurse(0, 1)\n the_file.write(tab+\"p.mode<-['car', 'bike', 'walk', 'PT'][rnd_choice(probs)];\\n\")\n the_file.write(tab+'}\\n')\n the_file.write('}')\n#********************************************\n# Constants\n#********************************************\n#MODE_TABLE_PATH='../../data/Boston/clean/main_modes.csv'\ncity='Boston'\nMODE_TABLE_PATH='./'+city+'/clean/trip_modes.csv'\nTOUR_TABLE_PATH='./'+city+'/clean/tours.csv'\nCHOICE_FUNCTION_PATH='../ABM/models/choiceModel.gaml'\nPICKLED_MODEL_PATH='./models/trip_mode_rf.p'\n#********************************************\n# Data\n#********************************************\nmode_table=pd.read_csv(MODE_TABLE_PATH)\n#to work with GAMA, rename all personal variables to p.name\nagent_specific_vars=['age', 'hh_income', 'male', 'bachelor_degree', 'pop_per_sqmile_home']\nmode_table=mode_table.rename(columns={v:'p.'+v for v in agent_specific_vars})\nfeatures=[c for c in mode_table.columns if not c=='mode']\n\nX=mode_table[features]\ny=mode_table['mode']\n\nrf = RandomForestClassifier(random_state = 0,n_estimators =5, max_depth=4)\n\nrf.fit(X, y)\n\nimportances = rf.feature_importances_\nstd = np.std([tree.feature_importances_ for tree in rf.estimators_],\n axis=0)\nindices = np.argsort(importances)[::-1]\nprint(\"Feature ranking:\")\n\nfor f in range(len(features)):\n print(\"%d. %s (%f)\" % (f + 1, features[indices[f]], importances[indices[f]]))\n\n# Plot the feature importances of the forest\n#plt.figure(figsize=(16, 9))\n#plt.title(\"Feature importances\")\n#plt.bar(range(len(features)), importances[indices],\n# color=\"r\", yerr=std[indices], align=\"center\")\n#plt.xticks(range(len(features)), [features[i] for i in indices], rotation=90, fontsize=15)\n#plt.xlim([-1, len(features)])\n#plt.show()\n\n#forest_to_code(rf.estimators_, features)\n\npickle.dump( rf, open( PICKLED_MODEL_PATH, \"wb\" ) )\n\n\n \n \n","sub_path":"scripts/archive/trip_mode_rf_GAMA.py","file_name":"trip_mode_rf_GAMA.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"580707942","text":"import logging\nimport tensorflow as tf\nimport numpy as np\nimport os\n\nfrom model_bert_crf import MyModel\nfrom bert import modeling as bert_modeling\nfrom utils import DataProcessor_BERT as DataProcessor\nfrom utils import load_vocabulary\nfrom utils import extract_kvpairs_in_bio\nfrom utils import cal_f1_score\n\nbert_vocab_path = \"./bert_model/vocab.txt\"\nbert_config_path = \"./bert_model/bert_config.json\"\nbert_ckpt_path = \"./bert_model/chinese_L-12_H-768_A-12.ckpt\"\n\n# set logging\nlog_file_path = \"./ckpt/run.log\"\nif os.path.exists(log_file_path): os.remove(log_file_path)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter(\"%(asctime)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\nchlr = logging.StreamHandler()\nchlr.setFormatter(formatter)\nfhlr = logging.FileHandler(log_file_path)\nfhlr.setFormatter(formatter)\nlogger.addHandler(chlr)\nlogger.addHandler(fhlr)\n\nlogger.info(\"loading vocab...\")\n\nw2i_char, i2w_char = load_vocabulary(bert_vocab_path)\nw2i_bio, i2w_bio = load_vocabulary(\"./data/vocab_bioattr.txt\")\n\nlogger.info(\"loading data...\")\n\ndata_processor_train = DataProcessor(\n \"./data/train/input.seq.char\",\n \"./data/train/output.seq.bioattr\",\n w2i_char,\n w2i_bio, \n shuffling=True\n)\n\ndata_processor_valid = DataProcessor(\n \"./data/test/input.seq.char\",\n \"./data/test/output.seq.bioattr\",\n w2i_char,\n w2i_bio, \n shuffling=True\n)\n\nlogger.info(\"building model...\")\n\nbert_config = bert_modeling.BertConfig.from_json_file(bert_config_path)\nlogger.info(bert_config.to_json_string())\n \nmodel = MyModel(bert_config=bert_config, \n vocab_size_bio=len(w2i_bio), \n use_lstm=False,\n use_crf=True)\n\nlogger.info(\"model params:\")\nparams_num_all = 0\nfor variable in tf.trainable_variables():\n params_num = 1\n for dim in variable.shape:\n params_num *= dim\n params_num_all += params_num\n logger.info(\"\\t {} {} {}\".format(variable.name, variable.shape, params_num))\nlogger.info(\"all params num: \" + str(params_num_all))\n \nlogger.info(\"loading bert pretrained parameters...\")\ntvars = tf.trainable_variables()\n(assignment_map, initialized_variable_names) = bert_modeling.get_assignment_map_from_checkpoint(tvars, bert_ckpt_path)\ntf.train.init_from_checkpoint(bert_ckpt_path, assignment_map)\n\nlogger.info(\"start training...\")\n\ntf_config = tf.ConfigProto(allow_soft_placement=True)\ntf_config.gpu_options.allow_growth = True\n\nwith tf.Session(config=tf_config) as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(max_to_keep=50)\n \n epoches = 0\n losses = []\n batches = 0\n best_f1 = 0\n batch_size = 32\n\n while epoches < 30:\n (inputs_seq_batch, \n inputs_mask_batch,\n inputs_segment_batch,\n outputs_seq_batch) = data_processor_train.get_batch(batch_size)\n \n feed_dict = {\n model.inputs_seq: inputs_seq_batch,\n model.inputs_mask: inputs_mask_batch,\n model.inputs_segment: inputs_segment_batch,\n model.outputs_seq: outputs_seq_batch\n }\n \n if batches == 0: \n logger.info(\"###### shape of a batch #######\")\n logger.info(\"inputs_seq: \" + str(inputs_seq_batch.shape))\n logger.info(\"inputs_mask: \" + str(inputs_mask_batch.shape))\n logger.info(\"inputs_segment: \" + str(inputs_segment_batch.shape))\n logger.info(\"outputs_seq: \" + str(outputs_seq_batch.shape))\n logger.info(\"###### preview a sample #######\")\n logger.info(\"input_seq:\" + \" \".join([i2w_char[i] for i in inputs_seq_batch[0]]))\n logger.info(\"input_mask :\" + \" \".join([str(i) for i in inputs_mask_batch[0]]))\n logger.info(\"input_segment :\" + \" \".join([str(i) for i in inputs_segment_batch[0]]))\n logger.info(\"output_seq: \" + \" \".join([i2w_bio[i] for i in outputs_seq_batch[0]]))\n logger.info(\"###############################\")\n \n loss, _ = sess.run([model.loss, model.train_op], feed_dict)\n losses.append(loss)\n batches += 1\n \n if data_processor_train.end_flag:\n data_processor_train.refresh()\n epoches += 1\n\n def valid(data_processor, max_batches=None, batch_size=1024):\n preds_kvpair = []\n golds_kvpair = []\n batches_sample = 0\n \n while True:\n (inputs_seq_batch, \n inputs_mask_batch,\n inputs_segment_batch,\n outputs_seq_batch) = data_processor.get_batch(batch_size)\n\n feed_dict = {\n model.inputs_seq: inputs_seq_batch,\n model.inputs_mask: inputs_mask_batch,\n model.inputs_segment: inputs_segment_batch\n }\n \n preds_seq_batch = sess.run(model.outputs, feed_dict)\n \n for input_seq, pred_seq, gold_seq, mask in zip(inputs_seq_batch,\n preds_seq_batch,\n outputs_seq_batch,\n inputs_mask_batch):\n l = sum(mask) - 2\n pred_seq = [i2w_bio[i] for i in pred_seq[1:-1][:l]]\n gold_seq = [i2w_bio[i] for i in gold_seq[1:-1][:l]]\n char_seq = [i2w_char[i] for i in input_seq[1:-1][:l]]\n \n pred_kvpair = extract_kvpairs_in_bio(pred_seq, char_seq)\n gold_kvpair = extract_kvpairs_in_bio(gold_seq, char_seq)\n \n preds_kvpair.append(pred_kvpair)\n golds_kvpair.append(gold_kvpair)\n \n if data_processor.end_flag:\n data_processor.refresh()\n break\n \n batches_sample += 1\n if (max_batches is not None) and (batches_sample >= max_batches):\n break\n \n p, r, f1 = cal_f1_score(preds_kvpair, golds_kvpair)\n\n logger.info(\"Valid Samples: {}\".format(len(preds_kvpair)))\n logger.info(\"Valid P/R/F1: {} / {} / {}\".format(round(p*100, 2), round(r*100, 2), round(f1*100, 2)))\n \n return (p, r, f1)\n \n if batches % 100 == 0:\n logger.info(\"\")\n logger.info(\"Epoches: {}\".format(epoches))\n logger.info(\"Batches: {}\".format(batches))\n logger.info(\"Loss: {}\".format(sum(losses) / len(losses)))\n losses = []\n\n ckpt_save_path = \"./ckpt/model.ckpt.batch{}\".format(batches)\n logger.info(\"Path of ckpt: {}\".format(ckpt_save_path))\n saver.save(sess, ckpt_save_path)\n \n p, r, f1 = valid(data_processor_valid, max_batches=10)\n if f1 > best_f1:\n best_f1 = f1\n logger.info(\"############# best performance now here ###############\")\n \n ","sub_path":"nlp_tf1_implement/ner/train_bert_crf.py","file_name":"train_bert_crf.py","file_ext":"py","file_size_in_byte":7133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"594365366","text":"from django.conf.urls import url\n\nfrom workerstation.views import (\n\t\t\t\t\tworkerstation_index,\n\t\t\t\t\ttimesheet_success,\n\t\t\t\t\tvacation_pending,\n\t\t\t\t\tsick_leave,\n\t\t\t\t\tmgnt_clocking,\n\t\t\t\t\tholiday_request_action,\n\t\t\t\t\tworkerstation_login,\n\t\t\t\t\tauth_view,\n\t\t\t\t\tworkerstation_logout,\n\t\t\t\t\tloggedin,\n\t\t\t\t\tinvalid_login,\n\t\t\t\t )\n\napp_name = \"workerstation\"\nurlpatterns = [\n\turl(r'^$', workerstation_index, name=\"workerstation-index\"),\n\turl(r'^timesheet_success/(?P\\w+)/$', timesheet_success, name=\"timesheet_success\"),\n\turl(r'^vacation_pending/$', vacation_pending, name=\"vacation_pending\"),\n\turl(r'^sick_leave/$', sick_leave, name=\"sick_leave\"),\n\turl(r'^mgnt_clocking/$', mgnt_clocking, name=\"mgnt_clocking\"),\n\turl(r'^holiday_request/(?P\\w+)/(?P\\w+)/$', holiday_request_action, name=\"holiday_request_action\"),\n\turl(r'^mgnt/$', workerstation_login, name=\"workerstation-login\"),\n\turl(r'^auth_view/$', auth_view, name=\"auth_view\"),\n\turl(r'^logout/$', workerstation_logout, name=\"workerstation-logout\"),\n\turl(r'^loggedin/$', loggedin, name=\"workerstation-loggedin\"),\n\turl(r'^invalid_login/$', invalid_login, name=\"workerstation-invalid-login\"),\n]\n","sub_path":"workerstation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"143987312","text":"import gzip\nimport os\nimport math\nimport time \n\nclass MMSplitter:\n\n ''' Set source file and get file size '''\n def __init__(self):\n self.source_file = 'source.csv.gz'\n self.source_file_size = os.path.getsize(self.source_file)\n self.num_files = 10\n\n\n def split_file(self):\n ''' Calculate sizes of new files (source size / number of splits)'''\n chunk_size = math.ceil(self.source_file_size/10)\n \n ''' Open source file '''\n with gzip.open(self.source_file, 'rb') as source:\n\n for chunk in range(1, (self.num_files + 1)):\n fname = '{}.csv.gz'.format(chunk)\n print('Writing {}'.format(fname))\n\n ''' Open Nth file and write next chunk '''\n with gzip.open(fname,'wb') as newfile:\n newfile.write(source.read(chunk_size))\n \n\nif __name__ == '__main__':\n print(\"Splitting File...\")\n start_time = time.time()\n\n splitter = MMSplitter()\n splitter.split_file()\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"118005453","text":"from testback import Testback\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# S5 假如bid1預測到下跌機率超過BIAS,高掛空單,看n1個I080內有沒有機會下到空單,成交後,n2個I080內停損或平倉。\n\nclass Strategy5(Testback):\n def __init__(self, data, order_bias, order_steps, stop_steps):\n super(Strategy5, self).__init__(data)\n self.order_bias = order_bias\n self.is_long = False\n self.stop_steps = stop_steps\n self.order_steps = order_steps\n\n def can_buy(self):\n return self.lastest_price >= self.order_price\n\n def can_sell(self):\n return (self.possessing_time > self.stop_steps or self.can_make_profit()) and self.possessing > 0\n\n def buy(self):\n self.possessing += 1\n self.possessing_time = 0\n self.cost = self.lastest_price\n\n def sell(self):\n self.possessing -= 1\n unrealized_profit = self.unrealized_profit()\n if unrealized_profit > 0:\n self.earnings.append(unrealized_profit)\n else:\n self.losings.append(unrealized_profit)\n\n def can_order(self):\n return self.num_order == 0 and self.row['nextask1p_label_pred_d'] >= self.order_bias\n\n def order(self):\n self.order_price = self.lastest_ask1p\n self.num_order += 1\n\n def can_cancel_order(self):\n return self.order_time > self.order_steps\n\n def cancel_order(self):\n self.num_order = 0\n\n\nif __name__ == \"__main__\":\n\n BIAS = 0.4\n data = pd.read_csv(\"./data/0828_tick5_timestep40_askbid1p_withProb.csv\")\n order_steps = 5\n\n # plt.scatter(np.arange(len(data)), data.loc[:, 'nextask1p_label_pred_i'], label = 'ask1 increase')\n # # plt.scatter(np.arange(len(data)), data.loc[:, 'nextask1p_label_pred_d'], label = 'ask1 decrease')\n # plt.scatter(np.arange(len(data)), data.loc[:, 'nextbid1p_label_pred_i'], label = 'bid1 increase')\n # # plt.scatter(np.arange(len(data)), data.loc[:, 'nextbid1p_label_pred_d'], label = 'bid1 decrease')\n # plt.plot(np.arange(len(data)), [BIAS]*len(data))\n # plt.legend()\n # plt.show()\n # print(\"num of bid1 increase points that prob exceed: %.2f\"%(BIAS), np.sum(data.loc[:, 'nextbid1p_label_pred_i'] > BIAS))\n # print(\"num of ask1 increase points that prob exceed: %.2f\"%(BIAS), np.sum(data.loc[:, 'nextask1p_label_pred_i'] > BIAS))\n for order_steps in [5, 10, 15]:\n for stop_steps in [5, 10]:\n print(\"order_steps: \", order_steps)\n print(\"stop_steps: \", stop_steps)\n S5 = Strategy5(\n data=data,\n order_bias=BIAS,\n order_steps = order_steps,\n stop_steps = stop_steps)\n S5.run()\n print(\"-\"*20)\n","sub_path":"testback/strategy5.py","file_name":"strategy5.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"384077530","text":"\n# purpose \n# plot master geojson tracks and recent individual tracks \n\n# usage\n# python plot_master_geojson.py --dir_geojson=data/geojson\n\n\n# imports\nimport os\nimport numpy as np\nimport folium\nimport geopandas\nimport argparse\nimport glob\nfrom datetime import datetime as dt\nfrom datetime import timedelta as td\nimport geojson\nimport webbrowser\nimport matplotlib.cm as cm\n#from scipy.signal import medfilt\nimport pandas as pd\n\nfrom utils import calc_dist_from_coords\nfrom utils import RDP\nfrom utils import rgb2hex\nfrom utils import calc_dist_between_two_coords\nfrom utils import calc_dist_between_one_point_to_all_points\n\n#manual_debug = True\nmanual_debug = False\nif (manual_debug):\n dir_work = '/home/craigmatthewsmith/gps_tracks'\n os.chdir(dir_work)\n dir_geojson = 'data/geojson'\nelse: # parse command line parameters\n dir_work = os.getcwd()\n parser = argparse.ArgumentParser(description = 'process gpx files to geojson')\n parser.add_argument('--dir_geojson', type=str, default='data/geojson', help = 'data of geojson files')\n args = parser.parse_args() \n dir_geojson = args.dir_geojson \n\nuse_RDP = True\nepsilon = 1.0 # rdp thinning \n#dist_min_aggregate_points = 1.0 \ndist_min_aggregate_points = 3.0 \n#dist_min_aggregate_points = 10.0 \n\n# 1.0, reduced points from 75752 to 65536, 2.5 M master.geojson file size \n# 10.0, reduced points from 75752 to 19330, 2.5 M master.geojson file size\n\ndist_max_between_points_to_make_line = 100.0 # dont plot lines this far away\n\ndt_now = dt.now()\n\ntrailheads_file = 'trailheads.csv'\n# read trailheads file\ntrailheads_csv = pd.read_csv(trailheads_file,index_col=0)\n#trailheads_matrix = stn_read_csv.as_matrix()\ntrailheads_csv.head()\n\n#th_lon = [-121.95168, -121.96682, -121.97457]\n#th_lat = [37.83386, 37.85792, 37.8524]\n\nth_lat = [\n 37.8694,\n 37.84743,\n 37.8628, \n 37.87572,\n 37.87586,\n 37.88652, \n 37.88754, \n 37.87056, \n 37.8501, \n 37.85977, \n 37.85792,\n 37.83386,\n 37.8524]\n\nth_lon = [\n -121.9973,\n -121.98593,\n -121.9791,\n -122.02286,\n -122.01466,\n -122.01799,\n -122.01429,\n -122.01017,\n -121.99069,\n -121.98874,\n -121.96682,\n -121.95168,\n -121.97457]\n\nn_th = len(th_lat)\n\n\n# last updated 2020/05/16\nn_recent_days1 = 90\nn_recent_days2 = 7 \n\n\n# find recent geojson files\ngeojson_file_list_recent1 = []\ngeojson_file_list_recent2 = []\n\ngeojson_file_list_all = glob.glob(os.path.join(dir_geojson, '*.geojson'))\nn_files = len(geojson_file_list_all)\nprint(n_files) \nn = 10\nfor n in range(0, n_files, 1):\n file_temp = geojson_file_list_all[n] \n date_temp = os.path.basename(file_temp).split('.')[0]\n if (date_temp.endswith('p')):\n date_temp = date_temp.split('p')[0]\n #print(date_temp)\n dt_temp = dt.strptime(date_temp,'%Y-%m-%d_%H-%M')\n #print(dt_temp)\n days_delta = (dt_now - dt_temp).days\n #print(days_delta)\n if (days_delta <= n_recent_days2):\n geojson_file_list_recent2.append(file_temp)\n if (days_delta > n_recent_days2 and days_delta <= n_recent_days1):\n geojson_file_list_recent1.append(file_temp)\n\nn_files1 = len(geojson_file_list_recent1)\nn_files2 = len(geojson_file_list_recent2)\nprint('found %s recent files' %(n_files1)) \nprint('found %s recent files' %(n_files2)) \n\nfeatures_tracks_recent1 = []\nf = 0\nfor f in range(0, n_files1, 1):\n geojson_file = geojson_file_list_recent1[f]\n print(' processing f %s of %s ' %(f, n_files))\n # read geojson file\n with open(geojson_file, 'r') as file:\n geojson_data = geojson.load(file) \n #geojson_data\n for feature in geojson_data['features']:\n line = geojson.LineString(feature['geometry']['coordinates'])\n features_tracks_recent1.append(geojson.Feature(geometry=line)) \n\nfeatures_tracks_recent2 = []\nf = 0\nfor f in range(0, n_files2, 1):\n geojson_file = geojson_file_list_recent2[f]\n print(' processing f %s of %s ' %(f, n_files))\n # read geojson file\n with open(geojson_file, 'r') as file:\n geojson_data = geojson.load(file) \n #geojson_data\n for feature in geojson_data['features']:\n line = geojson.LineString(feature['geometry']['coordinates'])\n features_tracks_recent2.append(geojson.Feature(geometry=line)) \n\n \ngeojson_data_track_recent1 = geojson.FeatureCollection(features_tracks_recent1)\ngeojson_data_track_recent2 = geojson.FeatureCollection(features_tracks_recent2)\n \n#geojson_file = os.path.join(dir_work, 'master_thin.geojson')\ngeojson_file = 'master_thin_min_'+str(int(dist_min_aggregate_points))+'_max_'+str(int(dist_max_between_points_to_make_line))+'.geojson'\n\n# print(os.path.isfile(geojson_file))\n\n# read geojson file\nwith open(geojson_file, 'r') as file:\n geojson_data = geojson.load(file)\n\nfeatures_tracks = []\nfeatures_n_times = []\n\nfor feature in geojson_data['features']:\n line = geojson.LineString(feature['geometry']['coordinates'])\n n_times = feature['properties']['n_times']\n features_tracks.append(geojson.Feature(geometry=line))\n features_n_times.append(geojson.Feature(geometry=line, properties={'n_times': n_times}))\n \ngeojson_data_track = geojson.FeatureCollection(features_tracks)\ngeojson_data_n_times = geojson.FeatureCollection(features_n_times)\n \ncmin_n_times = min(feature['properties']['n_times'] for feature in geojson_data['features'])\ncmax_n_times = max(feature['properties']['n_times'] for feature in geojson_data['features'])\ncmax_n_times = 20\n\nprint ('min and max times is %s - %s' %(cmin_n_times, cmax_n_times))\n\n# '#FC4C02'\n# create new GeoJson objects to reduce GeoJSON data sent to Folium map as layer\nstyle_track1 = lambda x: {'color': '#a432a8', 'weight': 5} \nstyle_track2 = lambda x: {'color': '#11f52f', 'weight': 5} # #4e32a8 \n#style_track2 = lambda x: {'color': '#11f52f', 'weight': 5} \n# cmap needs normalized data\n#style_n_times = lambda x: {'color': rgb2hex(cmap((x['properties']['n_times']-cmin_n_times)/(cmax_n_times-cmin_n_times))), 'weight': 5} \nstyle_n_times = lambda x: {'color': rgb2hex(cmap((min(cmax_n_times,x['properties']['n_times']) -cmin_n_times)/(cmax_n_times-cmin_n_times))), 'weight': 5} \ntooltip_n_times = folium.features.GeoJsonTooltip(fields=['n_times'], aliases=['n_times'])\n\n\n#fmap = folium.Map(location = [53.545612, -113.490067], zoom_start= 10.5)\n#fmap = folium.Map(location=[37.862606, -121.978372], tiles='Stamen Terrain', zoom_start=11, control_scale=True)\nfmap = folium.Map(location=[37.862606, -121.978372], tiles='Stamen Terrain', zoom_start=13, control_scale=True)\n\n# set up Folium map\n#fmap = folium.Map(tiles = None, prefer_canvas=True, disable_3d=True)\n#fmap = folium.Map(tiles='Stamen Terrain', prefer_canvas=True, disable_3d=True)\n#fmap = folium.Map(tiles='Stamen Terrain', name='Terrain Map', location=[34.862606, -121.978372], zoom_start=10) \n# folium.TileLayer(tiles = 'Stamen Terrain', name='Terrain Map', show=True).add_to(fmap)\nfolium.TileLayer(tiles = 'CartoDB dark_matter', name='CartoDB', show=False).add_to(fmap)\nfolium.TileLayer(tiles = 'OpenStreetMap', name='OpenStreetMap', show=False).add_to(fmap)\ncmap = cm.get_cmap('jet') # matplotlib colormap\nprint('appending features to map ')\n\n# add heatmap\n#folium.GeoJson(geojson_data_track, style_function=style_track, name='track', show=True, smooth_factor=3.0).add_to(fmap)\nfolium.GeoJson(geojson_data_n_times, style_function=style_n_times, tooltip=tooltip_n_times, name='n_times', show=True, smooth_factor=3.0).add_to(fmap)\n# add th\nfor n in range(0, n_th, 1):\n #folium.Marker([th_lat[n], th_lon[n]]).add_to(fmap)\n #folium.Marker([th_lat[n], th_lon[n]], fill_color='#43d9de', radius=8).add_to(fmap)\n folium.Marker([th_lat[n], th_lon[n]], fill_color='#43d9de', radius=4).add_to(fmap)\n # popup=df_counters['Name'][point], icon=folium.Icon(color='darkblue', icon_color='white', icon='male', angle=0, prefix='fa')).add_to(marker_cluster)\n\n# add recent tracks \n# folium.GeoJson(geojson_data_track_recent1, style_function=style_track1, name='7-90d', show=True, smooth_factor=3.0).add_to(fmap)\n# folium.GeoJson(geojson_data_track_recent2, style_function=style_track2, name='last 7 days', show=True, smooth_factor=3.0).add_to(fmap)\n\n# add layer control widget\nfolium.LayerControl(collapsed=False).add_to(fmap)\n\n# save map to html file\n#fmap.fit_bounds(fmap.get_bounds())\n\n#html_file = os.path.join(dir_work, 'heatmap.html')\n#html_file = 'heatmap_'+str(int(dist_min))+'_max_'+str(int(dist_max))+'.html'\nhtml_file = 'heatmap_'+str(int(dist_min_aggregate_points))+'_max_'+str(int(dist_max_between_points_to_make_line))+'.html'\nif os.path.isfile(html_file):\n os.system('rm -f '+html_file)\nfmap.save(html_file)\n# open html file in default browser\nwebbrowser.open(html_file, new=2, autoraise=True)\n","sub_path":"plot_master_geojson.py","file_name":"plot_master_geojson.py","file_ext":"py","file_size_in_byte":8790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"367083274","text":"import random\r\nfrom chessman import *\r\nimport re\r\nfrom chessboard import *\r\nclass Engine(object):\r\n def __init__(self,chessboard):\r\n self.__chessboard = chessboard\r\n\r\n def computerGo(self,chessman):\r\n if not isinstance(chessman,ChessMan):\r\n raise Exception('第一个参数必须为chessman对象')\r\n '''\r\n 电脑下棋 把下棋的位置写入chessman对象中\r\n :param chessman: 棋子对象 里面已经设置好棋子的颜色\r\n :return: \r\n '''\r\n while True:\r\n\r\n posX = random.randint(1,15)#[1,15]1到15\r\n posY = random.randint(1,15)#[1,15]1到15\r\n #判断该位置是否为空\r\n if self.__chessboard.isEmpty((posX,posY)):\r\n print('电脑下棋的位置:',(posX,posY))\r\n #如果该位置为空 则把posx和posY写入棋子的位置中\r\n chessman.setPos((posX,posY))\r\n #退出while循环\r\n break\r\n\r\n def userGo(self,chessman,userInput):\r\n '''\r\n 用户下棋 读取用户输入的字符串 并把下棋的位置写入chessman对象中\r\n :param chessman: 棋子对象 里面已经设置了棋子的颜色\r\n :param userInput: 用户输入下棋的坐标1-15,a-o\r\n :return:False不能正常下棋 True能够下棋\r\n '''\r\n if not isinstance(chessman,ChessMan):\r\n raise Exception('第一个参数必须为chessman对象')\r\n\r\n pattern = '^([1-9]|1[0-5]),([a-o])$'\r\n ret = re.findall(pattern,userInput)\r\n if len(ret):\r\n posX,posY = ret[0]\r\n posX = int(posX)\r\n posY = ord(posY) - ord('a') + 1\r\n print('用户下棋的位置:', (posX, posY))\r\n #判断该位置是否为空\r\n if self.__chessboard.isEmpty((posX,posY)):\r\n # 如果该位置为空 则把posx和posY写入棋子的位置中\r\n chessman.setPos((posX, posY))\r\n return True\r\n\r\n #输入格式不正确或位置非空\r\n return False\r\n\r\n def isWon(self,pos,color):\r\n '''\r\n 判断当下某一颗棋子是否赢钱\r\n :param pos:下棋的位置\r\n :param color:棋子的颜色\r\n :return:TRUE胜负已分 false胜负未分\r\n '''\r\n if not isinstance(pos,tuple) and not isinstance(pos,list):\r\n raise Exception('第一个参数必须为元组或者列表')\r\n if pos[0] <= 0 or pos[0] > ChessBoard.BOARD_SIZE:\r\n raise Exception('下标越界')\r\n if pos[1] <= 0 or pos[1] > ChessBoard.BOARD_SIZE:\r\n raise Exception('下标越界')\r\n\r\n #上下方向(posX-4,posY)-(posX+4,posY)\r\n startX = 1 # 开始遍历的位置\r\n if pos[0] - 4 > 1:\r\n startX = pos[0] - 4\r\n endX = ChessBoard.BOARD_SIZE#结束遍历的x位置\r\n if pos[0] + 4 < ChessBoard.BOARD_SIZE:\r\n endX = pos[0] + 4\r\n count = 0 # 统计有多少颗棋子连在一起\r\n\r\n for posX in range(startX,endX + 1):\r\n if self.__chessboard.getChess((posX,pos[1])) == color:\r\n count += 1\r\n if count >= 5:\r\n return True\r\n\r\n else:#一旦断开 则统计计数清0\r\n count = 0\r\n\r\n\r\n #左右方向\r\n startY = 1 # 开始遍历的位置\r\n if pos[1] - 4 > 1:\r\n startY = pos[1] - 4\r\n endY = ChessBoard.BOARD_SIZE # 结束遍历的y位置\r\n if pos[1] + 4 < ChessBoard.BOARD_SIZE:\r\n endY = pos[1] + 4\r\n count = 0 # 统计有多少颗棋子连在一起\r\n\r\n for posY in range(startY,endY + 1):\r\n if self.__chessboard.getChess((pos[0], posY)) == color:\r\n count += 1\r\n if count >= 5:\r\n return True\r\n\r\n else: # 一旦断开 则统计计数清0\r\n count = 0\r\n\r\n # 左上右下方向\r\n count = 0\r\n s = pos[0] - pos[1]#x减去y,知道差值多大\r\n start = startX#开始的横坐标\r\n end = endY + s#开始的纵坐标 计算y向右\r\n if pos[0] > pos[1]:#\r\n start = startY + s\r\n end = endX# 计算x坐标向上\r\n for index in range(start, end + 1):\r\n if self.__chessboard.getChess((index, index - s)) == color:\r\n count += 1\r\n if count >= 5:\r\n return True\r\n else:\r\n # 一旦断开 统计数清0\r\n count = 0\r\n\r\n # 左下右上方向\r\n count = 0\r\n s = pos[0] + pos[1]#\r\n if pos[0] + pos[1] <= 16:\r\n start = startX\r\n end = s - startY\r\n\r\n if pos[0] + pos[1] > 16:\r\n start = s - startY\r\n end = startX\r\n\r\n if s >= 6 and s <= 12:\r\n for index in range(start, end + 1):\r\n if self.__chessboard.getChess((index, s - index)) == color:\r\n count += 1\r\n if count >= 5:\r\n return True\r\n else:\r\n # 一旦断开 统计数清0\r\n count = 0\r\n #四个方向都找不到连续5颗相同颜色的棋子 则游戏继续\r\n\r\n return False\r\n\r\n def isWonMan(self,chessman):\r\n '''\r\n 判断在棋盘上放置chessman是否赢棋\r\n :param chessman:放置的棋子位置和颜色\r\n :return:TRUE胜负已分 false胜负未分\r\n '''\r\n if not isinstance(chessman,ChessMan):\r\n raise Exception('第一个参数必须为chessman对象')\r\n pos = chessman.getPos()\r\n color = chessman.getColor()\r\n return self.isWon(pos,color)\r\n\r\n\r\n\r\n","sub_path":"五子棋网络对战/服务端/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":5769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"81906980","text":"# В упорядоченном по возврастанию массиве целых чисел найти определенный элемент\n# ( указать его индекс) или сообщить что\n# что такого элемента нет\nfrom random import random\ndlina_massiva = int(input('введите длину массива '))\nmassiv = []\nfor i in range(dlina_massiva):\n massiv.append(int(random()*10))\nmassiv.sort()\nprint(massiv)\n\nnumber = int(input('Введите индекс '))\n\nlow = 0\nhigh = dlina_massiva-1\nwhile low <= high:\n mid = (low + high) // 2\n if number < massiv[mid]:\n high = mid - 1\n elif number > massiv[mid]:\n low = mid + 1\n else:\n print(\"ID =\", mid)\n break\nelse:\n print(\"No the number\")","sub_path":"zadacha1.2.py","file_name":"zadacha1.2.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"517610530","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import make_scorer, mean_absolute_error, mean_squared_error\nimport logging\nimport sys\nimport pandas as pd\nfrom future.utils import with_metaclass\nfrom sklearn.linear_model import *\n\nfrom sklearn.calibration import *\nfrom sklearn import metrics\nfrom modeldb.thrift.modeldb.ttypes import ExperimentRun ,Project,Experiment\nfrom modeldb.basic.Structs import (\n Model, ModelConfig, ModelMetrics, Dataset)\nfrom sklearn.ensemble import *\nfrom sklearn.pipeline import Pipeline\nfrom modeldb.utils.Singleton import Singleton\nfrom modeldb.basic import *\nfrom modeldb.events import *\n\nfrom modeldb.thrift.modeldb import ttypes as modeldb_types\nfrom modeldb.thrift.modeldb import ModelDBService\n# from ..basic import *\n# from ..events import *\n# from ..thrift.modeldb import ModelDBService\n# from ..thrift.modeldb import ttypes as modeldb_types\n\nfrom pymongo import MongoClient\nimport gridfs\nfrom bson.objectid import ObjectId\nimport logging\nimport sklearn.metrics\nfrom sklearn.externals import joblib\nimport sklearn, sklearn_pandas\n\nimport os\nimport re\nfrom glob import glob\nimport pickle\n# from ..events import FitEvent ,TransformEvent,PipelineEvent,GridSearchCVEvent,MetricEvent,RandomSplitEvent,ExperimentEvent\n\nimport numpy as np\nfrom thrift import Thrift\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.protocol import TBinaryProtocol\n\nfrom keras.layers import Input,Dense, Activation, Embedding, LSTM\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten,TimeDistributed\nfrom keras import backend as K\nfrom keras.models import Sequential,Model\nfrom keras.optimizers import SGD,Adagrad\nimport logging\n\nlogger=logging.getLogger(__name__)\n\ndef fit_fn(self,x_train,y_train,epochs=5,batch_size=32,**params):\n logger.info(\"fit model for keras\")\n model=self.fit(x_train, y_train, epochs=epochs, batch_size=batch_size)\n if params ==None:\n params={'epochs':epochs,'batch_size':batch_size}\n fit_event=FitEvent(model,self,x_train,params)\n Syncer.instance.add_to_buffer(event=fit_event)\n return model\n\n\ndef compile_fn(self,loss='categorical_crossentropy',optimizer= SGD(lr=0.01, momentum=0.9, nesterov=True),metrics=['accuracy']):\n logger.info(\"compile the model\")\n from keras.optimizers import SGD\n\n model=self.compile(loss=loss, optimizer=optimizer,metrics=metrics)\n # transform_event=TransformEvent()\n # Syncer.instance.add_to_buffer(event=transform_event)\n return model\n\n\ndef train_batch_fn(self,x_batch,y_batch):\n logger.info(\"batch train the dataset\")\n model=self.train_on_batch(x_batch, y_batch)\n # transform_event=TransformEvent()\n # Syncer.instance.add_to_buffer(event=transform_event)\n return model\n\ndef convert_prediction_to_event(model, predict_array, x):\n predict_df = pd.DataFrame(predict_array)\n # Assign names to the predicted columns.\n # This is to ensure there are no merge conflicts when joining.\n num_pred_cols = predict_df.shape[1]\n pred_col_names = []\n for i in range(0, num_pred_cols):\n pred_col_names.append('pred_' + str(i))\n predict_df.columns = pred_col_names\n if not isinstance(x, pd.DataFrame):\n x_to_df = pd.DataFrame(x)\n new_df = x_to_df.join(predict_df)\n else:\n new_df = x.join(predict_df)\n predict_event = TransformEvent(x, new_df, model)\n Syncer.instance.add_to_buffer(predict_event)\n return predict_array\n\n\ndef predict_fn(self,x_test,batch_size=128):\n logger.info(\"predict use the model\")\n predict_array = self.predict(x_test, batch_size=batch_size)\n return convert_prediction_to_event(self, predict_array, x_test)\n\n\ndef compute_roc_auc_sync(self,test_y,y_pred,df,prediction_col='',label_col='',**params):\n roc_auc=metrics.roc_auc_score(test_y,y_pred)\n print(\"compute is \"+ str(roc_auc))\n metrics_event=MetricEvent(df,self,label_col,prediction_col,metrics.roc_auc_score.__name__,roc_auc)\n Syncer.instance.add_to_buffer(metrics_event)\n\n return roc_auc\n\ndef compute_mean_absolute_error(self,test_y,y_pred,df,prediction_col='',label_col='',**params):\n mae=mean_absolute_error(test_y,y_pred=y_pred)\n print(\"compute is \"+ str(mae))\n metrics_event=MetricEvent(df,self,label_col,prediction_col,mean_absolute_error.__name__,mae)\n Syncer.instance.add_to_buffer(metrics_event)\n return mae\n\ndef compute_mean_squared_error(self,test_y,y_pred,df,prediction_col='',label_col='',**params):\n mse=mean_squared_error(test_y,y_pred=y_pred)\n metrics_event=MetricEvent(df,self,label_col,prediction_col,mean_squared_error.__name__,mse)\n Syncer.instance.add_to_buffer(metrics_event)\n return mse\n\n\ndef compute_accuracy_score_sync(self,test_y,y_pred,df,prediction_col='',label_col='',**params):\n y_pred_binary = (y_pred >= 0.5) * 1\n accuracy_score=metrics.accuracy_score(test_y,y_pred_binary)\n metrics_event=MetricEvent(df,self,label_col,prediction_col,metrics.accuracy_score.__name__,accuracy_score)\n Syncer.instance.add_to_buffer(metrics_event)\n return accuracy_score\n\ndef compute_recall_score_sync(self,test_y,y_pred,df,prediction_col='',lable_col='',**params):\n y_pred_binary = (y_pred >= 0.5) * 1\n recall_score=metrics.recall_score(test_y,y_pred_binary)\n metrics_event=MetricEvent(df,self,lable_col,prediction_col,metrics.recall_score.__name__,recall_score)\n Syncer.instance.add_to_buffer(metrics_event)\n return recall_score\n\ndef compute_precision_score_sync(self,test_y,y_pred,df,prediction_col='',lable_col='',**params):\n y_pred_binary = (y_pred >= 0.5) * 1\n print(\"hello precision\")\n precision_score=metrics.precision_score(test_y,y_pred_binary)\n metrics_event=MetricEvent(df,self,lable_col,prediction_col,metrics.precision_score.__name__,precision_score)\n Syncer.instance.add_to_buffer(metrics_event)\n print(Syncer.buffer_list)\n return precision_score\n\ndef compute_f1_score_sync(self,test_y,y_pred,df,prediction_col='',lable_col='',**params):\n y_pred_binary = (y_pred >= 0.5) * 1\n f1_score=metrics.f1_score(test_y,y_pred_binary)\n metrics_event=MetricEvent(df,self,lable_col,prediction_col,metrics.f1_score.__name__,f1_score)\n Syncer.instance.add_to_buffer(metrics_event)\n return f1_score\n\nswitch={\n 'roc_auc':compute_accuracy_score_sync,\n 'f1_score':compute_f1_score_sync,\n 'precision_score':compute_precision_score_sync,\n 'recall_score':compute_recall_score_sync,\n 'accuracy_score':compute_accuracy_score_sync,\n 'mean_squared_error':compute_mean_squared_error,\n 'mean_absolute_error':compute_mean_absolute_error\n}\ndef metrics_fn(self,metric_func, test_y, y_pred, df, prediction_col='', label_col='', **params):\n logger.info(\"metrics the model \")\n try:\n score= switch[metric_func](self,test_y,y_pred,df,prediction_col,label_col)\n return score\n except KeyError as e:\n logger.error(str(e))\n pass\n\n\n\n metric_event=MetricEvent()\n Syncer.instance.add_to_buffer(event=metric_event)\n\n\n\nclass Syncer(with_metaclass(Singleton, ModelDbSyncerBase.Syncer)):\n instance = None\n def __init__(self, project_config, experiment_config, experiment_run_config,\n thrift_config=None):\n self.enable_keras_fn()\n self.local_id_to_path = {}\n Syncer.instance = self\n\n super(Syncer, self).__init__(project_config, experiment_config,experiment_run_config, thrift_config)\n\n def __str__(self):\n return \"keras_syncer\"\n\n def enable_keras_fn(self):\n from keras.models import Model,Sequential\n for cls in [Model,Sequential]:\n setattr(cls,\"fit_sync\",fit_fn)\n setattr(cls,'compile_sync',compile_fn)\n setattr(cls,'predict_sync',predict_fn)\n setattr(cls,'metrics_sync',metrics_fn)","sub_path":"modeldb/Keras_Native/KerasModelDBSynceer.py","file_name":"KerasModelDBSynceer.py","file_ext":"py","file_size_in_byte":7902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"278828216","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/1/19 11:33\n# @Author : yangjuan\n# @Email : 269573175@qq.com\n# @File : 元素操作-等待.py\n# @Software: PyCharm Community Edition\n\n# 等待----三种方式\n# 1.强制等待:sleep()\n#\n# 2.隐形等待:implicitly_wait(秒) --智能等待\n# 设置最长等待时间,在这个时间内加载完成,则执行下一步\n# 整个driver的会话周期内,设置一次即可,全局都可用.\n# --等待单一,满足不了额外更多明确场景需求:\n# (等待iframe出现,等待新的窗口出现在去切换,要等待元素可用或者可见之后再去操作,等待页面url地址变化后再去操作)implicatly无法完成\n#\n# 3.显示等待: 等待元素可见后再操作,提高脚本的稳定性\n# 明确等到某个条件满足之后,再去执行下一步操作。\n# 有条件:元素可见,元素可用,iframe出现\n# 程序每隔xx秒看一眼,如果条件成立了,则执行下一步,否则继续等待,直到超过设定的最长时间,然后抛出TimeoutException\n# WebDriverWait类:显示等待类。 ---(程序每隔xx秒看一眼)\n# WebDriverWait(driver,等待时长,轮训周期).until()/until_not()\n# 使用方法:\n# 1.先确定元素的定位表达式:web_locator=\"xxx\"\n# 2.调用WebdriverWait累设置等待总时长,轮询周期。并调用until,until_not方法。\n# WebDriverWait(driver,等待时长,轮训周期).until(判断条件)\n# 3.使用execpted_conditions对应的方法来生成判断条件。\n# EC.类名((定位方式,定位表达式))\n#\n# html页面发生变化,元素发生变化,需要等待,等待元素可见后再操作\n#\n# expected_conditions模块:提供了一系列期望发生的条件 ---(如果条件成立了,则执行下一步)\n# presence_of_element_located:元素存在\n# visibility_of_element_located:元素可见\n# element_to_be_clickable:元素可点击\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By #By定义8种定位类型\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver import ActionChains\nimport time\n\n# 打开一个浏览器,开启浏览器会话\ndriver = webdriver.Chrome(service_log_path='D:\\pycharmworkspace\\\\testpython\\web_0111\\chromedriver.log')\n\n# 全局变量 等待开始和结束周期内\ndriver.implicitly_wait(10)\n\ndriver.get(\"http://101.37.24.52:8008/\")\n\n# 在页面中点击登录按钮\n# 点击登录查找元素,如果没有等待时,先看是否有隐形等待,然后默默等待10秒\ndriver.find_element_by_xpath('//h2[@id=\"_top\"]/following-sibling::a').click()\n# time.sleep(1)\n\nele_loc = (By.XPATH,'//h2[@id=\"_top\"]/following-sibling::a')\n# EC.visibility_of_element_located(ele_loc) #tuple\n# 等待10秒,ele_loc指定元素可见,最多等待10秒\nWebDriverWait(driver,10).until(EC.visibility_of_element_located(ele_loc)) #默认等待0.5秒\ntime.sleep(1) #补充等待\n\n\n\n\n\n# 在页面中输入用户名和密码\ndriver.find_element_by_id('username').send_keys('njscg')\ndriver.find_element_by_id('password').send_keys('666666')\n\n# 滑动滑块登录--模块拖动\nbutton = driver.find_element_by_xpath('//div[@class=\"handler handler_bg\"]') #找到滑块\naction = ActionChains(driver) #实例化一个action对象\naction.click_and_hold(button).perform() #perform()用来执行ActionChains中存储的行为\naction.reset_actions()\naction.move_by_offset(400,0).perform() #移动滑块\ntime.sleep(2)\n\ndriver.quit()\n","sub_path":"python_web/python_web_0111/元素操作-等待(1).py","file_name":"元素操作-等待(1).py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"64863181","text":"\"\"\"\nURL Configuration for superheroes\n\"\"\"\nfrom django.urls import path\nfrom . import views\nfrom . import views_meta\nfrom . import views_custom\nfrom . import views_manager\n\napp_name = \"superheroes\"\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('herosort', views_meta.hero_sort, name='herosort'),\n path(\n 'herodetails/',\n views_meta.hero_details,\n name=\"herodetails\",\n ),\n path(\n 'flyerdetails/',\n views_manager.hero_details,\n name=\"flyerdetails\",\n ),\n path(\n 'herocustom/',\n views_custom.hero_custom,\n name=\"herocustom\",\n ),\n path(\n 'heromanager',\n views_manager.hero_manager,\n name=\"heromanager\",\n ),\n]\n\n","sub_path":"wombats/EXAMPLES/django2.0/djmore/superheroes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"455013228","text":"# Copyright 2015 NEC Corporation. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport httplib2\n\nfrom oslo_serialization import jsonutils as json\nfrom oslotest import mockpatch\n\nfrom tempest.services.compute.json import certificates_client\nfrom tempest.tests import base\nfrom tempest.tests import fake_auth_provider\n\n\nclass TestCertificatesClient(base.TestCase):\n\n FAKE_CERTIFICATE = {\n \"certificate\": {\n \"data\": \"-----BEGIN----MIICyzCCAjSgAwI----END CERTIFICATE-----\\n\",\n \"private_key\": None\n }\n }\n\n def setUp(self):\n super(TestCertificatesClient, self).setUp()\n fake_auth = fake_auth_provider.FakeAuthProvider()\n self.client = certificates_client.CertificatesClient(\n fake_auth, 'compute', 'regionOne')\n\n def _test_show_certificate(self, bytes_body=False):\n serialized_body = json.dumps(self.FAKE_CERTIFICATE)\n if bytes_body:\n serialized_body = serialized_body.encode('utf-8')\n\n mocked_resp = (httplib2.Response({'status': 200}), serialized_body)\n self.useFixture(mockpatch.Patch(\n 'tempest.common.service_client.ServiceClient.get',\n return_value=mocked_resp))\n resp = self.client.show_certificate(\"fake-id\")\n self.assertEqual(self.FAKE_CERTIFICATE, resp)\n\n def test_show_certificate_with_str_body(self):\n self._test_show_certificate()\n\n def test_show_certificate_with_bytes_body(self):\n self._test_show_certificate(bytes_body=True)\n\n def _test_create_certificate(self, bytes_body=False):\n cert = copy.deepcopy(self.FAKE_CERTIFICATE)\n cert['certificate']['private_key'] = \"my_private_key\"\n serialized_body = json.dumps(cert)\n if bytes_body:\n serialized_body = serialized_body.encode('utf-8')\n\n mocked_resp = (httplib2.Response({'status': 200}), serialized_body)\n self.useFixture(mockpatch.Patch(\n 'tempest.common.service_client.ServiceClient.post',\n return_value=mocked_resp))\n resp = self.client.create_certificate()\n self.assertEqual(cert, resp)\n\n def test_create_certificate_with_str_body(self):\n self._test_create_certificate()\n\n def test_create_certificate_with_bytes_body(self):\n self._test_create_certificate(bytes_body=True)\n","sub_path":"tempest/tests/services/compute/test_certificates_client.py","file_name":"test_certificates_client.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"616091742","text":"import urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nfrom lxml import etree\nfrom time import sleep\nfrom selenium import webdriver\nfrom concurrent.futures import ProcessPoolExecutor\n\nurls = ['http://91porn.com/v.php?next=watch&page={}'.format(n) for n in range(1,10)]\n\n\ndef getInfo(url):\n browser = webdriver.Firefox()\n browser.get(url)\n response = browser.find_element_by_xpath('//html').get_attribute(\"innerHTML\")\n html = etree.HTML(response)\n for items in html.xpath('//a[@title]'):\n # print(items.attrib['title'],items.attrib['href'])\n download(items.attrib['title'],items.attrib['href'])\n\ndef download(title,sub_link):\n browser2 = webdriver.Firefox()\n browser2.get(sub_link)\n response2 = browser2.find_element_by_xpath('//html').get_attribute(\"innerHTML\")\n html = etree.HTML(response2)\n for items2 in html.xpath('//source[@type=\"video/mp4\"]'):\n print(title+\"#\"+items2.attrib['src'])\n sleep(3)\n browser2.close()\n\nif __name__ == \"__main__\":\n with ProcessPoolExecutor(max_workers=5) as pool:\n pool.map(getInfo,urls)","sub_path":"Python_Practise/pa_91_selenium.py","file_name":"pa_91_selenium.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"564401440","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 11 18:05:58 2020\n\n@author: dawid\n\"\"\"\n\nimport requests\n\ninput_text='21 Jan: phoned John S about signing them up for phrase 2 of Project Alpha.'\nresults=requests.get('http://127.0.0.1:5000/api/v1/text?text='+input_text).json()\nresults\n\ninput_text='21 Mar: emailed Lucy about project Beta.'\nresults=requests.get('http://127.0.0.1:5000/api/v1/text?text='+input_text).json()\nresults\n\n","sub_path":"projects/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"325287964","text":"from __init__ import *\nfrom data_analysis import Analyze\nfrom mc2pdf import MCprocessing\nfrom datamanage import DataIO\nfrom montecarlo import MonteCarlo\nfrom analytical_solutions import AnalyticalSolution, gaussian\nfrom mc2pdf import MCprocessing\nfrom pdfsolver import PdfGrid\nfrom visualization import Visualize\nfrom Learning import PDElearn\nimport numpy as np\nimport pdb\nimport time\n\n\nsave = True\ncheckExistence = True\n# plotpdf = True\nprintlearning = True\nsavenameMC = 'advection_reaction_randadv_analytical_712'+'.npy'\ncase = 'advection_reaction_randadv_analytical'\n\n# Read MC simulations\nD = DataIO(case=case, directory=MCDIR)\nfu, gridvars, ICparams = D.loadSolution(savenameMC)\nnum_realizations = ICparams['num_realizations']\n\n####################### KDE\n\nnu = 200\nu_margin = -1e-10\nbandwidth = 'scott'\ndistribution = 'PDF'\n\n####################### Learning\n\n# Adjust Size\npt = 1\npx = 1\npu = 1\nmu = [0.2, 1]\nmx = [0, 1]\nmt = [0, 1]\ncomments \t\t\t= ''\nfeature_opt = '1storder_close'\ntrainratio\t\t\t= 0.9\nnzthresh = 1e-10\ncoeforder = 2\nvariableCoef \t\t= True\nvariableCoefBasis \t= 'simple_polynomial'\nprint_rfeiter\t\t= True\nshuffle\t\t\t\t= False\nnormalize\t\t\t= True\nmaxiter\t\t\t\t= 10000\n\nuse_rfe\t\t\t\t= True\nrfe_alpha \t= 0.001\nRegCoef\t\t\t\t= 0.000005\nLassoType\t\t\t= 'LassoCV'\ncv\t\t\t\t\t= 5\ncriterion\t\t\t= 'aic'\n\n###############################\n\ncoeforder_vec = [2, 1, 0, 3]\nfeature_opt_vec = ['1storder_close', '1storder']\nLassoType_vec \t= ['LassoCV', 'LarsCV', 'LassoLarsCV', 'LassoLarsIC']\nrfe_alpha_vec\t= [0.000001, 0.000005, 0.00001, 0.00005, 0.0001, 0.0005, 0.001, 0.005, 0.01]#, 0.05, 0.08]\n\n###############################\n\nfor coeforder in coeforder_vec:\n\tfor LassoType in LassoType_vec:\n\t\tfor feature_opt in feature_opt_vec:\n\n\t\t\toutput_vec = []\n\t\t\tmetadata_vec = []\n\t\t\tfilename_vec = []\n\n\t\t\ttry:\n\t\t\t\tfor rfe_alpha in rfe_alpha_vec:\n\t\t\t\t\tprint('---------------------')\n\t\t\t\t\tprint('\\trfe_alpha = ', rfe_alpha)\n\t\t\t\t\tprint('---------------------')\n\n\t\t\t\t\t# BUILD PDF\n\t\t\t\t\tMCprocess = MCprocessing(savenameMC, case=case)\n\t\t\t\t\tsavenamepdf = MCprocess.buildKDE(nu, distribution=distribution, MCcount=num_realizations, save=save, u_margin=u_margin, bandwidth=bandwidth)\n\n\t\t\t\t\t# LEARN\n\t\t\t\t\tdataman = DataIO(case, directory=PDFDIR) \n\t\t\t\t\tfu, gridvars, ICparams = dataman.loadSolution(savenamepdf, array_opt='marginal')\n\n\t\t\t\t\tadjustgrid = {'mu':mu, 'mx':mx, 'mt':mt, 'pu':pu, 'px':px, 'pt':pt}\n\t\t\t\t\tgrid = PdfGrid(gridvars)\n\t\t\t\t\tfu = grid.adjust(fu, adjustgrid)\n\n\n\t\t\t\t\tdifflearn = PDElearn(grid=grid, fu=fu, ICparams=ICparams, scase=case, trainratio=trainratio, verbose=True)\n\t\t\t\t\tfilename = difflearn.fit_sparse(feature_opt=feature_opt, variableCoef=variableCoef, variableCoefBasis=variableCoefBasis, \\\n\t\t\t\t\t variableCoefOrder=coeforder, use_rfe=use_rfe, rfe_alpha=rfe_alpha, nzthresh=nzthresh, maxiter=maxiter, \\\n\t\t\t\t\t LassoType=LassoType, RegCoef=RegCoef, cv=cv, criterion=criterion, print_rfeiter=print_rfeiter, shuffle=shuffle, \\\n\t\t\t\t\t basefile=savenamepdf, adjustgrid=adjustgrid, save=save, normalize=normalize, comments=comments)\n\n\t\t\t\t\t# READ Learning\n\t\t\t\t\tD = DataIO(case, directory=LEARNDIR)\n\t\t\t\t\toutput, metadata = D.readLearningResults(filename)\n\n\t\t\t\t\toutput_vec.append(output)\t\n\t\t\t\t\tmetadata_vec.append(metadata)\n\t\t\t\t\tfilename_vec.append(filename)\n\n\n\n\t\t\t\tprint('files = [')\n\t\t\t\tfor f in filename_vec:\n\t\t\t\t\tprint(\"\\'\"+f+\"\\',\")\n\t\t\t\tprint(']')\n\n\t\t\t\t## PLOT\n\t\t\t\tA = Analyze()\n\t\t\t\tsavename = 'advectreact_rfe' + \"_\" + feature_opt + \"_\" + LassoType + \"_\" + str(coeforder)\n\t\t\t\tA.plotRMSEandCoefs(output_vec, rfe_alpha_vec, 'RFE Threshold', threshold=0.001, use_logx=True, set_grid=True, invert_sign=True, savename=savename, show=True)\n\n\t\t\texcept:\n\t\t\t\tprint(\"\\n\\n\\n************************\\n\\n\\n\")\n\t\t\t\tprint(\"Exception Happened for \", feature_opt, \" \", LassoType, \" \", rfe_alpha)\n\t\t\t\tprint(\"\\n\\n\\n************************\\n\\n\\n\")\n\n","sub_path":"code/testcases/advectreact_randadv_rfe.py","file_name":"advectreact_randadv_rfe.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"640882884","text":"#!/usr/bin/env python2\n# Se importa modulos de ROS\nfrom trabajo.srv import multiplicador, multiplicadorResponse\nimport rospy\n\n# Funcion para responder las consultas de los clientes\ndef handle_serviceAddEge(req):\n rospy.loginfo(\"Se realizo una consulta con exito!\")\n return multiplicadorResponse(req.entrada * 2)\n \n# Se inicia el nodo tipo servicio\nrospy.init_node('matematico_nodo')\ns = rospy.Service('servicio_multiplicador', multiplicador, handle_serviceAddEge)\nrospy.loginfo(\"Servicio multiplicador listo!\")\n# Esperamos todas las posibles consultas evitando que se cierre el servidor\nrospy.spin()\n","sub_path":"build/trabajo/catkin_generated/installspace/matematico_nodo.py","file_name":"matematico_nodo.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"136032577","text":"import os\nimport shutil\nimport tempfile\nfrom contextlib import contextmanager\n\nimport pytest\nfrom google.cloud import storage\n\ntry:\n from unittest import mock\nexcept ImportError:\n import mock\n\n\n@pytest.fixture(scope=\"session\")\ndef chdir():\n @contextmanager\n def _chdir(path):\n orig = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(orig)\n\n return _chdir\n\n\n@pytest.fixture(scope=\"session\")\ndef project_dir(chdir):\n projects_dir = os.path.join(os.path.dirname(__file__), \"..\", \"data\", \"projects\")\n\n @contextmanager\n def _project_dir(name):\n tmp_dir = tempfile.mkdtemp()\n try:\n src_dir = os.path.join(projects_dir, name)\n dst_dir = os.path.join(tmp_dir, name)\n shutil.copytree(src_dir, dst_dir)\n with chdir(dst_dir):\n yield dst_dir\n finally:\n shutil.rmtree(tmp_dir)\n\n return _project_dir\n\n\n@pytest.fixture\ndef gcs_credentials():\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"test.json\"\n\n\n@pytest.fixture\ndef gcs_bucket(gcs_credentials):\n storage_client = mock.create_autospec(storage.Client)\n mock_bucket = mock.create_autospec(\n spec=storage.Bucket(storage_client, \"gcs_pypi-test\"), spec_set=True,\n )\n mock_blob = mock.create_autospec(storage.Blob)\n\n storage_client.get_bucket.return_value = mock_bucket\n\n # def handle_get_data(path):\n # print(path)\n\n mock_bucket.get_blob.side_effect = mock_blob\n\n yield mock_bucket\n","sub_path":"tests/integration/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101479493","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\n\"\"\"Transformation routines\n\nAuthor(s): Sean Henely\nLanguage: Python 2.x\nModified: 28 July 2013\n\nProvides routines for state transformation.\n\nClasses:\nidentityTransfrom -- Identity transform\nInertialToKeplerianTransform -- Inertial to Keplerian\nKeplerianToInertialTransform -- Keplerian to inertial\nInertialToGeographicTransform -- Inertial to geographic\nGeographicToHorizontalTransform -- Geographic to horizontal\n\n\"\"\"\n\n\"\"\"Change log:\n \nDate Author Version Description\n---------- ------------ -------- -----------------------------\n2013-07-08 shenely 1.0 Initial revision\n2013-07-28 shenely 1.1 Added time to geographic\n\n\"\"\"\n\n\n##################\n# Import section #\n#\n#Built-in libraries\nfrom math import *\nfrom datetime import datetime\nimport functools\nimport logging\nimport types\n\n#External libraries\nfrom numpy import matrix,dot,cross,roots\nfrom scipy.linalg import norm\nfrom bson.tz_util import utc\n\n#Internal libraries\nfrom core.routine import ActionRoutine\nfrom epoch import EpochState\nfrom .. import *\n#\n##################\n\n\n##################\n# Export section #\n#\n__all__ = [\"identityTransfrom\",\n \"InertialToKeplerianTransform\",\n \"KeplerianToInertialTransform\",\n \"InertialToGeographicTransform\",\n \"GeographicToHorizontalTransform\"]\n#\n##################\n\n\n####################\n# Constant section #\n#\n__version__ = \"1.1\"#current version [major.minor]\n\nDEG_TO_RAD = pi / 180#Degrees to radians\nRAD_TO_DEG = 180 / pi#Radians to degrees\n\n#Earth parameters\nEARTH_RADIUS = 6378.1\nEARTH_GRAVITATION = 398600.4\n\nJULIAN_DAY = 86400#Length of Julian day (in seconds)\n\nJ2000 = datetime(2000,1,1,12,tzinfo=utc)#Julian epoch (2000-01-01T12:00:00Z)\n\n#Unit vectors\nUNIT_VECTOR_X = matrix([1,0,0]).T\nUNIT_VECTOR_Y = matrix([0,1,0]).T\nUNIT_VECTOR_Z = matrix([0,0,1]).T\n\n#Rotation matrices\nROTATION_X_AXIS = lambda theta:matrix([[1,0,0],\n [0,cos(theta),-sin(theta)],\n [0,sin(theta),cos(theta)]])\nROTATION_Y_AXIS = lambda theta:matrix([[cos(theta),0,sin(theta)],\n [0,1,0],\n [-sin(theta),0,cos(theta)]])\nROTATION_Z_AXIS = lambda theta:matrix([[cos(theta),-sin(theta),0],\n [sin(theta),cos(theta),0],\n [0,0,1]])\n\n#Wrapper functions for angles in degrees\n@functools.wraps(cos)\ndef cosd(x):return cos(DEG_TO_RAD * x)\n\n@functools.wraps(sin)\ndef sind(x):return sin(DEG_TO_RAD * x)\n\n@functools.wraps(tan)\ndef tand(x):return tan(DEG_TO_RAD * x)\n\n@functools.wraps(acos)\ndef acosd(x):return RAD_TO_DEG * acos(x)\n\n@functools.wraps(asin)\ndef asind(x):return RAD_TO_DEG * asin(x)\n\n@functools.wraps(atan2)\ndef atand2(y,x):return RAD_TO_DEG * atan2(y,x)\n#\n####################\n\n\nclass TransformAction(ActionRoutine):pass\n\nclass IdentityTransform(TransformAction):\n \"\"\"Story: Identity transform\n \n IN ORDER TO have the input equal output\n AS A generic segment\n I WANT TO perform an identity transformation\n \n \"\"\"\n \n \"\"\"Specification: Identity transform\n \n GIVEN a downstream pipeline (default null)\n \n Scenario 1: Upstream state received\n WHEN a state is received from upstream\n THEN the state SHALL be sent downstream\n \n \"\"\"\n \n name = \"Transform.Identity\"\n \n def _execute(self,message):\n logging.info(\"{0}: Transforming from self\".\\\n format(self.name))\n \n #\n \n logging.info(\"{0}: Transformed to self\".\\\n format(self.name))\n \n return message\n\nclass InertialToKeplerianTransform(TransformAction):\n \"\"\"Story: Inertial to Keplerian\n \n IN ORDER TO determine the orbital elements of a spacecraft\n AS A generic segment\n I WANT TO convert inertial coordinates into Keplerian elements\n \n \"\"\"\n \n u\"\"\"Specification: Inertial to Keplerian\n \n GIVEN a downstream pipeline (default null)\n \n Scenario 1: Upstream state received\n WHEN a inertial state is received from upstream\n THEN the state SHALL be converted to orbital elements:\n a=-μ/ε/2\n e=|e|\n cos(i)=h[z]/|h|\n cos(θ)=(e/|e|)∙(r/|r|)\n cos(ω)=(e/|e|)∙N\n cos(Ω)=N[z]\n AND the state SHALL be sent downstream\n \n \"\"\"\n \n name = \"Transform.InertialToKeplerian\"\n \n def _execute(self,message):\n assert isinstance(message,InertialState)\n\n logging.info(\"{0}: Transforming from inertial\".\\\n format(self.name))\n \n t = message.epoch\n\n #orbital elements\n epsilon = message.epsilon\n a = - EARTH_GRAVITATION / epsilon / 2\n \n _h_ = message.h\n h = norm(_h_)\n \n _e_ = message.e\n e = norm(_e_)\n\n _N_ = cross(UNIT_VECTOR_Z,_h_)\n N = norm(_N_)\n\n i = acos(_h_[:,2] / h)\n theta = acos(dot(_e_,message.position) / e / message.R)\n omega = acos(dot(_N_,_e_) / N / e)\n OMEGA = acos(_N_[:,0] / N)\n\n #quadrant correction\n if dot(message.position,message.velocity) < 0: theta = 2 * pi - theta\n if _e_[:,2] < 0:omega = 2 * pi - omega\n if _N_[:,1] < 0:OMEGA = 2 * pi - OMEGA\n\n message = KeplerianState(t,a,theta,e,omega,i,OMEGA)\n \n logging.info(\"{0}: Transformed to Keplerian\".\\\n format(self.name))\n \n return message\n\nclass KeplerianToInertialTransform(TransformAction):\n \"\"\"Story: Keplerian to Inertial\n \n IN ORDER TO determine the orbital elements of a spacecraft\n AS A generic segment\n I WANT TO convert inertial coordinates into Keplerian elements\n \n \"\"\"\n \n u\"\"\"Specification: Keplerian to Inertial\n \n GIVEN a downstream pipeline (default null)\n \n Scenario 1: Upstream state received\n WHEN a Keplerian state is received from upstream\n THEN the state SHALL be converted to inertial coordinates:\n [cos(θ)]\n r=r*[sin(θ)]\n [0 ]\n μ [-sin(θ) ]\n v=---*[e+cos(θ)]\n |h| [0 ]\n r=R[z](Ω)*R[x](i)*R[z](ω)*r\n v=R[z](Ω)*R[x](i)*R[z](ω)*v\n AND the state SHALL be sent downstream\n \n \"\"\"\n \n name = \"Transform.KeplerianToInertial\"\n \n def _execute(self,message):\n assert isinstance(message,KeplerianState)\n \n logging.info(\"{0}: Transforming from Keplerian\".\\\n format(self.name))\n\n #rotation matrices\n R_OMEGA = ROTATION_Z_AXIS(message.OMEGA)\n R_i = ROTATION_X_AXIS(message.i)\n R_omega = ROTATION_Z_AXIS(message.omega)\n\n Q = R_OMEGA * R_i * R_omega\n\n #state vectors\n r = message.r * matrix([cos(message.theta),\n sin(message.theta),0]).T\n v = EARTH_GRAVITATION / message.h *\\\n matrix([- sin(message.theta),\n message.e + cos(message.theta),0]).T\n\n #apply rotations\n t = message.epoch\n r = Q * r\n v = Q * v\n\n message = InertialState(t,r,v)\n \n logging.info(\"{0}: Transformed to inertial\".\\\n format(self.name))\n \n return message\n\nclass InertialToGeographicTransform(TransformAction):\n \"\"\"Story: Inertial to geographic\n \n IN ORDER TO determine the latitude and longitude of a spacecraft\n AS A generic segment\n I WANT TO convert inertial coordinates into geographic coordinates\n \n \"\"\"\n \n u\"\"\"Specification: Inertial to geographic\n \n GIVEN a downstream pipeline (default null)\n \n Scenario 1: Upstream state received\n WHEN a inertial state is received from upstream\n THEN the state SHALL be converted to geographic coordinates:\n cos(σ)=R[e]/R\n λ=α+(t-J2000.0)/86400\n φ=δ\n AND all coordinates SHALL be converted to degrees\n AND the state SHALL be sent downstream\n \n \"\"\"\n \n name = \"Transform.InertialToGeographic\"\n \n def _execute(self,message):\n assert isinstance(message,InertialState)\n\n logging.info(\"{0}: Transforming from inertial\".\\\n format(self.name))\n \n t = message.epoch\n arc = acosd(EARTH_RADIUS / message.R)\n long = (RAD_TO_DEG * message.alpha % 360 +\\\n 360 * (t - J2000).total_seconds() / JULIAN_DAY) % 360\n lat = RAD_TO_DEG * message.delta\n\n message = GeographicState(t,arc,long,lat)\n \n logging.info(\"{0}: Transformed to geographic\".\\\n format(self.name))\n \n return message\n\nclass GeographicToHorizontalTransform(TransformAction):\n \"\"\"Story: Geographic to horizontal\n \n IN ORDER TO determine the azimuth and elevation of a spacecraft\n AS A generic segment\n I WANT TO convert geographic coordinates into horizontal coordinates\n \n \"\"\"\n \n u\"\"\"Specification: Geographic to horizontal\n \n GIVEN a downstream pipeline (default null)\n \n Scenario 1: Upstream state received\n WHEN a inertial state is received from upstream\n THEN all coordinates SHALL be converted to radians\n AND the state SHALL be converted to geographic coordinates:\n tan(A)=sin(λ1-λ0)/(cos(φ0)tan(φ1)-sin(φ0)cos(λ1-λ0))\n sin(a)=cos(φ0)cos(φ1)cos(λ1-λ0)-sin(φ0)sin(φ1)\n r²+2*sin(a)*R[e]*r+R[e]²*(1/cos²(σ0)-1/cos²(σ1))=0\n AND all coordinates SHALL be converted to degrees\n AND the state SHALL be sent downstream\n \n \"\"\"\n \n name = \"Transform.GeographicToHorizontal\"\n \n def __init__(self,state):\n assert isinstance(state,GeographicState)\n \n TransformAction.__init__(self)\n \n self.state = state\n\n def _execute(self,message):\n assert isinstance(message,GeographicState)\n\n logging.info(\"{0}: Transforming from geographic\".\\\n format(self.name))\n \n t = message.epoch\n az = atan2(sind(message.long - self.state.long),\n cosd(self.state.lat) * tand(message.lat) -\\\n sind(self.state.lat) * cosd(message.long -\\\n self.state.long))\n el = asin(cosd(self.state.lat) * cosd(message.lat) *\\\n cosd(message.long - self.state.long) -\\\n sind(self.state.lat) * sind(message.lat))\n r = max(roots([1 / EARTH_RADIUS,\n 2 * sin(el),\n EARTH_RADIUS * (1 / cos(self.state.arc) ** 2 -\\\n 1 / cos(message.arc) ** 2)]))\n\n message = HorizontalState(t,r,az,el)\n \n logging.info(\"{0}: Transformed to horizontal\".\\\n format(self.name))\n\n return message","sub_path":"src/state/routine/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":11232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"430986791","text":"from data import paginatedLeaderboard\nimport datetime\nfrom decorators import help\nimport discord\nfrom discord.ext import commands, menus\nfrom enums.help_categories import Category\nfrom functions import checks, timeFormatters\nimport requests\n\n\nclass Train(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n # Don't allow any commands to work when locked\n def cog_check(self, ctx):\n return not self.client.locked\n\n @commands.command(name=\"Train\", aliases=[\"Trein\"], usage=\"[Vertrek]* [Bestemming]\")\n @help.Category(category=Category.School)\n async def train(self, ctx, *args):\n if not args or len(args) > 2:\n await ctx.send(\"Controleer je argumenten.\")\n return\n destination = args[-1]\n departure = args[0] if len(args) > 1 else \"Gent Sint-Pieters\"\n\n req = requests.get(\n \"http://api.irail.be/connections/?from={}&to={}&alerts=true&lang=nl&format=json\".format(departure,\n destination)).json()\n if \"error\" in req:\n embed = discord.Embed(colour=discord.Colour.red())\n embed.set_author(name=\"Treinen van {} naar {}\".format(\n self.formatCity(departure), self.formatCity(destination)))\n embed.add_field(name=\"Error\", value=\"Er ging iets fout, probeer het later opnieuw.\", inline=False)\n await self.sendEmbed(ctx, embed)\n return\n\n pages = paginatedLeaderboard.Pages(source=TrainPagination(self.formatConnections(req[\"connection\"]),\n self.formatCity(departure),\n self.formatCity(destination)),\n clear_reactions_after=True)\n await pages.start(ctx)\n\n def formatConnections(self, connections):\n response = []\n for connection in sorted(connections, key=lambda con: con[\"departure\"][\"time\"]):\n conn = {}\n if connection[\"departure\"][\"canceled\"] != \"0\" or connection[\"arrival\"][\"canceled\"] != \"0\":\n conn = {\"Canceled\": \"Afgeschaft\"}\n dep = connection[\"departure\"]\n arr = connection[\"arrival\"]\n conn[\"depStation\"] = self.formatCity(dep[\"station\"])\n conn[\"depTime\"] = self.formatTime(dep[\"time\"])\n conn[\"delay\"] = self.formatDelay(dep[\"delay\"])\n conn[\"track\"] = dep[\"platform\"]\n conn[\"arrStation\"] = self.formatCity(arr[\"station\"])\n conn[\"direction\"] = self.formatCity(dep[\"direction\"][\"name\"])\n conn[\"arrTime\"] = self.formatTime(arr[\"time\"])\n conn[\"duration\"] = self.formatTime(connection[\"duration\"])\n response.append(conn)\n return response\n\n def formatTime(self, timestamp):\n if int(timestamp) <= 86400:\n minutes = int(timestamp) // 60\n if minutes < 60:\n return str(minutes) + \"m\"\n return \"{}h{:02}m\".format(minutes // 60, minutes % 60)\n else:\n return timeFormatters.epochToDate(int(timestamp), \"%H:%M\")[\"date\"]\n\n def formatDelay(self, seconds):\n seconds = int(seconds)\n return self.sign(seconds) + self.formatTime(abs(seconds)) if seconds != 0 else \"\"\n\n def sign(self, number):\n return \"-\" if int(number) < 0 else \"+\"\n\n def formatCity(self, city):\n city = city[0].upper() + city[1:]\n arr = []\n for i, letter in enumerate(city):\n if (i > 0 and (city[i - 1] == \" \" or city[i - 1] == \"-\")) or i == 0:\n arr.append(letter.upper())\n else:\n arr.append(letter.lower())\n return \"\".join(arr)\n\n async def sendEmbed(self, ctx, embed):\n if checks.allowedChannels(ctx):\n await ctx.send(embed=embed)\n else:\n await ctx.author.send(embed=embed)\n\n\nclass TrainPagination(menus.ListPageSource):\n def __init__(self, data, departure, destination):\n super().__init__(data, per_page=3)\n self.departure = departure\n self.destination = destination\n\n async def format_page(self, menu: menus.MenuPages, entries):\n offset = menu.current_page * self.per_page\n embed = discord.Embed(colour=discord.Colour.blue())\n embed.set_author(name=\"Treinen van {} naar {}\".format(self.departure, self.destination))\n embed.set_footer(text=\"{}/{}\".format(menu.current_page + 1, self.get_max_pages()))\n\n for i, connection in enumerate(entries, start=offset):\n afgeschaft = \"Canceled\" in connection\n embed.add_field(name=\"Van\", value=str(connection[\"depStation\"]), inline=True)\n embed.add_field(name=\"Om\", value=str(connection[\"depTime\"]), inline=True)\n embed.add_field(name=\"Spoor\", value=str(connection[\"track\"]), inline=True)\n embed.add_field(name=\"Richting\", value=str(connection[\"direction\"]), inline=True)\n embed.add_field(name=\"Aankomst\", value=(str(connection[\"arrTime\"])\n if not afgeschaft else \"**AFGESCHAFT**\"), inline=True)\n embed.add_field(name=\"Vertraging\", value=str(connection[\"delay\"]) if connection[\"delay\"] != \"\" else \"0\",\n inline=True)\n\n # White space\n if i - offset < 2:\n embed.add_field(name=\"\\u200b\", value=\"\\u200b\", inline=False)\n return embed\n\n\ndef setup(client):\n client.add_cog(Train(client))\n","sub_path":"cogs/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"435535684","text":"\n\nfrom ..utils import Object\n\n\nclass SearchSecretMessages(Object):\n \"\"\"\n Searches for messages in secret chats. Returns the results in reverse chronological order. For optimal performance the number of returned messages is chosen by the library\n\n Attributes:\n ID (:obj:`str`): ``SearchSecretMessages``\n\n Args:\n chat_id (:obj:`int`):\n Identifier of the chat in which to searchSpecify 0 to search in all secret chats \n query (:obj:`str`):\n Query to search forIf empty, searchChatMessages should be used instead\n from_search_id (:obj:`int`):\n The identifier from the result of a previous request, use 0 to get results from the last message\n limit (:obj:`int`):\n The maximum number of messages to be returned; up to 100Fewer messages may be returned than specified by the limit, even if the end of the message history has not been reached\n filter (:class:`telegram.api.types.SearchMessagesFilter`):\n A filter for the content of messages in the search results\n\n Returns:\n FoundMessages\n\n Raises:\n :class:`telegram.Error`\n \"\"\"\n ID = \"searchSecretMessages\"\n\n def __init__(self, chat_id, query, from_search_id, limit, filter, extra=None, **kwargs):\n self.extra = extra\n self.chat_id = chat_id # int\n self.query = query # str\n self.from_search_id = from_search_id # int\n self.limit = limit # int\n self.filter = filter # SearchMessagesFilter\n\n @staticmethod\n def read(q: dict, *args) -> \"SearchSecretMessages\":\n chat_id = q.get('chat_id')\n query = q.get('query')\n from_search_id = q.get('from_search_id')\n limit = q.get('limit')\n filter = Object.read(q.get('filter'))\n return SearchSecretMessages(chat_id, query, from_search_id, limit, filter)\n","sub_path":"pytglib/api/functions/search_secret_messages.py","file_name":"search_secret_messages.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"261164961","text":"from __future__ import absolute_import\n\nfrom cStringIO import StringIO\n\nfrom twisted.spread import banana\n\nfrom feat.common.serialization import sexp\nfrom feat.interface.serialization import *\n\n\nclass BananaCodec(object):\n\n def __init__(self):\n self._banana = banana.Banana()\n self._banana.connectionMade()\n self._banana._selectDialect(\"pb\") # More compact\n\n def encode(self, lst):\n io = StringIO()\n self._banana.transport = io\n self._banana.sendEncoded(lst)\n return io.getvalue()\n\n def decode(self, data):\n heap = []\n self._banana.expressionReceived = heap.append\n try:\n self._banana.dataReceived(data)\n finally:\n self._banana.buffer = ''\n del self._banana.expressionReceived\n return heap[0]\n\n\nclass Serializer(sexp.Serializer, BananaCodec):\n\n def __init__(self, externalizer=None, source_ver=None, target_ver=None):\n sexp.Serializer.__init__(self, externalizer=externalizer,\n source_ver=source_ver, target_ver=target_ver)\n BananaCodec.__init__(self)\n\n ### Overridden Methods ###\n\n def post_convertion(self, data):\n return self.encode(data)\n\n\nclass Unserializer(sexp.Unserializer, BananaCodec):\n\n def __init__(self, registry=None, externalizer=None,\n source_ver=None, target_ver=None):\n sexp.Unserializer.__init__(self, registry=registry,\n externalizer=externalizer,\n source_ver=source_ver,\n target_ver=target_ver)\n BananaCodec.__init__(self)\n\n ### Overridden Methods ###\n\n def pre_convertion(self, data):\n return self.decode(data)\n\n\ndef serialize(value):\n global _serializer\n return _serializer.convert(value)\n\n\ndef freeze(value):\n global _serializer\n return _serializer.freeze(value)\n\n\ndef unserialize(data):\n global _unserializer\n return _unserializer.convert(data)\n\n\n### Private Stuff ###\n\n_serializer = Serializer()\n_unserializer = Unserializer()\n","sub_path":"src/feat/common/serialization/banana.py","file_name":"banana.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"111008486","text":"import pandas as pd\n\nfrom src import constants as C\n\n\n# upct: unit percentage\n# rbin: risk bin\n\ndef spu2spatial_unit_attr(y_true, y_pred, spu):\n spatial_unit_attr = spu.copy()\n spatial_unit_attr[C.COL.num_events] = y_true\n spatial_unit_attr[C.COL.risk] = y_pred\n return spatial_unit_attr\n\n\ndef hit_rate_upct_wrap(y_true, y_pred, spu):\n spatial_unit_attr = spu2spatial_unit_attr(y_true, y_pred, spu)\n return hit_rate_upct(spatial_unit_attr)\n\n\ndef search_efficient_rate_upct_wrap(y_true, y_pred, spu):\n spatial_unit_attr = spu2spatial_unit_attr(y_true, y_pred, spu)\n return search_efficient_rate_upct(spatial_unit_attr)\n\n\ndef prediction_accuracy_index_upct_wrap(y_true, y_pred, spu):\n spatial_unit_attr = spu2spatial_unit_attr(y_true, y_pred, spu)\n return prediction_accuracy_index_upct(spatial_unit_attr)\n\n\ndef area_to_perimeter_ratio_upct_wrap(y_true, y_pred, spu):\n spatial_unit_attr = spu2spatial_unit_attr(y_true, y_pred, spu)\n return area_to_perimeter_ratio_upct(spatial_unit_attr)\n\n\n# TODO: metrics that compute on \"hotspots\" instead of top N% most risky grids\n\n\ndef upct_idx(len_obj, num_obs):\n \"\"\"\n helper for creating upct-like index\n :param len_obj: number of objects\n :param num_obs: number of observation points for plotting\n :return:\n \"\"\"\n # print(len_obj, num_obs)\n idx_for_upct = [int(len_obj * (i + 1) / num_obs) for i in range(num_obs - 1)] + [len_obj - 1]\n readable_index = ['%.0f%%' % ((i + 1) / num_obs * 100) for i in range(num_obs)]\n return idx_for_upct, readable_index\n\n\ndef rbin_idx(risk, num):\n mi, ma = risk.min(), risk.max()\n iloc_idx, readable_idx = [], []\n for i in range(num):\n thres = mi + (ma - mi) * (1 - (i + 1) / num)\n iloc_idx.append((risk >= thres).sum() - 1)\n readable_idx.append('rbin>=%d' % (i + 1))\n # print(iloc_idx, readable_idx)\n return iloc_idx, readable_idx\n\n\ndef get_idx(spatial_unit_attr, num, upct_or_rbin='upct'):\n if upct_or_rbin == 'upct':\n len_obj = len(spatial_unit_attr)\n return upct_idx(len_obj, num)\n elif upct_or_rbin == 'rbin':\n risk = spatial_unit_attr[C.COL.risk]\n return rbin_idx(risk, num)\n else:\n raise ValueError('upct_or_rbin should be one of: upct, rbin')\n\n\ndef hit_to_pai(spatial_unit_attr, event_normalized=False,\n area_normalized=False, event_by_area=False,\n upct_or_rbin='upct'):\n \"\"\"Aggregate hit rate, search efficient rate and PAI\n into a parameter-controlled process\n\n :param spatial_unit_attr: pd.DataFrame\n - index=unit_index\n - columns: at least risk, num_events and area\n\n :param event_normalized: if yes: hit rate; no, number of events\n :param area_normalized: if no: km^2 (search rate); if yes, area percentage(PAI)\n :param event_by_area: if no: hit rate; yes, search rate or PAI\n \"\"\"\n\n # upct index\n # num_grids = len(spatial_unit_attr)\n # idx_for_upct = [int(num_grids * (i + 1) / 10) for i in range(9)] + [num_grids - 1]\n # iloc_idx, readable_idx = upct_idx(num_grids, 10)\n num = 5 if upct_or_rbin == 'rbin' else 10\n iloc_idx, readable_idx = get_idx(spatial_unit_attr, num, upct_or_rbin)\n\n tmp = spatial_unit_attr.sort_values(C.COL.risk, ascending=False)\n tmp[C.COL.area] /= 1e6\n event_factor = tmp[C.COL.num_events].sum() if event_normalized else 1\n tmp['hit'] = tmp[C.COL.num_events].cumsum() / event_factor\n if not event_by_area:\n res = tmp['hit']\n else:\n area_factor = tmp[C.COL.area].sum() if area_normalized else 1\n tmp['cum_area'] = tmp[C.COL.area].cumsum() / area_factor\n res = tmp['hit'] / tmp['cum_area']\n\n res = res.iloc[iloc_idx]\n res.index = readable_idx\n return res\n\n\ndef hit_rate_upct(spatial_unit_attr):\n return hit_to_pai(spatial_unit_attr, event_normalized=True, event_by_area=False, upct_or_rbin='upct')\n\n\ndef search_efficient_rate_upct(spatial_unit_attr):\n \"\"\"Proposed by Bower et al 2004 (Bowers2004-gn):\n the number of crimes successfully predicted per kilometre-squared.\n Using a standardized index allows different procedures\n and different hot spots to be meaningfully compared.\n\n Critics by Chainey2008-ys:\n does not easily allow for comparisons between study areas of different sizes\n\n :param spatial_unit_attr: pd.DataFrame,\n index=unit_index,\n columns: at least risk, num_events, area\n :return:\n \"\"\"\n return hit_to_pai(spatial_unit_attr, event_normalized=False, area_normalized=False, event_by_area=True,\n upct_or_rbin='upct')\n\n\ndef prediction_accuracy_index_upct(spatial_unit_attr):\n \"\"\"PAI, Proposed by \\cite{Chainey2008-ys}.\n\n the greater the number of future crime events in\n a hotspot area that is smaller in areal size to the whole study area,\n the higher the PAI value.\n\n We also believe it is a measure that is applicable to\n any study area, any crime point data,\n and to any analysis technique that aims to predict spatial patterns of crime\n \"\"\"\n return hit_to_pai(spatial_unit_attr, event_normalized=True, area_normalized=True, event_by_area=True,\n upct_or_rbin='upct')\n\n\ndef area_to_perimeter_ratio_upct(spatial_unit_attr, upct_or_rbin='upct'):\n \"\"\"Proposed by bower-2004\"\"\"\n from shapely.ops import cascaded_union\n # upct index\n # num_grids = len(spatial_unit_attr)\n # idx_for_upct = [int(num_grids * (i + 1) / 10) for i in range(9)] + [num_grids - 1]\n # idx_for_upct, readable_idx = upct_idx(num_grids, 10)\n num = 5 if upct_or_rbin == 'rbin' else 10\n iloc_idx, readable_idx = get_idx(spatial_unit_attr, num, upct_or_rbin)\n\n tmp = spatial_unit_attr.sort_values(C.COL.risk, ascending=False)\n tmp['cum_area'] = tmp[C.COL.area].cumsum()\n\n res = []\n for idx, name in zip(iloc_idx, readable_idx):\n sub_units = tmp.iloc[:idx + 1]['geometry']\n perimeter = cascaded_union(sub_units).length\n cum_area = tmp.iloc[idx]['cum_area']\n res.append(cum_area / perimeter)\n # print(idx, cum_area, perimeter)\n # print(cascaded_union(sub_units).wkt)\n\n return pd.Series(res, index=readable_idx)\n\n\ndef hit_rate_rbin(spatial_unit_attr):\n return hit_to_pai(spatial_unit_attr, event_normalized=True, event_by_area=False, upct_or_rbin='rbin')\n\n\ndef search_efficient_rate_rbin(spatial_unit_attr):\n return hit_to_pai(spatial_unit_attr, event_normalized=False, area_normalized=False, event_by_area=True,\n upct_or_rbin='rbin')\n\n\ndef prediction_accuracy_index_rbin(spatial_unit_attr):\n return hit_to_pai(spatial_unit_attr, event_normalized=True, area_normalized=True, event_by_area=True,\n upct_or_rbin='rbin')\n\n\ndef area_to_perimeter_ratio_rbin(spatial_unit_attr):\n return area_to_perimeter_ratio_upct(spatial_unit_attr, 'rbin')\n\n\ndef main():\n import pandas as pd\n from shapely.geometry import box\n d = [\n (1, 2, 40000, box(0, 0, 200, 200)),\n (2, 3, 40000, box(200, 200, 400, 400)),\n (3, 1, 40000, box(0, 200, 400, 400)),\n (4, 4, 40000, box(600, 0, 800, 200)),\n (5, 2, 40000, box(800, 200, 1000, 400)),\n (6, 7, 40000, box(800, 0, 1000, 200))\n ]\n df = pd.DataFrame(d)\n df.columns = [C.COL.risk, C.COL.num_events, C.COL.area, 'geometry']\n # print(hit_rate(df))\n print(search_efficient_rate_upct(df), search_efficient_rate_upct.__name__)\n print(prediction_accuracy_index_upct(df), prediction_accuracy_index_upct.__name__)\n print(area_to_perimeter_ratio_upct(df), area_to_perimeter_ratio_upct.__name__)\n print(hit_rate_upct(df), hit_rate_upct.__name__)\n print(hit_rate_rbin(df), hit_rate_rbin.__name__)\n return hit_rate_upct(df)\n\n\nif __name__ == '__main__':\n roc = main()\n","sub_path":"src/utils/metric_roc_like.py","file_name":"metric_roc_like.py","file_ext":"py","file_size_in_byte":7780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"508111294","text":"import collections\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pygal\nimport json\nimport os\nimport collections as col\n\n#This script intends to graph favourites accumulation through 3200 tweets.\ndef collect_tags(read_dir):\n filelist = os.listdir(read_dir)\n count = col.Counter()\n for file in filelist:\n with open('%s%s' % (read_dir,file),\"r\",encoding='utf8') as raw:\n data = json.load(raw)\n for tweet in data['tweets']:\n if len(tweet[\"entities\"]['user_mentions']) > 0 and \"retweeted_status\" not in tweet :\n #then we count the screen names.\n for mentions in tweet['entities']['user_mentions']:\n if data['handle'].replace('@',\"\") != mentions['screen_name']:\n count.update({mentions['screen_name']})\n pygal_bar(count,data['handle'])\n count = col.Counter()\n\n\ndef pygal_bar(count,handle):\n '''makes a pygal chart from input'''\n most_common = count.most_common()\n cutlist = most_common[0:10]\n chart_title = \"Top 10 Mentions for %s\" % (handle)\n barchart = pygal.Bar(title=chart_title)\n for items in cutlist:\n barchart.add(*items)\n print(\"Plotted\",handle)\n \n barchart.render_to_file('analysis_result/mentions_pygal/%s.svg' % (handle))\n\n\n\ncollect_tags(\"result_raw/\")","sub_path":"plot/plot_all_users_mentions.py","file_name":"plot_all_users_mentions.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"537694466","text":"\"\"\"Parameterized curves on any given manifold.\"\"\"\n\nimport math\n\n\nimport geomstats.backend as gs\nfrom geomstats.geometry.euclidean import Euclidean\nfrom geomstats.geometry.euclidean import EuclideanMetric\nfrom geomstats.geometry.landmarks import L2Metric\nfrom geomstats.geometry.manifold import Manifold\nfrom geomstats.geometry.riemannian_metric import RiemannianMetric\n\nR2 = Euclidean(dim=2)\nR3 = Euclidean(dim=3)\n\n\nclass DiscreteCurves(Manifold):\n r\"\"\"Space of discrete curves sampled at points in ambient_manifold.\n\n Each individual curve is represented by a 2d-array of shape `[\n n_sampling_points, ambient_dim]`. A Batch of curves can be passed to\n all methods either as a 3d-array if all curves have the same number of\n sampled points, or as a list of 2d-arrays, each representing a curve.\n\n Parameters\n ----------\n ambient_manifold : Manifold\n Manifold in which curves take values.\n\n Attributes\n ----------\n ambient_manifold : Manifold\n Manifold in which curves take values.\n l2_metric : callable\n Function that takes as argument an integer number of sampled points\n and returns the corresponding L2 metric (product) metric,\n a RiemannianMetric object\n square_root_velocity_metric : RiemannianMetric\n Square root velocity metric.\n \"\"\"\n\n def __init__(self, ambient_manifold):\n super(DiscreteCurves, self).__init__(dim=math.inf)\n self.ambient_manifold = ambient_manifold\n self.l2_metric = lambda n: L2Metric(\n self.ambient_manifold, n_landmarks=n)\n self.square_root_velocity_metric = SRVMetric(self.ambient_manifold)\n\n def belongs(self, point, atol=gs.atol):\n \"\"\"Test whether a point belongs to the manifold.\n\n Test that all points of the curve belong to the ambient manifold.\n\n Parameters\n ----------\n point : array-like, shape=[..., n_sampling_points, ambient_dim]\n Point representing a discrete curve.\n atol : float\n Absolute tolerance.\n Optional, default: backend atol.\n\n Returns\n -------\n belongs : bool\n Boolean evaluating if point belongs to the space of discrete\n curves.\n \"\"\"\n def each_belongs(pt):\n return gs.all(self.ambient_manifold.belongs(pt))\n\n if isinstance(point, list) or point.ndim > 2:\n return gs.stack([each_belongs(pt) for pt in point])\n\n return each_belongs(point)\n\n def is_tangent(self, vector, base_point, atol=gs.atol):\n \"\"\"Check whether the vector is tangent at a curve.\n\n A vector is tangent at a curve if it is a vector field along that\n curve.\n\n Parameters\n ----------\n vector : array-like, shape=[..., n_sampling_points, ambient_dim]\n Vector.\n base_point : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve.\n atol : float\n Absolute tolerance.\n Optional, default: backend atol.\n\n Returns\n -------\n is_tangent : bool\n Boolean denoting if vector is a tangent vector at the base point.\n \"\"\"\n ambient_manifold = self.ambient_manifold\n shape = vector.shape\n stacked_vec = gs.reshape(vector, (-1, shape[-1]))\n stacked_point = gs.reshape(base_point, (-1, shape[-1]))\n is_tangent = ambient_manifold.is_tangent(\n stacked_vec, stacked_point, atol)\n is_tangent = gs.reshape(is_tangent, shape[:-1])\n return gs.all(is_tangent, axis=-1)\n\n def to_tangent(self, vector, base_point):\n \"\"\"Project a vector to a tangent space of the manifold.\n\n As tangent vectors are vector fields along a curve, each component of\n the vector is projected to the tangent space of the corresponding\n point of the discrete curve. The number of sampling points should\n match in the vector and the base_point.\n\n Parameters\n ----------\n vector : array-like, shape=[..., n_sampling_points, ambient_dim]\n Vector.\n base_point : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve.\n\n Returns\n -------\n tangent_vec : array-like, shape=[..., n_sampling_points, ambient_dim]\n Tangent vector at base point.\n \"\"\"\n ambient_manifold = self.ambient_manifold\n shape = vector.shape\n stacked_vec = gs.reshape(vector, (-1, shape[-1]))\n stacked_point = gs.reshape(base_point, (-1, shape[-1]))\n tangent_vec = ambient_manifold.to_tangent(stacked_vec, stacked_point)\n tangent_vec = gs.reshape(tangent_vec, vector.shape)\n return tangent_vec\n\n def random_point(self, n_samples=1, bound=1., n_sampling_points=10):\n \"\"\"Sample random curves.\n\n If the ambient manifold is compact, a uniform distribution is used.\n\n Parameters\n ----------\n n_samples : int\n Number of samples.\n Optional, default: 1.\n bound : float\n Bound of the interval in which to sample for non compact\n ambient manifolds.\n Optional, default: 1.\n n_sampling_points : int\n Number of sampling points for the discrete curves.\n Optional, default : 10.\n\n Returns\n -------\n samples : array-like, shape=[..., n_sampling_points, {dim, [n, n]}]\n Points sampled on the hypersphere.\n \"\"\"\n sample = self.ambient_manifold.random_point(\n n_samples * n_sampling_points)\n sample = gs.reshape(sample, (n_samples, n_sampling_points, -1))\n return sample[0] if n_samples == 1 else sample\n\n\nclass SRVMetric(RiemannianMetric):\n \"\"\"Elastic metric defined using the Square Root Velocity Function.\n\n See [Sea2011]_ for details.\n\n Parameters\n ----------\n ambient_manifold : Manifold\n Manifold in which curves take values.\n metric : RiemannianMetric\n Metric to use on the ambient manifold. If None is passed, ambient\n manifold should have a metric attribute, which will be used.\n Optional, default : None.\n\n References\n ----------\n .. [Sea2011] A. Srivastava, E. Klassen, S. H. Joshi and I. H. Jermyn,\n \"Shape Analysis of Elastic Curves in Euclidean Spaces,\"\n in IEEE Transactions on Pattern Analysis and Machine Intelligence,\n vol. 33, no. 7, pp. 1415-1428, July 2011.\n \"\"\"\n\n def __init__(self, ambient_manifold, metric=None):\n super(SRVMetric, self).__init__(dim=math.inf,\n signature=(math.inf, 0, 0))\n if metric is None:\n if hasattr(ambient_manifold, 'metric'):\n self.ambient_metric = ambient_manifold.metric\n else:\n raise ValueError('Instantiating an object of class '\n 'DiscreteCurves requires either a metric'\n ' or an ambient manifold'\n ' equipped with a metric.')\n else:\n self.ambient_metric = metric\n self.l2_metric = lambda n: L2Metric(ambient_manifold, n_landmarks=n)\n\n def pointwise_inner_product(self, tangent_vec_a, tangent_vec_b,\n base_curve):\n \"\"\"Compute the pointwise inner product of pair of tangent vectors.\n\n Compute the point-wise inner-product between two tangent vectors\n at a base curve.\n\n Parameters\n ----------\n tangent_vec_a : array-like, shape=[..., n_sampling_points, ambient_dim]\n Tangent vector to discrete curve.\n tangent_vec_b : array-like, shape=[..., n_sampling_points, ambient_dim]\n Tangent vector to discrete curve.\n base_curve : array-like, shape=[..., n_sampling_points, ambient_dim]\n Point representing a discrete curve.\n\n Returns\n -------\n inner_prod : array-like, shape=[..., n_sampling_points]\n Point-wise inner-product.\n \"\"\"\n def inner_prod_aux(vec_a, vec_b, curve):\n inner_prod = self.ambient_metric.inner_product(vec_a, vec_b, curve)\n return gs.squeeze(inner_prod)\n\n inner_prod = gs.vectorize(\n (tangent_vec_a, tangent_vec_b, base_curve),\n inner_prod_aux,\n dtype=gs.float32,\n multiple_args=True,\n signature='(i,j),(i,j),(i,j)->(i)')\n\n return inner_prod\n\n def pointwise_norm(self, tangent_vec, base_curve):\n \"\"\"Compute the point-wise norm of a tangent vector at a base curve.\n\n Parameters\n ----------\n tangent_vec : array-like, shape=[..., n_sampling_points, ambient_dim]\n Tangent vector to discrete curve.\n base_curve : array-like, shape=[..., n_sampling_points, ambient_dim]\n Point representing a discrete curve.\n\n Returns\n -------\n norm : array-like, shape=[..., n_sampling_points]\n Point-wise norms.\n \"\"\"\n sq_norm = self.pointwise_inner_product(\n tangent_vec_a=tangent_vec, tangent_vec_b=tangent_vec,\n base_curve=base_curve)\n return gs.sqrt(sq_norm)\n\n def square_root_velocity(self, curve):\n \"\"\"Compute the square root velocity representation of a curve.\n\n The velocity is computed using the log map. In the case of several\n curves, an index selection procedure allows to get rid of the log\n between the end point of curve[k, :, :] and the starting point of\n curve[k + 1, :, :].\n\n Parameters\n ----------\n curve : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve.\n\n Returns\n -------\n srv : array-like, shape=[..., n_sampling_points - 1, ambient_dim]\n Square-root velocity representation of a discrete curve.\n \"\"\"\n curve = gs.to_ndarray(curve, to_ndim=3)\n n_curves, n_sampling_points, n_coords = curve.shape\n srv_shape = (n_curves, n_sampling_points - 1, n_coords)\n\n curve = gs.reshape(curve, (n_curves * n_sampling_points, n_coords))\n coef = gs.cast(gs.array(n_sampling_points - 1), gs.float32)\n velocity = coef * self.ambient_metric.log(point=curve[1:, :],\n base_point=curve[:-1, :])\n velocity_norm = self.ambient_metric.norm(velocity, curve[:-1, :])\n srv = gs.einsum(\n '...i,...->...i', velocity, 1. / gs.sqrt(velocity_norm))\n\n index = gs.arange(n_curves * n_sampling_points - 1)\n mask = ~((index + 1) % n_sampling_points == 0)\n srv = gs.reshape(srv[mask], srv_shape)\n\n return srv\n\n def square_root_velocity_inverse(self, srv, starting_point):\n \"\"\"Retrieve a curve from sqrt velocity rep and starting point.\n\n Parameters\n ----------\n srv : array-like, shape=[..., n_sampling_points - 1, ambient_dim]\n Square-root velocity representation of a discrete curve.\n starting_point : array-like, shape=[..., ambient_dim]\n Point of the ambient manifold to use as start of the retrieved\n curve.\n\n Returns\n -------\n curve : array-like, shape=[..., n_sampling_points, ambient_dim]\n Curve retrieved from its square-root velocity.\n \"\"\"\n if not isinstance(self.ambient_metric, EuclideanMetric):\n raise AssertionError('The square root velocity inverse is only '\n 'implemented for discrete curves embedded '\n 'in a Euclidean space.')\n if gs.ndim(srv) != gs.ndim(starting_point):\n starting_point = gs.to_ndarray(\n starting_point, to_ndim=srv.ndim, axis=1)\n srv_shape = srv.shape\n srv = gs.to_ndarray(srv, to_ndim=3)\n n_curves, n_sampling_points_minus_one, n_coords = srv.shape\n\n srv = gs.reshape(srv,\n (n_curves * n_sampling_points_minus_one, n_coords))\n srv_norm = self.ambient_metric.norm(srv)\n delta_points = gs.einsum(\n '...,...i->...i', 1 / n_sampling_points_minus_one * srv_norm, srv)\n delta_points = gs.reshape(delta_points, srv_shape)\n curve = gs.concatenate((starting_point, delta_points), -2)\n curve = gs.cumsum(curve, -2)\n\n return curve\n\n def exp(self, tangent_vec, base_point):\n \"\"\"Compute Riemannian exponential of tangent vector wrt to base curve.\n\n Parameters\n ----------\n tangent_vec : array-like, shape=[..., n_sampling_points, ambient_dim]\n Tangent vector to discrete curve.\n base_point : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve.\n\n Return\n ------\n end_curve : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve, result of the Riemannian exponential.\n \"\"\"\n if not isinstance(self.ambient_metric, EuclideanMetric):\n raise AssertionError('The exponential map is only implemented '\n 'for discrete curves embedded in a '\n 'Euclidean space.')\n base_point = gs.to_ndarray(base_point, to_ndim=3)\n tangent_vec = gs.to_ndarray(tangent_vec, to_ndim=3)\n n_sampling_points = base_point.shape[1]\n\n base_curve_srv = self.square_root_velocity(base_point)\n\n tangent_vec_derivative = (n_sampling_points - 1) * (\n tangent_vec[:, 1:, :] - tangent_vec[:, :-1, :])\n base_curve_velocity = (n_sampling_points - 1) * (\n base_point[:, 1:, :] - base_point[:, :-1, :])\n base_curve_velocity_norm = self.pointwise_norm(\n base_curve_velocity, base_point[:, :-1, :])\n\n inner_prod = self.pointwise_inner_product(\n tangent_vec_derivative, base_curve_velocity, base_point[:, :-1, :])\n coef_1 = 1 / gs.sqrt(base_curve_velocity_norm)\n coef_2 = -1 / (2 * base_curve_velocity_norm**(5 / 2)) * inner_prod\n\n term_1 = gs.einsum('ij,ijk->ijk', coef_1, tangent_vec_derivative)\n term_2 = gs.einsum('ij,ijk->ijk', coef_2, base_curve_velocity)\n srv_initial_derivative = term_1 + term_2\n\n end_curve_srv = self.l2_metric(n_sampling_points - 1).exp(\n tangent_vec=srv_initial_derivative, base_point=base_curve_srv)\n end_curve_starting_point = self.ambient_metric.exp(\n tangent_vec=tangent_vec[:, 0, :], base_point=base_point[:, 0, :])\n end_curve = self.square_root_velocity_inverse(\n end_curve_srv, end_curve_starting_point)\n\n return end_curve\n\n def log(self, point, base_point):\n \"\"\"Compute Riemannian logarithm of a curve wrt a base curve.\n\n Parameters\n ----------\n point : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve.\n base_point : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve to use as base point.\n\n Returns\n -------\n log : array-like, shape=[..., n_sampling_points, ambient_dim]\n Tangent vector to a discrete curve.\n \"\"\"\n if not isinstance(self.ambient_metric, EuclideanMetric):\n raise AssertionError('The logarithm map is only implemented '\n 'for discrete curves embedded in a '\n 'Euclidean space.')\n point = gs.to_ndarray(point, to_ndim=3)\n base_point = gs.to_ndarray(base_point, to_ndim=3)\n n_curves, n_sampling_points, n_coords = point.shape\n\n curve_srv = self.square_root_velocity(point)\n base_curve_srv = self.square_root_velocity(base_point)\n\n base_curve_velocity = (n_sampling_points - 1) * (base_point[:, 1:, :] -\n base_point[:, :-1, :])\n base_curve_velocity_norm = self.pointwise_norm(base_curve_velocity,\n base_point[:, :-1, :])\n\n inner_prod = self.pointwise_inner_product(curve_srv - base_curve_srv,\n base_curve_velocity,\n base_point[:, :-1, :])\n coef_1 = gs.sqrt(base_curve_velocity_norm)\n coef_2 = 1 / base_curve_velocity_norm**(3 / 2) * inner_prod\n\n term_1 = gs.einsum('ij,ijk->ijk', coef_1, curve_srv - base_curve_srv)\n term_2 = gs.einsum('ij,ijk->ijk', coef_2, base_curve_velocity)\n log_derivative = term_1 + term_2\n\n log_starting_points = self.ambient_metric.log(\n point=point[:, 0, :], base_point=base_point[:, 0, :])\n log_starting_points = gs.to_ndarray(\n log_starting_points, to_ndim=3, axis=1)\n\n log_cumsum = gs.hstack(\n [gs.zeros((n_curves, 1, n_coords)),\n gs.cumsum(log_derivative, -2)])\n log = log_starting_points + 1 / (n_sampling_points - 1) * log_cumsum\n\n return log\n\n def geodesic(self,\n initial_curve,\n end_curve=None,\n initial_tangent_vec=None):\n \"\"\"Compute geodesic from initial curve and end curve end curve.\n\n Geodesic specified either by an initial curve and an end curve,\n either by an initial curve and an initial tangent vector.\n\n Parameters\n ----------\n initial_curve : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve.\n end_curve : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve. If None, an initial tangent vector must be given.\n Optional, default : None\n initial_tangent_vec : array-like,\n shape=[..., n_sampling_points, ambient_dim]\n Tangent vector at base curve, the initial speed of the geodesics.\n If None, an end curve must be given and a logarithm is computed.\n Optional, default : None\n\n Returns\n -------\n curve_on_geodesic : callable\n The time parameterized geodesic curve.\n \"\"\"\n if not isinstance(self.ambient_metric, EuclideanMetric):\n raise AssertionError('The geodesics are only implemented for '\n 'discrete curves embedded in a '\n 'Euclidean space.')\n curve_ndim = 2\n initial_curve = gs.to_ndarray(initial_curve, to_ndim=curve_ndim + 1)\n\n if end_curve is None and initial_tangent_vec is None:\n raise ValueError('Specify an end curve or an initial tangent '\n 'vector to define the geodesic.')\n if end_curve is not None:\n end_curve = gs.to_ndarray(end_curve, to_ndim=curve_ndim + 1)\n shooting_tangent_vec = self.log(point=end_curve,\n base_point=initial_curve)\n if initial_tangent_vec is not None:\n if not gs.allclose(shooting_tangent_vec, initial_tangent_vec):\n raise RuntimeError(\n 'The shooting tangent vector is too'\n ' far from the initial tangent vector.')\n initial_tangent_vec = shooting_tangent_vec\n initial_tangent_vec = gs.array(initial_tangent_vec)\n initial_tangent_vec = gs.to_ndarray(initial_tangent_vec,\n to_ndim=curve_ndim + 1)\n\n def curve_on_geodesic(t):\n t = gs.cast(t, gs.float32)\n t = gs.to_ndarray(t, to_ndim=1)\n t = gs.to_ndarray(t, to_ndim=2, axis=1)\n new_initial_curve = gs.to_ndarray(initial_curve,\n to_ndim=curve_ndim + 1)\n new_initial_tangent_vec = gs.to_ndarray(initial_tangent_vec,\n to_ndim=curve_ndim + 1)\n\n tangent_vecs = gs.einsum('il,nkm->ikm', t, new_initial_tangent_vec)\n\n curve_at_time_t = []\n for tan_vec in tangent_vecs:\n curve_at_time_t.append(\n self.exp(tan_vec, new_initial_curve))\n return gs.stack(curve_at_time_t)\n\n return curve_on_geodesic\n\n def dist(self, point_a, point_b, **kwargs):\n \"\"\"Geodesic distance between two curves.\n\n Parameters\n ----------\n point_a : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve.\n point_b : array-like, shape=[..., n_sampling_points, ambient_dim]\n Discrete curve.\n\n Returns\n -------\n dist : array-like, shape=[...,]\n \"\"\"\n if not isinstance(self.ambient_metric, EuclideanMetric):\n raise AssertionError('The distance is only implemented for '\n 'discrete curves embedded in a '\n 'Euclidean space.')\n if point_a.shape != point_b.shape:\n raise ValueError('The curves need to have the same shapes.')\n\n srv_a = self.square_root_velocity(point_a)\n srv_b = self.square_root_velocity(point_b)\n n_sampling_points = srv_a.shape[-2]\n dist_starting_points = self.ambient_metric.dist(\n point_a[0, :], point_b[0, :])\n dist_srvs = self.l2_metric(n_sampling_points).dist(srv_a, srv_b)\n dist = gs.sqrt(dist_starting_points**2 + dist_srvs**2)\n\n return dist\n","sub_path":"geomstats/geometry/discrete_curves.py","file_name":"discrete_curves.py","file_ext":"py","file_size_in_byte":21497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"638891881","text":"def count_letters(string):\n counts = dict()\n for words in string:\n for i in words :\n if i is not \" \":\n if i in counts:\n counts[i] = counts[i] + 1 \n else :\n counts[i] = 1\n print(f\"Given List : {string}\")\n for k in counts:\n print(f\"{k} - {counts[k]}\")\n\nstring = input(\"Enter words seperated by spaces: \")\ncount_letters(string.split())\n","sub_path":"submissions/sm_108_krishna-kant/week_14/day_3/evaluation/count_letters.py","file_name":"count_letters.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601994615","text":"from admins import ModelView, db\nfrom .models import Gatekeeper\n\n\nclass GatekeeperView(ModelView):\n column_searchable_list = (Gatekeeper.name, )\n column_labels = {\n \"name\": u\"权限\",\n \"description\": u\"描述\",\n \"staff_required\": u\"管理员校验\",\n \"percent\": u\"百分比\",\n \"whitelist\": u\"白名单\"\n }\n\n def __init__(self, **kwargs):\n super(GatekeeperView, self).__init__(Gatekeeper, db.session, **kwargs)","sub_path":"gatekeeper/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"347058485","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 26 21:14:26 2018\n\n@author: Huawei\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Colum names\n\n# 0 : time column\n\n# 2 : GRF Left x\n# 3 : GRF Left y\n# 4 : GRF Left moment \n\n# 5 : GRF Right x\n# 6 : GRF Right y\n# 7 : GRF Right moment \n\n# 8 : Pelvis x\n# 9 : Pelvis y \n\n# 10 : Hip x\n# 11 : Hip y\n\n# 12 : Left Ankle x\n# 13 : Left Ankle y \n\n# 14 : Right Ankle x\n# 15 : Right Ankle y\n\n# 16 : Left Heel x\n# 17 : Left Heel y\n\n# 18 : Right Heel x\n# 19 : Right Heel y\n\n# 16 : Left Knee x\n# 17 : Left Knee y\n\n# 18 : Right Knee x\n# 19 : Right Knee y\n\nSt = 0\nEd = 73\n\nLen = 100\n\nSwingAnkx = np.zeros((2*(Ed-St), Len))\nSwingAnky = np.zeros((2*(Ed-St), Len))\n\nSwingHeelx = np.zeros((2*(Ed-St), Len))\nSwingHeely = np.zeros((2*(Ed-St), Len))\n\nSwingKneex = np.zeros((2*(Ed-St), Len))\nSwingKneey = np.zeros((2*(Ed-St), Len))\n\nStanceKneex = np.zeros((2*(Ed-St), Len))\nStanceKneey = np.zeros((2*(Ed-St), Len))\n\nStanceHipx = np.zeros((2*(Ed-St), Len))\nStanceHipy = np.zeros((2*(Ed-St), Len))\n\nStanceHeelx = np.zeros((2*(Ed-St), Len))\nStanceHeely = np.zeros((2*(Ed-St), Len))\n\nSwingt = np.zeros((2*(Ed-St), Len))\nStancet = np.zeros((2*(Ed-St), Len))\n\nfor k in range(St, Ed):\n #stance = np.loadtxt('SepData/StanceMotion0.txt')\n Lswing = np.loadtxt('SepData_Force/SwingMotion'+str(k)+'L.txt')\n Lstance = np.loadtxt('SepData_Force/StanceMotion'+str(k)+'L.txt')\n \n Rswing = np.loadtxt('SepData_Force/SwingMotion'+str(k)+'R.txt')\n Rstance = np.loadtxt('SepData_Force/StanceMotion'+str(k)+'R.txt')\n \n LtimeSwO = np.linspace(0, len(Lswing[:, 0])*0.01, len(Lswing[:, 0]))\n LtimeStO = np.linspace(0, len(Lstance[:, 0])*0.01, len(Lstance[:, 0]))\n \n RtimeSwO = np.linspace(0, len(Rswing[:, 0])*0.01, len(Rswing[:, 0]))\n RtimeStO = np.linspace(0, len(Rstance[:, 0])*0.01, len(Rstance[:, 0]))\n \n LtimeSw = np.linspace(0, len(Lswing[:, 0])*0.01, Len)\n LtimeSt = np.linspace(0, len(Lstance[:, 0])*0.01, Len)\n \n RtimeSw = np.linspace(0, len(Rswing[:, 0])*0.01, Len)\n RtimeSt = np.linspace(0, len(Rstance[:, 0])*0.01, Len)\n \n SwingLAnkx = (Lswing[:, 12] - Lswing[:, 10])\n SwingRAnkx = (Rswing[:, 14] - Rswing[:, 10])\n \n SwingLAnky = (Lswing[:, 13] - Lswing[:, 11])\n SwingRAnky = (Rswing[:, 15] - Rswing[:, 11])\n \n SwingLKneex = (Lswing[:, 12] - Lswing[:, 20])\n SwingRKneex = (Rswing[:, 14] - Rswing[:, 22])\n \n SwingLKneey = (Lswing[:, 13] - Lswing[:, 21])\n SwingRKneey = (Rswing[:, 15] - Rswing[:, 23])\n \n SwingLHeelx = -(Lswing[:, 12] - Lswing[:, 16])\n SwingRHeelx = -(Rswing[:, 14] - Rswing[:, 18])\n \n SwingLHeely = -(Lswing[:, 13] - Lswing[:, 17])\n SwingRHeely = -(Rswing[:, 15] - Rswing[:, 19])\n \n StanceLHipx = -(Lstance[:, 12] - Lstance[:, 10])\n StanceRHipx = -(Rstance[:, 14] - Rstance[:, 10])\n \n StanceLHipy = -(Lstance[:, 13] - Lstance[:, 11])\n StanceRHipy = -(Rstance[:, 15] - Rstance[:, 11])\n \n StanceLHeelx = (Lstance[:, 12] - Lstance[:, 16])\n StanceRHeelx = (Rstance[:, 14] - Rstance[:, 18])\n \n StanceLHeely = (Lstance[:, 13] - Lstance[:, 17])\n StanceRHeely = (Rstance[:, 15] - Rstance[:, 19])\n \n StanceLKneex = -(Lstance[:, 12] - Lstance[:, 20])\n StanceRKneex = -(Rstance[:, 14] - Rstance[:, 22])\n \n StanceLKneey = -(Lstance[:, 13] - Lstance[:, 21])\n StanceRKneey = -(Rstance[:, 15] - Rstance[:, 23])\n \n SwingAnkx[k, :] = np.interp(LtimeSw, LtimeSwO, SwingLAnkx)\n SwingAnkx[k+(Ed-St), :] = np.interp(RtimeSw, RtimeSwO, SwingRAnkx)\n \n SwingAnky[k, :] = np.interp(LtimeSw, LtimeSwO, SwingLAnky)\n SwingAnky[k+(Ed-St), :] = np.interp(RtimeSw, RtimeSwO, SwingRAnky)\n \n SwingHeelx[k, :] = np.interp(LtimeSw, LtimeSwO, SwingLHeelx)\n SwingHeelx[k+(Ed-St), :] = np.interp(RtimeSw, RtimeSwO, SwingRHeelx)\n \n SwingHeely[k, :] = np.interp(LtimeSw, LtimeSwO, SwingLHeely)\n SwingHeely[k+(Ed-St), :] = np.interp(RtimeSw, RtimeSwO, SwingRHeely)\n \n SwingKneex[k, :] = np.interp(LtimeSw, LtimeSwO, SwingLKneex)\n SwingKneex[k+(Ed-St), :] = np.interp(RtimeSw, RtimeSwO, SwingRKneex)\n \n SwingKneey[k, :] = np.interp(LtimeSw, LtimeSwO, SwingLKneey)\n SwingKneey[k+(Ed-St), :] = np.interp(RtimeSw, RtimeSwO, SwingRKneey)\n \n Swingt[k, :] = LtimeSw\n Swingt[k+(Ed-St), :] = RtimeSw\n \n StanceHipx[k, :] = np.interp(LtimeSt, LtimeStO, StanceLHipx)\n StanceHipx[k+(Ed-St), :] = np.interp(RtimeSt, RtimeStO, StanceRHipx)\n \n StanceHipy[k, :] = np.interp(LtimeSt, LtimeStO, StanceLHipy)\n StanceHipy[k+(Ed-St), :] = np.interp(RtimeSt, RtimeStO, StanceRHipy)\n \n StanceHeelx[k, :] = np.interp(LtimeSt, LtimeStO, StanceLHeelx)\n StanceHeelx[k+(Ed-St), :] = np.interp(RtimeSt, RtimeStO, StanceRHeelx)\n \n StanceHeely[k, :] = np.interp(LtimeSt, LtimeStO, StanceLHeely)\n StanceHeely[k+(Ed-St), :] = np.interp(RtimeSt, RtimeStO, StanceRHeely)\n \n StanceKneex[k, :] = np.interp(LtimeSt, LtimeStO, StanceLKneex)\n StanceKneex[k+(Ed-St), :] = np.interp(RtimeSt, RtimeStO, StanceRKneex)\n \n StanceKneey[k, :] = np.interp(LtimeSt, LtimeStO, StanceLHeely)\n StanceKneey[k+(Ed-St), :] = np.interp(RtimeSt, RtimeStO, StanceRKneey)\n \n Stancet[k, :] = LtimeSt\n Stancet[k+(Ed-St), :] = RtimeSt\n \nfig2 = plt.figure(figsize=(18,18))\nax1 = fig2.add_subplot(4,2,1)\nax2 = fig2.add_subplot(4,2,2)\nax3 = fig2.add_subplot(4,2,3)\nax4 = fig2.add_subplot(4,2,4)\n\nax5 = fig2.add_subplot(4,2,5)\nax6 = fig2.add_subplot(4,2,6)\nax7 = fig2.add_subplot(4,2,7)\nax8 = fig2.add_subplot(4,2,8)\n\nfor m in range(2*(Ed-St)):\n ax1.plot(Swingt[m, :], SwingAnkx[m, :], '-')\n \n ax2.plot(Swingt[m, :], SwingAnky[m, :], '-')\n\n ax3.plot(Stancet[m, :], StanceHipx[m, :], '-')\n \n ax4.plot(Stancet[m, :], StanceHipy[m, :], '-')\n \n ax5.plot(Swingt[m, :], SwingHeelx[m, :], '-')\n\n ax6.plot(Swingt[m, :], SwingHeely[m, :], '-')\n \n ax7.plot(Stancet[m, :], StanceHeelx[m, :], '-')\n\n ax8.plot(Stancet[m, :], StanceHeely[m, :], '-')\n \nfig3 = plt.figure(figsize=(18,18))\nax9 = fig3.add_subplot(4,2,1)\nax10 = fig3.add_subplot(4,2,2)\nax11 = fig3.add_subplot(4,2,3)\nax12 = fig3.add_subplot(4,2,4)\n\nax13 = fig3.add_subplot(4,2,5)\nax14 = fig3.add_subplot(4,2,6)\nax15 = fig3.add_subplot(4,2,7)\nax16 = fig3.add_subplot(4,2,8)\n\nfor m in range(2*(Ed-St)):\n ax9.plot( SwingAnkx[m, :], '-')\n \n ax10.plot( SwingAnky[m, :], '-')\n\n ax11.plot( StanceHipx[m, :], '-')\n \n ax12.plot( StanceHipy[m, :], '-')\n \n ax13.plot( SwingHeelx[m, :], '-')\n\n ax14.plot( SwingHeely[m, :], '-')\n \n ax15.plot( StanceHeelx[m, :], '-')\n\n ax16.plot( StanceHeely[m, :], '-')\n\nwith open('SepData_Force_For/SwingAnkxF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(SwingAnkx[:, 0])):\n for j in range(len(SwingAnkx[0, :])):\n StringP1 += str(SwingAnkx[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n \nwith open('SepData_Force_For/SwingAnkyF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(SwingAnky[:, 0])):\n for j in range(len(SwingAnky[0, :])):\n StringP1 += str(SwingAnky[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n\nwith open('SepData_Force_For/StanceHipxF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(StanceHipx[:, 0])):\n for j in range(len(StanceHipx[0, :])):\n StringP1 += str(StanceHipx[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n \nwith open('SepData_Force_For/StanceHipyF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(StanceHipy[:, 0])):\n for j in range(len(StanceHipy[0, :])):\n StringP1 += str(StanceHipy[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n \nwith open('SepData_Force_For/SwingKneexF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(SwingKneex[:, 0])):\n for j in range(len(SwingKneex[0, :])):\n StringP1 += str(SwingKneex[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n \nwith open('SepData_Force_For/SwingKneeyF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(SwingKneey[:, 0])):\n for j in range(len(SwingKneey[0, :])):\n StringP1 += str(SwingKneey[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n\nwith open('SepData_Force_For/StanceKneexF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(StanceKneex[:, 0])):\n for j in range(len(StanceKneex[0, :])):\n StringP1 += str(StanceKneex[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n \nwith open('SepData_Force_For/StanceKneeyF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(StanceKneey[:, 0])):\n for j in range(len(StanceKneey[0, :])):\n StringP1 += str(StanceKneey[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n \nwith open('SepData_Force_For/SwingHeelxF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(SwingHeelx[:, 0])):\n for j in range(len(SwingHeelx[0, :])):\n StringP1 += str(SwingHeelx[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n \nwith open('SepData_Force_For/SwingHeelyF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(SwingHeely[:, 0])):\n for j in range(len(SwingHeely[0, :])):\n StringP1 += str(SwingHeely[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n\nwith open('SepData_Force_For/StanceHeelxF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(StanceHeelx[:, 0])):\n for j in range(len(StanceHeelx[0, :])):\n StringP1 += str(StanceHeelx[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n \nwith open('SepData_Force_For/StanceHeelyF.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(StanceHeely[:, 0])):\n for j in range(len(StanceHeely[0, :])):\n StringP1 += str(StanceHeely[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n# \nwith open('SepData_Force_For/Swingt.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(Swingt[:, 0])):\n for j in range(len(Swingt[0, :])):\n StringP1 += str(Swingt[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)\n \nwith open('SepData_Force_For/Stancet.txt', 'w') as outfile1:\n StringP1 = \"\"\n for i in range(len(Stancet[:, 0])):\n for j in range(len(Stancet[0, :])):\n StringP1 += str(Stancet[i, j])\n StringP1 += \" \"\n StringP1 += \"\\n\"\n outfile1.write(StringP1)","sub_path":"Chapter6/walking_data/DataFormating.py","file_name":"DataFormating.py","file_ext":"py","file_size_in_byte":10878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"370890262","text":"\"\"\"\n智图--万方视频\n描述:代码比较健硕,可以运行定时任务。\n问题:暂无\nURL: http://video.wanfangdata.com.cn\n\"\"\"\nimport math\nimport os\nimport re\nimport time\nfrom collections import defaultdict\n\nfrom bs4 import BeautifulSoup\n\nimport utils\n\n\nclass DownloadWFVedio(utils.Download):\n\n def down_html(self):\n # 尝试3次\n super().down_html()\n for _ in range(4):\n resp = utils.get_html(\"http://video.wanfangdata.com.cn/s/search/\", feature=\"检索结果\")\n if not resp:\n continue\n with open(self.html_path + \"/html.html\", mode='w', encoding=\"utf8\") as f:\n f.write(resp.content.decode())\n utils.printf(\"起始页下载成功!\")\n break\n\n def down_detail(self):\n super().down_detail()\n utils.printf(\"开始下载详情页...\")\n conn = utils.init_db('mysql', 'wanfang_video')\n cur = conn.cursor()\n cur.execute('SELECT DISTINCT(wanID) FROM `article` where stat=0')\n rows = cur.fetchall()\n uri = 'http://video.wanfangdata.com.cn/v/play/{}.html'\n prefix_1 = '总共播放'\n prefix_2 = '课程介绍'\n for row in rows:\n url = uri.format(row[0])\n filename = self.detail_path + '/{}.html'.format(row[0])\n if os.path.exists(filename):\n continue\n resp = utils.get_html(url)\n\n if not resp:\n continue\n html = resp.content.decode('utf8')\n if (prefix_1 in html) or (prefix_2 in html):\n with open(filename, mode='w', encoding='utf8') as f:\n f.write(html)\n utils.printf(\"Download\", filename, \"success!\")\n cur.execute(\"update article set stat=1 where wanID='{}'\".format(row[0]))\n conn.commit()\n else:\n utils.printf(prefix_1, ' or ', prefix_2, 'not in ', url)\n conn.close()\n\n def down_list(self):\n super().down_list()\n utils.printf(\"开始下载列表页...\")\n conn = utils.init_db('mysql', 'wanfang_video')\n cur = conn.cursor()\n cur.execute('select uri,num,name from category')\n rows = cur.fetchall()\n cur.close()\n conn.close()\n print(self.list_path)\n prefix = '清晰度:'\n for uri, num, name in rows:\n for page in range(1, math.ceil(num / 16) + 1):\n filename = self.list_path + '/' + name + '_' + repr(page) + '.html'\n if os.path.exists(filename):\n continue\n\n url = 'http://video.wanfangdata.com.cn{}page={}'.format(uri.strip(), page)\n resp = utils.get_html(url, feature=prefix)\n if not resp:\n continue\n with open(filename, mode='w', encoding='utf8') as f:\n f.write(resp.content.decode())\n utils.printf(\"Download\", filename, \"success!\")\n\n def down_cover(self):\n super().down_cover()\n utils.printf(\"开始下载封面...\")\n conn = utils.init_db('mysql', 'wanfang_video')\n cur = conn.cursor()\n cur.execute('SELECT DISTINCT(wanID) FROM `article` where jpg_d=0')\n rows = cur.fetchall()\n uri = 'http://video.wanfangdata.com.cn/wfresourse/pic/video/{}.jpg'\n # prefix = '总共播放'\n for row in rows:\n url = uri.format(row[0])\n path = self.cover_path + '/' + row[0][:4]\n if not os.path.exists(path):\n os.makedirs(path)\n filename = path + '/{}.jpg'.format(row[0])\n if os.path.exists(filename):\n continue\n\n resp = utils.get_html(url)\n if not resp:\n continue\n with open(filename, mode='wb') as f:\n f.write(resp.content)\n utils.printf(\"Download\", filename, \"success!\")\n cur.execute(\"update article set jpg_d=1 where wanID='{}'\".format(row[0]))\n conn.commit()\n conn.close()\n\n\nsubject_regex = re.compile(r'/s/search/\\?key=.+')\n\ncategory = {\n \"学科\": [\n \"哲学与宗教\", \"历史与考古\", \"文学\", \"艺术\", \"经济\", \"管理\", \"法律\", \"政治\", \"社会\", \"教育\", \"天文\", \"地球科学\", \"生物\", \"人文地理\", \"数理科学\", \"工程技术\",\n \"中国医学\", \"医药卫生\", \"农学与农业\", \"国防军事\", \"图情档案\", \"计算机\"],\n \"频道\": [\n \"时尚科技\", \"境内高端学术会议\", \"中国名师讲坛系列\", \"环球高清精选\", \"国外优秀视频系列\", \"高校精品课程系列\", \"资格考试辅导系列\", \"就业创业指导系列\", \"凤凰卫视系列\", \"央视科教系列\",\n \"中国科学院系列\", \"北大光华管理学院\", \"赢家大讲堂系列\", \"第一财经系列\", \"中华医学会系列\", \"中医药管理局系列\", \"中国气象局系列\", \"CCTV-7系列\", \"中国科技信息研究所\",\n \"微视频大赛\"]}\n\n\ndef get_wanid_xk():\n \"\"\"从 article 表中获取 ID 与类别\"\"\"\n conn = utils.init_db('mysql', 'wanfang_video')\n cur = conn.cursor()\n cur.execute('SELECT wanid,xk,pd FROM `article`')\n rows = cur.fetchall()\n cur.close()\n conn.close()\n classes = defaultdict(list)\n for wanid, xk, pd in rows:\n if xk:\n classes[wanid].append(xk)\n if pd:\n classes[wanid].append(pd)\n return classes\n\n\ndef _get_detail(file):\n \"\"\"获取每个页面的包含的详细信息\"\"\"\n with open(file, mode='r', encoding=\"utf8\") as f:\n text = f.read()\n soup = BeautifulSoup(text, 'lxml')\n info = soup.select_one('.part_l .txtinfor')\n title = ''.join(info.find('h3').stripped_strings)\n subject = ';'.join([x.string.strip() for x in info.find_all('a', href=subject_regex)])\n mixed = [''.join(x.stripped_strings) for x in info.find_all('span')]\n creator = ''\n creator_institution = ''\n durations = ''\n date = ''\n for i in mixed:\n if '主讲人:' in i:\n creator = i[len('主讲人:'):]\n if '机构:' in i:\n creator_institution = i[len('机构:'):]\n if '时长:' in i:\n durations = i[len('时长:'):]\n if '年代:' in i:\n date = i[len('年代:'):][:4]\n description = info.find('h3').next_sibling.next_sibling.string.strip()\n return title, description, subject, creator, creator_institution, durations, date\n\n\nclass ParseWfVedio(utils.Parse):\n\n def parse_html(self):\n super().parse_html()\n with open(self.html_path + \"/html.html\", encoding=\"utf8\") as f:\n text = f.read()\n soup = BeautifulSoup(text, 'lxml')\n num_reg = re.compile(r'\\((\\d{1,}).*\\)')\n dts = soup.select('.float_l .part_l2 .item')\n conn = utils.init_db('mysql', 'wanfang_video')\n cur = conn.cursor()\n sql = \"update category set num={},uri='{}' where name='{}'\"\n for dt in dts:\n try:\n atag = dt.select_one('h3 a')\n num, uri, name = num_reg.findall(atag.string)[0], atag['href'], dt['title']\n cur.execute(sql.format(num, uri, name))\n except IndexError:\n continue\n conn.commit()\n conn.close()\n utils.printf(\"起始页解析完毕!\")\n\n def parse_detail(self):\n super().parse_detail()\n # _parse_detail(self.detail_path)\n classes = get_wanid_xk()\n sql = \"\"\"insert or ignore into modify_title_info_zt(lngid,rawid,title,creator,folio_size,type,batch,provider,provider_url,provider_id,language,country,date,date_created,subject,description,medium,provider_subject,cover,creator_institution)values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\"\n language = 'ZH'\n country = 'CN'\n provider = 'wanfangvideo'\n type_ = 10\n medium = 2\n batch = time.strftime('%Y%m%d') + '00'\n conn = utils.init_db(\"sqlite3\", self.template_file)\n cur = conn.cursor()\n for _, file in utils.file_list(self.detail_path):\n title, description, subject, creator, creator_institution, folio_size, date = _get_detail(file)\n rawid = (os.path.basename(file)).partition('.')[0]\n provider_subject = ';'.join(classes[rawid])\n provider_url = provider + '@http://video.wanfangdata.com.cn/v/play/' + rawid + '.html'\n provider_id = provider + '@' + rawid\n date_created = date + '0000'\n\n lngid = 'WANFANGDATA_DMT_' + rawid\n cover = '/smartlib/wanfangvideo/{}/{}.jpg'.format(rawid[:4], rawid)\n data = [\n lngid, rawid, title, creator, folio_size, type_, batch, provider, provider_url, provider_id, language,\n country, date, date_created, subject, description, medium, provider_subject, cover,\n creator_institution]\n cur.execute(sql, data)\n utils.printf(\"Parse \", file, \"success\")\n conn.commit()\n cur.close()\n conn.close()\n\n def parse_list(self):\n super().parse_list()\n conn = utils.init_db('mysql', 'wanfang_video')\n cur = conn.cursor()\n sql1 = 'insert ignore into article(wanID,xk)values(%s,%s)'\n sql2 = 'insert ignore into article(wanID,pd)values(%s,%s)'\n for _, file in utils.file_list(self.list_path):\n with open(file, encoding=\"utf8\") as f:\n text = f.read()\n #text = utils.read_txt(file)\n cate = file.split('\\\\')[-1].split('_')[0]\n soup = BeautifulSoup(text, 'lxml')\n tags = soup.select('.movListBox .movList dd a')\n data = [(tag['href'].split('/')[-1].partition('.')[0], cate) for tag in tags]\n if cate in category['学科']:\n cur.executemany(sql1, data)\n if cate in category['频道']:\n cur.executemany(sql2, data)\n utils.printf(\"Parse \", file, \"Successfull!\")\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef main():\n while True:\n down_task = DownloadWFVedio()\n parse_task = ParseWfVedio()\n down_task.down_html()\n parse_task.parse_html()\n down_task.down_list()\n parse_task.parse_list()\n down_task.down_detail()\n down_task.down_cover()\n parse_task.parse_detail()\n # utils.upload_file_to_hdfs('/RawData/CQU/_db3_latest', parse.template_file)\n utils.printf('此次任务完成,程序将进入睡眠,15天后重启...')\n time.sleep(3600 * 24 * 7)\n\n\nif __name__ == '__main__':\n main()","sub_path":"src/wanfang_video.py","file_name":"wanfang_video.py","file_ext":"py","file_size_in_byte":10677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"266507397","text":"import numpy as np\nimport pyaudio\nimport mido\nimport xml.etree.ElementTree as ET\n\nfrom Chromagram import Chromagram\nfrom ChordDetector import ChordDetector\n\nNOTE_ON = [0, 47, 110, 111, 116, 101, 95, 111, 110]\nNOTE_OFF = [0, 47, 110, 111, 116, 101, 95, 111, 102, 102, 32, 37, 105]\nALL_NOTES_OFF = [0, 47, 97, 108, 108, 95, 110, 111, 116, 101, 115, 95, 111, 102, 102]\nBLINK = [0, 47, 98, 108, 105, 110, 107]\nPORT = mido.open_output('Dr Squiggles:Dr Squiggles MIDI 1 20:0')\n\ntree = ET.parse('../.squiggles_notes/squiggles_notes.xml')\nroot = tree.getroot()\nNOTES = [int(root[i].text.strip()) for i in range(1,9)]\nPREVIOUS_NOTE = 64\nprint(NOTES)\n\nTIME_TILL_SILENCE = 8\nsilence_counter = 0\n\ndef play_solinoid(note):\n stop_all()\n msg = mido.Message('sysex', data=NOTE_ON)\n msg.data += [32, note+48]\n PORT.send(msg)\n\ndef stop_play(note):\n msg = mido.Message('sysex', data=NOTE_OFF)\n msg.data += [32, note]\n PORT.send(msg)\n\ndef stop_all():\n msg = mido.Message('sysex', data=ALL_NOTES_OFF)\n PORT.send(msg)\n\ndef play_chord(chord):\n global PREVIOUS_NOTE\n notes_in_chord = [i for i, x in enumerate(chord) if x == 1]\n for note in notes_in_chord:\n if convert_note_to_solinoid(note):\n break\n\ndef voice_leading(chord):\n global PREVIOUS_NOTE\n\n notes_in_chord = [i for i, x in enumerate(chord) if x == 1]\n possible_notes = []\n for note in notes_in_chord:\n midi_note = (note-4)%12+52\n for i in range(2):\n possible_notes.append(midi_note+12*i)\n if len(possible_notes) == 0:\n return\n\n min_distance = 48\n best_note = None\n for note in possible_notes:\n dist = np.abs(PREVIOUS_NOTE - note)\n if dist < min_distance:\n best_note = note\n\n print(PREVIOUS_NOTE, best_note, NOTES)\n PREVIOUS_NOTE = best_note\n if best_note != None and best_note in NOTES:\n note_to_play = NOTES.index(best_note)\n play_solinoid(note_to_play)\n\n\n\nCHUNK = 2**12\nRATE = 44100\n\nindex_to_note = [\"C\",\"C#\",\"D\",\"Eb\",\"E\",\"F\",\"F#\",\"G\",\"Ab\",\"A\",\"Bb\",\"B\"]\ntype_of_chord = [\"Major\", \"Minor\", \"Diminished\", \"Augmented\", \"Sus2\", \"Sus4\", \"Major 7th\", \"Minor 7th\", \"Dominant 7th\"]\n\nchroma = Chromagram(CHUNK, RATE, buffer_size=2**12)#, reference_freq=65.41, num_octaves=3)\nchord = ChordDetector()\np=pyaudio.PyAudio()\nstream=p.open(format=pyaudio.paInt16,channels=1,rate=RATE,input=True,\n frames_per_buffer=CHUNK)\n\nwhile True:\n data = np.frombuffer(stream.read(CHUNK, exception_on_overflow = False), dtype=np.int16)\n \"\"\"print(data.shape)\n peak=np.average(np.abs(data))*2\n bars=\"#\"*int(50*peak/2**16)\n print(\"%04d %05d %s\"%(i,peak,bars))\"\"\"\n chroma.process_audio_frame(data)\n spectrum_dif = chroma.magnitude_spectrum - chroma.previous_spectrum\n pos_values = spectrum_dif[spectrum_dif>0]\n new_energy = np.sum(pos_values)\n print(new_energy)\n\n if new_energy > 50000:\n silence_counter = TIME_TILL_SILENCE\n pred = chord.classify_chromagram(chroma.chromagram)\n root = index_to_note[pred%12]\n type = type_of_chord[int(pred//12)]\n print(root, type)\n voice_leading(chord.chord_profiles[pred])\n elif silence_counter == 0:\n stop_all()\n else:\n silence_counter -= 1\n print(silence_counter)\n\nstream.stop_stream()\nstream.close()\np.terminate()\n","sub_path":"melody.py","file_name":"melody.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}