diff --git "a/3145.jsonl" "b/3145.jsonl" new file mode 100644--- /dev/null +++ "b/3145.jsonl" @@ -0,0 +1,619 @@ +{"seq_id":"215055975","text":"# encoding: utf-8\n\"\"\"\n@author: liaoxingyu\n@contact: sherlockliao01@gmail.com\n\"\"\"\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Parameter\n\nfrom fastreid.utils.one_hot import one_hot\n\n\nclass Arcface(nn.Module):\n def __init__(self, cfg, in_feat, num_classes):\n super().__init__()\n self.in_feat = in_feat\n self._num_classes = num_classes\n self._s = cfg.MODEL.HEADS.SCALE\n self._m = cfg.MODEL.HEADS.MARGIN\n\n self.weight = Parameter(torch.Tensor(self._num_classes, in_feat))\n\n def forward(self, features, targets):\n # get cos(theta)\n cosine = F.linear(F.normalize(features), F.normalize(self.weight))\n\n # add margin\n theta = torch.acos(torch.clamp(cosine, -1.0 + 1e-7, 1.0 - 1e-7))\n\n phi = torch.cos(theta + self._m)\n\n # --------------------------- convert label to one-hot ---------------------------\n targets = one_hot(targets, self._num_classes)\n pred_class_logits = targets * phi + (1.0 - targets) * cosine\n\n # logits re-scale\n pred_class_logits *= self._s\n\n return pred_class_logits\n\n def extra_repr(self):\n return 'in_features={}, num_classes={}, scale={}, margin={}'.format(\n self.in_feat, self._num_classes, self._s, self._m\n )\n","sub_path":"fastreid/layers/arcface.py","file_name":"arcface.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"478679594","text":"from numpy import place\nimport pandas as pd\nimport os\nimport sys\n\nsys.path.append('..')\n\nfrom src import gcs_ex\n\n\nclass DailyBettingEval:\n def __init__(\n self,\n race_date\n ):\n self.bucket = gcs_ex.GCSBucket('boat_race_ai', 'boat_race_ai')\n\n self.race_date = race_date\n\n def main_process(self):\n # 任意の日付の投票結果のファイル名の一覧を取得\n result_filenames = [\n blob.name for blob in self.bucket.list_objects(\n os.path.join(\n 'betting_results/', self.race_date\n )\n )]\n\n print(result_filenames)\n\n # 任意の日付の投票結果の Df を全て結合。\n bettings = pd.concat(\n [\n self.bucket.read_csv(filename)\n for filename in result_filenames\n ]\n )\n\n print(bettings)\n\n print(bettings['return'].sum())\n print(bettings['amount'].sum()*100)\n\n # payoff * amount = reutrn\n sum_buy = bettings['amount'].sum()*100 # 購入金額合計\n sum_return = bettings['return'].sum() # 配当金合計\n benefit = sum_return - sum_buy # 純利益\n\n \"\"\"\n 1.place_id,\n 2.bet_type\n それぞれに対する購入金額・配当・純利益の算出\n \"\"\"\n results_place_id = pd.DataFrame(None)\n results_bet_type = pd.DataFrame(None)\n list_place_id = bettings['place_id'].unique().tolist()\n list_bet_type = bettings['bet_type'].unique().tolist()\n list_bet_type = sorted(list_bet_type)\n sumbuylist_place_id = []\n sumbuylist_bet_type = []\n returnlist_place_id = []\n returnlist_bet_type = []\n benefitlist_place_id = []\n benefitlist_bet_type = []\n hitcountlist_place_id = []\n hitcountlist_bet_type = []\n wincountlist_place_id = [] # レースごとの、収支がプラスのカウント。Plusはだせえ。いい名前募集。\n wincountlist_bet_type = [] # レースごとの、収支がプラスのカウント。\n racecountlist_place_id = []\n racecountlist_bet_type = []\n hit_race_list = []\n win_race_list = []\n\n # 1.'place_id'(場所)毎の成績\n for place_id in list_place_id:\n sum_buy = bettings[bettings['place_id'] == place_id]['amount'].sum()*100\n sum_return = bettings[bettings['place_id'] == place_id]['return'].sum()\n benefit = sum_return - sum_buy\n # race_no ごとにグループ分け -> 合計算出\n grouped = bettings[bettings['place_id'] == place_id].groupby('race_no').sum()\n \n\n sumbuylist_place_id.append(sum_buy)\n returnlist_place_id.append(sum_return)\n benefitlist_place_id.append(benefit)\n hitcountlist_place_id.append(len(grouped[grouped['return'] > 0]))\n wincountlist_place_id.append(len(grouped[(grouped['return'] - grouped['amount']*100) > 0]))\n racecountlist_place_id.append(len(grouped))\n \n results_place_id['place_id'] = list_place_id\n results_place_id['sum_buy'] = sumbuylist_place_id\n results_place_id['return'] = returnlist_place_id\n results_place_id['benefit'] = benefitlist_place_id\n results_place_id['benefit_rate'] = None\n results_place_id['hit_count'] = hitcountlist_place_id\n results_place_id['win_count'] = wincountlist_place_id\n results_place_id['race_count'] = racecountlist_place_id\n results_place_id = results_place_id.append(results_place_id.sum(), ignore_index=True)\n results_place_id.iloc[len(results_place_id)-1, 0] = 'SUM'\n results_place_id['hit_rate'] = round(results_place_id['hit_count'] / results_place_id['race_count'], 3)\n results_place_id['win_rate'] = round(results_place_id['win_count'] / results_place_id['race_count'], 3)\n results_place_id['benefit_rate'] = results_place_id['return'] / results_place_id['sum_buy']\n\n print(results_place_id)\n\n # 2.'bet_type'(買い方)毎の成績\n for bet_type in list_bet_type:\n sum_buy = bettings[bettings['bet_type'] == bet_type]['amount'].sum()*100\n sum_return = bettings[bettings['bet_type'] == bet_type]['return'].sum()\n benefit = sum_return - sum_buy\n\n # bet_type ごとにグループ分け -> 合計算出\n grouped = bettings[bettings['bet_type'] == bet_type].groupby(['place_id', 'race_no']).sum().reset_index()\n\n sumbuylist_bet_type.append(sum_buy)\n returnlist_bet_type.append(sum_return)\n benefitlist_bet_type.append(benefit)\n hitcountlist_bet_type.append(len(grouped[grouped['return'] > 0]))\n # hit_count, win_count は place_id から引用(return>0 の買い目が複数あるレースを考慮)\n sum_hit_count = results_place_id[results_place_id['place_id'] == 'SUM']['hit_count'].iloc[-1]\n sum_win_count = results_place_id[results_place_id['place_id'] == 'SUM']['win_count'].iloc[-1]\n sum_race_count = results_place_id[results_place_id['place_id'] == 'SUM']['race_count'].iloc[-1]\n \n\n wincountlist_bet_type.append(len(grouped[(grouped['return'] - grouped['amount']*100) > 0]))\n racecountlist_bet_type.append(len(grouped))\n \n results_bet_type['bet_type'] = list_bet_type\n results_bet_type['sum_buy'] = sumbuylist_bet_type\n results_bet_type['return'] = returnlist_bet_type\n results_bet_type['benefit'] = benefitlist_bet_type\n results_bet_type['benefit_rate'] = None\n results_bet_type['hit_count'] = hitcountlist_bet_type\n results_bet_type['win_count'] = wincountlist_bet_type\n results_bet_type['race_count'] = racecountlist_bet_type\n results_bet_type = results_bet_type.append(results_bet_type.sum(), ignore_index=True)\n results_bet_type.iloc[len(results_bet_type)-1, 0] = 'SUM'\n results_bet_type.loc[results_bet_type['bet_type'] == 'SUM', 'hit_count'] = sum_hit_count\n results_bet_type.loc[results_bet_type['bet_type'] == 'SUM', 'win_count'] = sum_win_count\n results_bet_type.loc[results_bet_type['bet_type'] == 'SUM', 'race_count'] = sum_race_count\n results_bet_type['hit_rate'] = round(results_bet_type['hit_count'] / results_bet_type['race_count'], 3)\n results_bet_type['win_rate'] = round(results_bet_type['win_count'] / results_bet_type['race_count'], 3)\n results_bet_type['benefit_rate'] = results_bet_type['return'] / results_bet_type['sum_buy']\n \n print(results_bet_type)\n \n # アップロード\n path_bettings = 'daily_betting_results/{}/bettings.csv'.format(args.race_date)\n path_place_id = 'daily_betting_results/{}/results_about_place_id.csv'.format(args.race_date)\n path_bet_type = 'daily_betting_results/{}/results_about_bet_type.csv'.format(args.race_date)\n\n self.bucket.upload_csv_from_string(\n bettings.to_csv(encoding=\"utf-8\", index=False),\n path_bettings\n )\n\n self.bucket.upload_csv_from_string(\n results_place_id.to_csv(encoding=\"utf-8\", index=False),\n path_place_id\n )\n\n self.bucket.upload_csv_from_string(\n results_bet_type.to_csv(encoding=\"utf-8\", index=False),\n path_bet_type\n )\n \n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--race_date', type = str, required = True)\n\n args = parser.parse_args()\n\n dbe = DailyBettingEval(args.race_date)\n dbe.main_process()","sub_path":"src/daily_betting_eval.py","file_name":"daily_betting_eval.py","file_ext":"py","file_size_in_byte":7746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"517247391","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nclass ProductSticker(models.Model):\n _name = 'product.sticker'\n _description = 'Product sticker used on product image'\n\n POSITIONS = [('top-left','Top Left'),('top-right','Top Right'),('bottom-left','Bottom Left'),\n ('bottom-right','Bottom Right')]\n\n name = fields.Char(string='Name', translate=True, required=True)\n sticker_text = fields.Char(string='Sticker Text', translate=True)\n bg_color = fields.Char(string='Background Color',\n help='Here you can specify the HTML color for the background of product label')\n text_color = fields.Char(string='Text Color',\n help='Here you can specify the HTML color for the font color of product label')\n font_size = fields.Integer(string='Font Size', help='For ex. 10')\n top = fields.Integer(string='Top Margin(px)', help='For ex. 10')\n bottom = fields.Integer(string='Bottom Margin(px)', help='For ex. 10')\n left = fields.Integer(string='Left Margin(px)', help='For ex. 10')\n right = fields.Integer(string='Right Margin(px)', help='For ex. 10')\n height = fields.Integer(string='Height(px)', help='For ex. 100')\n width = fields.Integer(string='Width(px)', help='For ex. 100')\n rotate = fields.Integer(string='Rotate', help='For ex. -25, 45')\n image = fields.Binary(string='Image')\n cut_corner = fields.Boolean(string='Cut the corner')\n sticker_type = fields.Selection([('image','Image'),('html','HTML')], default='image', required=True)\n shape = fields.Selection([('square','Square'),('circle','Circle'),('rectangle','Rectangle')], default='circle')\n position = fields.Selection(POSITIONS, string='Position', required=True, default='top-left')\n\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n\n product_sticker_ids = fields.Many2many('product.sticker', string='product stickers')\n product_rating = fields.Float(string='Product Rating', compute='_compute_product_rating', store=True)\n product_banner = fields.Binary(string='Product Banner')\n product_tags_ids = fields.Many2many('product.tags', string='Product Tags')\n\n @api.depends('message_ids')\n def _compute_product_rating(self):\n for i in self:\n prodRating = round(i.sudo().rating_get_stats().get('avg') / 1 * 100) / 100\n i.product_rating = prodRating\n","sub_path":"atharva_theme_base/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"466728461","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Currency(models.Model):\n currency_id = models.CharField(max_length=100)\n name = models.CharField(max_length=100)\n symbol = models.CharField(max_length=100)\n users = models.ManyToManyField(User)\n\n# mapper:\n def populate_from_dict(self, dict):\n self.currency_id = dict['id']\n self.name = dict['name']\n self.symbol = dict['symbol']\n\n\nclass CurrencyHistory(models.Model):\n available_supply = models.FloatField()\n market_cap_eur = models.FloatField()\n market_cap_usd = models.FloatField()\n max_supply = models.FloatField()\n percent_change_1h = models.FloatField()\n percent_change_7d = models.FloatField()\n percent_change_24h = models.FloatField()\n price_btc = models.FloatField()\n price_eur = models.FloatField()\n price_usd = models.FloatField()\n rank = models.IntegerField()\n total_supply = models.FloatField()\n date = models.IntegerField(default=0)\n currency = models.ForeignKey(\"Currency\", on_delete=models.CASCADE)\n\n def populate_from_dict(self, dict):\n self.available_supply = dict['available_supply'] if dict['available_supply'] is not None and dict['available_supply'] != \"\" else 0\n self.date = dict['last_updated'] if dict['last_updated'] is not None and dict['last_updated'] != \"\" else 0\n self.market_cap_eur = dict['market_cap_eur'] if dict['market_cap_eur'] is not None and dict['market_cap_eur'] != \"\" else 0\n self.market_cap_usd = dict['market_cap_usd'] if dict['market_cap_usd'] is not None and dict['market_cap_usd'] != \"\" else 0\n self.max_supply = dict['max_supply'] if dict['max_supply'] is not None and dict['max_supply'] != \"\" else 0\n self.percent_change_1h = dict['percent_change_1h'] if dict['percent_change_1h'] is not None and dict['percent_change_1h'] != \"\" else 0\n self.percent_change_7d = dict['percent_change_7d'] if dict['percent_change_7d'] is not None and dict['percent_change_7d'] != \"\" else 0\n self.percent_change_24h = dict['percent_change_24h'] if dict['percent_change_24h'] is not None and dict['percent_change_24h'] != \"\" else 0\n self.price_btc = dict['price_btc'] if dict['price_btc'] is not None and dict['price_btc'] != \"\" else 0\n self.price_eur = dict['price_eur'] if dict['price_eur'] is not None and dict['price_eur'] != \"\" else 0\n self.price_usd = dict['price_usd'] if dict['price_usd'] is not None and dict['price_usd'] != \"\" else 0\n self.rank = dict['rank']\n self.total_supply = dict['total_supply'] if dict['total_supply'] is not None and dict['total_supply'] != \"\" else 0\n\nclass MarketCap(models.Model):\n total_market_cap_usd = models.BigIntegerField()\n total_24h_volume_usd = models.BigIntegerField()\n bitcoin_percentage_of_market_cap = models.FloatField()\n active_currencies = models.IntegerField()\n active_assets = models.IntegerField()\n active_markets = models.IntegerField()\n last_updated = models.IntegerField()\n total_market_cap_eur = models.BigIntegerField()\n total_24h_volume_eur = models.BigIntegerField()\n\n def populate_from_dict(self, dict):\n self.total_market_cap_usd = dict['total_market_cap_usd']\n self.total_24h_volume_usd = dict['total_24h_volume_usd']\n self.bitcoin_percentage_of_market_cap = dict['bitcoin_percentage_of_market_cap']\n self.active_currencies = dict['active_currencies']\n self.active_assets = dict['active_assets']\n self.active_markets = dict['active_markets']\n self.last_updated = dict['last_updated']\n self.total_market_cap_eur = dict['total_market_cap_eur']\n self.total_24h_volume_eur = dict['total_24h_volume_eur']","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"512170689","text":"import numpy as np\n\n\nclass FuncAnalytics:\n\n def __init__(self, a, b, func, dfunc) -> None:\n super().__init__()\n self.a = a\n self.b = b\n self.func = func\n self.dfunc = dfunc\n\n def set_interval(self, a, b):\n self.a = a\n self.b = b\n\n def get_func_value(self, dx):\n ax = self.a\n bx = self.b\n\n _x = []\n _y = []\n x = ax\n while x < bx:\n _y.append(self.func(x))\n _x.append(x)\n x += dx\n return _x, _y\n\n def passive_algorithm(self, N):\n\n x, y, k = [], [], 0\n delta = (self.b - self.a) / N\n a, b = self.a, self.b\n\n if N % 2 == 0:\n k = N / 2\n else:\n k = (N - 1) / 2\n\n def _get_func_values(ax, bx):\n _x = []\n _y = []\n for i in range(N):\n if N % 2 == 0:\n _x.append(ax + (bx - ax) / (N + 1) * i)\n else:\n tmp = ax + (bx - ax) / (k + 1) * i\n _x.append(tmp - delta)\n _x.append(tmp)\n for i in _x:\n _y.append(self.func(i))\n return _x, _y\n\n LN = 0\n if N % 2 == 0:\n LN = 2 * (self.b - self.a) / (N + 1)\n else:\n LN = 2 * (self.b - self.a) / (k + 1) + delta\n eps = LN / 2\n\n x, y = _get_func_values(self.a, self.b)\n\n min_y = np.min(y)\n j = y.index(min_y)\n L = x[j + 1] - x[j - 1]\n\n exact_min_y = 0\n exact_min_x = 0\n tmp_x, tmp_y = x, y\n while np.fabs(exact_min_y - min_y) > eps:\n tmp_x, tmp_y = _get_func_values(tmp_x[j - 1], tmp_y[j + 1])\n exact_min_y = np.min(tmp_y)\n j = tmp_y.index(exact_min_y)\n exact_min_x = tmp_x[j]\n\n return exact_min_x, exact_min_y\n\n def bisection_algorithm(self, eps):\n\n def _find_min_value(a, b):\n get_middle = lambda left, right: \\\n ((left + right) / 2,\n self.func((left + right) / 2))\n x2, y2 = get_middle(a, b)\n x1, y1 = get_middle(a, x2)\n x3, y3 = get_middle(x2, b)\n x = [a, x1, x2, x3, b]\n y = [self.func(a),\n y1, y2, y3,\n self.func(b)\n ]\n min_y = np.min(y)\n j = y.index(min_y)\n\n return x[j], min_y, x[j - 1], x[j + 1]\n\n min_x, min_y, a, b = _find_min_value(self.a, self.b)\n\n while b - a > 2 * eps:\n min_x, min_y, a, b = _find_min_value(a, b)\n\n return min_x, min_y\n\n def dichotomy_method(self, eps):\n delta = eps / (1 / eps ** 2)\n\n def _find_min_len(a, b):\n x = (a + b) / 2\n y1 = self.func(x - delta)\n y2 = self.func(x + delta)\n if y1 <= y2:\n return a, x + delta\n else:\n return x - delta, b\n\n a, b = _find_min_len(self.a, self.b)\n\n while b - a > 2 * eps:\n a, b = _find_min_len(a, b)\n\n return (a + b) / 2, self.func((a + b) / 2)\n\n def fibonacci_method(self, N):\n\n def _get_number_fibonacci(n):\n F = [1, 1]\n for i in range(2, n):\n F.append(F[i - 1] + F[i - 2])\n return F\n\n F = _get_number_fibonacci(N)\n\n x1 = self.a + (self.b - self.a) * (F[N - 3] / F[N - 1])\n x2 = self.a + (self.b - self.a) * (F[N - 2] / F[N - 1])\n\n y1, y2 = self.func(x1), self.func(x2)\n\n a, b = self.a, self.b\n\n for i in range(N - 2):\n if y1 <= y2:\n b, x2, y2 = x2, x1, y1\n x1 = a + b - x2\n y1 = self.func(x1)\n else:\n a, x1, y1 = x1, x2, y2\n x2 = a + b - x1\n y2 = self.func(x2)\n\n if y1 < y2:\n return x1, y1\n else:\n return x2, y2\n\n def tangent_method(self, eps):\n a, b = self.a, self.b\n y1, y2 = self.func(a), self.func(b)\n z1, z2 = self.dfunc(a), self.dfunc(b)\n\n while (b - a) > 2 * eps:\n c = ((b * z2 - a * z1) - (y2 - y1)) / (z2 - z1)\n y, z = self.func(c), self.dfunc(c)\n if z == 0:\n return c, y\n elif z < 0:\n a, y1, z1 = c, y, z\n else:\n b, y2, z2 = c, y, z\n return c, y\n","sub_path":"lab2/unconditional_optimization.py","file_name":"unconditional_optimization.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"7295580","text":"def solver(n):\n for a in range(1,1000):\n for b in range(a,1000):\n c = 1000 - a - b\n if a < b and b < c and a**2 + b**2 == c**2:\n return a*b*c\n\nif __name__ == '__main__':\n hasil = solver(1000)\n print(\"Jawaban : {hasil}\".format(hasil=hasil))","sub_path":"archives/9-special-pythagorean-triplet/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"181084965","text":"import os\n\n#.\n\n# Les tuples sont des séquences, assez semblables aux listes, sauf\n# qu'on ne peut modifier un tuple après qu'il ait été créé. Cela signifie\n# qu'on définit le contenu d'un tuple (les objets qu'il doit contenir) lors\n# de sa création, mais qu'on ne peut en ajouter ou en retirer par la suite.\n\n# DEMONSTRATION :\n\nUnoListo = [[1,'a'],[2,'B'],[3,'c'],[4,'D'],[5,'e']]\n\n\nfor nb, malettre in UnoListo:\n print(\"La lettre {} est la {}e lettre de l'alphabet.\".format(malettre, nb))\n\n\n# - Les tuples sont remarquées par la présences, ici de 2 valeurs entre crochets ici.\n# - Mais généralement les tuples sont entre parenthèses et non entre crochets.\n# - On voit ci-dessus qu'il y a 5 tuples.\n# - On a défini dans cet exercice, via la fonction'FORMAT' que la première valeur d'une tuple serait le nombre.\n# et on a défini que la seconde valeur serait les lettres, via la fonction 'FORMAT', donc on peut inverser.\n# - MAIS on ne peut rien modifier dans une tuple une fois remplie !\n\n\n\n# UNE TUPLE SE DEFINIT PAR DES VALEURS ENTRE PARENTHESES,\n# par exemples :\n# tuple_avec_plusieurs_valeurs = (1,2,3)\n\n\nos.system(\"pause\")\n","sub_path":"Objets&Listes/TUPLES-explications.py","file_name":"TUPLES-explications.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"591509588","text":"from unittest.mock import patch\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal, assert_series_equal\n\nfrom evalml.pipelines import TimeSeriesBaselineRegressionPipeline\nfrom evalml.pipelines.time_series_baselines import (\n TimeSeriesBaselineBinaryPipeline,\n TimeSeriesBaselineMulticlassPipeline\n)\n\n\n@pytest.mark.parametrize('X_none', [True, False])\n@pytest.mark.parametrize('gap', [0, 1])\n@pytest.mark.parametrize('pipeline_class', [TimeSeriesBaselineRegressionPipeline,\n TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline])\n@patch(\"evalml.pipelines.TimeSeriesClassificationPipeline._encode_targets\", side_effect=lambda y: y)\n@patch(\"evalml.pipelines.TimeSeriesClassificationPipeline._decode_targets\", side_effect=lambda y: y)\ndef test_time_series_baseline(mock_decode, mock_encode, pipeline_class, gap, X_none, ts_data):\n X, y = ts_data\n\n clf = pipeline_class(parameters={\"pipeline\": {\"gap\": gap, \"max_delay\": 1},\n \"Time Series Baseline Estimator\": {'gap': gap, 'max_delay': 1}})\n expected_y = y.shift(1) if gap == 0 else y\n expected_y = expected_y.reset_index(drop=True)\n if not expected_y.isnull().values.any():\n expected_y = expected_y.astype(\"Int64\")\n if X_none:\n X = None\n clf.fit(X, y)\n assert_series_equal(expected_y, clf.predict(X, y).to_series())\n\n\n@pytest.mark.parametrize('X_none', [True, False])\n@pytest.mark.parametrize('gap', [0, 1])\n@pytest.mark.parametrize('pipeline_class', [TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline])\ndef test_time_series_baseline_predict_proba(pipeline_class, gap, X_none):\n X = pd.DataFrame({\"a\": [4, 5, 6, 7, 8]})\n y = pd.Series([0, 1, 1, 0, 1])\n expected_proba = pd.DataFrame({0: pd.Series([1, 0, 0, 1, 0], dtype=\"float64\"),\n 1: pd.Series([0, 1, 1, 0, 1], dtype=\"float64\")})\n if pipeline_class == TimeSeriesBaselineMulticlassPipeline:\n y = pd.Series([0, 1, 2, 2, 1])\n expected_proba = pd.DataFrame({0: pd.Series([1, 0, 0, 0, 0], dtype=\"float64\"),\n 1: pd.Series([0, 1, 0, 0, 1], dtype=\"float64\"),\n 2: pd.Series([0, 0, 1, 1, 0], dtype=\"float64\")})\n if gap == 0:\n # Shift to pad the first row with Nans\n expected_proba = expected_proba.shift(1)\n\n clf = pipeline_class(parameters={\"pipeline\": {\"gap\": gap, \"max_delay\": 1},\n \"Time Series Baseline Estimator\": {'gap': gap, 'max_delay': 1}})\n if X_none:\n X = None\n clf.fit(X, y)\n assert_frame_equal(expected_proba, clf.predict_proba(X, y).to_dataframe())\n\n\n@pytest.mark.parametrize('pipeline_class', [TimeSeriesBaselineRegressionPipeline,\n TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline])\n@pytest.mark.parametrize(\"only_use_y\", [True, False])\n@pytest.mark.parametrize(\"gap,max_delay\", [(0, 0), (1, 0), (0, 2), (1, 1), (1, 2), (2, 2), (7, 3), (2, 4)])\n@patch(\"evalml.pipelines.RegressionPipeline._score_all_objectives\")\n@patch(\"evalml.pipelines.TimeSeriesClassificationPipeline._score_all_objectives\")\n@patch(\"evalml.pipelines.TimeSeriesBinaryClassificationPipeline._score_all_objectives\")\n@patch(\"evalml.pipelines.ClassificationPipeline._encode_targets\", side_effect=lambda y: y)\ndef test_time_series_baseline_score_offset(mock_encode, mock_binary_classification_score, mock_multiclass_classification_score,\n mock_regression_score, gap, max_delay,\n only_use_y, pipeline_class, ts_data):\n X, y = ts_data\n\n expected_target = pd.Series(np.arange(1 + gap, 32), index=pd.date_range(f\"2020-10-01\", f\"2020-10-{31-gap}\"))\n if gap == 0:\n expected_target = expected_target[1:]\n clf = pipeline_class(parameters={\"pipeline\": {\"gap\": gap, \"max_delay\": max_delay},\n \"Time Series Baseline Estimator\": {\"gap\": gap, \"max_delay\": max_delay}})\n mock_score = None\n if pipeline_class == TimeSeriesBaselineRegressionPipeline:\n mock_score = mock_regression_score\n elif pipeline_class == TimeSeriesBaselineBinaryPipeline:\n mock_score = mock_binary_classification_score\n else:\n mock_score = mock_multiclass_classification_score\n if only_use_y:\n clf.fit(None, y)\n clf.score(X=None, y=y, objectives=['MCC Binary'])\n else:\n clf.fit(X, y)\n clf.score(X, y, objectives=['MCC Binary'])\n\n # Verify that NaNs are dropped before passed to objectives\n _, target, preds = mock_score.call_args[0]\n assert not target.isna().any()\n assert not preds.isna().any()\n\n # Target used for scoring matches expected dates\n pd.testing.assert_index_equal(target.index, expected_target.index)\n np.testing.assert_equal(target.values, expected_target.values)\n","sub_path":"evalml/tests/pipeline_tests/test_time_series_baseline_pipeline.py","file_name":"test_time_series_baseline_pipeline.py","file_ext":"py","file_size_in_byte":5013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"89604615","text":"#! /usr/bin/env python3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef gauss(x, nu):\n return np.exp(-(x - nu) ** 2)\n\nxs = np.linspace(-4, 12, 500)\nplt.plot(xs, 1 - (gauss(xs, 0) + 0.7 * gauss(xs, 8)))\nplt.xlabel(r'$B_\\mathrm{m}$')\nplt.ylabel(r'Transparenz')\nplt.ylim(0, 1.1)\nplt.xticks([0])\nplt.yticks([1])\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['bottom'].set_color('none')\nax.spines['top'].set_position('zero')\nax.spines['left'].set_position('zero')\nax.xaxis.labelpad = 0.05\nax.yaxis.labelpad = 0.05\nplt.tight_layout()\nplt.savefig('build/plots/transmission.pdf')\nplt.clf()\n","sub_path":"V21_Optisches_pumpen/scripts/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"212840922","text":"\"\"\"\nQuestion 17\nLevel 2\n\nQuestion:\nWrite a program that computes the net amount of a bank account\nbased a transaction log from console input.\nThe transaction log format is shown as following:\nD 100\nW 200\n¡­\nD means deposit while W means withdrawal.\nSuppose the following input is supplied to the program:\nD 300\nD 300\nW 200\nD 100\nThen, the output should be:\n500\n\"\"\"\n\ndef transaction(lst, d):\n\t\n\ttotal = [int(trans[2:]) for trans in lst if trans.startswith(d)]\n\treturn sum(total)\n\nrunning = True\n\ntransactions = []\nwhile running:\n\ttran = input(\"Enter a transaction: \")\n\n\tif tran:\n\t\ttransactions.append(tran)\n\telse:\n\t\tdeposits = transaction(transactions, \"D\")\n\t\twithdraws = transaction(transactions, \"W\")\n\t\tbalance = deposits - withdraws\n\t\tprint(balance)\n\t\tbreak","sub_path":"level17.py","file_name":"level17.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"411320150","text":"#Creates a UVJ diagram split by good, low, and bad measurements for specified lines\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import ascii\nimport sys, os, string\nimport pandas as pd\nfrom astropy.io import fits\nimport collections\nfrom astropy.cosmology import WMAP9 as cosmo\nfrom astropy.stats import biweight_midvariance\nfrom scipy.optimize import curve_fit\n#import lnr\n\n#Folder to save the figures\nfigout = '/Users/galaxies-air/COSMOS/Images/'\n\n#The location with the file for all of our data\nfluxdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/lineflux_red.txt'\n\n#Location of the equivalent width data\n#ewdata = '/Users/galaxies-air/COSMOS/COSMOSData/lineew.txt'\n#Read in the ew of the lines \n#ew_df = ascii.read(ewdata).to_pandas()\n\n#The location to store the scale and its stddev of each line\nqualdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/dataqual.txt'\n#Read in the scale of the lines \ndataqual = ascii.read(qualdatapath).to_pandas()\nd = {'True': True, 'False': False}\n\n#File with the error array\nerrdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/errs.txt'\n#Read in the scale of the galaxies-air \nerr_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()\n\n\n#File with the error array\nerrreddatapath = '/Users/galaxies-air/COSMOS/COSMOSData/errs_red.txt'\n#Read in the scale of the lines \nerr_dfred = ascii.read(errreddatapath,data_start=1,header_start=0,format='csv').to_pandas()\n\n#Read the datafile:\nfluxdata = ascii.read(fluxdatapath).to_pandas()\n\n#File with the structural properties\nspropdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/struct_prop.txt'\n#Read in the scale of the lines \nsprop_df = ascii.read(spropdatapath).to_pandas()\nsprop_df = sprop_df.rename(columns={'id':'OBJID'})\nfluxdata = pd.merge(fluxdata,sprop_df)\n\n#The location with the file for the filter data\nfiltdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/all_c_hasinger.txt'\n#Read in the data\nfiltdata = ascii.read(filtdatapath).to_pandas()\ncordata = filtdata[['id','Ks','eKs','Ks_tot','eKs_tot']]\ncordata = cordata.rename(columns={'id':'OBJID'})\n\nfluxdata = pd.merge(fluxdata,cordata,on='OBJID',how='inner')\nfluxdata = fluxdata.drop_duplicates()\nfluxdata = fluxdata.reset_index()\n\n#Read in the sfr file\nsfdata = '/Users/galaxies-air/COSMOS/COSMOSData/sfrs.txt'\nsfr_df = ascii.read(sfdata).to_pandas()\nfluxdata = pd.merge(fluxdata,sfr_df,on='fluxfile')\n\n#Fontsizes for plotting\naxisfont = 24\nticksize = 18\nticks = 8\ntitlefont = 24\nlegendfont = 16\ntextfont = 16\n\n#Division function\ndef divz(X,Y):\n return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)\n\n\nHbHg = 0\nHaHg = 0\n\nif HbHg: lines = ['4861']\nelif HaHg: lines = ['6563_fix']\nelse: lines=['6563_fix']\n\n'''\n#Set low objects to an upper limit\nfor line in lines:\n for i in range(0,len(fluxdata)):\n if (fluxdata.iloc[i][line+'_flux'] == 0) and (dataqual[line+'_low']=='True'):\n print 'Fixing'\n fluxdata.at[line+'_flux',i] = err_df.iloc[i][line+'_err']\n'''\n#Filter the data\ngoodlines = [dataqual[line+'_good'].map(d) for line in lines]\n#Needs to be good in all lines to be good\nallgood = np.logical_and.reduce(goodlines)\n#Needs to be bad in any line to be bad\nbadlines = [dataqual[line+'_bad'].map(d) for line in lines]\nbaddata = np.logical_or.reduce(badlines)\nlowlines = [dataqual[line+'_low'].map(d) for line in lines]\n#Needs to be low in any line to be low, and also not bad in a line\nsomelow = np.logical_and(np.logical_or.reduce(lowlines),np.logical_not(baddata))\n\n\n\ndupids = [item for item, count in collections.Counter(fluxdata[allgood]['OBJID']).items() if count > 1]\ndupobjsarr = []\nweight_df = pd.DataFrame()\nweight_df['fluxfile'] = fluxdata.fluxfile\nfor i in range(len(fluxdata)):\n weight_df.at[i,'Weight'] = 1.0\nfor obj in dupids:\n dupobjsarr.append(fluxdata[fluxdata.OBJID == obj].OBJID)\nfor i in range(0,len(dupids)):\n ndup = len(dupobjsarr[i])\n for j in range(0,ndup):\n weight_df.at[dupobjsarr[i].index[j],'Weight'] = 1.0/ndup\nfluxdata = pd.merge(fluxdata,weight_df,on='fluxfile')\n\n\n\ncombinemass = 1\n\npaper = 0\nshowkel = 0\nshows = 0\ncutofflow = 0\nshowmeds = 0\n\n\nfiltSFR = fluxdata['SFR']<10000000\n\nms=12\nlwbw=2\n\nnotbad = np.logical_not(baddata)\n\nssfr = 1\n\nif not paper:\n fig = plt.figure(figsize = (19.5,7))\n ax = fig.add_axes((0.15,0.15,0.315,0.8))\nelse: fig,ax = plt.subplots(figsize = (8,7))\nc=0\nmsbw = 12\nlwbw = 3\ncolormed2 = 'black'\n \n\n \n\nkey = ''\nif HbHg: key = '_HbHg'\nelif HaHg: key = '_HaHg'\n \nfor w in range(0,2):\n #llim1 = (np.log10(fluxdata['sSFR'+key]) > -10.7)\n #if cutofflow: ax.plot((0,20),(-10.7,-10.7),ls='--',color='black',label='sSFR cutoff for completeness')\n if cutofflow: ax.axhspan(-10.7,-15, color='indianred', alpha=0.1,label='Incomplete, discarding for analysis')\n if c in [0,3]:\n col = 'good'\n filt = allgood\n color='cornflowerblue'\n mark2 = 'o'\n label2 = 'Significant H$\\\\alpha$ detection'\n elif c in [1,4]:\n col = 'low'\n filt = somelow\n color='cornflowerblue'\n mark2 = 'v'\n label2 = '5$\\\\sigma$ Upper limit on SSFR'\n else:\n col = 'bad'\n filt = baddata\n color='red'\n filt = np.logical_and(filt,filtSFR)\n \n xdata = fluxdata[filt]['re_kpc']\n ydata = np.log10(fluxdata[filt]['SFR'+key])\n mdata = fluxdata[filt]['LMASS']\n ax.set_xlabel('log(Stellar Mass) (M$_\\odot)$',fontsize = axisfont)\n ax.set_ylabel('log(SFR) (M$_{sun})$/yr)',fontsize = axisfont)\n if ssfr:\n ydata = np.log10(fluxdata[filt]['sSFR'+key])\n #Upper error\n yerru = np.log10(fluxdata[filt]['sSFR']+fluxdata[filt]['ssfr_err_u'])-np.log10(fluxdata[filt]['sSFR'])\n #If lower error is 0 or negative, set it to be very large\n ldiff = fluxdata[filt]['sSFR']-fluxdata[filt]['ssfr_err_d']\n ldiff.loc[ldiff<=0] = 10000\n yerrd = np.abs(np.log10(fluxdata[filt]['sSFR'])-np.log10(ldiff))\n ax.set_ylabel('log(sSFR) (yr$^{-1}$)',fontsize = axisfont)\n kelmodelcolor = 'orange'\n kelmodelw = 4\n kelz = 100\n if (c==0 and showkel): pkel = ax.plot((-100,100),(-9.46,-9.46),color=kelmodelcolor,ls='-',label='Model (Kelson 2014)',zorder=kelz,lw=kelmodelw)\n if (c==0 and showkel): ax.plot((-100,100),(-9.86,-9.86),color=kelmodelcolor,ls='--',label=None,zorder=kelz,lw=kelmodelw)\n if (c==0 and showkel): ax.plot((-100,100),(-9.06,-9.06),color=kelmodelcolor,ls='--',label=None,zorder=kelz,lw=kelmodelw)\n smodelcolor = 'orange'\n smodelw = 4\n sz = 100\n x2 = np.arange(9,9.4,0.01)\n x2b = np.arange(9.4,10,0.01)\n y2 = -0.17*(x2-10)-9.65\n y2b = -0.53*(x2b-10)-9.87\n if (c==0 and shows): psal = ax.plot(x2,y2,color=smodelcolor,ls='-',label='Fit to SDSS z<0.1 Galaxies (Salim+ 2007)',zorder=sz,lw=smodelw)\n if (c==0 and shows): ax.plot(x2b,y2b,color=smodelcolor,ls='-',label=None,zorder=sz,lw=smodelw)\n \n fluxdata['lsSFR'] = np.log10(fluxdata['sSFR'+key])\n mr1 = (fluxdata[notbad]['LMASS']<9.25)\n mr2 = np.logical_and(fluxdata[notbad]['LMASS']>=9.25,fluxdata[notbad]['LMASS']<9.5)\n mr3 = np.logical_and(fluxdata[notbad]['LMASS']>=9.5,fluxdata[notbad]['LMASS']<9.75)\n mr4 = (fluxdata[notbad]['LMASS']>=9.75)\n mrs = [mr1,mr2,mr3,mr4]\n if cutofflow:\n llim = (np.log10(fluxdata[notbad]['sSFR']) > -10.7)\n mrs = [np.logical_and(i,llim) for i in mrs]\n\n def getWmed(fluxdata, mr):\n sflux = fluxdata[notbad][mr].sort_values('lsSFR')\n cumsum = sflux.Weight.cumsum()\n cutoff = sflux.Weight.sum()/2.0\n median = sflux.lsSFR[cumsum>=cutoff].iloc[0]\n return median\n\n def geteWmed(fluxdata, mr):\n sflux = fluxdata.sort_values('sSFR')\n cumsum = sflux.Weight.cumsum()\n cutoff = sflux.Weight.sum()/2.0\n median = sflux.sSFR[cumsum>=cutoff].iloc[0]\n fluxdata['absSFR']=np.abs(fluxdata['sSFR']-median)\n sflux = fluxdata[notbad][mr].sort_values('absSFR')\n cumsum = sflux.Weight.cumsum()\n cutoff = sflux.Weight.sum()/2.0\n median = sflux.absSFR[cumsum>=cutoff].iloc[0]\n return median\n \n \n \n meds = np.array([getWmed(fluxdata,i) for i in mrs])\n emeds = 1.49*np.array([geteWmed(fluxdata,i) for i in mrs])\n emeds = (emeds/np.median(10**fluxdata['lsSFR'][notbad]))/2.303\n bins = np.array([9.125,9.375,9.625,9.875])\n #bins=(np.arange(1,17,2)/16.0)+9\n msbw = 12\n lwbw = 3\n colormed2 = 'black'\n if c==0:\n if showmeds: pmed = ax.errorbar(bins,meds,yerr=emeds,marker='o',ms=msbw,lw=lwbw,ls='None',zorder=1000,markerfacecolor='None', markeredgecolor=colormed2,mew=3,ecolor=colormed2,label='Median in bin, log(sSFR)>-10.7')\n def linefit(x,m,b):\n y=m*x+b\n return y\n coeff,pcov = curve_fit(linefit,bins,meds,sigma=np.array(emeds)/100)\n perr = np.sqrt(np.diag(pcov))\n sbins = np.array([8.5,10.5])\n if showmeds: ax.plot(sbins,linefit(sbins,coeff[0],coeff[1]),color='red',lw=4,ls='-',label='Fit to median',zorder=4)\n\n\n\n\n \n\n if c==0:\n #pcir = ax.errorbar(ydata,xdata,xerr=np.array([yerrd,yerru]),color=color,marker=mark2,ms=4,lw=0.5,ls='None',zorder=10,label='Significant H$\\\\alpha$ detection')\n errfilt = yerru<0.1\n pcirdark = ax.scatter(mdata[errfilt],xdata[errfilt],c=ydata[errfilt],label='Significant H$\\\\alpha$ detection (error <0.1 dex)')\n else:\n #ptri = ax.plot(ydata,xdata,color=color,marker=mark2,mfc='None',ms=6,lw=0.5,ls='None',zorder=10,label=label2)\n #if HbHg: a = ax.plot((0,0),(0,0),color=color,marker='o',ms=4,lw=0.5,ls='None',zorder=1,label='Significant H$\\\\beta$ detection')\n #else: a = ax.plot((0,0),(0,0),color=color,marker='o',ms=4,lw=0.5,ls='None',zorder=1,label='Significant H$\\\\alpha$ detection')\n #b = ax.plot((0,0),(0,0),color=color,marker=mark2,mfc='None',ms=6,lw=0.5,ls='None',zorder=2,label=label2)\n #if showmeds: c1 = ax.errorbar(0,0,yerr=0.4,marker='o',ms=msbw,lw=lwbw,ls='None',zorder=3,markerfacecolor='None', markeredgecolor=colormed2,mew=3,ecolor=colormed2,label='Median in bin, log(sSFR)>-10.7')\n #if showkel: d = ax.plot((-100,0),(-9.46,-9.46),color=kelmodelcolor,ls='-',label='Model (Kelson 2014)',zorder=4,lw=kelmodelw)\n #if shows: e = ax.plot((0,0),(1,1),color=smodelcolor,ls='-',label='Empirical Fit (Salim 2007)',zorder=5,lw=smodelw)\n #if showmeds: f = ax.plot((0,0),(1,1),color='red',lw=4,ls='-',label='Fit to median',zorder=6)\n handles, labels = ax.get_legend_handles_labels()\n if not paper:\n '''\n hand = [a[0],b[0]]\n if showmeds: hand.append(c1[0])\n if showmeds: hand.append(f[0])\n if showkel: hand.append(d[0])\n if shows: hand.append(e[0])\n '''\n if (showmeds) and (showkel or shows): hand = [handles[-1],handles[-2],handles[2],handles[3],handles[5],handles[1],handles[0]]\n elif (showmeds) and (cutofflow): hand = [handles[-1],handles[-2],handles[1],handles[3],handles[4],handles[0]]\n elif (cutofflow): hand = [handles[-1],handles[-2],handles[0],handles[1]]\n #else: hand = [handles[-1],handles[-2],handles[0]]\n #ax.legend(handles=hand,fontsize=axisfont-2,bbox_to_anchor=(1.01, 0.5))\n else:\n pass\n #hand = [handles[-1],handles[-2],handles[2],handles[3],handles[5],handles[1],handles[0]]\n #ax.legend(handles=hand,fontsize=legendfont-6,loc=1,frameon=False)\n ax.tick_params(labelsize = ticksize, size=ticks)\n ax.set_xlim(8.95,10.05)\n ax.set_ylim(0,12)\n \n if ssfr:\n if not paper:\n if HbHg: ax.set_ylim(-13,-6)\n else:\n ax.set_xlim(8.95,10.05)\n ax.set_ylim(0,12)\n\n else:\n ax.set_xlim(8.95,10.05)\n ax.set_ylim(0,12)\n c=c+1\n\nfig.tight_layout()\nif ssfr:\n if HbHg: fig.savefig(figout + 'sSFR_Mass_HbHg.pdf')\n elif HaHg: fig.savefig(figout + 'sSFR_Mass_HaHg.pdf')\n else: fig.savefig(figout + 're_lmass_V2.pdf')\n \nelse: fig.savefig(figout + 'SFR_Mass.pdf')\nplt.close(fig)\n","sub_path":"PlotCodes/Plot_Re_lmass_V2.py","file_name":"Plot_Re_lmass_V2.py","file_ext":"py","file_size_in_byte":12389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"418757449","text":"\"\"\"\n.. module:: commands.check_tickets\n.. synopsis: check rt tickets and notifies owners that their reaction is needed\n.. author:: Nikita Borisenko ,\n Ivan Lisitsyn \n\"\"\"\nimport sys\nimport time\nimport getpass\nimport argparse\nimport operator\nimport requests\n\nimport schedule\n\nfrom bs4 import BeautifulSoup\nfrom collections import namedtuple\nfrom jinja2 import Template\nfrom slacker import Slacker\n\nfrom control import helpers\n\n__name__ = 'review'\n__description__ = (\n \"This script is designed to notify team members about pending \"\n \"reviews in Slack. It fetches all active reviews from Crucible \"\n \"along with corresponding reviewers and depending on the command \"\n \"line options either sends direct messages in Slack or notifies \"\n \"the entire channel with a status message listing all outstanding \"\n \"reviews. IMPORTANT: To authenticate in Crucible we need to \"\n \"use LDAP creadentials of some real IPONWEB user. To make it \"\n \"safe we don't store them anywhere in the code or in the \"\n \"environment variables but require a user to manually provide \"\n \"then upon the script start thus making it tough to run this \"\n \"script from cron.\"\n)\n\nSLACK_BOT_USER = 'Code Review'\nDOMAIN = 'https://crucible.iponweb.net/rest/api/1.0/rest-service/reviews-v1'\nREVIEWS_URL = '%s/filter?project={0}' % DOMAIN\nREVIEWERS_URL = '%s/{0}/reviewers/' % DOMAIN\nCOMMENTS_URL = '%s/{0}/comments/' % DOMAIN\n\nINACTIVE_STATES = [\n 'Abandoned',\n 'Closed',\n 'Dead',\n 'Draft',\n]\n\nUSER_NOTIFICATION_TEMPLATE = \"\"\"Dear {{ user.fullname }},\n\nSome of our teammates are blocked by outstanding code reviews.\nPlease complete the following reviews as soon as you can:\n{% if reviews['reviews'] %}\n\n Reviews:{% for review in reviews['reviews'] %}\n {{ review.url }}{% endfor %}{% endif %}{% if reviews['corrections'] %}\n\n Corrections:{% for review in reviews['corrections'] %}\n {{ review.url }}{% endfor %}{% endif %}\n\"\"\"\n\nSTATUS_TEMPLATE = \"\"\"Below is the list of outstanding reviews:\n{% if user_reviews %}\n{% for user in user_reviews|sort %}\n {{ user.fullname }}:{% if user_reviews[user]['reviews'] %}\n Reviews:{% for review in user_reviews[user]['reviews'] %}\n {{ review.url }}{% endfor %}{% endif %}\n {% if user_reviews[user]['corrections'] %}\n Corrections:{% for review in user_reviews[user]['corrections'] %}\n {{ review.url }}{% endfor %}{% endif %}\n{% endfor %}{% else %} No pending reviews. Good job!{% endif %}\n\"\"\"\nUser = namedtuple('User', 'username, fullname')\n\n\nclass Review(object):\n def __init__(self, id, state, author):\n self.id = id\n self.state = state\n self.author = author\n self.url = 'https://crucible.iponweb.net/cru/' + id\n\n\nclass Script(object):\n def __init__(self, args):\n self._args = args\n self._crucible_project = args.crucible_project\n if args.ldap_username:\n self._crucible_username = args.ldap_username\n else:\n self._crucible_username = input('LDAP Username: ')\n if args.ldap_password:\n self._crucible_password = args.ldap_password\n else:\n self._crucible_password = getpass.getpass('LDAP Password: ')\n self._slack = Slacker(args.slack_bot_token)\n\n def get_reviewers(self, review_id):\n self._log('Fetching reviewers for {0}...'.format(review_id))\n soup = self._get(REVIEWERS_URL.format(review_id))\n result = []\n for r in soup.reviewers:\n if r.completed.text == 'true':\n continue\n user = User(r.username.text, r.displayname.text)\n result.append(user)\n return result\n\n def get_unanswered_comments(self, review):\n self._log('Fetching unanswered comments for {0}...'.format(review.id))\n soup = self._get(COMMENTS_URL.format(review.id))\n result = []\n for c in soup.comments:\n if c.user.username.text != review.author.username:\n if not c.replies.text:\n result.append(c)\n return result\n\n def get_active_reviews(self):\n self._log('Fetching active reviews...')\n soup = self._get(REVIEWS_URL.format(self._args.crucible_project))\n result = []\n for e in soup.reviews:\n if e.state.text in INACTIVE_STATES:\n continue\n author = User(e.creator.username.text, e.creator.displayname.text)\n review = Review(e.id.text, e.state.text, author)\n result.append(review)\n sorted_result = sorted(result, key=operator.attrgetter('url'))\n return sorted_result\n\n def get_user_reviews(self, reviews):\n self._log('Loading reviewers info...')\n result = {}\n\n def _touch_user(result, user):\n if user not in result:\n result[user] = {\n 'reviews': [],\n 'corrections': [],\n }\n\n for review in reviews:\n unanswered_comments = self.get_unanswered_comments(review)\n if unanswered_comments:\n _touch_user(result, review.author)\n result[review.author]['corrections'].append(review)\n\n reviewers = self.get_reviewers(review.id)\n for user in reviewers:\n _touch_user(result, user)\n result[user]['reviews'].append(review)\n\n return result\n\n def run(self):\n if self._args.schedule:\n time_points = self._args.schedule.split(',')\n\n for t in time_points:\n schedule.every().monday.at(t).do(self._real_run)\n schedule.every().tuesday.at(t).do(self._real_run)\n schedule.every().wednesday.at(t).do(self._real_run)\n schedule.every().thursday.at(t).do(self._real_run)\n schedule.every().friday.at(t).do(self._real_run)\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n else:\n self._real_run()\n\n def _real_run(self):\n reviews = self.get_active_reviews()\n user_reviews = self.get_user_reviews(reviews)\n\n user_template = Template(USER_NOTIFICATION_TEMPLATE)\n\n # Notify all users via direct messages:\n if self._args.notify_all:\n for user in user_reviews:\n user_message = user_template.render(\n user=user, reviews=user_reviews[user]\n )\n self._send('@' + user.username, user_message)\n\n # Notify specific user:\n if self._args.user is not None:\n for user in user_reviews:\n if user.username != self._args.user:\n continue\n user_message = user_template.render(\n user=user, reviews=user_reviews[user]\n )\n self._send('@' + user.username, user_message)\n\n # Send review status to channel:\n if self._args.notify_channel:\n status_template = Template(STATUS_TEMPLATE)\n status_message = status_template.render(user_reviews=user_reviews)\n channel_name = self._args.notify_channel\n if not channel_name.startswith('#'):\n channel_name = '#' + channel_name\n self._send(channel_name, status_message)\n\n def _log(self, message):\n if self._args.quiet:\n return\n timestamp = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n print('[{0}] {1}'.format(timestamp, message))\n\n def _debug(self, message):\n if self._args.debug:\n self._log(message)\n\n def _get(self, url):\n self._debug('GET {0}'.format(url))\n r = requests.get(\n url,\n verify=False,\n auth=(self._crucible_username, self._crucible_password)\n )\n soup = BeautifulSoup(r.text, \"html.parser\")\n return soup\n\n def _send(self, destination, message):\n self._log('Sending message to {0}...'.format(destination))\n if self._args.dry_run:\n print(\"-\" * 43)\n print(message)\n print(\"-\" * 43)\n else:\n self._slack.chat.post_message(destination, message, SLACK_BOT_USER)\n\n\ndef register(parser=None):\n parser = parser or argparse.ArgumentParser(description=__description__,\n prog=__name__)\n\n parser.add_argument(\n '-a', '--notify-all', action='store_true',\n default=False,\n help='Notify all users individually via direct messages in Slack.'\n )\n parser.add_argument(\n '-c', '--notify-channel',\n help='Post pending reviews to specific Slack channel.')\n parser.add_argument('-d', '--debug', action='store_true', default=False,\n help='Print debug information to console.')\n parser.add_argument(\n '-n', '--dry-run', action='store_true',\n default=False,\n help=\"Don't send messages in Slack. Print everything \"\n \"to console instead.\"\n )\n parser.add_argument('-p', '--crucible-project', default='U-WORKFLOW',\n help='Crucible project name, e.g. U-SERVER.')\n parser.add_argument('-q', '--quiet',\n help=\"Don't print any run time info to console.\")\n parser.add_argument(\n '-s', '--schedule',\n help='Notify on weekdays at specific time, e.g. -s 12:00,18:00.'\n )\n parser.add_argument('-t', '--slack-bot-token',\n help='Token to be used to authenticate in Slack.',\n required=True)\n parser.add_argument('-u', '--user', help='Notify specific user in Slack.')\n parser.add_argument('-U', '--ldap-user', dest='ldap_username',\n help='ldap username', required=False, default='')\n parser.add_argument('-P', '--ldap-password', dest='ldap_password',\n help='ldap password', required=False, default='')\n\n return parser\n\n\ndef invoke(opts):\n helpers.disable_requests_insecure_warnings()\n script = Script(opts)\n try:\n script.run()\n except KeyboardInterrupt:\n print(\"\\n\\nCaught Ctrl+C, exiting ...\\n\")\n\n\ndef execute():\n parser = register()\n sys.exit(invoke(parser.parse_args()))\n","sub_path":"control/control/commands/plugins/slack/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":10312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"306042354","text":"from typing import Literal\nfrom argparse import BooleanOptionalAction\n\nfrom clavier import log as logging, CFG, sh\n\n\nLOG = logging.getLogger(__name__)\nDEFAULT_ARCH = \"arm32v7\"\n\nTArch = Literal[\"arm32v7\", \"amd64\", \"arm64v8\"]\n\ndef add_to(subparsers):\n parser = subparsers.add_parser(\n \"build\",\n target=run,\n help=\"Build the client\",\n )\n\n parser.add_argument(\n \"-a\",\n \"--arch\",\n choices=TArch.__args__,\n default=DEFAULT_ARCH,\n help=\"Architecture to build for\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--static\",\n action=BooleanOptionalAction,\n default=False,\n help=\"Do a static build (may not work?)\",\n )\n\n parser.add_argument(\n \"-x\",\n \"--exe-only\",\n action=BooleanOptionalAction,\n default=False,\n help=\"Only copy the resulting binary over from the build container\",\n )\n\n parser.add_argument(\n \"-p\",\n \"--plain\",\n action=BooleanOptionalAction,\n default=False,\n help=\"Pass `--progress plain` to `docker build` (real Docker only!)\",\n )\n\ndef run(\n arch: str = DEFAULT_ARCH,\n static: bool = False,\n exe_only: bool = False,\n plain: bool = False\n):\n tag = f\"genie-builder:{arch}\"\n\n opts = {\n \"build-arg\": [\n f\"ARCH={arch}/\", # TODO Why is this `/` added here?\n f\"STATIC={int(static)}\"\n ],\n \"tag\": tag,\n \"file\": CFG.genie_client_cpp.paths.scripts.dockerfile,\n }\n\n if plain:\n opts[\"progress\"] = \"plain\"\n\n sh.run(\n \"docker\",\n \"build\",\n opts,\n \".\",\n chdir=CFG.genie_client_cpp.paths.repo,\n log=LOG,\n rel_paths=True,\n opts_style=\" \",\n )\n\n if exe_only:\n script = \"/src/scripts/binonly.sh\"\n else:\n script = \"/src/scripts/blob.sh\"\n\n sh.run(\n \"docker\",\n \"run\",\n {\n \"rm\": True,\n \"volume\": f\"{CFG.genie_client_cpp.paths.out.root}:/out\",\n \"security-opt\": \"label=disable\",\n \"env\": f\"ARCH={arch}\",\n },\n tag,\n script,\n chdir=CFG.genie_client_cpp.paths.repo,\n log=LOG,\n rel_paths=True,\n opts_style=\" \",\n )\n","sub_path":"cli/genie_client_cpp/cmd/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"135847756","text":"# -*- coding: utf-8 -*-\nimport pprint\nfrom django.contrib.auth.decorators import user_passes_test, login_required\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import PermissionDenied\nimport json\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template.context import RequestContext\nfrom issues.forms import FileForm\nfrom issues.models import Issue, Event\nfrom issues.model_issue_field import IssueField\n\n\ndef user_is_teacher_or_staff(user, issue):\n if user.is_staff:\n return True\n if issue.task.course.user_is_teacher(user):\n return True\n return False\n\ndef user_can_read(user, issue):\n if user.is_staff:\n return True\n if user == issue.student:\n return True\n if issue.task.course.user_is_teacher(user):\n return True\n\n return False\n\n\ndef prepare_info_fields(info_fields, request, issue):\n user = request.user\n for field in info_fields:\n field.editable = field.can_edit(user, issue)\n if field.is_visible():\n field.repr = issue.get_field_repr(field)\n\n field.value = issue.get_field_value_for_form(field)\n\n data = { field.name : field.value }\n field.form = field.get_form(request, issue, data)\n\n@login_required\ndef issue_page(request, issue_id):\n issue = get_object_or_404(Issue, id=issue_id)\n if not user_can_read(request.user, issue):\n raise PermissionDenied\n\n issue_fields = issue.task.course.issue_fields.all()\n\n if request.method == 'POST':\n form_name = request.POST['form_name']\n\n for field in issue_fields:\n if form_name == u'{0}_form'.format(field.name):\n form = field.get_form(request, issue)\n\n if form.is_valid():\n value = form.cleaned_data[field.name]\n\n if field.name in ['mark','status', 'responsible_name', 'followers_names']:\n if not user_is_teacher_or_staff(request.user, issue):\n raise PermissionDenied\n \n if 'Me' in request.POST:\n if field.name == 'responsible_name':\n value = request.user\n else:\n if request.user not in value:\n value.append(request.user)\n if 'Accepted' in request.POST:\n issue.set_byname('status', 'accepted')\n\n if field.name == 'comment':\n value = {\n 'comment': value,\n 'files': request.FILES.getlist('files')\n }\n if 'need_info' in request.POST:\n issue.set_byname('status', 'need_info')\n\n issue.set_field(field, value, request.user)\n return HttpResponseRedirect('')\n\n prepare_info_fields(issue_fields, request, issue)\n\n context = {\n 'issue': issue,\n 'issue_fields': issue_fields,\n 'course': issue.task.course,\n 'events_to_show': 7,\n 'teacher_or_staff': user_is_teacher_or_staff(request.user, issue),\n }\n\n return render_to_response('issues/issue.html', context, context_instance=RequestContext(request))\n\n\n@login_required\ndef get_or_create(request, task_id, student_id):\n #if not request.is_ajax():\n # return HttpResponseForbidden()\n\n issue, created = Issue.objects.get_or_create(task_id=task_id, student_id=student_id)\n\n data = {\n 'issue_url': issue.get_absolute_url(),\n }\n\n return HttpResponseRedirect(\"/issue/\"+str(issue.id))#(json.dumps(data), content_type='application/json')\n","sub_path":"anytask/issues/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"333326905","text":"import pandas as pd\nimport us\n\nfrom can_tools.scrapers.base import CMU\nfrom can_tools.scrapers.official.base import FederalDashboard\n\n\ndef _find_fips(abbr):\n return int(us.states.lookup(abbr).fips)\n\n\nclass CovidTrackingProjectDemographics(FederalDashboard):\n provider: str = \"ctp\"\n location_type: str = \"state\"\n has_location: bool = True\n source: str = \"https://covidtracking.com/race/dashboard\"\n\n def fetch(self):\n url = (\n \"https://docs.google.com/spreadsheets/d/e/\"\n \"2PACX-1vS8SzaERcKJOD_EzrtCDK1dX1zkoMochlA9iHoHg_RSw3V8bkpfk1mpw4pfL5RdtSOyx_oScsUtyXyk\"\n \"/pub?gid=43720681&single=true&output=csv\"\n )\n return pd.read_csv(url)\n\n def normalize(self, data: pd.DataFrame) -> pd.DataFrame:\n def _cases(race, ethnicity=\"all\"):\n return CMU(\n category=\"cases\",\n measurement=\"cumulative\",\n unit=\"people\",\n race=race,\n ethnicity=ethnicity,\n )\n\n def _deaths(race, ethnicity=\"all\"):\n return CMU(\n category=\"deaths\",\n measurement=\"cumulative\",\n unit=\"people\",\n race=race,\n ethnicity=ethnicity,\n )\n\n def _hosp(race, ethnicity=\"all\"):\n return CMU(\n category=\"hospital_beds_in_use_covid\",\n measurement=\"current\",\n unit=\"beds\",\n race=race,\n ethnicity=ethnicity,\n )\n\n column_map = {\n \"Cases_Total\": _cases(\"all\"),\n \"Cases_White\": _cases(\"white\"),\n \"Cases_Black\": _cases(\"black\"),\n \"Cases_LatinX\": _cases(\"latinx\"),\n \"Cases_Asian\": _cases(\"asian\"),\n \"Cases_AIAN\": _cases(\"ai_an\"),\n \"Cases_NHPI\": _cases(\"pacific_islander\"),\n \"Cases_Multiracial\": _cases(\"multiple_other\"),\n \"Cases_Other\": _cases(\"other\"),\n \"Cases_Unknown\": _cases(\"unknown\"),\n \"Cases_Ethnicity_Hispanic\": _cases(\"all\", \"hispanic\"),\n \"Cases_Ethnicity_NonHispanic\": _cases(\"all\", \"non-hispanic\"),\n \"Cases_Ethnicity_Unknown\": _cases(\"all\", \"unknown\"),\n \"Deaths_Total\": _deaths(\"all\"),\n \"Deaths_White\": _deaths(\"white\"),\n \"Deaths_Black\": _deaths(\"black\"),\n \"Deaths_LatinX\": _deaths(\"latinx\"),\n \"Deaths_Asian\": _deaths(\"asian\"),\n \"Deaths_AIAN\": _deaths(\"ai_an\"),\n \"Deaths_NHPI\": _deaths(\"pacific_islander\"),\n \"Deaths_Multiracial\": _deaths(\"multiple_other\"),\n \"Deaths_Other\": _deaths(\"other\"),\n \"Deaths_Unknown\": _deaths(\"unknown\"),\n \"Deaths_Ethnicity_Hispanic\": _deaths(\"all\", \"hispanic\"),\n \"Deaths_Ethnicity_NonHispanic\": _deaths(\"all\", \"non-hispanic\"),\n \"Deaths_Ethnicity_Unknown\": _deaths(\"all\", \"unknown\"),\n \"Hosp_Total\": _hosp(\"all\"),\n \"Hosp_White\": _hosp(\"white\"),\n \"Hosp_Black\": _hosp(\"black\"),\n \"Hosp_LatinX\": _hosp(\"latinx\"),\n \"Hosp_Asian\": _hosp(\"asian\"),\n \"Hosp_AIAN\": _hosp(\"ai_an\"),\n \"Hosp_NHPI\": _hosp(\"pacific_islander\"),\n \"Hosp_Multiracial\": _hosp(\"multiple_other\"),\n \"Hosp_Other\": _hosp(\"other\"),\n \"Hosp_Unknown\": _hosp(\"unknown\"),\n \"Hosp_Ethnicity_Hispanic\": _hosp(\"all\", \"hispanic\"),\n \"Hosp_Ethnicity_NonHispanic\": _hosp(\"all\", \"non-hispanic\"),\n \"Hosp_Ethnicity_Unknown\": _hosp(\"all\", \"unknown\"),\n # \"Tests_Total\",\n # \"Tests_White\",\n # \"Tests_Black\",\n # \"Tests_LatinX\",\n # \"Tests_Asian\",\n # \"Tests_AIAN\",\n # \"Tests_NHPI\",\n # \"Tests_Multiracial\",\n # \"Tests_Other\",\n # \"Tests_Unknown\",\n # \"Tests_Ethnicity_Hispanic\",\n # \"Tests_Ethnicity_NonHispanic\",\n # \"Tests_Ethnicity_Unknown\",\n }\n\n return (\n data.assign(\n location=lambda x: x[\"State\"].map(_find_fips),\n dt=lambda x: pd.to_datetime(x[\"Date\"].astype(str)),\n )\n .melt(\n id_vars=[\"dt\", \"location\"],\n value_vars=column_map.keys(),\n )\n .dropna()\n .pipe(self.extract_CMU, column_map)\n .assign(\n location_type=\"state\",\n vintage=self._retrieve_vintage(),\n value=lambda x: pd.to_numeric(\n x[\"value\"].astype(str).str.replace(\",\", \"\")\n ),\n )\n .drop([\"variable\"], axis=1)\n )\n\n\nclass CovidTrackingProject(FederalDashboard):\n provider: str = \"ctp\"\n location_type: str = \"state\"\n has_location: bool = True\n source: str = \"https://api.covidtracking.com/v1/states/daily.csv\"\n\n def fetch(self):\n return pd.read_csv(self.source, parse_dates=[\"date\"])\n\n def normalize(self, data: pd.DataFrame) -> pd.DataFrame:\n column_map = dict(\n death=CMU(\n category=\"deaths\",\n measurement=\"cumulative\",\n unit=\"people\",\n ),\n hospitalizedCurrently=CMU(\n category=\"hospital_beds_in_use_covid\",\n measurement=\"current\",\n unit=\"beds\",\n ),\n inIcuCurrently=CMU(\n category=\"icu_beds_in_use_covid\",\n measurement=\"current\",\n unit=\"beds\",\n ),\n negative=CMU(\n category=\"pcr_tests_negative\",\n measurement=\"cumulative\",\n unit=\"unique_people\",\n ),\n negativeTestsAntibody=CMU(\n category=\"antibody_tests_negative\",\n measurement=\"cumulative\",\n unit=\"specimens\",\n ),\n negativeTestsPeopleAntibody=CMU(\n category=\"antibody_tests_negative\",\n measurement=\"cumulative\",\n unit=\"unique_people\",\n ),\n negativeTestsViral=CMU(\n category=\"pcr_tests_negative\",\n measurement=\"cumulative\",\n unit=\"specimens\",\n ),\n positive=CMU(\n category=\"cases\",\n measurement=\"cumulative\",\n unit=\"people\",\n ),\n positiveCasesViral=CMU(\n category=\"pcr_tests_positive\",\n measurement=\"cumulative\",\n unit=\"unique_people\",\n ),\n positiveTestsAntibody=CMU(\n category=\"antibody_tests_positive\",\n measurement=\"cumulative\",\n unit=\"specimens\",\n ),\n positiveTestsAntigen=CMU(\n category=\"antigen_tests_positive\",\n measurement=\"cumulative\",\n unit=\"specimens\",\n ),\n positiveTestsPeopleAntibody=CMU(\n category=\"antibody_tests_positive\",\n measurement=\"cumulative\",\n unit=\"unique_people\",\n ),\n positiveTestsPeopleAntigen=CMU(\n category=\"antigen_tests_positive\",\n measurement=\"cumulative\",\n unit=\"unique_people\",\n ),\n positiveTestsViral=CMU(\n category=\"pcr_tests_positive\",\n measurement=\"cumulative\",\n unit=\"specimens\",\n ),\n totalTestsAntigen=CMU(\n category=\"antigen_tests_total\",\n measurement=\"cumulative\",\n unit=\"specimens\",\n ),\n totalTestsAntibody=CMU(\n category=\"antibody_tests_total\",\n measurement=\"cumulative\",\n unit=\"specimens\",\n ),\n totalTestsPeopleAntibody=CMU(\n category=\"antibody_tests_total\",\n measurement=\"cumulative\",\n unit=\"unique_people\",\n ),\n totalTestsPeopleAntigen=CMU(\n category=\"antigen_tests_total\",\n measurement=\"cumulative\",\n unit=\"unique_people\",\n ),\n totalTestsPeopleViral=CMU(\n category=\"pcr_tests_total\",\n measurement=\"cumulative\",\n unit=\"unique_people\",\n ),\n totalTestsViral=CMU(\n category=\"pcr_tests_total\",\n measurement=\"cumulative\",\n unit=\"specimens\",\n ),\n )\n\n df = (\n data.rename(columns=dict(fips=\"location\", date=\"dt\"))\n .melt(\n id_vars=[\"dt\", \"location\"],\n value_vars=column_map.keys(),\n )\n .dropna()\n .pipe(self.extract_CMU, column_map)\n .assign(location_type=\"state\", vintage=self._retrieve_vintage())\n )\n\n return df\n","sub_path":"can_tools/scrapers/ctp.py","file_name":"ctp.py","file_ext":"py","file_size_in_byte":8991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"302773419","text":"################################################################################\n# Copyright (c) 2015-2019 Skymind, Inc.\n#\n# This program and the accompanying materials are made available under the\n# terms of the Apache License, Version 2.0 which is available at\n# https://www.apache.org/licenses/LICENSE-2.0.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n################################################################################\n\nimport pytest\r\nimport jnius_config\r\nimport os\r\nimport warnings\r\nimport pydl4j\r\n\r\n\r\ndef test_spark():\r\n # skip test in travis\r\n if \"TRAVIS\" in os.environ and os.environ[\"TRAVIS\"] == \"true\":\r\n return\r\n\r\n pydl4j.validate_datavec_jars()\r\n\r\n from jnius import autoclass\r\n\r\n SparkConf = autoclass('org.apache.spark.SparkConf')\r\n SparkContext = autoclass('org.apache.spark.api.java.JavaSparkContext')\r\n JavaRDD = autoclass('org.apache.spark.api.java.JavaRDD')\r\n SparkTransformExecutor = autoclass(\r\n 'org.datavec.spark.transform.SparkTransformExecutor')\r\n StringToWritablesFunction = autoclass(\r\n 'org.datavec.spark.transform.misc.StringToWritablesFunction')\r\n WritablesToStringFunction = autoclass(\r\n 'org.datavec.spark.transform.misc.WritablesToStringFunction')\r\n\r\n spark_conf = SparkConf()\r\n spark_conf.setMaster('local[*]')\r\n spark_conf.setAppName('test')\r\n\r\n spark_context = SparkContext(spark_conf)\r\n source = 'basic_example.csv'\r\n assert os.path.isfile(source)\r\n string_data = spark_context.textFile(source)\r\n\r\n\r\nif __name__ == '__main__':\r\n pytest.main([__file__])\r\n","sub_path":"pydl4j/tests/spark_test.py","file_name":"spark_test.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"535232940","text":"def longestIncreasingSubsequence(X):\n N = len(X)\n Prev = [0] * N\n M = [0] * (N+1)\n Length = 0\n for i in range(N):\n if (not isinstance(X[i], int )):\n raise Exception('There is real number in the array.')\n lower = 1\n upper = Length\n while lower <= upper:\n mid = (lower + upper) // 2\n if (X[M[mid]] < X[i]):\n lower = mid + 1\n else:\n upper = mid - 1\n newL = lower\n Prev[i] = M[newL - 1]\n M[newL] = i\n\n if (newL > Length):\n Length = newL\n SubS = []\n k = M[Length]\n for i in range(Length - 1, -1, -1):\n SubS.append(X[k])\n k = Prev[k]\n print(SubS)\n return Length\n\n\nA=[0, 1,2,3,4,5,0,1,2,3]\nprint( longestIncreasingSubsequence(A))\n\n","sub_path":"task2_2.py","file_name":"task2_2.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"513226609","text":"'''\ntk Treeview 範例\n'''\n\nimport tkinter as tk\nfrom tkinter import ttk\n\nwindow = tk.Tk()\nwindow.geometry('800x400')\nwindow.title('Treeview Example')\n\ncolumn_name = '序號', '英文名', '中文名', '體重' #tuple\nprint(type(column_name))\nprint(column_name)\ntreeview = ttk.Treeview(window, column = column_name, show = \"headings\")\n#treeview = ttk.Treeview(window, columns = ('序號', '英文名', '中文名', '體重'), show = 'headings')\n\n'''\nfor col_name in column_name:\n treeview.heading(col_name, text = col_name)\n'''\ntreeview.heading('序號', text = '序號')\ntreeview.heading('英文名', text = '英文名')\ntreeview.heading('中文名', text = '中文名')\ntreeview.heading('體重', text = '體重')\n\ntreeview.pack(fill = 'both', expand = True)\n#treeview.pack(expand = True, fill = 'y') another\n\nanimal1 = 1, 'mouse', '老鼠', 1\nanimal2 = 2, 'panda', '貓熊', 123\nanimal3 = 3, 'penguin', '企鵝', 29\nanimal4 = 4, 'lion', '獅子', 270\ntreeview.insert('', tk.END, values = animal1) #插入在最後\ntreeview.insert('', tk.END, values = animal2) #插入在最後\ntreeview.insert('', tk.END, values = animal3) #插入在最後\ntreeview.insert('', tk.END, values = animal4) #插入在最後\ntreeview.insert(parent = '', index = 0, values = animal1) #插入在index = 0\ntreeview.insert(parent = '', index = 0, values = animal2) #插入在index = 0\ntreeview.insert(parent = '', index = 0, values = animal3) #插入在index = 0\ntreeview.insert(parent = '', index = 0, values = animal4) #插入在index = 0\n\ntreeview.insert(parent = '', index = 0, values = (3, 'aaa', 'bbb', 'ccc')) #插入在index = 0\ntreeview.insert(parent = '', index = tk.END, values = (5, 'XXXXX', 'YYYYY', 'ZZZZZ')) #插入在最後\n\n# events\ndef item_select(_):\n print('你點選了', treeview.selection())\n for i in treeview.selection():\n print(treeview.item(i)['values'])\n # treeview.item(treeview.selection())\n\ndef delete_items(_):\n print('你刪除了', treeview.selection())\n for i in treeview.selection():\n treeview.delete(i)\n\ntreeview.bind('<>', item_select)\ntreeview.bind('', delete_items)\n\nwindow.mainloop()\n","sub_path":"_4.python/tkinter/tk_Treeview.py","file_name":"tk_Treeview.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"328355281","text":"def solution(plans):\n \n plans = list(map(lambda x : [x[0], x[1], int(x[2])], plans))\n plans.sort(key = lambda x : x[1])\n plans.append(['x', '23:59', 1440])\n \n stack = []\n answer = []\n current_plan = plans[0]\n for next_plan in plans[1:]:\n remain_time = current_plan[2] - get_diff_time(current_plan[1], next_plan[1])\n if remain_time > 0:\n current_plan[2] = remain_time\n stack.append(current_plan)\n elif remain_time == 0:\n answer.append(current_plan[0])\n else:\n answer.append(current_plan[0])\n while stack != [] and remain_time < 0:\n last_plan = stack.pop()\n remain_time += last_plan[2]\n if remain_time <= 0:\n answer.append(last_plan[0])\n else:\n last_plan[2] = remain_time\n stack.append(last_plan)\n \n current_plan = next_plan\n \n while stack != []:\n answer.append(stack.pop()[0])\n \n return answer\n\n\ndef get_finish_time(current_plan):\n current_h, current_m = map(int, current_plan[1].split(\":\"))\n current_m += current_plan[2]\n current_h += current_m // 60\n current_m %= 60\n \n return str(current_h) + \":\" + str(current_m)\n\n\ndef get_diff_time(current_time, next_time):\n next_h, next_m = map(int, next_time.split(\":\"))\n current_h, current_m = map(int, current_time.split(\":\"))\n \n return (next_h - current_h) * 60 + next_m - current_m\n","sub_path":"programmers/176962.py","file_name":"176962.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"514082303","text":"import utime\nimport math\nfrom color import colors\nfrom neopixel import NeoPixel\n\nclass LedStrip:\n\n def __init__(self, pin, number):\n self._pin = pin\n self._colors = []\n self._number = number\n self._strip = NeoPixel(pin, number)\n self.clear()\n\n def clear(self):\n self.all((0,0,0))\n\n def add(self, color):\n self.flash(3, colors['GREEN'])\n self._colors.append(color)\n self.clear()\n\n i = 0\n leds_per_color = math.ceil(self._number / len(self._colors))\n\n for color in self._colors:\n for num in range(0, leds_per_color):\n if(i < self._number):\n self.set_color(i, color)\n i = i + 1\n\n self.update()\n\n def all(self, color):\n for i in range(self._number):\n self.set_color(i, color)\n self.update()\n\n\n def blink(self, n, color):\n on=False\n for i in range(n * 2):\n on=not(on)\n if on:\n self.all(color)\n else:\n self.clear()\n utime.sleep(0.25)\n\n def flash(self, n, color):\n for i in range(0, n):\n self.clear()\n for led in range(0, self._number):\n self.set_color(led, color, 0.5)\n\n\n def set_color(self, i, rgb, brightness = 0.1):\n self._strip[i] = tuple(\n [int(brightness * clr) for clr in rgb]\n )\n self.update()\n\n def update(self):\n self._strip.write()\n","sub_path":"ledstrip.py","file_name":"ledstrip.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"253564767","text":"# Implementation of classic arcade game Pong\n\nimport simplegui\nimport random\n\n# initialize globals - pos and vel encode vertical info for paddles\nWIDTH = 600\nHEIGHT = 400 \nBALL_RADIUS = 20\nPAD_WIDTH = 8\nPAD_HEIGHT = 80\nHALF_PAD_WIDTH = PAD_WIDTH / 2\nHALF_PAD_HEIGHT = PAD_HEIGHT / 2\nLEFT = False\nRIGHT = True\nball_pos = [300,150]\nball_vel = [0,0]\npaddle1_vel = 0\npaddle2_vel = 0\nscore1 = 0\nscore2 = 0\ndirection = LEFT\npaddle1_pos = [0,0]\npaddle2_pos = [600-PAD_WIDTH,0]\n# initialize ball_pos and ball_vel for new bal in middle of table\n# if direction is RIGHT, the ball's velocity is upper right, else upper left\ndef spawn_ball(direction):\n global ball_pos, ball_vel # these are vectors stored as lists\n ball_pos = [300,150]\n a = (random.random()+10)/11\n if direction:\n ball_vel = [-2*a,-a]\n else:\n ball_vel = [2*a,-a]\n\n# define event handlers\ndef new_game():\n global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel # these are numbers\n\n global score1, score2 # these are ints\n global direction\n direction = not direction\n spawn_ball(direction)\n\ndef draw(c):\n global score1, score2, paddle1_pos, paddle2_pos, ball_pos, ball_vel\n \n \n # draw mid line and gutters\n c.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, \"White\")\n c.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, \"White\")\n c.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, \"White\")\n \n # update ball\n if(ball_pos[1]<=BALL_RADIUS):\n ball_vel[1] = -ball_vel[1]\n elif(ball_pos[1]>=HEIGHT- BALL_RADIUS):\n ball_vel[1] = -ball_vel[1]\n \n if(ball_pos[0]-BALL_RADIUSWIDTH-PAD_WIDTH):\n if(paddle2_pos[1]=1):\n paddle1_vel = 0\n if(paddle2_pos[1]==HEIGHT-PAD_HEIGHT and paddle2_vel>=1):\n paddle2_vel = 0\n if(paddle2_pos[1]==0 and paddle2_vel<=-1):\n paddle2_vel = 0\n\n paddle1_pos[1] += paddle1_vel \n paddle2_pos[1] += paddle2_vel\n\n # draw paddles\n c.draw_polygon([paddle1_pos, [paddle1_pos[0]+PAD_WIDTH,paddle1_pos[1]],\n [paddle1_pos[0]+PAD_WIDTH, paddle1_pos[1]+PAD_HEIGHT],\n [paddle1_pos[0], paddle1_pos[1]+PAD_HEIGHT],], 1, 'White','White')\n c.draw_polygon([paddle2_pos, [paddle2_pos[0]+PAD_WIDTH,paddle2_pos[1]],\n [paddle2_pos[0]+PAD_WIDTH, paddle2_pos[1]+PAD_HEIGHT],\n [paddle2_pos[0], paddle2_pos[1]+PAD_HEIGHT],], 1, 'White','White')\n\n \n # draw scores\n c.draw_text(str(score1), (175, 70), 50, 'White')\n c.draw_text(str(score2), (400, 70), 50, 'White') \n \ndef keydown(key):\n global paddle1_vel, paddle2_vel\n if key == simplegui.KEY_MAP['s']:\n paddle1_vel += 2\n elif key == simplegui.KEY_MAP['w']:\n paddle1_vel -= 2\n elif key == simplegui.KEY_MAP['down']:\n paddle2_vel += 2\n elif key == simplegui.KEY_MAP['up']:\n paddle2_vel -= 2 \n \ndef keyup(key):\n global paddle1_vel, paddle2_vel\n paddle1_vel = 0\n paddle2_vel = 0\ndef reset():\n global score2\n score2 =0\n global score1\n score1 =0 \n new_game()\n\n\n# create frame\nframe = simplegui.create_frame(\"Pong\", WIDTH, HEIGHT)\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\nframe.set_keyup_handler(keyup)\nframe.add_button('Reset', reset)\n\n# start frame\nnew_game()\nframe.start()\n","sub_path":"Mini-project # 4 - \"Pong\".py","file_name":"Mini-project # 4 - \"Pong\".py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"309895576","text":"# coding=utf-8\n\n' a test module '\n\n__author__ = 'pythme'\n\n# 虽然Python提供了Unicode表示的str和bytes两种数据类型,并且可以通过encode()和decode()方法转换\n# 在不知道编码的情况下,对bytes做decode()不好做。\n\"\"\"\n chardet这个第三方库,用它来检测编码,简单易用。\n $ pip install chardet\n\n 拿到一个bytes时,就可以对其检测编码。用chardet检测编码,只需要一行代码:\n >>> chardet.detect(b'Hello, world!')\n {'encoding': 'ascii', 'confidence': 1.0, 'language': ''}\n 检测出的编码是ascii,注意到还有个confidence字段,表示检测的概率是1.0(即100%)。\n \n 我们来试试检测GBK编码的中文:\n >>> data = '离离原上草,一岁一枯荣'.encode('gbk')\n >>> chardet.detect(data)\n {'encoding': 'GB2312', 'confidence': 0.7407407407407407, 'language': 'Chinese'}\n 检测的编码是GB2312,注意到GBK是GB2312的超集,两者是同一种编码,检测正确的概率是74%,language字段指出的语言是'Chinese'。\n \n 对UTF-8编码进行检测:\n >>> data = '离离原上草,一岁一枯荣'.encode('utf-8')\n >>> chardet.detect(data)\n {'encoding': 'utf-8', 'confidence': 0.99, 'language': ''}\n \n 我们再试试对日文进行���测:\n >>> data = '最新の主要ニュース'.encode('euc-jp')\n >>> chardet.detect(data)\n {'encoding': 'EUC-JP', 'confidence': 0.99, 'language': 'Japanese'}\n \n 可见,用chardet检测编码,使用简单。获取到编码后,再转换为str,就可以方便后续处理。\n chardet支持检测的编码列表请参考官方文档Supported encodings。\n\n\"\"\"\n","sub_path":"14_常用第三方模块/3_chardet.py","file_name":"3_chardet.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"222737248","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom urllib.parse import urljoin\nimport string\nimport re\nimport json\ndef seperate_prereqs(extra):\n string = extra.getText().replace(u'\\xa0', u' ')\n if len(re.findall(\"[A-Z]{2,}[0-9]{1,}\", string.replace(\"Prerequisite:\", \"\").replace(\" \", \"\"))):\n string = string.replace(\"Prerequisite:\", \"\").replace(\" \", \"\")\n return re.findall(\"[A-Z]{2,}[0-9]{1,}\", string)\n else:\n return list(set(map(lambda x: ''.join(x.split(' ')).upper(), re.findall(\"[A-Za-z]{2,} [0-9]{1,}\", string))))\n \ndef seperate_attrs(extra):\n lines = extra.find_all('p')\n attrs = []\n for line in lines:\n attrs.append(white_out(line.getText()))\n return attrs\n\ndef white_out(s):\n return re.sub('[\\s]{2,}', ' ', s).strip().replace(u'\\xa0', u' ')\n\ndef extract_credits(s):\n return re.search(\"[0-9]{1,}-[0-9]{1,}|([0-9]{1,})\", s).group()\n\njason = {}\nmain_url = 'http://undergraduate.bulletins.psu.edu/university-course-descriptions/undergraduate/'\nsoupski = BeautifulSoup(urlopen(main_url), \"html.parser\")\nnav_list = soupski.find(id=\"sidebar\").find_all('a')\nfor subject in nav_list:\n print(subject[\"href\"])\n subject_url = urljoin(main_url, subject[\"href\"])\n subject_soupski = BeautifulSoup(urlopen(subject_url), \"html.parser\")\n bclasses = subject_soupski.find_all('div', {'class': 'courseblock'})\n for bclass in bclasses:\n class_sep = bclass.find('div', {'class': 'course_codetitle'}).getText().split(':')[0].split(' ')\n title = ''.join(class_sep)\n # print(class_sep)\n creds = extract_credits(bclass.find('div', {'class': 'course_credits'}).getText())\n # can be multiple extra blocks\n extras = bclass.find_all('div', {'class': 'courseblockextra'})\n prereqs = []\n attr = []\n url = \"http://undergraduate.bulletins.psu.edu/search/?scontext=courses&search=\" + '+'.join(class_sep)\n for extra in extras:\n if extra.find('strong'):\n prereqs = seperate_prereqs(extra)\n else:\n attr = seperate_attrs(extra)\n jason[title] = {'url': url, 'prerequisites': prereqs, 'attributes': attr, 'credits': creds}\n\n \nwith open('new_bulletin.js', 'w') as outfile:\n json.dump(jason, outfile)\n\n","sub_path":"new_soupski.py","file_name":"new_soupski.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"481036232","text":"import tkinter\nimport wust\nfrom tkinter import ttk\n\nwust.GetRandCode()\nrandcode = input()\nwust.Login('username', 'password', randcode)\n\nCousList = wust.GetCoursesList()\n\nwin = tkinter.Tk(className='武科大教务抢课')\n\ntree = ttk.Treeview()\ntree[\"columns\"] = (\"上课教师\", \"上课时间\", \"上课周次\", \"学分\") \ntree.column(\"上课教师\",width=100) #表示列,不显示 \ntree.column(\"上课时间\",width=100) \ntree.column(\"上课周次\",width=100)\ntree.column(\"学分\",width=100)\n\ntree.heading(\"上课教师\",text=\"上课教师\") #显示表头 \ntree.heading(\"上课时间\",text=\"上课时间\") \ntree.heading(\"上课周次\",text=\"上课周次\")\ntree.heading(\"学分\",text=\"学分\")\n\nindex = 0\nfor i in CousList:\n tree.insert(\"\", index, text = i['kcmc'], values = (i['skjs'], i['sksj'], i['skzc'], i['xf']))\n index = index + 1\n\ntree.pack() \nwin.mainloop()\n","sub_path":"wustgui.py","file_name":"wustgui.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"100058175","text":"# Submitter: sameelm(Malik, Sameel)\r\n\r\nfrom goody import type_as_str\r\nimport inspect\r\n\r\nclass Check_All_OK:\r\n \"\"\"\r\n Check_All_OK class implements __check_annotation__ by checking whether each\r\n annotation passed to its constructor is OK; the first one that\r\n fails (raises AssertionError) prints its problem, with a list of all\r\n annotations being tried at the end of the check_history.\r\n \"\"\"\r\n \r\n def __init__(self,*args):\r\n self._annotations = args\r\n \r\n def __repr__(self):\r\n return 'Check_All_OK('+','.join([str(i) for i in self._annotations])+')'\r\n\r\n def __check_annotation__(self, check, param, value,check_history):\r\n for annot in self._annotations:\r\n check(param, annot, value, check_history+'Check_All_OK check: '+str(annot)+' while trying: '+str(self)+'\\n')\r\n\r\n\r\nclass Check_Any_OK:\r\n \"\"\"\r\n Check_Any_OK implements __check_annotation__ by checking whether at least\r\n one of the annotations passed to its constructor is OK; if all fail \r\n (raise AssertionError) this classes raises AssertionError and prints its\r\n failure, along with a list of all annotations tried followed by the check_history.\r\n \"\"\"\r\n \r\n def __init__(self,*args):\r\n self._annotations = args\r\n \r\n def __repr__(self):\r\n return 'Check_Any_OK('+','.join([str(i) for i in self._annotations])+')'\r\n\r\n def __check_annotation__(self, check, param, value, check_history):\r\n failed = 0\r\n for annot in self._annotations: \r\n try:\r\n check(param, annot, value, check_history)\r\n except AssertionError:\r\n failed += 1\r\n if failed == len(self._annotations):\r\n assert False, repr(param)+' failed annotation check(Check_Any_OK): value = '+repr(value)+\\\r\n '\\n tried '+str(self)+'\\n'+check_history \r\n\r\n\r\n\r\nclass Check_Annotation():\r\n # set name to True for checking to occur\r\n checking_on = True\r\n \r\n # self._checking_on must also be true for checking to occur\r\n def __init__(self,f):\r\n self._f = f\r\n self.checking_on = True\r\n \r\n # Check whether param's annot is correct for value, adding to check_history\r\n # if recurs; defines many local function which use it parameters. \r\n def check(self,param,annot,value,check_history=''): \r\n tre = lambda x: x== 1\r\n def swag(param, value, fk):\r\n if type(param) != fk:\r\n return False\r\n elif len(value) == 1:\r\n for item in param:\r\n if type(item) != value[0]:\r\n return False\r\n return True\r\n else:\r\n if len(param) != len(value):\r\n return False\r\n for y in zip(param, value):\r\n if type(y[0]) != y[1]:\r\n return False\r\n return True\r\n def seti (param, value, h):\r\n if type(param) != h:\r\n return False\r\n elif len(value) == 1:\r\n for v in value:\r\n d = v\r\n for item in param:\r\n if type(item) != d:\r\n return False\r\n elif len(value) != len(param):\r\n return False\r\n else:\r\n g = [f for f in value]\r\n param = list(param)\r\n for y in zip(param, g):\r\n if type(y[0]) != y[1]:\r\n return False \r\n return True\r\n if value == None:\r\n return True\r\n elif type(param) == value:\r\n return True\r\n elif type(value) == tuple:\r\n try:\r\n if type(value) == tuple:\r\n for item in range(len(param)):\r\n if type(value[0]) == list:\r\n if not swag(param[item], value[0], list):\r\n return False\r\n elif type(value[0]) == tuple:\r\n if not swag(param[item], value[0], tuple):\r\n return False\r\n elif len(param) < len(value):\r\n return False\r\n else:\r\n return swag(param, value, tuple)\r\n return True\r\n except:\r\n return False\r\n elif type(param) == dict and type(value) == dict:\r\n p = [i for i in value.items()]\r\n if len(p) == 1:\r\n for item in param.items():\r\n for y in zip(item, p[0]):\r\n if type(y[0]) != y[1]:\r\n return False\r\n elif len(p) != len(param.items()):\r\n return False\r\n else:\r\n for item in range(len(param)):\r\n for y in zip(p[item], param.items()[item]):\r\n if type(y[0]) != y[1]:\r\n return False \r\n return True\r\n elif type(value) == set:\r\n return seti(param, value, set)\r\n elif type(value) == frozenset:\r\n return seti(param, value, frozenset)\r\n elif type(value) == list:\r\n y = lambda x: x== 1\r\n for item in range(len(param)):\r\n if type(value[0]) == list:\r\n if not swag(param[item], value[0], list):\r\n return False\r\n elif type(value[0]) == tuple:\r\n if not swag(param[item], value[0], tuple):\r\n return False\r\n elif type(value[0]) == type(y):\r\n for i in param:\r\n try:\r\n if not self.an['x'][0](i):\r\n return False\r\n except:\r\n return False\r\n return True\r\n else:\r\n return swag(param, value, list)\r\n return True\r\n elif type(value) == type(tre):\r\n if type(param) == list:\r\n for i in param:\r\n try:\r\n if not self.an['x'][0](i):\r\n return False\r\n except:\r\n return False\r\n return True\r\n try:\r\n return self.an['x'](param)\r\n except:\r\n return False\r\n else:\r\n try:\r\n param.__check_annotation__()\r\n except:\r\n return False\r\n # Define local functions for checking, list/tuple, dict, set/frozenset, and\r\n # lambda/functions\r\n # Many of these local functions called by check, call check on their\r\n # elements (thus are indirectly recursive)\r\n \r\n # Decode annotation and check it \r\n \r\n \r\n # Return result of calling decorated function call, checking present\r\n # parameter/return annotations if required\r\n def __call__(self, *args, **kargs):\r\n # Return a dictionary of the parameter/argument bindings (actually an\r\n # ordereddict, in the order parameters occur in the function's header)\r\n def param_arg_bindings():\r\n f_signature = inspect.signature(self._f)\r\n bound_f_signature = f_signature.bind(*args,**kargs)\r\n for param in f_signature.parameters.values():\r\n if param.name not in bound_f_signature.arguments:\r\n bound_f_signature.arguments[param.name] = param.default\r\n return bound_f_signature.arguments\r\n \r\n # If annotation checking is turned off at the class or function level\r\n # just return the result of calling the decorated function\r\n # Otherwise do all the annotation checking\r\n if not self.checking_on:\r\n return self._f(*args)\r\n try:\r\n # Check the annotation for every parameter (if there is one)\r\n self.an = self._f.__annotations__\r\n #print(self.an)\r\n self.param = param_arg_bindings()\r\n for x in self.param.items():\r\n if x[0] in self.an:\r\n types = self.an[x[0]]\r\n if not self.check(x[1], self.an, types):\r\n raise AssertionError\r\n result = self._f(*args, **kargs) \r\n if 'return' in self.an:\r\n if type(result) == self.an['return']:\r\n self.param['return'] = result\r\n else:\r\n raise AssertionError\r\n return result\r\n \r\n \r\n # Compute/remember the value of the decorated function\r\n \r\n # If 'return' is in the annotation, check it\r\n \r\n # Return the decorated answer\r\n \r\n # On first AssertionError, print the source lines of the function and reraise \r\n except AssertionError:\r\n #print(80*'-')\r\n #for l in inspect.getsourcelines(self._f)[0]: # ignore starting line #\r\n # print(l.rstrip())\r\n #print(80*'-')\r\n raise\r\n\r\n\r\n\r\n\r\n \r\nif __name__ == '__main__': \r\n # an example of testing a simple annotation \r\n #def f(x:int): pass\r\n #f = Check_Annotation(f)\r\n #f(3)\r\n #f('a')\r\n import driver\r\n driver.driver()\r\n\r\n","sub_path":"Annotation Checker/checkannotation.py","file_name":"checkannotation.py","file_ext":"py","file_size_in_byte":9547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"120957368","text":"# python standard library modules\nimport os\nimport sys\nimport textwrap\nimport pygbutton\n\n# pygame\nimport pygame\nfrom pygame.locals import *\n\n# our modules\nimport globals as GL\nfrom globals import *\nfrom pygbutton import *\nfrom classes import *\n\nselection_box_width = 4\n\nclass StartMenu:\n def __init__(self):\n self.bg_image = pygame.image.load('data/background2.png')\n self.start_button = PygButton((325, 395, 140, 40), 'Start')\n self.help_button = PygButton((485, 395, 110, 40), 'Help')\n self.options_button = PygButton((615, 395, 175, 40), 'Options')\n self.exit_button = PygButton((810, 395, 105, 40), 'Exit')\n AUDIO.turn_on_music()\n title_font = pygame.font.Font('data/Kremlin.ttf', 50)\n self.title_font1 = title_font.render('Famished', True, DKRED)\n self.title_font2 = title_font.render('Tournament', True, DKRED)\n self.selection_box_properties = [(325, 395, 140, 40), (485, 395, 110, 40), (615, 395, 175, 40), (810, 395, 105, 40)]\n self.selection_box_i = 0\n\n def __call__(self):\n self.return_now = False\n while not self.return_now:\n self.draw()\n self.input()\n self.events()\n GL.CLOCK.tick(GL.FPS)\n\n def draw(self):\n GL.SCREEN.blit(self.bg_image, (0, 0))\n self.start_button.draw(GL.SCREEN)\n self.help_button.draw(GL.SCREEN)\n self.options_button.draw(GL.SCREEN)\n self.exit_button.draw(GL.SCREEN)\n GL.SCREEN.blit(self.title_font1, (495, 120))\n GL.SCREEN.blit(self.title_font2, (450, 175))\n self.selection_box = Rect2(self.selection_box_properties[self.selection_box_i], color=BLUE)\n pygame.draw.rect(GL.SCREEN, self.selection_box.color, self.selection_box, selection_box_width)\n pygame.display.update()\n\n def input(self):\n GL.INPUT1.refresh()\n\n if GL.INPUT1.kb_input['K_F12']:\n self.return_now = True\n GL.NEXT_PAGE = 'GameLoop()'\n\n if GL.INPUT1.SELECT_PRESS_EVENT:\n GL.INPUT1.SELECT_PRESS_EVENT = False\n\n if GL.INPUT1.B_PRESS_EVENT:\n GL.INPUT1.B_PRESS_EVENT = False\n\n if GL.INPUT1.START_PRESS_EVENT or GL.INPUT1.A_PRESS_EVENT:\n\n if GL.INPUT1.START_PRESS_EVENT:\n GL.INPUT1.START_PRESS_EVENT = False\n\n if GL.INPUT1.A_PRESS_EVENT:\n GL.INPUT1.A_PRESS_EVENT = False\n\n if self.selection_box_i == 0:\n self.return_now = True\n GL.NEXT_PAGE = 'PlayerSelectPage()'\n\n elif self.selection_box_i == 1:\n self.return_now = True\n GL.NEXT_PAGE = 'help'\n\n elif self.selection_box_i == 2:\n self.return_now = True\n GL.NEXT_PAGE = 'options'\n\n elif self.selection_box_i == 3:\n self.return_now = True\n EXIT_GAME()\n\n if GL.INPUT1.RIGHT_PRESS_EVENT:\n GL.INPUT1.RIGHT_PRESS_EVENT = False\n self.selection_box_i += 1\n if self.selection_box_i > 3:\n self.selection_box_i = 0\n\n if GL.INPUT1.LEFT_PRESS_EVENT:\n GL.INPUT1.LEFT_PRESS_EVENT = False\n self.selection_box_i -= 1\n if self.selection_box_i < 0:\n self.selection_box_i = 3\n\n def events(self):\n for event in pygame.event.get():\n if 'click' in self.start_button.handleEvent(event):\n self.selection_box_i = 0\n self.return_now = True\n GL.NEXT_PAGE = 'PlayerSelectPage()'\n\n if 'click' in self.help_button.handleEvent(event):\n self.selection_box_i = 1\n self.return_now = True\n GL.NEXT_PAGE = 'help'\n\n if 'click' in self.options_button.handleEvent(event):\n self.selection_box_i = 2\n self.return_now = True\n GL.NEXT_PAGE = 'options'\n\n if 'click' in self.exit_button.handleEvent(event):\n self.selection_box_i = 3\n EXIT_GAME()\n\n if event.type == pygame.QUIT:\n EXIT_GAME()\n\n# ----------------------------------------------------------------------------\nclass HelpPage:\n def __init__(self):\n self.return_button = pygbutton.PygButton((0, 550, 300, 50), 'Main Menu')\n self.section_font = pygame.font.Font('data/Kremlin.ttf', 40)\n self.font = pygame.font.Font('data/arial_narrow_7.ttf', 20)\n self.bg_image = pygame.image.load('data/help.png')\n self.bg_title = self.section_font.render('Background', True, WHITE)\n self.bg_text = textwrap.wrap('Under the tyranny of the dark overlord, the world' +\n 'is in chaos and all the resources are nearly depleted. ' +\n 'Entire populations have been subjugated to life in labor ' +\n 'camps, brutally policed by the overlord\\'s military forces. ' +\n 'As your people\\'s champion, you must fight to the death in the ' +\n 'battle arena to win much needed resources.', width=50)\n self.goals_title = self.section_font.render('Goals', True, WHITE)\n self.goals_text = textwrap.wrap('Ultimately, you want to slay your opponent. ' +\n 'To become a better fighter, kill the monsters, gain ' +\n 'experience, and pick up skills. The player to land ' +\n 'the last hit on the monster will receives the experience ' +\n 'points. An ultimate boss will spawn every few ' +\n 'minutes. These bosses drop ultimate skills which ' +\n 'will help you humiliate and destroy your opponent.', width=50)\n self.selection_box_properties = [(0, 550, 300, 50)]\n self.selection_box_i = 0\n\n def __call__(self):\n self.return_now = False\n while not self.return_now:\n self.draw()\n self.input()\n self.events()\n GL.CLOCK.tick(GL.FPS)\n\n def draw(self):\n GL.SCREEN.fill(BLACK)\n GL.SCREEN.blit(self.bg_image, (0, 0))\n\n GL.SCREEN.blit(self.bg_title, (800, 40))\n for num, text in enumerate(self.bg_text):\n line = self.font.render(text, True, DKRED)\n GL.SCREEN.blit(line, (800, 90 + (num * 20)))\n\n GL.SCREEN.blit(self.goals_title, (800, 250))\n for num, text in enumerate(self.goals_text):\n line = self.font.render(text, True, DKRED)\n GL.SCREEN.blit(line, (800, 300 + (num * 20)))\n\n self.return_button.draw(GL.SCREEN)\n self.selection_box = Rect2(self.selection_box_properties[self.selection_box_i], color=BLUE)\n pygame.draw.rect(GL.SCREEN, self.selection_box.color, self.selection_box, selection_box_width)\n pygame.display.update()\n\n def input(self):\n GL.INPUT1.refresh()\n\n if GL.INPUT1.START_PRESS_EVENT:\n GL.INPUT1.START_PRESS_EVENT = False\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if GL.INPUT1.SELECT_PRESS_EVENT:\n GL.INPUT1.SELECT_PRESS_EVENT = False\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if GL.INPUT1.B_PRESS_EVENT:\n GL.INPUT1.B_PRESS_EVENT = False\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if GL.INPUT1.A_PRESS_EVENT:\n GL.INPUT1.A_PRESS_EVENT = False\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n EXIT_GAME()\n if 'click' in self.return_button.handleEvent(event):\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n# ----------------------------------------------------------------------------\nclass PlayerSelectPage:\n\n def __init__(self):\n def _setup_display():\n self.return_button = pygbutton.PygButton((0, 550, 300, 50), 'Main Menu')\n self.player1_spritesheet = None\n self.player2_spritesheet = None\n\n def _load_images():\n self.bg_image = pygame.image.load('data/player_select_bkg.png')\n self.humanPortrait = pygame.image.load('data/portrait_human.png')\n self.elfPortrait = pygame.image.load('data/portrait_elf.png')\n\n self.portraits = [self.humanPortrait, self.elfPortrait]\n self.portraits2 = [self.humanPortrait, self.elfPortrait]\n\n # show human portrait by default\n self.index = 0\n self.index2 = 0\n\n def _setup_fonts():\n self.start_font = pygame.font.Font('data/Kremlin.ttf', 50)\n self.start_font_xy = font_position_center(GL.SCREEN.get_rect(), self.start_font, '---------------Press Start when ready---------------')\n self.start_font_rendered = self.start_font.render('---------------Press Start when ready---------------', True, YELLOW)\n\n def _setup_flags():\n self.ready1 = False\n self.ready2 = False\n self.start = False\n\n # if there is a second gamepad, there is a second player\n # set ready to false if second player exists\n # if no second player, set ready to true\n if not GL.INPUT2.get_gamepad():\n self.ready2 = True\n\n _setup_display()\n _setup_fonts()\n _setup_flags()\n _load_images()\n\n def __call__(self):\n self.return_now = False\n while not self.return_now:\n self.draw()\n self.input()\n self.events()\n GL.CLOCK.tick(GL.FPS)\n\n def draw(self):\n GL.SCREEN.blit(self.bg_image, (0, 0))\n self.return_button.draw(GL.SCREEN)\n GL.SCREEN.blit(self.portraits[self.index], (167, 106))\n GL.SCREEN.blit(self.portraits2[self.index2], (810, 106))\n if self.ready1 and self.ready2:\n GL.SCREEN.blit(self.start_font_rendered, self.start_font_xy)\n pygame.display.update()\n\n def input(self):\n\n def refresh_inputs():\n GL.INPUT1.refresh()\n GL.INPUT2.refresh()\n\n def player_select_inputs():\n\n def check_other_player(player):\n if player == 'player1':\n if self.index == self.index2 and self.ready2: # player 2 is using character, skip index\n self.index += 1\n if self.index >= len(self.portraits):\n self.index = 0\n else:\n if self.index == self.index2 and self.ready1: # player 2 is using character, skip index\n self.index2 += 1\n if self.index2 >= len(self.portraits2):\n self.index2 = 0\n\n def check_left_right(player):\n if player == 'player1':\n if GL.INPUT1.LEFT_PRESS_EVENT:\n GL.INPUT1.LEFT_PRESS_EVENT = False\n self.index -= 1\n if self.index < 0:\n self.index = len(self.portraits) - 1\n\n check_other_player('player1')\n\n elif GL.INPUT1.RIGHT_PRESS_EVENT:\n GL.INPUT1.RIGHT_PRESS_EVENT = False\n self.index += 1\n if self.index >= len(self.portraits):\n self.index = 0\n\n check_other_player('player1')\n\n elif player == 'player2':\n if GL.INPUT2.LEFT_PRESS_EVENT:\n GL.INPUT2.LEFT_PRESS_EVENT = False\n self.index2 -= 1\n if self.index2 < 0:\n self.index2 = len(self.portraits2) - 1\n\n check_other_player('player2')\n\n elif GL.INPUT2.RIGHT_PRESS_EVENT:\n GL.INPUT2.RIGHT_PRESS_EVENT = False\n self.index2 += 1\n if self.index2 >= len(self.portraits2):\n self.index2 = 0\n\n check_other_player('player2')\n\n # if player 1/2 is not ready, let them select character\n if not self.ready1:\n check_left_right('player1')\n if not self.ready2:\n check_left_right('player2')\n\n def player_done_selecting():\n # if player presses A\n # they selected sprite\n # set sprite to player\n # if they pressed select\n # they want to select a different sprite or return to start screen\n if GL.INPUT1.A_PRESS_EVENT or GL.INPUT1.kb_input['K_SPACE']:\n GL.INPUT1.A_PRESS_EVENT = False\n GL.INPUT1.kb_input['K_SPACE'] = False # press space on keyboard to select\n if self.ready2 and self.index2 == self.index:\n print('Player 2 is using this character. Select a different one.')\n else:\n print('player 1 ready')\n self.ready1 = True\n\n if GL.INPUT2.A_PRESS_EVENT:\n GL.INPUT2.A_PRESS_EVENT = False\n if self.ready1 and self.index2 == self.index:\n print('Player 1 is using this character. Select a different one.')\n else:\n print('player 2 ready')\n self.ready2 = True\n\n # if player presses back when previously stated they were ready\n # allow them to reselect player\n # keyboard equivalent of select is 's' key\n if self.ready1 and GL.INPUT1.B_PRESS_EVENT:\n GL.INPUT1.B_PRESS_EVENT = False\n print('player 1 not ready anymore')\n self.ready1 = False\n\n elif not self.ready1 and GL.INPUT1.B_PRESS_EVENT:\n GL.INPUT1.B_PRESS_EVENT = False\n GL.NEXT_PAGE = 'start'\n self.return_now = True\n print('player 1 requested to go back to start')\n\n if self.ready2 and GL.INPUT2.B_PRESS_EVENT:\n GL.INPUT2.B_PRESS_EVENT = False\n print('player 2 not ready anymore')\n self.ready2 = False\n\n elif not self.ready2 and GL.INPUT2.B_PRESS_EVENT:\n GL.INPUT2.B_PRESS_EVENT = False\n GL.NEXT_PAGE = 'start'\n self.return_now = True\n print('player 2 requested to go back to start')\n\n\n # elif GL.INPUT1.SELECT_PRESS_EVENT or GL.INPUT1.kb_input['K_s']:\n # GL.INPUT1.SELECT_PRESS_EVENT = False\n # GL.INPUT1.kb_input['K_s'] = False\n # print('player 1 not ready anymore')\n # self.ready1 = False\n #\n # elif GL.INPUT2.SELECT_PRESS_EVENT:\n # GL.INPUT2.SELECT_PRESS_EVENT = False\n # print('player 2 not ready anymore')\n # self.ready2 = False\n\n # if player presses back when they were not ready\n # go back to start screen\n # if GL.INPUT1.SELECT_PRESS_EVENT and not self.ready1:\n # GL.INPUT1.SELECT_PRESS_EVENT = False\n # GL.NEXT_PAGE = 'start'\n # self.return_now = True\n\n # elif (GL.INPUT1.SELECT_PRESS_EVENT and not self.ready1 or GL.INPUT1.kb_input['K_s']):\n # GL.INPUT1.SELECT_PRESS_EVENT = False\n # GL.INPUT1.kb_input['K_s'] = False\n # GL.NEXT_PAGE = 'start'\n #\n # if GL.INPUT2.SELECT_PRESS_EVENT and not self.ready2:\n # GL.INPUT2.SELECT_PRESS_EVENT = False\n # GL.NEXT_PAGE = 'start'\n # self.return_now = True\n\n # elif (GL.INPUT2.SELECT_PRESS_EVENT and not self.ready2):\n # GL.INPUT2.SELECT_PRESS_EVENT = False\n # GL.NEXT_PAGE = 'start'\n\n def ready_for_start():\n if self.ready1 and self.ready2:\n\n # if player 1 or player 2 presses start when both players are ready\n # enter game loop\n # if using a keyboard - only one player\n # if keyboard user presses 'A' when he is ready\n # enter game loop\n if (GL.INPUT1.START_PRESS_EVENT or GL.INPUT2.START_PRESS_EVENT) or (GL.INPUT1.kb_input['K_a']):\n if GL.INPUT1.START_PRESS_EVENT:\n GL.INPUT1.START_PRESS_EVENT = False\n if GL.INPUT2.START_PRESS_EVENT:\n GL.INPUT2.START_PRESS_EVENT = False\n if GL.INPUT1.kb_input['K_a']:\n GL.INPUT1.kb_input['K_a'] = False\n\n self.start = True\n print('setting sprites')\n set_sprites()\n print('set sprites')\n print('going to level select screen')\n GL.NEXT_PAGE = 'LevelSelectPage()'\n self.return_now = True\n\n def set_sprites():\n # set spritesheet for player1\n if self.index == 0: # human\n self.player1_spritesheet = 'data/p1_human.png'\n # elif self.index == 1: #elf\n else:\n self.player1_spritesheet = 'data/p1_elf.png'\n\n if self.index2 == 0: # human\n self.player2_spritesheet = 'data/p2_human.png'\n elif self.index2 == 1: # elf\n self.player2_spritesheet = 'data/p2_elf.png' # Elf spritesheet 2 if available\n\n GL.set_player1_spritesheet(self.player1_spritesheet)\n GL.set_player2_spritesheet(self.player2_spritesheet)\n\n refresh_inputs()\n player_select_inputs()\n player_done_selecting()\n ready_for_start()\n\n def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n EXIT_GAME()\n if 'click' in self.return_button.handleEvent(event):\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n# ----------------------------------------------------------------------------\nclass LevelSelectPage:\n\n def __init__(self):\n def _setup_display():\n self.return_button = pygbutton.PygButton((0, 550, 300, 50), 'Main Menu')\n self.ready = False\n\n def _load_images():\n self.bg_image = pygame.image.load('data/level_select_bkg.png')\n self.bg_image2 = pygame.image.load('data/level_select_bkg2.png')\n self.humanLevel = pygame.image.load('data/humanLevel.png')\n self.elfLevel = pygame.image.load('data/vinesLevel.png')\n self.androidLevel = pygame.image.load('data/androidLevel.png')\n self.levels = [self.humanLevel, self.elfLevel, self.androidLevel]\n self.outerX = [19, 444, 874]\n self.innerX = [24, 450, 878]\n self.index = 0\n\n _setup_display()\n _load_images()\n\n def __call__(self):\n self.return_now = False\n while not self.return_now:\n self.input()\n self.draw()\n self.events()\n GL.CLOCK.tick(GL.FPS)\n\n def draw(self):\n GL.SCREEN.blit(self.bg_image, (0, 0))\n outer_highlight = Rect2(topleft=(self.outerX[self.index], 184), size = (389, 173), color=(20, 118, 128))\n inner_highlight = Rect2(topleft=(self.innerX[self.index], 190), size=(379, 162), color=(80, 191, 201))\n pygame.draw.rect(GL.SCREEN, outer_highlight.color, outer_highlight)\n pygame.draw.rect(GL.SCREEN, inner_highlight.color, inner_highlight)\n GL.SCREEN.blit(self.bg_image2, (0, 0))\n self.return_button.draw(GL.SCREEN)\n pygame.display.update()\n\n # only player 1 can select level\n def input(self):\n GL.INPUT1.refresh()\n\n if GL.INPUT1.LEFT_PRESS_EVENT:\n GL.INPUT1.LEFT_PRESS_EVENT = False\n self.index -= 1\n if self.index < 0:\n self.index = len(self.levels) - 1\n\n if GL.INPUT1.RIGHT_PRESS_EVENT:\n GL.INPUT1.RIGHT_PRESS_EVENT = False\n self.index += 1\n if self.index >= len(self.levels):\n self.index = 0\n\n if GL.INPUT1.B_PRESS_EVENT:\n GL.INPUT1.B_PRESS_EVENT = False\n GL.NEXT_PAGE = 'PlayerSelectPage()'\n self.return_now = True\n\n\n def ready_check():\n if GL.INPUT1.START_PRESS_EVENT or GL.INPUT1.kb_input['K_a']:\n GL.INPUT1.START_PRESS_EVENT = False\n GL.INPUT1.kb_input['K_a'] = False\n print('ready to load')\n self.ready = True\n set_level()\n GL.NEXT_PAGE = 'GameLoop()'\n self.return_now = True\n\n def set_level():\n print('setting level')\n if self.index == 0:\n arena = arena4\n elif self.index == 1:\n arena = arena3\n elif self.index == 2:\n arena = arena5\n\n GL.set_level(arena)\n print('set level')\n\n ready_check()\n\n def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n EXIT_GAME()\n if 'click' in self.return_button.handleEvent(event):\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n# ----------------------------------------------------------------------------\nclass OptionsPage:\n def __init__(self):\n self.bg_image = pygame.image.load('data/background2.png')\n self.active_colors = BLACK, DKRED\n self.inactive_colors = DKRED, BLACK\n self.music_on_button = pygbutton.PygButton((650, 200, 60, 50), 'ON')\n self.music_off_button = pygbutton.PygButton((730, 200, 80, 50), 'OFF')\n self.effects_on_button = pygbutton.PygButton((650, 260, 60, 50), 'ON')\n self.effects_off_button = pygbutton.PygButton((730, 260, 80, 50), 'OFF')\n self.return_button = pygbutton.PygButton((0, 550, 300, 50), 'Main Menu')\n font = pygame.font.Font('data/Kremlin.ttf', 40)\n self.bg_font = font.render('Music:', True, DKRED)\n self.se_font = font.render('Sound:', True, DKRED)\n\n main_menu = (0, 550, 300, 50)\n sound_on = (650, 260, 60, 50)\n music_on = (650, 200, 60, 50)\n music_off = (730, 200, 80, 50)\n sound_off = (730, 260, 80, 50)\n\n self.selection_box_row_properties = [[main_menu, sound_on, music_on], [main_menu, sound_off, music_off]]\n\n row1_initial = 0 if AUDIO.sound_on else 1\n row2_initial = 0 if AUDIO.music_on else 1\n\n self.selection_box_col_indices = [0, row1_initial, row2_initial]\n self.selection_box_row = 0\n\n def __call__(self):\n self.return_now = False\n while not self.return_now:\n self.draw()\n self.input()\n self.events()\n GL.CLOCK.tick(GL.FPS)\n\n def draw(self):\n if AUDIO.music_on:\n self.music_on_button.fgcolor, self.music_on_button.bgcolor = self.active_colors\n self.music_off_button.fgcolor, self.music_off_button.bgcolor = self.inactive_colors\n else:\n self.music_on_button.fgcolor, self.music_on_button.bgcolor = self.inactive_colors\n self.music_off_button.fgcolor, self.music_off_button.bgcolor = self.active_colors\n\n if AUDIO.sound_on:\n self.effects_on_button.fgcolor, self.effects_on_button.bgcolor = self.active_colors\n self.effects_off_button.fgcolor, self.effects_off_button.bgcolor = self.inactive_colors\n else:\n self.effects_on_button.fgcolor, self.effects_on_button.bgcolor = self.inactive_colors\n self.effects_off_button.fgcolor, self.effects_off_button.bgcolor = self.active_colors\n\n GL.SCREEN.blit(self.bg_image, (0, 0))\n GL.SCREEN.blit(self.bg_font, (450, 200))\n GL.SCREEN.blit(self.se_font, (450, 260))\n self.music_on_button.draw(GL.SCREEN)\n self.music_off_button.draw(GL.SCREEN)\n self.effects_on_button.draw(GL.SCREEN)\n self.effects_off_button.draw(GL.SCREEN)\n\n self.return_button.draw(GL.SCREEN)\n row = self.selection_box_row\n col = self.selection_box_col_indices[row]\n self.selection_box = Rect2(self.selection_box_row_properties[col][row], color=BLUE)\n pygame.draw.rect(GL.SCREEN, self.selection_box.color, self.selection_box, selection_box_width)\n pygame.display.update()\n\n def input(self):\n GL.INPUT1.refresh()\n if GL.INPUT1.START_PRESS_EVENT:\n GL.INPUT1.START_PRESS_EVENT = False\n if self.selection_box_row == 0:\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if GL.INPUT1.B_PRESS_EVENT:\n GL.INPUT1.B_PRESS_EVENT = False\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if GL.INPUT1.B_PRESS_EVENT:\n GL.INPUT1.B_PRESS_EVENT = False\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if GL.INPUT1.A_PRESS_EVENT:\n GL.INPUT1.A_PRESS_EVENT = False\n if self.selection_box_row == 0:\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if GL.INPUT1.UP_PRESS_EVENT:\n GL.INPUT1.UP_PRESS_EVENT = False\n self.selection_box_row += 1\n if self.selection_box_row > 2:\n self.selection_box_row = 0\n\n if GL.INPUT1.DOWN_PRESS_EVENT:\n GL.INPUT1.DOWN_PRESS_EVENT = False\n self.selection_box_row -= 1\n if self.selection_box_row < 0:\n self.selection_box_row = 2\n\n if GL.INPUT1.LEFT_PRESS_EVENT or GL.INPUT1.RIGHT_PRESS_EVENT:\n\n if GL.INPUT1.LEFT_PRESS_EVENT:\n GL.INPUT1.LEFT_PRESS_EVENT = False\n\n if GL.INPUT1.RIGHT_PRESS_EVENT:\n GL.INPUT1.RIGHT_PRESS_EVENT = False\n\n curr_col = self.selection_box_col_indices[self.selection_box_row]\n new_col = 1 if curr_col == 0 else 0\n self.selection_box_col_indices[self.selection_box_row] = new_col\n\n if self.selection_box_row == 1:\n if new_col == 0:\n AUDIO.turn_on_effects()\n elif new_col == 1:\n AUDIO.turn_off_effects()\n\n elif self.selection_box_row == 2:\n if new_col == 0:\n AUDIO.turn_on_music()\n elif new_col == 1:\n AUDIO.turn_off_music()\n\n def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n EXIT_GAME()\n\n if 'click' in self.music_on_button.handleEvent(event):\n self.selection_box_row = 2\n self.selection_box_col_indices[self.selection_box_row] = 0\n AUDIO.turn_on_music()\n\n if 'click' in self.music_off_button.handleEvent(event):\n self.selection_box_row = 2\n self.selection_box_col_indices[self.selection_box_row] = 1\n AUDIO.turn_off_music()\n\n if 'click' in self.effects_on_button.handleEvent(event):\n self.selection_box_row = 1\n self.selection_box_col_indices[self.selection_box_row] = 0\n AUDIO.turn_on_effects()\n\n if 'click' in self.effects_off_button.handleEvent(event):\n self.selection_box_row = 1\n self.selection_box_col_indices[self.selection_box_row] = 1\n AUDIO.turn_off_effects()\n\n if 'click' in self.return_button.handleEvent(event):\n self.selection_box_row = 0\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n# ----------------------------------------------------------------------------\nclass PauseMenu:\n def __init__(self):\n self.menu_box = Rect2(topleft=(320, 120), size=(640, 240), color=BLACK)\n main_font = 'data/Kremlin.ttf'\n pause_font = pygame.font.Font(main_font, 100)\n self.pause_font_xy = font_position_center(self.menu_box, pause_font, '-PAUSE-')\n self.pause_font_rendered = pause_font.render('-PAUSE-', True, RED)\n\n self.continue_button_properties = (395, 270, 200, 50)\n self.quit_button_properties = (730, 270, 100, 50)\n\n self.continue_button = pygbutton.PygButton(self.continue_button_properties, 'Continue')\n self.quit_button = pygbutton.PygButton(self.quit_button_properties, 'Quit')\n self.selection_box_properties = [self.continue_button_properties, self.quit_button_properties]\n self.selection_box_i = 0\n\n def __call__(self):\n self.return_now = False\n while not self.return_now:\n self.draw()\n self.input()\n self.events()\n GL.CLOCK.tick(GL.FPS)\n\n def draw(self):\n pygame.draw.rect(GL.SCREEN, DGREY, self.menu_box)\n pygame.draw.rect(GL.SCREEN, self.menu_box.color, self.menu_box, 4)\n GL.SCREEN.blit(self.pause_font_rendered, (self.pause_font_xy[0], self.menu_box.top))\n self.continue_button.draw(GL.SCREEN)\n self.quit_button.draw(GL.SCREEN)\n self.selection_box = Rect2(self.selection_box_properties[self.selection_box_i], color=BLUE)\n pygame.draw.rect(GL.SCREEN, self.selection_box.color, self.selection_box, selection_box_width)\n pygame.display.update()\n\n def input(self):\n GL.INPUT1.refresh_during_pause()\n if GL.INPUT1.START_PRESS_EVENT or GL.INPUT1.A_PRESS_EVENT:\n\n if GL.INPUT1.START_PRESS_EVENT:\n GL.INPUT1.START_PRESS_EVENT = False\n\n if GL.INPUT1.A_PRESS_EVENT:\n GL.INPUT1.A_PRESS_EVENT = False\n\n if self.selection_box_i == 0:\n self.return_now = True\n GL.NEXT_PAGE = 'GL.CURR_GAME'\n\n if self.selection_box_i == 1:\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if GL.INPUT1.RIGHT_PRESS_EVENT:\n GL.INPUT1.RIGHT_PRESS_EVENT = False\n self.selection_box_i += 1\n if self.selection_box_i > 1:\n self.selection_box_i = 0\n\n if GL.INPUT1.LEFT_PRESS_EVENT:\n GL.INPUT1.LEFT_PRESS_EVENT = False\n self.selection_box_i -= 1\n if self.selection_box_i < 0:\n self.selection_box_i = 1\n\n def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n EXIT_GAME()\n\n if 'click' in self.continue_button.handleEvent(event):\n self.return_now = True\n GL.NEXT_PAGE = 'GL.CURR_GAME'\n\n if 'click' in self.quit_button.handleEvent(event):\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\nclass GameOverMenu:\n def __init__(self):\n self.menu_box = Rect2(topleft=(290, 120), size=(670, 240), color=BLACK)\n main_font = 'data/Kremlin.ttf'\n game_over_font = pygame.font.Font(main_font, 100)\n self.game_over_xy = font_position_center(self.menu_box, game_over_font, '-Game Over-')\n self.game_over_rendered = game_over_font.render('-Game Over-', True, RED)\n\n self.main_menu_properties = (395, 270, 200, 50)\n self.exit_button_properties = (730, 270, 100, 50)\n\n self.main_menu_button = pygbutton.PygButton(self.main_menu_properties, 'Main Menu')\n self.exit_button = pygbutton.PygButton(self.exit_button_properties, 'Exit')\n self.selection_box_properties = [self.main_menu_properties, self.exit_button_properties]\n self.selection_box_i = 0\n\n def __call__(self):\n self.return_now = False\n while not self.return_now:\n self.draw()\n self.input()\n self.events()\n GL.CLOCK.tick(GL.FPS)\n\n def draw(self):\n pygame.draw.rect(GL.SCREEN, DGREY, self.menu_box)\n pygame.draw.rect(GL.SCREEN, self.menu_box.color, self.menu_box, 4)\n GL.SCREEN.blit(self.game_over_rendered, (self.game_over_xy[0], self.menu_box.top))\n self.main_menu_button.draw(GL.SCREEN)\n self.exit_button.draw(GL.SCREEN)\n self.selection_box = Rect2(self.selection_box_properties[self.selection_box_i], color=BLUE)\n pygame.draw.rect(GL.SCREEN, self.selection_box.color, self.selection_box, selection_box_width)\n pygame.display.update()\n\n def input(self):\n GL.INPUT1.refresh_during_pause()\n # if GL.INPUT1.START_PRESS_EVENT:\n # GL.INPUT1.START_PRESS_EVENT = False\n # self.return_now = True\n # GL.NEXT_PAGE = 'GL.CURR_GAME'\n\n if GL.INPUT1.SELECT_PRESS_EVENT:\n GL.INPUT1.SELECT_PRESS_EVENT = False\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if GL.INPUT1.START_PRESS_EVENT:\n GL.INPUT1.START_PRESS_EVENT = False\n\n if self.selection_box_i == 0:\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if self.selection_box_i == 1:\n self.return_now = True\n EXIT_GAME()\n\n if GL.INPUT1.RIGHT_PRESS_EVENT:\n GL.INPUT1.RIGHT_PRESS_EVENT = False\n self.selection_box_i += 1\n if self.selection_box_i > 1:\n self.selection_box_i = 0\n\n if GL.INPUT1.LEFT_PRESS_EVENT:\n GL.INPUT1.LEFT_PRESS_EVENT = False\n self.selection_box_i -= 1\n if self.selection_box_i < 0:\n self.selection_box_i = 1\n\n def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n EXIT_GAME()\n\n if 'click' in self.main_menu_button.handleEvent(event):\n self.return_now = True\n GL.NEXT_PAGE = 'start'\n\n if 'click' in self.exit_button.handleEvent(event):\n EXIT_GAME()\n","sub_path":"pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":34294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"391429167","text":"import math as m\n\nimport librosa\nimport numpy\nfrom numpy.fft import rfft\n\npathFirstMicro = \"\"\npathSecondMicro = \"\"\n\n\ndef baseAlg():\n maxRangeCor = 5500 # Диапозон маскимальной корреляции\n u = 330 # Скорость звука\n frequency = 22000 # Частота дискретизации\n dist = 100 # Расстояние между микрофонами\n\n angles = list()\n\n if difDist(maxRangeCor) > 0:\n angles.append(m.degrees.cos(difDist(maxRangeCor) * u / (frequency * dist)))\n angles.append(\n 360 - m.degrees.cos(difDist(maxRangeCor) * u / (frequency * dist))\n )\n\n angles.append(180 - m.degrees.cos(difDist(maxRangeCor) * u / (frequency * dist)))\n angles.append(180 + m.degrees.cos(difDist(maxRangeCor) * u / (frequency * dist)))\n\n return angles\n\n\ndef difDist(maxRange):\n x, sr = librosa.load(pathFirstMicro)\n leftSignalFurie = rfft(x)\n x, sr = librosa.load(pathSecondMicro)\n rightSignalFurie = rfft(x)\n best = 0\n dist = 0\n\n for i in range(1, maxRange):\n corrCoeff = numpy.corrcoef(\n leftSignalFurie(i, maxRange + i), rightSignalFurie(0, maxRange)\n )[0, 1]\n if corrCoeff > best:\n best = corrCoeff\n dist = i\n\n for i in range(1, maxRange):\n corrCoeff = numpy.corrcoef(\n leftSignalFurie(0, maxRange), rightSignalFurie(i, maxRange + i)\n )[0, 1]\n if corrCoeff > best:\n best = corrCoeff\n dist = -1 * i\n\n return dist\n\n\nangles = baseAlg()\n","sub_path":"code/BaseAlg.py","file_name":"BaseAlg.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"569636113","text":"import sys\nimport numpy as np\n\nimport MapReduce\n\nmr = MapReduce.MapReduce()\n\n\ndef mapper(record):\n # record: (matrix, row, col, value)\n value = record[3]\n for i in range(5):\n if record[0] == 'a':\n key = (record[1], i)\n mr.emit_intermediate(key, (record[0], record[2], value))\n elif record[0] == 'b':\n key = (i, record[2])\n mr.emit_intermediate(key, (record[0], record[1], value))\n\n\ndef reducer(key, list_of_values):\n # key: label\n sparse_arrays = {\n 'a': np.zeros(5),\n 'b': np.zeros(5),\n }\n\n for v in list_of_values:\n sparse_arrays[v[0]][v[1]] = v[2]\n\n mr.emit((key[0], key[1], int(np.dot(sparse_arrays['a'], sparse_arrays['b']))))\n\n\nif __name__ == '__main__':\n with open(sys.argv[1], 'r') as input_data:\n mr.execute(input_data, mapper, reducer)\n","sub_path":"Coursera - Data Manipulation at Scale - Systems and Algorithms/week 3/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"400961535","text":"\"\"\"\nGiven the root of a binary tree, return the level order traversal of its\nnodes' values. (i.e., from left to right, level by level).\n\"\"\"\n\n\n# Definition for a binary tree node.\nfrom collections import deque\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n que = deque()\n heights = {}\n height = 0\n if not root:\n return []\n\n que.append((root, height + 1))\n while que:\n node, height = que.popleft()\n # print(node.val, height, heights)\n heights.setdefault(height, []).append(node.val)\n if node.left:\n que.append((node.left, height + 1))\n if node.right:\n que.append((node.right, height + 1))\n return heights.values()\n\n","sub_path":"Binary Tree Level Order Traversal.py","file_name":"Binary Tree Level Order Traversal.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"188174687","text":"# Given a financial dataset of various customer, predict the likelihood of a customer to sign up for a RRSP account\n# Features are normalized in this script\n\nimport pandas as pd\nimport numpy as np\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\n\nfrom mlxtend.plotting import plot_learning_curves\n\n# read csv data\ndf = pd.read_csv('vcRSP2017.csv')\n\ntotalRows,totalFeatures = df.shape\n\n\n# 0.8 Training, 0.2 X-val, 0.2 Testing data\ntrain_df,xval_df,test_df = np.split(df,[int(0.6*len(df)),int(0.8*len(df))])\n\ncombine = [train_df,xval_df,test_df]\n\nfor dataset in combine:\n dataset.loc[(dataset['APURCH'] == 'Y'), 'APURCH'] = 1\n dataset.loc[(dataset['APURCH'] == 'N'),'APURCH'] = 0\n\n\n\n# grid = sns.FacetGrid(train_df, col='APURCH')\n# grid.map (plt.hist, 'age',bins=20)\n# plt.show()\n\n# Set NA Values to median values\nfreq_age = train_df.age.dropna().median()\n\nfor dataset in combine:\n dataset.loc[dataset['age']==0,'age'] = freq_age\n\n#Normalize and feature engineer data\nfor dataset in combine:\n dataset.loc[(dataset['age'] <= 28.2),'age'] = 0\n dataset.loc[(dataset['age'] > 28.2) & (dataset['age'] <=38.4), 'age'] = .25\n dataset.loc[(dataset['age'] > 38.4) & (dataset['age'] <=48.6), 'age'] = .5\n dataset.loc[(dataset['age'] > 48.6) & (dataset['age'] <=58.8), 'age'] = .75\n dataset.loc[(dataset['age'] > 58.8), 'age'] = 1\n\n dataset['gender'] = 0\n dataset.loc[(dataset['gendm']==1),'gender'] = 1\n dataset.loc[(dataset['gendm']==0)&(dataset['gendf']==0),'gender'] = 0\n\n dataset['BALCHQ'] = dataset['BALCHQ'].fillna(0)\n dataset['BALSAV'] = dataset['BALSAV'].fillna(0)\n dataset['savings'] = dataset['BALSAV'] + dataset['BALCHQ']\n\nfor dataset in combine:\n dataset['transactions'] = dataset['TXBRAN'] + dataset['TXATM'] + dataset['TXATM'] +dataset['TXTEL'] + dataset['TXPOS'] + dataset['TXWEB']+dataset['TXCHQ']\n# print(pd.qcut(train_df['transactions'], 5))\n\nfor dataset in combine:\n dataset.loc[(dataset['transactions'] <=1.25),'transactions'] = 0\n dataset.loc[(dataset['transactions'] > 1.25) & (dataset['transactions'] <=6.833), 'transactions'] = 0.25\n dataset.loc[(dataset['transactions'] > 6.833) & (dataset['transactions'] <= 18.5), 'transactions'] = 0.5\n dataset.loc[(dataset['transactions'] > 18.5) & (dataset['transactions'] <= 36.0), 'transactions'] = 0.75\n dataset.loc[(dataset['transactions'] > 36.0), 'transactions'] = 1\n\n# for dataset in combine:\n# dataset['avgincome'] = dataset['avginc_1'] + dataset['avginv_1']\n# print(pd.qcut(train_df['avgincome'], 5))\n#\n# for dataset in combine:\n# dataset.loc[(dataset['avgincome'] <= 27450.635), 'transactions'] = 0\n# print(pd.qcut(train_df['transactions'], 5))\n\ntrain_df = train_df.drop(['gendm','gendf','BALSAV','BALCHQ'],axis = 1)\nxval_df = xval_df.drop(['gendm','gendf','BALSAV','BALCHQ'],axis = 1)\ntest_df = test_df.drop(['gendm','gendf','BALSAV','BALCHQ'],axis = 1)\n\n# print(pd.qcut(train_df['savings'],5))\n\ncombine = [train_df,xval_df,test_df]\n\nfor dataset in combine:\n dataset.loc[(dataset['savings'] <= 99.396),'savings'] = 0\n dataset.loc[(dataset['savings'] > 99.396) & (dataset['savings'] <= 645.431),'savings'] = 0.25\n dataset.loc[(dataset['savings'] > 645.431) & (dataset['savings'] <= 1830.809), 'savings'] = .5\n dataset.loc[(dataset['savings'] > 1830.809) & (dataset['savings'] <= 4866.124), 'savings'] = .75\n dataset.loc[(dataset['savings'] > 4866.124), 'savings'] = 1\n# print(train_df[['savings','APURCH']].groupby(['savings'],as_index=False).mean().sort_values(by='APURCH',ascending=False))\n# print(pd.qcut(train_df['TOTDEP'],5))\nfreq_segment = train_df.valsegm.dropna().mode()[0]\n# print(freq_segment)\n\n# Normalize data\nfor dataset in combine:\n dataset.loc[(dataset['TOTDEP'] <= 325.126),'TOTDEP'] = 0\n dataset.loc[(dataset['TOTDEP'] > 325.126) & (dataset['TOTDEP'] <= 1118.013), 'TOTDEP'] = 1\n dataset.loc[(dataset['TOTDEP'] > 1118.013) & (dataset['TOTDEP'] <= 3225.706), 'TOTDEP'] = 2\n dataset.loc[(dataset['TOTDEP'] > 3225.706) & (dataset['TOTDEP'] <= 10410.799), 'TOTDEP'] = 3\n dataset.loc[(dataset['TOTDEP'] > 10410.799),'TOTDEP'] = 4\n\n dataset['valsegm'] = dataset['valsegm'].fillna(freq_segment)\n\nsegmentMapping = {\"A\":0,\"B\":0.25,\"C\":0.5,\"D\":.75,\"E\":1}\n\nfor dataset in combine:\n dataset['valsegm'] = dataset['valsegm'].map(segmentMapping)\n\ntrain_df = train_df.drop(['unique', 'pcode', 'BALLOAN', 'BALLOC','BALMRGG','TXBRAN','TXATM','TXPOS','TXCHQ',\n 'TXWEB','TXTEL','TOTSERV','CH_NM_SERV','CH_NM_PRD','N_IND_INC_','numrr_1','numcon_1',\n 'avginc_1','avginv_1'], axis=1)\ntest_df = test_df.drop(['unique', 'pcode', 'BALLOAN', 'BALLOC','BALMRGG','TXBRAN','TXATM','TXPOS','TXCHQ',\n 'TXWEB','TXTEL','TOTSERV','CH_NM_SERV','CH_NM_PRD','N_IND_INC_','numrr_1','numcon_1',\n 'avginc_1','avginv_1'], axis=1)\nxval_df = xval_df.drop(['unique', 'pcode', 'BALLOAN', 'BALLOC','BALMRGG','TXBRAN','TXATM','TXPOS','TXCHQ',\n 'TXWEB','TXTEL','TOTSERV','CH_NM_SERV','CH_NM_PRD','N_IND_INC_','numrr_1','numcon_1',\n 'avginc_1','avginv_1'], axis=1)\n\n# train_df = train_df.drop(['age','valsegm','gender','savings'],axis=1)\n# xval_df = xval_df.drop(['age','valsegm','gender','savings'],axis=1)\n# test_df = test_df.drop(['age','valsegm','gender','savings'],axis=1)\n\nX_train = train_df.drop('APURCH',axis = 1).copy()\nY_train = train_df['APURCH']\nX_val = xval_df.drop('APURCH', axis = 1).copy()\nY_val = xval_df['APURCH']\nX_test = test_df.drop('APURCH',axis = 1).copy()\nY_test = test_df['APURCH']\n\nprint(train_df.head())\nprint(xval_df[['NEWMRGG','APURCH']].groupby(['NEWMRGG'],as_index=False).mean().sort_values(by='APURCH',ascending=False))\n\n\n# Logistic Regression\nlogreg = LogisticRegression()\nlogreg.fit(X_train,Y_train)\n# Y_test = logreg.predict(X_val)\nacc_log = round(logreg.score(X_train, Y_train)*100 ,2)\n\ncoeff_df = pd.DataFrame(train_df.columns.delete(0))\ncoeff_df. columns = ['Feature']\ncoeff_df[\"Correlation\"] = pd.Series(logreg.coef_[0])\nprint(coeff_df.sort_values(by='Correlation',ascending=False))\nclf = MLPClassifier(max_iter=2000)\n# print(X_train.shape,Y_train.shape,X_val.shape,Y_val.shape)\n# clf = SVC()\nplot_learning_curves(X_train,Y_train,X_val,Y_val, clf)\nplt.show()\n# print(xval_df['APURCH'].size)\n# print(Y_val.shape)\n\n# plt.plot(train_sizes,train_scores,'r')\n# plt.plot(train_scores,valid_scores,'b')\n# plt.show()\n\n\nsvc = SVC()\nsvc.fit(X_train,Y_train)\nY_val = svc.predict(X_val)\nacc_svc = round(svc.score(X_train,Y_train)*100,2)\nprint(\"acc svc = \", acc_svc)\n\nrandom_forest = RandomForestClassifier(n_estimators=100)\nrandom_forest.fit(X_train, Y_train)\nY_val = random_forest.predict(X_val)\nrandom_forest.score(X_train, Y_train)\nacc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)\nprint(\"acc randon forest = \",acc_random_forest)\n\n# count = 0\n# correct = 0\n# for i in range(xval_df['APURCH'].size):\n# if Y_val[i] == xval_df.iloc[i]['APURCH']:\n# correct = correct + 1\n# count = count + 1\n\n# print(\"score: \", correct,\". count:\", count , \"acc:\", round(correct/count,2))\n#\n# print(\"acc log = \", acc_log)\n# print(train_df.head())\n # dataset['BALLOAN'] = dataset['BALLOAN'].fillna(0).astype(int)\n # dataset['BALLOC'] = dataset['BALLOC'].fillna(0).astype(int)\n # dataset['BALMRGG'] = dataset['BALMRGG'].fillna(0).astype(int)\n # dataset['loans'] = dataset['BALLOAN'] +dataset['BALLOC'] + dataset['BALMRGG']\n # dataset['hasLoan'] = 1\n # dataset.loc[dataset['loans'] < 1, 'hasLoan'] = 0\n#\n# grid = sns.FacetGrid(train_df, col='APURCH')\n# grid.map (plt.hist, 'age',bins=5)\n# plt.show()\n# print(pd.qcut(train_df['loans'],4))\n\n\n\n\n\n# print(train_df[['TOTDEP', 'APURCH']].groupby(['TOTDEP'], as_index=False).mean().sort_values(by='APURCH',\n# ascending=False))\n # dataset = dataset.drop(['gen'])\n# print(train_df[['gender','APURCH']].groupby(['gender'],as_index=False).mean().sort_values(by='APURCH',ascending=False))\n # dataset['gender'] = dataset.loc[]\n# print(train_df[['age','APURCH']].groupby(['age'],as_index=False).mean().sort_values(by='APURCH',ascending=False))\n\n# print(train_df[['gendm','APURCH']].groupby(['gendm'],as_index=False).mean().sort_values(by='APURCH',ascending=False))\n\n# print(xval_df.head(20))\n# print(train_df.iloc[[766]])\n\n","sub_path":"VancityModel_normalized.py","file_name":"VancityModel_normalized.py","file_ext":"py","file_size_in_byte":8840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"122783775","text":"#!/usr/bin/env python3\n\nimport sys\nimport pathlib\nimport re\nimport os\nimport subprocess\nimport tarfile\nfrom github3 import login\n\ndef release(package, commit, version):\n\n \"\"\" Create or update a github release for this version / commit, uploading package. \"\"\"\n\n print(\"Releasing {} from commit {} as version {}\".format(package, commit, version))\n\n token = os.environ['GITHUB_API_TOKEN']\n if not token:\n raise EnvironmentError(\"No Github API Token available\")\n gh = login(token=token)\n r = gh.repository(\"curvelogic\", \"eucalypt-hs\")\n\n try:\n release = r.release_from_tag(version)\n print(\"Found existing release for {}\".format(version))\n except:\n print(\"Creating release for {}\".format(version))\n release = r.create_release(tag_name = version,\n name = version,\n body = \"Prototype eu binary.\",\n target_commitish = commit,\n draft = True,\n prerelease = True)\n\n if release:\n print(\"Uploading binary {}\".format(package))\n release.upload_asset(content_type = \"application/binary\",\n name = package,\n asset = open(package, 'rb'))\n\ndef main(args):\n\n \"\"\" Determine version details to release and then create. \"\"\"\n\n exe_path = pathlib.Path(args[1])\n\n # Determine architecture from path\n arch_tags = [item for item in exe_path.parts if re.match('.*64.*', item)]\n if arch_tags:\n arch = arch_tags[0]\n else:\n arch = \"x86_64-osx\"\n\n # Query eu itself for version details\n version = subprocess.check_output([exe_path, \"-e\", \"eu.build.version\"]).strip().decode('utf8').strip(\"'\")\n # Until this whole circular thing is operational\n # commit = subprocess.check_output([exe_path, \"-e\", \"eu.build.commit\"]).strip().decode('utf8').strip(\"'\")\n commit = os.environ.get(\"CIRCLE_SHA1\")\n if not commit:\n commit = subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"]).strip().decode('utf8').strip(\"'\")\n\n # TGZ the exe\n package = \"eucalypt-hs-\" + arch + \".tgz\"\n with tarfile.open(package, \"w:gz\") as tar:\n tar.add(exe_path)\n\n release(package, commit, version)\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"ci/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"211292595","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import BaseCrossValidator\n\nfrom xam.util import datetime_range\n\n\nclass DatetimeCV(BaseCrossValidator):\n\n \"\"\"Cross-validation procedure that is aware of datetimes\n\n This goes a step further than sklearn's TimeSeriesSplit and takes into\n account the datetimes contained in the index of provided\n pandas.DataFrame. TimeSeriesSplit is more of a \"running\" cross-validation\n prodedure whereas DatetimeCV returns splits that correspond to datetimes.\n Moreover TimeSeriesSplit only produce one test index at a time. DatetimeCV\n will produce test indexes that match a given date.\n\n Attributes:\n timedelta (datetime.timedelta): the step to increase folds by.\n\n \"\"\"\n\n def __init__(self, timedelta):\n super().__init__()\n self.timedelta = timedelta\n\n def split(self, X, y=None, groups=None):\n \"\"\"\n\n Args:\n X (pd.DataFrame): a dataframe with a DatetimeIndex.\n \"\"\"\n if not isinstance(X, pd.DataFrame):\n raise ValueError('X is not a pandas.DataFrame')\n if not isinstance(X.index, pd.DatetimeIndex):\n raise ValueError(\"X's index is not a DatetimeIndex\")\n\n min_dt = X.index.min()\n max_dt = X.index.max()\n\n indices = np.arange(len(X))\n\n for dt in datetime_range(min_dt+self.timedelta, max_dt, step=self.timedelta):\n train_idxs = indices[X.index < dt]\n test_idxs = indices[X.index == dt]\n\n if train_idxs.size == 0:\n raise ValueError('No data found before {}'.format(dt))\n if test_idxs.size == 0:\n raise ValueError('No data found for {}'.format(dt))\n\n yield train_idxs, test_idxs\n\n def get_n_splits(self, X=None, y=None, groups=None):\n \"\"\"Returns the number of splitting iterations in the cross-validator\"\"\"\n if not isinstance(X, pd.DataFrame):\n raise ValueError('X is not a pandas.DataFrame')\n if not isinstance(X.index, pd.DatetimeIndex):\n raise ValueError(\"X's index is not a DatetimeIndex\")\n\n first_dt = X.index.min()\n last_dt = X.index.max()\n\n return int((last_dt - first_dt) / self.timedelta)\n","sub_path":"xam/model_selection/datetime_cross_validation.py","file_name":"datetime_cross_validation.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"270383641","text":"######################################\n############# Parameters #############\n######################################\nis_cuda = True # Turn to False if CPU used\nimg_root = \"dataset/500-samples/train/images\" # \"dataset/5pairs/train/images\"\ntext_root = \"dataset/500-samples/train/qa\"\nis_QApairs = False # If false, we use caption, if true, we use Q/A pairs\nmax_words_length = 9 # 13 for QA pairs, and 9 for captions\nuse_vgg = True # Do not change this, we use vgg here for image encoding!\n\n### test ### /home/aeroone/Research/Model_v1/\nimg_test_root = \"dataset/500-samples/test/images\" # \"dataset/5pairs/train/images\"\ntext_test_root = \"dataset/500-samples/test/qa\"\ntest_G_model_root = \"dataset/500-samples/test/model\" # change this for the corresponding model!\ntest_num_workers = 4\nbatchsize_test = 10\ntest_result_root = \"dataset/500-samples/test/test result\"\n############\n######################################\n\n#######################################################\n### learning parameters for combined model training ###\n#######################################################\nGenerator_model_filename = 'model/Generator_training_model.pth' # save the generator trained model\nbatch_size = 30 #64\nnum_workers = 4 # threads number for data loader\nnum_epochs = 1000\nis_residual_module = True # use residual module in the generator\nlearning_rate = 0.0001\nmomentum = 0.1\nlr_decay = 0.2\n# --- attention model ---\nis_attention = False\n######################################\n\n######################################\n### visual-semantic text embedding ###\n######################################\nVSE_model_filename = 'model/VSE_training_model.pth'\npretrained_word_model = 'glove.6B.300d.txt'\nVSE_batch_size = 64\nVSE_num_workers = 4 # threads number for data loader\nVSE_num_epochs = 300\nVSE_embedding_dim = 300\nVSE_learning_rate = 0.0002\nVSE_margin = 0.2\n############################################################\n############################################################","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"525336236","text":"#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n#Currently this file is included by two python files here. They are:-\n#1. compile_vPath_dissector.py 2. archive_generator.py\n#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n#importing system files....\nimport os\nimport errno\nimport subprocess\n\n#----------------------------------------------------------------------------\n#This function makes directory at the path specified if it doesn't\n#exist. Otherwise, if its present, then it simply moves forward to the\n#next line of code. On successful creation of the directory it returns\n#a status code of '0' and if there is an error in creating directory,\n#then it returns an error code of '-1000'\n\ndef update_dir(path):\n retcode=subprocess.Popen([\"git\",\"pull\"],shell=False)\n retcode.wait()\n return retcode\n\ndef mkdir_p(path): #This function makes directory at the path\n #specified. If directory is already there,\n #then it handles the error.\n try:\n os.mkdir(path) #making the directory by making system\n #calls......\n except OSError as err:\n if err.errno==errno.EEXIST:\n pass #If directory is there, then go\n #ahead..... Don't stop by displaying\n #message.......\n else:\n return -1000\n return 0 #returns a status code of 0 which indicates that the\n #function executed successfully...\n\n#--------------------------------------------------------------------------\n#This function takes the path of directory whose subdirectories you\n#want to list. For example, you want to get the list of all the\n#directories present in /home directory, then pass the path as /home\n#and it will list all the subdirectories present in the /home\n#folder. *****It will not look for subdirectories inside\n#subdirectories.******* Returntype: It returns a list containing all\n#the subdirectories..... On error, it exits the program with\n#errorcode '1'\n\ndef listdir_check(path): #This function checks whether the path\n #specified is correct and then lists the\n #directories in the path......\n try:\n list=os.listdir(path) #This system call lists all the\n #directories in the path specified.....\n return list\n except OSError as err: #If error occurs, then perform the\n #following tasks.....\n return -1001 #So here it returns the errorcode\n #\"-1001\" to the program.....\n \n#--------------------------------------------------------------------------\n","sub_path":"vpath-dissector-compiler/dir_fxns.py","file_name":"dir_fxns.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"225564298","text":"import numpy as np\nfrom ReadFile import Read\nfrom astropy.table import Table\nfrom astropy.io import ascii\n\n#from ParticleProperties import ParticleInfo\n\ndef CompnentMass(filename,particle_type):\n #Calculating total mass of specified type of particle\n\n #Inputs:\n #filename = path to file data file to analyze\n #particle_type = particle number correspodning to particle of interest (1 for Halo, 2 for Disk, 3 for Bulge)\n\n #Returns:\n #total mass of desired particle population\n\n time, num_part, data = Read(filename) #reading in data\n\n particle = data[data['type'] == particle_type] #Finding particle based on inputs particle_type\n tot_mass = np.sum(particle['m'])/100 #summing up all mass elements of a certain particle type. Converting to units of 10^12 M_sun\n return(np.around(tot_mass,3)) #returning mass\n\n\nfilename_MW = '/Users/Ryan/Desktop/School/ASTR400B/400B_webster/MW_000.txt' #defining file name\ncomp_mass_MW = [] #Creating empty list to fill with halo, disk and bulge mass\nfor i in range(1,4): #This loop is for computing the total mass of each particle type \n comp_mass = CompnentMass(filename_MW,i) #calculating total mass for each particle type\n comp_mass_MW.append(comp_mass) #appending each componet to list\n\n#Same process as above, except for M31\nfilename_M31 = '/Users/Ryan/Desktop/School/ASTR400B/400B_webster/M31_000.txt'\ncomp_mass_M31 = []\nfor i in range(1,4):\n comp_mass = CompnentMass(filename_M31,i)\n comp_mass_M31.append(comp_mass)\n\n#Same process as above, except for M33\nfilename_M33 = '/Users/Ryan/Desktop/School/ASTR400B/400B_webster/M33_000.txt'\ncomp_mass_M33 = []\nfor i in range(1,4):\n comp_mass = CompnentMass(filename_M33,i)\n comp_mass_M33.append(comp_mass)\n\n#Creating astropy table with appropriate columns\nt = Table(names=('Galaxy Name', 'Halo Mass (10^12 Msun)', 'Disk Mass (10^12 Msun)', 'Buldge Mass (10^12 Msun)', 'Total (10^12 Msun)','fbar'), dtype=('S', 'f4', 'f4', 'f4', 'f4', 'f4'))\n\n#Calculating fbar ratio for MW by simply adding up disk and bulge mass, and dividing by total system mass\nmw_fbar = np.around((comp_mass_MW[1]+comp_mass_MW[2])/np.sum(comp_mass_MW),3)\n\n#Same as above, but for M31\nm31_fbar = np.around((comp_mass_M31[1]+comp_mass_M31[2])/np.sum(comp_mass_M31),3)\n\n#Same as above, but for M33\nm33_fbar = np.around((comp_mass_M33[1]+comp_mass_M33[2])/np.sum(comp_mass_M33),3)\n\n#Calculating total group halo mass by simply adding up halo masses of MW, M31 and M33\ngroup_halo_mass = np.around(comp_mass_MW[0] + comp_mass_M31[0] + comp_mass_M33[0],3)\n\n#Same as above, but for total group disk mass\ngroup_disk_mass = np.around(comp_mass_MW[1] + comp_mass_M31[1] + comp_mass_M33[1],3)\n\n#Same as above, but for total group bulge mass\ngroup_bulge_mass = np.around(comp_mass_MW[2] + comp_mass_M31[2] + comp_mass_M33[2],3)\n\n#Adding up all group masses to find total mass\ntotal_group_mass = group_bulge_mass+group_disk_mass+group_halo_mass\n\n#Calculating f_bar for local group\nf_bar = np.around((group_disk_mass+group_bulge_mass)/total_group_mass,3)\n\n#Adding data to astropy table\nt.add_row(('MW',comp_mass_MW[0],comp_mass_MW[1],comp_mass_MW[2],np.sum(comp_mass_MW),mw_fbar))\nt.add_row(('M31',comp_mass_M31[0],comp_mass_M31[1],comp_mass_M31[2],np.sum(comp_mass_M31),m31_fbar))\nt.add_row(('M33',comp_mass_M33[0],comp_mass_M33[1],comp_mass_M33[2],np.sum(comp_mass_M33),m33_fbar))\nt.add_row(('Group',group_halo_mass,group_disk_mass,group_bulge_mass,total_group_mass,f_bar))\n\n#Saving table\nascii.write(t,'comp_masses', format='latex')","sub_path":"Homeworks/Homework7/GalaxyMass.py","file_name":"GalaxyMass.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"579017530","text":"\"\"\"SMS URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom UserProfile.views import register, user_active, index\nfrom changelog.views import GetChangeLog\n\"\"\"\nfrom Platform.views import test_ue\n\"\"\"\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^about/', GetChangeLog, name='about'),\n url(r'^$', index, name='index'),\n\n # User\n url(r'^accounts/', include('django.contrib.auth.urls')),\n url(r'^register/$', register, name='register'),\n url(r'^active/(?P\\w+)$', user_active, name='user_active'),\n\n # Platform\n url(r'^platform/', include('Platform.urls')),\n\n # Product\n url(r'^product/', include('Product.urls')),\n\n # Vunls\n url(r'^vuln/', include('Vuln.urls')),\n\n # ue upload\n url(r'^ueditor/', include('DjangoUeditor.urls')),\n]","sub_path":"SMS/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"134400928","text":"#Se utilizan las funciones de la vez pasada y\n#se modifican para que tiren solamente resultados\ndef recolectar_datos(x):\n #Se crea el contador\n i=0\n for i in range(0,12): \n #Retomamos lo del primer ejercicio \n a=i+1\n b=int(input(\"Por favor ingrese la temperatura {}:\".format(a)))\n c=x.append(b)\n#Creamos la segunda función\ndef promedio(x):\n sumatoria=sum(x)\n promedio=sumatoria/12\n print(promedio)\n#Creamos la tercer función\ndef mayor(x):\n a=max(x)\n print(a)\n#Creamos la cuarta función\ndef mes(x):\n a=max(x)\n b=x.index(a)\n if b == 0:\n c=\"Enero\"\n else:\n if b == 1:\n c=\"Febrero\"\n else:\n if b == 2:\n c=\"Marzo\"\n else:\n if b == 3:\n c=\"Abril\"\n else:\n if b == 4:\n c=\"Mayo\"\n else:\n if b == 5:\n c=\"Junio\"\n else:\n if b == 6:\n c=\"Julio\"\n else:\n if b == 7:\n c=\"Agosto\"\n else:\n if b == 8:\n c=\"Septiembre\"\n else:\n if b == 9:\n c=\"Octubre\"\n else:\n if b == 10:\n c=\"Noviembre\"\n else:\n if b == 11:\n c=\"Diciembre\"\n #Se imprime los resultados\n print(c)\n#Creamos las listas necesarias\nsantander=[]\nguajira=[]\nnarino=[]\n#Mensaje de Bienvenida\nprint(\"Bienveido al OMN\")\nprint(\"Este programa está diseñado para hacer un análizis de datos de las temperaturas de diferentes departamentos\")\n#Creamos un integro que reciba la opción del usuario\na=int(input(\"Escoge las siguientes opciones: \\n 1:Departamento de Santander \\n 2:Departamento de La Guajira \\n 3:Departamento de Nariño \\n 4:Todo el país \\n Escoge la opción: \"))\n#Creamos un if y then para la opcion que el usuario vaya a escoger\nif a==1:\n #Simplemente se recogen las funciones anteriores y se utilizan en el proyecto\n print(\"Departamento de Santander\")\n recolectar_datos(santander)\n print(\"El promedio de las temperaturas en Santander fue de:\")\n promedio(santander)\n print(\"La temperatura mayor en Santander fue de: \")\n mayor(santander)\n print(\"La temperatura donde fue mayor es en: \")\n mes(santander)\n print(\"Departamento de Santander\")\nelse:\n if a==2:\n print(\"Departamento de La Guajira\")\n recolectar_datos(guajira)\n print(\"El promedio de las temperaturas en La Guajira fue de: \")\n promedio(guajira)\n print(\"La temperatura mayor en La Guajira fue de: \")\n mayor(guajira)\n print(\"El mes donde fue mayor es: \")\n mes(guajira)\n print(\"Departamento de La Guajira\")\n else:\n if a == 3:\n print(\"Departamento de Nariño\")\n recolectar_datos(narino)\n print(\"El promedio de las temperaturas en Nariño fue de: \")\n promedio(narino)\n print(\"La temperatura mayor en Nariño fue de: \")\n mayor(narino)\n print(\"El mes donde fue mayor es: \")\n mes(narino)\n print(\"Departamento de Nariño\")\n else:\n if a == 4:\n print(\"Departamento de Santander\")\n recolectar_datos(santander)\n print(\"El promedio de las temperaturas en Santander fue de:\")\n promedio(santander)\n print(\"La temperatura mayor en Santander fue de: \")\n mayor(santander)\n print(\"La temperatura donde fue mayor es en: \")\n mes(santander)\n print(\"Departamento de Santander\")\n print(\"Departamento de La Guajira\")\n recolectar_datos(guajira)\n print(\"El promedio de las temperaturas en La Guajira fue de:\")\n promedio(guajira)\n print(\"La temperatura mayor en La Guajira fue de: \")\n mayor(guajira)\n print(\"La temperatura donde fue mayor es en: \")\n mes(guajira)\n print(\"Departamento de La Guajira\")\n print(\"Departamento de Nariño\")\n recolectar_datos(narino)\n print(\"El promedio de las temperaturas en Nariño fue de:\")\n promedio(narino)\n print(\"La temperatura mayor en Nariño fue de: \")\n mayor(narino)\n print(\"La temperatura donde fue mayor es en: \")\n mes(narino)\n print(\"Departamento de Narino\")\n\n#Desarrollado por Pedro Gómez / ID:000396221 / CACE Producciones \n","sub_path":"Proyecto/main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"567772279","text":"# -*- coding: utf-8 -*-\n# Copyright 2019 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nimport attr\n\nfrom synapse.api.constants import RelationTypes\nfrom synapse.storage._base import SQLBaseStore\nfrom synapse.storage.data_stores.main.stream import generate_pagination_where_clause\nfrom synapse.storage.relations import (\n AggregationPaginationToken,\n PaginationChunk,\n RelationPaginationToken,\n)\nfrom synapse.util.caches.descriptors import cached, cachedInlineCallbacks\n\nlogger = logging.getLogger(__name__)\n\n\nclass RelationsWorkerStore(SQLBaseStore):\n @cached(tree=True)\n def get_relations_for_event(\n self,\n event_id,\n relation_type=None,\n event_type=None,\n aggregation_key=None,\n limit=5,\n direction=\"b\",\n from_token=None,\n to_token=None,\n ):\n \"\"\"Get a list of relations for an event, ordered by topological ordering.\n\n Args:\n event_id (str): Fetch events that relate to this event ID.\n relation_type (str|None): Only fetch events with this relation\n type, if given.\n event_type (str|None): Only fetch events with this event type, if\n given.\n aggregation_key (str|None): Only fetch events with this aggregation\n key, if given.\n limit (int): Only fetch the most recent `limit` events.\n direction (str): Whether to fetch the most recent first (`\"b\"`) or\n the oldest first (`\"f\"`).\n from_token (RelationPaginationToken|None): Fetch rows from the given\n token, or from the start if None.\n to_token (RelationPaginationToken|None): Fetch rows up to the given\n token, or up to the end if None.\n\n Returns:\n Deferred[PaginationChunk]: List of event IDs that match relations\n requested. The rows are of the form `{\"event_id\": \"...\"}`.\n \"\"\"\n\n where_clause = [\"relates_to_id = ?\"]\n where_args = [event_id]\n\n if relation_type is not None:\n where_clause.append(\"relation_type = ?\")\n where_args.append(relation_type)\n\n if event_type is not None:\n where_clause.append(\"type = ?\")\n where_args.append(event_type)\n\n if aggregation_key:\n where_clause.append(\"aggregation_key = ?\")\n where_args.append(aggregation_key)\n\n pagination_clause = generate_pagination_where_clause(\n direction=direction,\n column_names=(\"topological_ordering\", \"stream_ordering\"),\n from_token=attr.astuple(from_token) if from_token else None,\n to_token=attr.astuple(to_token) if to_token else None,\n engine=self.database_engine,\n )\n\n if pagination_clause:\n where_clause.append(pagination_clause)\n\n if direction == \"b\":\n order = \"DESC\"\n else:\n order = \"ASC\"\n\n sql = \"\"\"\n SELECT event_id, topological_ordering, stream_ordering\n FROM event_relations\n INNER JOIN events USING (event_id)\n WHERE %s\n ORDER BY topological_ordering %s, stream_ordering %s\n LIMIT ?\n \"\"\" % (\n \" AND \".join(where_clause),\n order,\n order,\n )\n\n def _get_recent_references_for_event_txn(txn):\n txn.execute(sql, where_args + [limit + 1])\n\n last_topo_id = None\n last_stream_id = None\n events = []\n for row in txn:\n events.append({\"event_id\": row[0]})\n last_topo_id = row[1]\n last_stream_id = row[2]\n\n next_batch = None\n if len(events) > limit and last_topo_id and last_stream_id:\n next_batch = RelationPaginationToken(last_topo_id, last_stream_id)\n\n return PaginationChunk(\n chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token\n )\n\n return self.db.runInteraction(\n \"get_recent_references_for_event\", _get_recent_references_for_event_txn\n )\n\n @cached(tree=True)\n def get_aggregation_groups_for_event(\n self,\n event_id,\n event_type=None,\n limit=5,\n direction=\"b\",\n from_token=None,\n to_token=None,\n ):\n \"\"\"Get a list of annotations on the event, grouped by event type and\n aggregation key, sorted by count.\n\n This is used e.g. to get the what and how many reactions have happend\n on an event.\n\n Args:\n event_id (str): Fetch events that relate to this event ID.\n event_type (str|None): Only fetch events with this event type, if\n given.\n limit (int): Only fetch the `limit` groups.\n direction (str): Whether to fetch the highest count first (`\"b\"`) or\n the lowest count first (`\"f\"`).\n from_token (AggregationPaginationToken|None): Fetch rows from the\n given token, or from the start if None.\n to_token (AggregationPaginationToken|None): Fetch rows up to the\n given token, or up to the end if None.\n\n\n Returns:\n Deferred[PaginationChunk]: List of groups of annotations that\n match. Each row is a dict with `type`, `key` and `count` fields.\n \"\"\"\n\n where_clause = [\"relates_to_id = ?\", \"relation_type = ?\"]\n where_args = [event_id, RelationTypes.ANNOTATION]\n\n if event_type:\n where_clause.append(\"type = ?\")\n where_args.append(event_type)\n\n having_clause = generate_pagination_where_clause(\n direction=direction,\n column_names=(\"COUNT(*)\", \"MAX(stream_ordering)\"),\n from_token=attr.astuple(from_token) if from_token else None,\n to_token=attr.astuple(to_token) if to_token else None,\n engine=self.database_engine,\n )\n\n if direction == \"b\":\n order = \"DESC\"\n else:\n order = \"ASC\"\n\n if having_clause:\n having_clause = \"HAVING \" + having_clause\n else:\n having_clause = \"\"\n\n sql = \"\"\"\n SELECT type, aggregation_key, COUNT(DISTINCT sender), MAX(stream_ordering)\n FROM event_relations\n INNER JOIN events USING (event_id)\n WHERE {where_clause}\n GROUP BY relation_type, type, aggregation_key\n {having_clause}\n ORDER BY COUNT(*) {order}, MAX(stream_ordering) {order}\n LIMIT ?\n \"\"\".format(\n where_clause=\" AND \".join(where_clause),\n order=order,\n having_clause=having_clause,\n )\n\n def _get_aggregation_groups_for_event_txn(txn):\n txn.execute(sql, where_args + [limit + 1])\n\n next_batch = None\n events = []\n for row in txn:\n events.append({\"type\": row[0], \"key\": row[1], \"count\": row[2]})\n next_batch = AggregationPaginationToken(row[2], row[3])\n\n if len(events) <= limit:\n next_batch = None\n\n return PaginationChunk(\n chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token\n )\n\n return self.db.runInteraction(\n \"get_aggregation_groups_for_event\", _get_aggregation_groups_for_event_txn\n )\n\n @cachedInlineCallbacks()\n def get_applicable_edit(self, event_id):\n \"\"\"Get the most recent edit (if any) that has happened for the given\n event.\n\n Correctly handles checking whether edits were allowed to happen.\n\n Args:\n event_id (str): The original event ID\n\n Returns:\n Deferred[EventBase|None]: Returns the most recent edit, if any.\n \"\"\"\n\n # We only allow edits for `m.room.message` events that have the same sender\n # and event type. We can't assert these things during regular event auth so\n # we have to do the checks post hoc.\n\n # Fetches latest edit that has the same type and sender as the\n # original, and is an `m.room.message`.\n sql = \"\"\"\n SELECT edit.event_id FROM events AS edit\n INNER JOIN event_relations USING (event_id)\n INNER JOIN events AS original ON\n original.event_id = relates_to_id\n AND edit.type = original.type\n AND edit.sender = original.sender\n WHERE\n relates_to_id = ?\n AND relation_type = ?\n AND edit.type = 'm.room.message'\n ORDER by edit.origin_server_ts DESC, edit.event_id DESC\n LIMIT 1\n \"\"\"\n\n def _get_applicable_edit_txn(txn):\n txn.execute(sql, (event_id, RelationTypes.REPLACE))\n row = txn.fetchone()\n if row:\n return row[0]\n\n edit_id = yield self.db.runInteraction(\n \"get_applicable_edit\", _get_applicable_edit_txn\n )\n\n if not edit_id:\n return\n\n edit_event = yield self.get_event(edit_id, allow_none=True)\n return edit_event\n\n def has_user_annotated_event(self, parent_id, event_type, aggregation_key, sender):\n \"\"\"Check if a user has already annotated an event with the same key\n (e.g. already liked an event).\n\n Args:\n parent_id (str): The event being annotated\n event_type (str): The event type of the annotation\n aggregation_key (str): The aggregation key of the annotation\n sender (str): The sender of the annotation\n\n Returns:\n Deferred[bool]\n \"\"\"\n\n sql = \"\"\"\n SELECT 1 FROM event_relations\n INNER JOIN events USING (event_id)\n WHERE\n relates_to_id = ?\n AND relation_type = ?\n AND type = ?\n AND sender = ?\n AND aggregation_key = ?\n LIMIT 1;\n \"\"\"\n\n def _get_if_user_has_annotated_event(txn):\n txn.execute(\n sql,\n (\n parent_id,\n RelationTypes.ANNOTATION,\n event_type,\n sender,\n aggregation_key,\n ),\n )\n\n return bool(txn.fetchone())\n\n return self.db.runInteraction(\n \"get_if_user_has_annotated_event\", _get_if_user_has_annotated_event\n )\n\n\nclass RelationsStore(RelationsWorkerStore):\n pass\n","sub_path":"synapse/storage/data_stores/main/relations.py","file_name":"relations.py","file_ext":"py","file_size_in_byte":11125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"148529560","text":"###############################################################################\n# Way to use this:\n# cmsRun dumpHGCalDDD_cfg.py geometry=D88\n#\n# Options for geometry D77, D83, D88, D92, D93\n#\n###############################################################################\nimport FWCore.ParameterSet.Config as cms\nimport os, sys, imp, re\nimport FWCore.ParameterSet.VarParsing as VarParsing\n\n####################################################################\n### SETUP OPTIONS\noptions = VarParsing.VarParsing('standard')\noptions.register('geometry',\n \"D88\",\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string,\n \"geometry of operations: D77, D83, D88, D92, D93\")\n\n### get and parse the command line arguments\noptions.parseArguments()\n\nprint(options)\n\n####################################################################\n# Use the options\n\nif (options.geometry == \"D83\"):\n from Configuration.Eras.Era_Phase2C11M9_cff import Phase2C11M9\n process = cms.Process('DUMP',Phase2C11M9)\n process.load('Configuration.Geometry.GeometryExtended2026D83Reco_cff')\n fileName = 'hgcalV15DDD.root'\nelif (options.geometry == \"D77\"):\n from Configuration.Eras.Era_Phase2C11_cff import Phase2C11\n process = cms.Process('DUMP',Phase2C11)\n process.load('Configuration.Geometry.GeometryExtended2026D77Reco_cff')\n fileName = 'hgcalV14DDD.root'\nelif (options.geometry == \"D92\"):\n from Configuration.Eras.Era_Phase2C11M9_cff import Phase2C11M9\n process = cms.Process('DUMP',Phase2C11M9)\n process.load('Configuration.Geometry.GeometryExtended2026D92Reco_cff')\n fileName = 'hgcalV17DDD.root'\nelif (options.geometry == \"D93\"):\n from Configuration.Eras.Era_Phase2C11M9_cff import Phase2C11M9\n process = cms.Process('DUMP',Phase2C11M9)\n process.load('Configuration.Geometry.GeometryExtended2026D93Reco_cff')\n fileName = 'hgcalV17NDDD.root'\nelse:\n from Configuration.Eras.Era_Phase2C11M9_cff import Phase2C11M9\n process = cms.Process('DUMP',Phase2C11M9)\n process.load('Configuration.Geometry.GeometryExtended2026D88Reco_cff')\n fileName = 'hgcalV16DDD.root'\n\nprint(\"Output file Name: \", fileName)\n\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\n\nif 'MessageLogger' in process.__dict__:\n process.MessageLogger.G4cerr=dict()\n process.MessageLogger.G4cout=dict()\n process.MessageLogger.HGCalGeom=dict()\n\nprocess.source = cms.Source(\"EmptySource\")\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(1)\n)\n\nprocess.add_(cms.ESProducer(\"TGeoMgrFromDdd\",\n verbose = cms.untracked.bool(False),\n level = cms.untracked.int32(14)\n))\n\nprocess.dump = cms.EDAnalyzer(\"DumpSimGeometry\",\n outputFileName = cms.untracked.string(fileName))\n\nprocess.p = cms.Path(process.dump)\n","sub_path":"Geometry/HGCalCommonData/test/python/dumpHGCalDDD_cfg.py","file_name":"dumpHGCalDDD_cfg.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"306299355","text":"\n\nfrom xai.brain.wordbase.nouns._superintendent import _SUPERINTENDENT\n\n#calss header\nclass _SUPERINTENDENTS(_SUPERINTENDENT, ):\n\tdef __init__(self,): \n\t\t_SUPERINTENDENT.__init__(self)\n\t\tself.name = \"SUPERINTENDENTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"superintendent\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_superintendents.py","file_name":"_superintendents.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"397553428","text":"from matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport math\n\n# read raw data\ndata_1 = pd.read_csv(\"C:\\\\Users\\\\ses516\\\\Desktop\\\\Data Incubator\\\\Problem 2\\\\MT_cleaned.csv\")\ndata_2 = pd.read_csv(\"C:\\\\Users\\\\ses516\\\\Desktop\\\\Data Incubator\\\\Problem 2\\\\VT_cleaned.csv\")\n\n# find counts of each gender\n[ln,temp] = data_1.shape\ngender_count = pd.DataFrame({'count': data_1.groupby(\"driver_gender\").size()}).reset_index()\n\n# arrested + out of state or not\narrested = data_1[(data_1['is_arrested']==1)]\n[arrested_ln,temp] = arrested.shape\narrested_OS = pd.DataFrame({'count': arrested.groupby(\"out_of_state\").size()}).reset_index()\n\n# speeding\nviolation_type = pd.DataFrame({'count': data_1.groupby(\"violation\").size()}).reset_index()\nviolation_speed = pd.DataFrame(violation_type[violation_type['violation'].str.contains('Speeding')]).reset_index()\nspeed_total = violation_speed['count'].sum()\n\n# DUI in Montana\nviolation_DUI_mon = pd.DataFrame(violation_type[violation_type['violation'].str.contains('DUI')]).reset_index()\nDUI_total_mon = violation_DUI_mon['count'].sum()\nR1 = DUI_total_mon/(ln-DUI_total_mon)\n\n# DUI in Vermont\n[ln_ver,temp] = data_2.shape\nviolation_type_vt = pd.DataFrame({'count': data_2.groupby(\"violation\").size()}).reset_index()\nviolation_DUI_ver = pd.DataFrame(violation_type_vt[violation_type_vt['violation'].str.contains('DUI')]).reset_index()\nDUI_total_ver = violation_DUI_ver['count'].sum()\nR2 = DUI_total_ver/(ln_ver-DUI_total_ver)\n\n# ratio between DUI rate in Montana vs. Vermont\nratio = R1/R2\n\n# Year vs. model linear regression\ndata_1[\"time\"] = pd.to_datetime(data_1[\"stop_date\"])\ndata_1[\"year\"] = data_1[\"time\"].dt.year\ndata_1_nomissing = data_1[data_1['year'].notnull()]\ndata_1_nomissing['vehicle_year'] = data_1_nomissing['vehicle_year'].apply(pd.to_numeric, errors='coerce')\nreg_data = pd.DataFrame(data_1_nomissing.groupby('year', as_index=False)['vehicle_year'].mean()).reset_index()\n\nimport sklearn\nfrom sklearn import linear_model as linmod\nlm = linmod.LinearRegression()\nx = reg_data.year.values.reshape((8,1))\ny = reg_data.vehicle_year.values.reshape((8,1))\nresult = lm.fit(x,y)\n\n# prediction for 2020\ny_2020 = result.predict(2020)\n\n# p_value\ntemp, p_val = sklearn.feature_selection.f_regression(x, y, center=True)\n\n# two states combined, grouped by hours\nbigdata = data_1.append(data_2, ignore_index=True)\nbigdata[\"time\"] = pd.to_datetime(bigdata[\"stop_time\"])\nbigdata[\"hr\"] = bigdata[\"time\"].dt.hour\nhour_size = pd.DataFrame({'count': bigdata.groupby(\"hr\").size()}).reset_index()\ndiff = hour_size.count.max()-hour_size.count.min()\n\n# county area\ndata_1['county_id'] = pd.Categorical(data_1['county_name']).codes\nlon_std = pd.DataFrame({'std': data_1.groupby(\"county_id\")['lon'].std()}).reset_index()\nlat_std = pd.DataFrame({'std': data_1.groupby(\"county_id\")['lat'].std()}).reset_index()\nlon_std['Area'] = lon_std['std']*lat_std['std']*math.pi*111*111*np.cos(lat_std['std'])\nlon_std['Area'].max()\n\n# chi_cquared test\narrested_mon = pd.DataFrame({'count': data_1.groupby(\"is_arrested\").size()}).reset_index()\narrested_ver = pd.DataFrame({'count': data_2.groupby(\"is_arrested\").size()}).reset_index()\n\ntable_df = pd.DataFrame({'montana':[807923,17195], 'vermont':[279954,3331]}, index = ['not_arrested','arrested'])\nfrom scipy.stats import chi2_contingency\nchi2, p, dof, expected = chi2_contingency(table_df, correction=False)\n\n\n\n\n","sub_path":"Problem 2/Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"514558306","text":"'''In Python, anonymous function is a function that is defined without a name.\n\nWhile normal functions are defined using the def keyword, in Python anonymous functions are defined using the lambda keyword.\n\nLambda Functions\n\nSyntax of Lambda Function\n\nlambda arguments: expression\n\nLambda functions can have any number of arguments but only one expression. The expression is evaluated and returned. Lambda functions can be used wherever function objects are required.\n\n\n\n\n'''\n\n# Program to show the use of lambda functions\n\ndouble = lambda x: x * 2\n\n# Output: 10\nprint(double(5))\n\n'''In the above program, lambda x: x * 2 is the lambda function. Here x is the argument and x * 2 is the expression that gets evaluated and returned.\n\nThis function has no name. It returns a function object which is assigned to the identifier double. We can now call it as a normal function. The statement'''\n\ndouble = lambda x: x * 2\n\n#is nearly the same as\n\ndef double(x):\n return x * 2\n\n'''Use of Lambda Function\n\nWe use lambda functions when we require a nameless function for a short period of time.\n\nIn Python, we generally use it as an argument to a higher-order function (a function that takes in other functions as arguments).\n Lambda functions are used along with built-in functions like filter(), map() etc.'''\n\n# Program to filter out only the even items from a list\n\n#filter\nmy_list = [1, 5, 4, 6, 8, 11, 3, 12]\n\nnew_list = list(filter(lambda x: (x%2 == 0) , my_list))\n\n# Output: [4, 6, 8, 12]\nprint(new_list)\n\n\n\n#map\n\n\nmy_list1=[2,4,6,7,8,9,10,20]\n\n\nnew_list1 =list(map(lambda x:x*2, my_list1))\n\nprint(new_list1)\n\n'''\ndef mul(val):\n\n return val*2\n\nprint(\"\\n\\n\")\nfor i in my_list1:\n print(mul(i))'''\n\n#fact :-\nn = 5\n\nprint (\"reduce\",reduce(lambda x,y:x*y,range(1,n+1)))\n\n\n\n\n\n\n","sub_path":"lambda/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"502688741","text":"#GET SCORE\n#PLACE IN TEXT FILE\n#AUTO_FOLDER_SEARCH\n\nfrom tkinter import *\nfrom tkinter.ttk import *\nimport time\n\ndef main():\n \n root=Tk()\n winWidth = 640\n winHeight = 480\n root.geometry('{}x{}'.format(winWidth, winHeight))\n root.resizable(width=False, height=False)\n\n bg_label = Label(root,text='Verification Status').pack()\n progress=Progressbar(root,orient=HORIZONTAL,length=300,mode='determinate')\n progress['value']=5\n time.sleep(2)\n progress['value']=50\n time.sleep(2)\n progress['value']=20\n progress.pack()\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Code/print_result.py","file_name":"print_result.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"143955093","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 31 09:06:37 2020\n\n@author: Matthew\n\"\"\"\n\nimport matplotlib as mp\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math, cv2, scipy.misc\nfrom matplotlib.backends.backend_agg import FigureCanvas\n\n#params\nsze = 1024\nr = round(sze*.45)\nn_start = 3\nfinal_n = 25\nsteps = 900\nnam = 'gist_heat'\ncmap=mp.pyplot.get_cmap(nam)\nrand=True\n\n\n#initialize shape\ncenter=[sze/2,sze/2]\n\n#setup vid\nfourcc = cv2.VideoWriter_fourcc(*'MP42')\nfps = float(30)\nvideo_filename = './'+'ngon_rot.avi'\nout = cv2.VideoWriter(video_filename, fourcc, fps, (1200, 1200))\nmaxInd = (final_n-n_start)*steps\n#define converter\ndef fig2data ( fig ):\n \"\"\"\n @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it\n @param fig a matplotlib figure\n @return a numpy 3D array of RGBA values\n \"\"\"\n # draw the renderer\n fig.canvas.draw ( )\n \n # Get the RGBA buffer from the figure\n w,h = fig.canvas.get_width_height()\n buf = np.fromstring ( fig.canvas.tostring_rgb(), dtype=np.uint8 )\n buf.shape = ( w, h,3 )\n \n # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode\n buf = np.roll ( buf, 3, axis = 2 )\n return buf\n\n#define plotter\ndef plotTheseCoords(coords,ind):\n #coords is (rowCoords,colCoords)\n pts = len(coords[0])\n \n fig=mp.pyplot.figure(facecolor=(0,0,0),figsize=[4,4])\n mp.pyplot.xlim([0,sze])\n mp.pyplot.ylim([0,sze])\n mp.pyplot.axis('off')\n mp.pyplot.xlim([0,sze])\n mp.pyplot.ylim([0,sze])\n mp.pyplot.gca().axis('off')\n ax = plt.axes([0,0,1,1], frameon=False)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n mp.pyplot.autoscale(tight=False)\n for i in range(len(coords[0])):\n for j in np.arange(1,len(coords[0]),1):\n rowStart = coords[0][i]\n colStart = coords[1][i]\n rowEnd = coords[0][j]\n colEnd = coords[1][j]\n thck=1.5*ind/maxInd\n mp.pyplot.plot([rowStart,rowEnd],[colStart,colEnd],color= 'k',linewidth=1.5,figure=fig)\n\n \n #mp.pyplot.savefig(\"gonvid prog/step\" + '_' + str(ind) ,facecolor = 'k', dpi = 200)\n fig.savefig(\"gonvid prog/step\" + '_' + str(ind) ,facecolor = 'w', dpi = 300)\n #arr = fig2data(fig)\n# canvas = FigureCanvas(fig)\n# canvas.draw()\n# # grab the pixel buffer and dump it into a numpy array\n# X = np.array(canvas.renderer.buffer_rgba())\n #a=notavar\n \n\n\n\n\n\n#mp.pyplot.rcParams['axes.facecolor'] = 'black'\nind=0\n#n is old number of sides\nn=20\n\nrads = np.array((2*math.pi)*np.arange(n)/(n))\nrads=np.append(rads,rads[0])\nmult= np.tile([1,-1], 10)\nmult=np.append(mult, 1)\ndRad=(math.pi/steps)*mult\n\n#display moving\nfor i in range(300):\n \n current_rads = rads+i*dRad\n rowCoords = np.round(center[0] + r * np.sin(current_rads)).astype(int)\n colCoords = np.round(center[1] + r * np.cos(current_rads)).astype(int)\n coords = (rowCoords,colCoords)\n \n plotTheseCoords(coords,ind)\n im = np.asarray(scipy.misc.imread(\"gonvid prog/step\" + '_' + str(i)+\".png\")[:,:,:3]).astype('uint8')\n out.write(im[:,:,[2,1,0]])\n ind+=1\n plt.close('all')\nout.release() \n\n \n \n \n \n \n ","sub_path":"video-gen/nGon_star/rot_gon_vidmaker.py","file_name":"rot_gon_vidmaker.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"221555653","text":"from flask import Flask, request\nimport json\n\nfrom conf.config import Config\n\n\napp = Flask(__name__)\nconfig = Config()\n\n\nCRUD = ['POST', 'GET', 'PUT', 'DELETE']\n\n\n@app.route(\"/question\", methods=CRUD)\ndef question():\n \"\"\"\n Serves question content from S3\n \"\"\"\n s3_key_base = 'questions'\n\n if request.method == 'POST':\n request_body = request.json\n # look up the token ID from the request body\n token_id = request_body.get('token_id', None)\n if not token_id:\n return 'Cannot create content without Token ID', 400\n\n # create the metadata file\n s3_metadata_key = '{}/{}/METADATA.json'.format(\n s3_key_base, token_id)\n metadata_content = json.dumps({'version': '0'})\n _create_s3_object(s3_metadata_key, metadata_content)\n\n s3_content_key = '{}/{}/content_v0.json'.format(\n s3_key_base, token_id)\n token_name = request_body.get('token_name', 'ERROR NO NAME GIVEN')\n token_content = json.dumps({'token': token_name})\n _create_s3_object(s3_content_key, token_content)\n return json.dumps(\n {\n 'key': [s3_metadata_key, s3_content_key],\n 'token': token_name\n }\n ), 200\n\n elif request.method == 'GET':\n\n # Parses out the token id from the request querystring\n query_string = request.query_string\n varargs = _parse_query_string(query_string)\n\n token_id = varargs.get('token_id', None)\n if not token_id:\n return 'Token ID is missing', 400\n \n # Fetch metadata from S3\n s3_metadata_key = '{}/{}/METADATA.json'.format(\n s3_key_base, token_id)\n # Parse the latest version from the metadata file\n metadata_json = _fetch_s3_object_and_parse_to_json(s3_metadata_key)\n latest_version = metadata_json.get('latest_version', -1)\n\n # fetch the latest version of the token\n s3_content_key = '{}/{}/content-v{}.json'.format(\n s3_key_base, token_id, latest_version)\n content = _fetch_s3_object_and_parse_to_json(s3_content_key)\n\n # return the token content as a dict\n return json.dumps(content), 200\n elif request.method == 'PUT':\n # bump the version on the metadata file\n pass\n elif request.method == 'DELETE':\n # delete the subcontents\n pass\n\n\ndef _parse_query_string(query_string):\n # splits a query string by '&', then splits each split\n # by '=', building a dict of keys and values from each\n # side of the split by '='\n query_values = query_string.split('&')\n\n data = dict()\n for query_value in query_values:\n keys_and_values = query_value.split('=')\n if len(keys_and_values) == 2:\n key, val = keys_and_values\n data[key] = val\n return data\n\n\ndef _fetch_s3_object_and_parse_to_json(s3_key):\n # uses the Config s3 client to read an object from s3\n client = config.client('s3')\n s3_response = client.get_object(Bucket=config.bucket, Key=s3_key)\n return json.loads(s3_response['Body'].read())\n\n\ndef _create_s3_object(s3_key, content):\n client = config.client('s3')\n s3_response = client.put_object(\n Bucket=config.bucket, Key=s3_key, Body=content)\n print(s3_response)\n\n\ndef _validate_varargs(*varargs):\n \"\"\"\n \"\"\"\n pass\n","sub_path":"cms/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"351838899","text":"import os\nimport sys\n\nimport cv2\n\nASCII = list(map(chr, range(127)))\n# Seq: A-Z, a-z, 0-9\nRANGES = [(65, 91), (97, 123), (48, 58)]\nSPECIAL_CHARACTERS = [\n \".\",\n \",\",\n \";\",\n \":\",\n \"!\",\n \"?\",\n '\"',\n \"'\",\n \"-\",\n \"+\",\n \"=\",\n \"/\",\n \"%\",\n \"&\",\n \"(\",\n \")\",\n \"[\",\n \"]\",\n]\n\n\nclass SheetToPNG:\n LETTER_NAMES = [\n item for start, end in RANGES for item in ASCII[start:end]\n ] + SPECIAL_CHARACTERS\n\n def __init__(self, sheet, letters_dir, cols=8, rows=10):\n self.cols = cols\n self.rows = rows\n\n # TODO If directory given instead of image file, read all images and wrtie the images\n # (example) 0.png, 1.png, 2.png inside every character folder in letters/\n\n # sheet_images = []\n # for s in os.listdir(sheet_dir):\n # sheet_images.append(cv2.imread(sheet_dir + \"/\" + s))\n\n letters = self.detectLetters(sheet)\n self.createLetterDirectory(letters, letters_dir)\n\n def detectLetters(self, sheet_image):\n image = cv2.imread(sheet_image)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1],])\n # filtered = cv2.filter2D(blurred, -1, kernel)\n\n ret, thresh = cv2.threshold(gray, 200, 255, 1)\n close_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, close_kernel, iterations=2)\n\n contours, h = cv2.findContours(\n close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n contours = sorted(\n filter(\n lambda cnt: len(\n cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)\n )\n == 4,\n contours,\n ),\n key=cv2.contourArea,\n reverse=True,\n )\n\n x, y, w, h = cv2.boundingRect(contours[0])\n space_h, space_w = 7 * h // 16, 7 * w // 16\n\n letters = []\n j = 0\n for i in range(self.rows * self.cols):\n x, y, w, h = cv2.boundingRect(contours[i])\n cx, cy = x + w // 2, y + h // 2\n\n roi = image[cy - space_h : cy + space_h, cx - space_w : cx + space_w]\n letters.append([roi, cx, cy])\n j += 1\n\n letters.sort(key=lambda x: x[2])\n sorted_letters = []\n for k in range(self.rows):\n sorted_letters.extend(\n sorted(letters[self.cols * k : self.cols * (k + 1)], key=lambda x: x[1])\n )\n\n return sorted_letters\n\n def createLetterDirectory(self, letters, letters_dir):\n if not os.path.exists(letters_dir):\n os.mkdir(letters_dir)\n\n for k, images in enumerate(letters):\n letter = os.path.join(letters_dir, str(ord(self.LETTER_NAMES[k])))\n if not os.path.exists(letter):\n os.mkdir(letter)\n cv2.imwrite(\n os.path.join(letter, str(ord(self.LETTER_NAMES[k])) + \".png\"),\n images[0],\n )\n\n\ndef main():\n if len(sys.argv) > 1:\n a = SheetToPNG(sheet=sys.argv[1], letters_dir=sys.argv[2], cols=8, rows=10,)\n else:\n print(\"Usage: sheettopng [SHEET_PATH] [LETTER_DIRECTORY_PATH]\")\n","sub_path":"handwrite/sheettopng.py","file_name":"sheettopng.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"137516917","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass AzureStorageCredentialsInfo(Model):\n \"\"\"Credentials to access Azure File Share.\n\n :param account_key: Storage account key. One of accountKey or\n accountKeySecretReference must be specified.\n :type account_key: str\n :param account_key_secret_reference: Specifies the location of the storage\n account key, which is a Key Vault Secret. Users can store their secrets in\n Azure KeyVault and pass it to the Batch AI Service to integrate with\n KeyVault. One of accountKey or accountKeySecretReference must be\n specified.\n :type account_key_secret_reference:\n ~azure.mgmt.batchai.models.KeyVaultSecretReference\n \"\"\"\n\n _attribute_map = {\n 'account_key': {'key': 'accountKey', 'type': 'str'},\n 'account_key_secret_reference': {'key': 'accountKeySecretReference', 'type': 'KeyVaultSecretReference'},\n }\n\n def __init__(self, *, account_key: str=None, account_key_secret_reference=None, **kwargs) -> None:\n super(AzureStorageCredentialsInfo, self).__init__(**kwargs)\n self.account_key = account_key\n self.account_key_secret_reference = account_key_secret_reference\n","sub_path":"azure-mgmt-batchai/azure/mgmt/batchai/models/azure_storage_credentials_info_py3.py","file_name":"azure_storage_credentials_info_py3.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"273819908","text":"from .recipes import recipes\n\n\n\nclass SymbolicModel:\n\n def __init__(self, model_name, model_spec, symbols, symbolic_equations, symbolic_calibration,\n discrete_transition=None, distribution=None, options=None, definitions=None):\n\n self.name = model_name\n self.model_spec = model_spec\n\n # reorder symbols\n from collections import OrderedDict\n canonical_order = ['markov_states', 'states', 'controls', 'auxiliaries', 'values', 'shocks', 'parameters']\n osyms = OrderedDict()\n for vg in canonical_order:\n if vg in symbols:\n osyms[vg] = symbols[vg]\n for vg in symbols:\n if vg not in canonical_order:\n osyms[vg] = symbols[vg]\n\n self.symbols = osyms\n self.equations = symbolic_equations\n self.calibration_dict = symbolic_calibration\n self.distribution = distribution\n self.discrete_transition = discrete_transition\n self.options = options\n self.definitions = definitions\n\n self.check()\n\n def check(self):\n\n if self.model_spec == 'fg':\n\n n_eq_transition = len(self.equations['transition'])\n n_eq_arbitrage = len(self.equations['arbitrage'])\n\n assert( len(self.symbols['states']) == n_eq_transition)\n assert( len(self.symbols['controls']) == n_eq_arbitrage)\n\n if 'auxiliary' in self.equations:\n n_eq_auxiliary = len(self.equations['auxiliary'])\n assert( len(self.symbols['auxiliaries']) == n_eq_auxiliary)\n\n else:\n pass\n # raise Exception( \"No rule to check model type {}\".format(self.model_spec))\n","sub_path":"dolo/compiler/model_symbolic.py","file_name":"model_symbolic.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"330932692","text":"\nimport fnmatch,os\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.sparse import csr_matrix\n\ndef csr_append(a, b):\n return sp.hstack((a, b), format='csr')\n\n \ndef buildCSRLengths(path):\n corpSentLen = []\n corpWordLen = []\n max_sent_len = 0\n max_word_len= 0\n \n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames,'*.txt'):\n lengths = {}\n fileStream = open(os.path.join(root, filename), 'r')\n content = str(fileStream.readlines())\n a=root.split(\"\\\\\")\n if (len(a)>1):\n splitted = content.split(\".\")\n for sen in splitted:\n words = sen.split()\n for word in words:\n if len(word) > max_word_len:\n max_word_len = len(word)\n if len(words)>max_sent_len:\n max_sent_len = len(words)\n \n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, '*.txt'):\n sentLengths = [0.0] * max_sent_len\n wordLengths = [0.0]*max_word_len\n fileStream = open(os.path.join(root, filename), 'r')\n content = str(fileStream.readlines())\n a = root.split(\"\\\\\")\n if len(a) > 1:\n splitted = content.split(\".\")\n for sen in splitted:\n words = sen.split()\n for word in words:\n wordLengths[len(word)-1] += 1.0\n sentLengths[len(words)-1] += 1.0\n corpSentLen.append(sentLengths)\n corpWordLen.append(wordLengths)\n lensCsr = csr_matrix(corpSentLen)\n \n wordsCsr = csr_matrix(corpWordLen)\n \n return [csr_append(lensCsr, wordsCsr), [max_sent_len, max_word_len]]\n \n#buildCSRLengths(\"C:\\\\Users\\\\xeniak\\\\Desktop\\\\Data\")\n","sub_path":"WpfApplication1/bin/release/Scripts/backups/sentences.py","file_name":"sentences.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"179281592","text":"import sys, os\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Conv2D, MaxPooling2D, Dense, Activation, Dropout, Flatten, BatchNormalization\nfrom keras.initializers import RandomNormal\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard, EarlyStopping, ModelCheckpoint\n\nbatchSize = 50\nimgHeight = 48\nimgWidth = 48\nemotionCodes = [\"AN\", \"DI\", \"FE\", \"HA\", \"SA\", \"SU\", \"NE\"]\n\ndef main():\n try:\n dataSetPath = sys.argv[1]\n outputModelDirectory = sys.argv[2]\n except Exception as e:\n print(\"Invalid parameters.\")\n sys.exit(1)\n\n try:\n with open(dataSetPath) as f:\n csvData = f.read().splitlines()\n except Exception as e:\n print(f\"Could not read {dataSetPath} - {e}\")\n sys.exit(1)\n\n print(f\"Creating dataset from {dataSetPath} ...\")\n trainingData = []\n trainingLabels = []\n validationData = []\n validationLabels = []\n firstRecord = True\n for row in csvData: #For each record in csv (apart from first headers row)\n row = row.split(\",\")\n if firstRecord:\n firstRecord = False\n else:\n record = {\"emotion\": int(row[0]), \"pixels\": row[1], \"usage\": row[2]}\n pixels = record['pixels'].split(\" \") # Split pixels into array, separated by space\n for i in range(len(pixels)): #Force conversion to integer\n pixels[i] = int(pixels[i])\n if record['usage'] == 'Training':\n trainingData.append(np.array(pixels))\n trainingLabels.append(int(record['emotion']))\n elif record['usage'] == 'PublicTest':\n validationLabels.append(int(record['emotion']))\n validationData.append(np.array(pixels))\n \n # Convert to numpy array\n trainingData = np.array(trainingData)\n trainingLabels = np.array(trainingLabels)\n validationData = np.array(validationData)\n validationLabels = np.array(validationLabels)\n\n trainingData = trainingData.reshape(trainingData.shape[0], 48, 48, 1)\n validationData = validationData.reshape(validationData.shape[0], 48, 48, 1)\n\n trainingLabels= keras.utils.to_categorical(trainingLabels, num_classes=7)\n validationLabels = keras.utils.to_categorical(validationLabels, num_classes=7)\n\n # Create augmented data\n augmentedData = keras.Sequential([\n layers.experimental.preprocessing.RandomFlip(\"horizontal\", input_shape=(imgHeight, imgWidth, 1)),\n layers.experimental.preprocessing.RandomRotation(0.5),\n layers.experimental.preprocessing.RandomZoom(0.5),\n ])\n\n # Create model (model shape and size to be investigated, maybe improved)\n model = Sequential()\n\n # 1st convolution layer\n model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same', input_shape=(48,48,1), bias_initializer=RandomNormal(stddev=1), kernel_initializer=RandomNormal(stddev=1)))\n model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same', input_shape=(48,48,1), bias_initializer=RandomNormal(stddev=1), kernel_initializer=RandomNormal(stddev=1)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2, 2)))\n model.add(Dropout(0.25))\n\n # 3rd convolution layer\n model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same', bias_initializer=RandomNormal(stddev=1), kernel_initializer=RandomNormal(stddev=1)))\n model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same', bias_initializer=RandomNormal(stddev=1), kernel_initializer=RandomNormal(stddev=1)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2, 2)))\n model.add(Dropout(0.25))\n \n # 5th convolution layer\n model.add(Conv2D(128, (3, 3), activation='relu', padding = 'same', bias_initializer=RandomNormal(stddev=1), kernel_initializer=RandomNormal(stddev=1)))\n model.add(Conv2D(128, (3, 3), activation='relu', padding = 'same', bias_initializer=RandomNormal(stddev=1), kernel_initializer=RandomNormal(stddev=1)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2, 2)))\n model.add(Dropout(0.25))\n\n # 7th convolution layer\n model.add(Conv2D(256, (3, 3), activation='relu', padding = 'same', bias_initializer=RandomNormal(stddev=1), kernel_initializer=RandomNormal(stddev=1)))\n model.add(Conv2D(256, (3, 3), activation='relu', padding = 'same', bias_initializer=RandomNormal(stddev=1), kernel_initializer=RandomNormal(stddev=1)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2, 2)))\n model.add(Dropout(0.5))\n\n\n model.add(Flatten())\n # Fully connected layers\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(7, activation='softmax'))\n\n # Compile model\n model.compile(\n optimizer='adam',\n loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),\n metrics=['accuracy']\n )\n print(\"Model summary:\")\n print(model.summary())\n\n # Add callbacks\n cbLrReducer = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=3, verbose=1) # Reduce learning rate if there is no improvement on the value of the loss function\n cbEarlyStopper = EarlyStopping(monitor='val_loss', min_delta=0, patience=8, verbose=1, mode='auto') # Stop training the model if it's overfitting\n cbCheckpoint = ModelCheckpoint(outputModelDirectory, monitor='val_accuracy', verbose=1, save_best_only=True) # Save model at the end of the epoch (if there's an improvement on the previous epoch's accuracy)\n\n # Train model\n epochs=30\n model.fit(\n trainingData,\n trainingLabels,\n validation_data=(validationData, validationLabels),\n batch_size=batchSize,\n epochs=epochs,\n callbacks=[cbLrReducer, cbEarlyStopper, cbCheckpoint]\n )\n scores = model.evaluate(np.array(validationData), np.array(validationLabels), batch_size=batchSize)\n print(f\"Loss: {scores[0]}\")\n print(f\"Accuracy: {int(scores[1])*100}%\")\n model.save(outputModelDirectory)\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"Training/trainModel.py","file_name":"trainModel.py","file_ext":"py","file_size_in_byte":6231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"600230113","text":"import discord\nimport os\nfrom discord.ext import commands, tasks\nimport datetime\nimport asyncio\nimport random\nimport sqlite3\n\n\nclass _190Cog(commands.Cog, name='190Cog'):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n @commands.has_role(703003853796409434)\n async def staterules(self, ctx):\n channelInfoLogs = self.client.get_channel(728238632175009863)\n currentDT = datetime.datetime.now()\n\n embed190 = discord.Embed(title='State Rules **Updated August 19th, amended by Governer**', colour=discord.Colour(29952), description=f\"1.) No Tile hitting, unless within your alliance boundary or Kill Event.\\n2.) No attacking of alliances during Trap, Horde and Fortress event.\\n3.) Facilities are free for all. Each alliance may hold only 1 Facility.\\n**4.) Each alliance may hold up to 2 Bunkers. Only Bunker 10 will be for T9s and below. (NO level 30 players and above).**\")\n embed190.set_footer(text='Missing or wrong information? \\nMissing or wrong rewards? \\nSuggestions or requests regarding the bot? \\nMessage OrionAF#6982')\n embed190.timestamp = datetime.datetime.utcnow()\n\n embed190Log = discord.Embed(colour=discord.Colour(29952), description=f'```Execution information:\\nTime & Date: {currentDT}\\nName: {ctx.author}\\nServer Name: {ctx.guild.name}\\nGuild ID: {ctx.guild.id}\\nServer Owner: {ctx.guild.owner}\\nMember Amount: {ctx.guild.member_count}\\nServer Icon:``` [Icon]({ctx.guild.icon_url_as(format=None, size=64)})')\n embed190Log.set_author(name='State 190 rules.', icon_url=f'{ctx.author.avatar_url}')\n embed190Log.set_footer(text=f'Info Logs', icon_url=f'{ctx.guild.icon_url_as(format=None)}')\n embed190Log.timestamp = datetime.datetime.utcnow()\n\n await ctx.send(embed=embed190)\n await channelInfoLogs.send(embed=embed190Log)\n await channelInfoLogs.send((f'◆↓◆↓◆↓◆↓◆↓◆↓◆'))\n\n @staterules.error\n async def staterules_error(self, ctx, error):\n if isinstance(error, commands.MissingRole):\n await ctx.send(f'You are missing the required role to execute this command.')\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef setup(client):\n client.add_cog(_190Cog(client))","sub_path":"cogs/190.py","file_name":"190.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"640072717","text":"#!/usr/bin/env python\n\nimport sys\ndef sed(pattern, replacement, input_file, output_file):\n \"\"\" reads a source file and writes the destinations file. Each line is replaced\n with replace\"\"\"\n\n try:\n infile=open(input_file, 'r')\n outfile=open(output_file, 'w')\n for line in infile:\n line = line.replace(pattern, replacement)\n outfile.write(line)\n infile.close()\n outfile.close()\n except:\n print(\"There is an error\")\n\ndef main():\n pattern='bullshit'\n replacement='not nice to say'\n input_file='input.txt'\n output_file='output.txt'\n sed(pattern, replacement, input_file, output_file)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"students/enrique_silva/session04/practiceRunningScript.py","file_name":"practiceRunningScript.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"312095461","text":"print ('Hello, Django Girls!')\n\nif (3 > 2):\n print ('It works!')\n\nname = \"Juli\"\n\ngirls = ['Reka', 'Gabi', 'Julcsi', 'Juli']\n\n\ndef hi(name):\n print('Hello ' + name + '!')\n\nfor name in girls:\n hi(name)\n\nfor i in range(1, 9):\n print(i)\n","sub_path":"gyak/dg.py","file_name":"dg.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"330103249","text":"#from nltk.stem import WordNetLemmatizer\nimport spacy\nimport numpy\n\nimport xml.etree.ElementTree as ET\nimport demo_utils\nimport os\n\nclass TabularView(object):\n def __init__(self, spacy_model):\n super().__init__()\n \n self.ta = {\"corpusId\": \"\", \"id\": \"\"}\n self.views = {}\n self.current_outputs = {}\n self.sp =spacy.load(spacy_model)\n \n def update_sentence(self, srl_output):\n generator = \"srl_pipeline\"\n tokens = srl_output[\"tokens\"]\n text = \" \".join(srl_output[\"words\"])\n self.ta[\"text\"] = text\n self.ta[\"tokens\"] = tokens\n self.ta[\"tokenOffsets\"] = demo_utils.create_token_char_offsets(text, tokens)\n sentence_end_positions = [i+1 for i,x in enumerate(tokens) if x==\".\"]\n if len(tokens) not in sentence_end_positions:\n sentence_end_positions.append(len(tokens))\n sentences = {\"generator\": generator, \"score\": 1.0, \"sentenceEndPositions\": sentence_end_positions}\n self.ta[\"sentences\"] = sentences\n \n self.views = {}\n self.views[\"SENTENCE\"] = demo_utils.create_sentence_view(tokens)\n self.views[\"TOKENS\"] = demo_utils.create_tokens_view(tokens)\n self.ta[\"views\"] = self.views.values()\n\n def update_view(self, view_name, srl_output):\n output = srl_output[\"predicates\"]\n self.views[view_name] = self._create_srl_view(output, view_name)\n self.current_outputs[view_name] = output\n self.ta[\"views\"] = list(self.views.values())\n\n def remove_view(self, view_name):\n if view_name in self.views:\n del self.views[view_name]\n if view_name in self.current_outputs:\n del self.current_outputs[view_name]\n\n def clear_table(self):\n self.views = {}\n self.ta = {\"corpusId\": \"\", \"id\": \"\", \"text\": \"\", \"tokens\": [], \"tokenOffsets\": [], \"sentences\": {}, \"views\": []}\n self.current_outputs = {}\n\n\n def get_textannotation(self):\n sanitized = self._sanitize(self.ta)\n # print(sanitized)\n # print(type(sanitized))\n return sanitized\n\n \n def _sanitize(self,x):\n if isinstance(x, (str, float, int, bool)):\n return x\n elif isinstance(x, numpy.ndarray):\n return x.tolist()\n elif isinstance(x, numpy.number):\n return x.item()\n elif isinstance(x, dict):\n return {key:self._sanitize(value) for key, value in x.items()}\n elif isinstance(x, numpy.bool_):\n return bool(x)\n elif isinstance(x, (list, tuple)):\n return [self._sanitize(x_i) for x_i in x]\n elif x is None:\n return \"None\"\n elif hasattr(x, \"to_json\"):\n return x.to_json()\n else:\n print(x, ' IS THE HARD ONE WE CANOT SANITIZE, IT IS OF TYPE, ', type(x))\n\n\n def _create_srl_view(self, frames, view_name):\n srl_view = {\"viewName\": view_name}\n constituents = []\n relations = []\n for frame in frames:\n predicate = frame[\"predicate\"]\n tags = frame[\"tags\"]\n predicate_idx = frame[\"predicate_index\"]\n properties = {\"SenseNumber\": \"NA\", \"predicate\": predicate}\n constituent = {\"label\": \"Predicate\", \"score\": 1.0, \"start\": predicate_idx, \"end\": predicate_idx+1, \"properties\": properties}\n predicate_constituent_idx = len(constituents)\n constituents.append(constituent)\n active_tag = \"\"\n active_tag_start_idx = -1\n for tag_idx, tag in enumerate(tags):\n if tag in {\"O\", \"B-V\"}:\n if active_tag != \"\":\n constituent = {\"label\": active_tag, \"score\": 1.0, \"start\": active_tag_start_idx, \"end\": tag_idx}\n relation = {\"relationName\": active_tag, \"srcConstituent\": predicate_constituent_idx, \"targetConstituent\": len(constituents)}\n relations.append(relation)\n constituents.append(constituent)\n active_tag = \"\"\n active_tag_start_idx = -1\n continue\n if tag[2:] == active_tag:\n continue\n else:\n if active_tag != \"\":\n constituent = {\"label\": active_tag, \"score\": 1.0, \"start\": active_tag_start_idx, \"end\": tag_idx}\n relation = {\"relationName\": active_tag, \"srcConstituent\": predicate_constituent_idx, \"targetConstituent\": len(constituents)}\n relations.append(relation)\n constituents.append(constituent)\n active_tag = tag[2:]\n active_tag_start_idx = tag_idx\n # collect stragglers\n if active_tag != \"\":\n constituent = {\"label\": active_tag, \"score\": 1.0, \"start\": active_tag_start_idx, \"end\": len(tags)}\n relation = {\"relationName\": active_tag, \"srcConstituent\": predicate_constituent_idx, \"targetConstituent\": len(constituents)}\n relations.append(relation)\n constituents.append(constituent)\n view_data = [{\"viewType\": \"\", \"viewName\": view_name, \"generator\": \"multilingual_srl_pipeline\", \"score\": 1.0, \"constituents\": constituents, \"relations\": relations}]\n srl_view[\"viewData\"] = view_data\n return srl_view\n \n","sub_path":"tabular_view.py","file_name":"tabular_view.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"410956219","text":"import argparse\nimport tempfile\nimport shutil\nimport os\nimport json\n\nfrom git import Repo\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(\n description='Test few RAML Python parsers.')\n arg_parser.add_argument(\n '--parser', type=str, help='Parser to test',\n choices=['ramlfications', 'pyraml'],\n required=True)\n arg_parser.add_argument(\n '--verbose', help='Print errors or not',\n action='store_true')\n return arg_parser.parse_args()\n\n\ndef clone_tck_repo():\n repo_dir = os.path.join(tempfile.gettempdir(), 'raml-tck')\n if os.path.exists(repo_dir):\n print('Removing existing raml-tck repo directory')\n shutil.rmtree(repo_dir)\n os.mkdir(repo_dir)\n print('Cloning raml-tck repo to {}'.format(repo_dir))\n repo = Repo.init(repo_dir)\n origin = repo.create_remote(\n 'origin', 'git@github.com:raml-org/raml-tck.git')\n origin.fetch('refs/heads/rename-cleanup:refs/heads/origin')\n origin.pull(origin.refs[0].remote_head)\n return os.path.join(repo_dir, 'tests', 'raml-1.0')\n # return '/home/post/projects/raml-tck/tests/raml-1.0/' # DEBUG\n\ndef list_ramls(ex_dir):\n manifest_path = os.path.join(ex_dir, 'manifest.json')\n with open(manifest_path) as f:\n manifest = json.load(f)\n return [os.path.join(ex_dir, fp) for fp in manifest['filePaths']]\n\n\ndef should_fail(fpath):\n return 'invalid' in fpath.lower()\n","sub_path":"py/parsers_test/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"624476445","text":"import BlynkLib\nimport time\nimport RPi.GPIO as GPIO\nfrom time import sleep\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\n\nGPIO.setup(7,GPIO.OUT)\nGPIO.setup(13,GPIO.OUT)\n\nBLYNK_AUTH = '7e7202fb6b204a2686e7379f5907deae'\n\n# Initialize Blynk\nblynk = BlynkLib.Blynk(BLYNK_AUTH)\n\n# Register virtual pin handler\n@blynk.VIRTUAL_WRITE(1)\ndef v1_write_handler(value):\n GPIO.output(7,0)\n GPIO.output(13,0)\n\n@blynk.VIRTUAL_WRITE(2)\ndef v1_write_handler(value):\n GPIO.output(7,0)\n GPIO.output(13,1)\n\n@blynk.VIRTUAL_WRITE(3)\ndef v1_write_handler(value):\n GPIO.output(7,1)\n GPIO.output(13,0)\n\n@blynk.VIRTUAL_WRITE(4)\ndef v1_write_handler(value):\n GPIO.output(7,1)\n GPIO.output(13,1)\n\n# Start Blynk (this call should never return)\nblynk.run()\n","sub_path":"blynk/gpio_blynk.py","file_name":"gpio_blynk.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"270560353","text":"#!/usr/bin/env python3\n'''\n'''\nfrom collections import defaultdict\n\nimport sys\nimport re\nimport time\nimport pprint\nimport pygsheets\n# import pandas as pd\n\nimport urllib.request\n\nfrom helper.logger import *\nfrom helper.gsheet.gsheet_util import *\nfrom helper.gdrive.gdrive_util import *\n\nCOLUMNS = [ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',\n 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN', 'AO', 'AP', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AV', 'AW', 'AX', 'AY', 'AZ',\n 'BA', 'BB', 'BC', 'BD', 'BE', 'BF', 'BG', 'BH', 'BI', 'BJ', 'BK', 'BL', 'BM', 'BN', 'BO', 'BP', 'BQ', 'BR', 'BS', 'BT', 'BU', 'BV', 'BW', 'BX', 'BY', 'BZ']\n\ndef process(sheet, section_data, context):\n ws_title = section_data['link']\n\n # if the worksheet has already been read earlier, use the content from cache\n if ws_title in context['worksheet-cache'][sheet.title]:\n return context['worksheet-cache'][sheet.title][ws_title]\n\n info('processing ... {0} : {1}'.format(sheet.title, ws_title))\n try:\n ws = sheet.worksheet('title', ws_title)\n except:\n warn('No worksheet ... {0}'.format(ws_title))\n return {}\n\n ranges = ['{0}!B3:{1}{2}'.format(ws_title, COLUMNS[ws.cols-1], ws.rows)]\n include_grid_data = True\n\n wait_for = context['gsheet-read-wait-seconds']\n try_count = context['gsheet-read-try-count']\n request = context['service'].spreadsheets().get(spreadsheetId=sheet.id, ranges=ranges, includeGridData=include_grid_data)\n response = None\n for i in range(0, try_count):\n try:\n response = request.execute()\n break\n except:\n warn('gsheet read request (attempt {0}) failed, waiting for {1} seconds before trying again'.format(i, wait_for))\n time.sleep(float(wait_for))\n\n if response is None:\n error('gsheet read request failed, quiting')\n sys.exit(1)\n\n # if any of the cells have userEnteredValue of IMAGE or HYPERLINK, process it\n row = 0\n for row_data in response['sheets'][0]['data'][0]['rowData']:\n val = 0\n if 'values' in row_data:\n for cell_data in row_data['values']:\n if 'userEnteredValue' in cell_data:\n userEnteredValue = cell_data['userEnteredValue']\n if 'formulaValue' in userEnteredValue:\n formulaValue = userEnteredValue['formulaValue']\n\n # IMAGE/image\n m = re.match('=IMAGE\\((?P.+)\\)', formulaValue, re.IGNORECASE)\n if m and m.group('name') is not None:\n row_height = response['sheets'][0]['data'][0]['rowMetadata'][row]['pixelSize']\n result = download_image(m.group('name'), context['tmp-dir'], row_height)\n if result:\n response['sheets'][0]['data'][0]['rowData'][row]['values'][val]['userEnteredValue']['image'] = result\n\n # HYPERLINK/hyperlink\n m = re.match('=HYPERLINK\\(\"#gid=(?P.+)\",\"(?P.+)\"\\)', formulaValue, re.IGNORECASE)\n if m and m.group('ws_gid') is not None and m.group('ws_title') is not None:\n # debug(m.group('ws_gid'), m.group('ws_title'))\n if worksheet_exists(sheet, m.group('ws_title')):\n cell_data['contents'] = process(sheet, {'link': m.group('ws_title')}, context)\n\n val = val + 1\n row = row + 1\n\n context['worksheet-cache'][sheet.title][ws_title] = response\n return response\n","sub_path":"src/processor/table_processor.py","file_name":"table_processor.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"490320902","text":"from eventmanager import *\n\n\nclass GameEngine(object):\n \"\"\"\n Tracks the game state.\n \"\"\"\n\n def __init__(self, evManager, cols, rows):\n \"\"\"\n evManager (EventManager): Allows posting messages to the event queue.\n\n Attributes:\n running (bool): True while the engine is online. Changed via QuitEvent().\n \"\"\"\n # Create a cols by row grid\n # 0 = ' ', 1 = black, 2 = red\n self.cols = cols\n self.rows = rows\n self.grid = [[0 for x in range(cols)] for y in range(rows)]\n self.player = 'black'\n self.winner = ' '\n self.evManager = evManager\n evManager.RegisterListener(self)\n self.running = False\n\n def _switch_player(self):\n self.player = 'red' if self.player == 'black' else 'black'\n return self.player\n\n def get_next_player(self):\n \"\"\"\n ...\n \"\"\"\n return self.player\n\n def _set_piece(self, pos):\n row, col = pos\n if self.winner == ' ' and self.is_valid_move(row, col):\n self.grid[row][col] = 1 if self.player == 'black' else 2\n if self.is_winner(self.player):\n self.winner = self.player\n else:\n # Update next player\n self._switch_player()\n\n # debug\n print(' %s' % range(self.cols))\n for row in range(self.rows):\n print(row, self.grid[row])\n\n def is_valid_move(self, row, col):\n \"\"\"\n ...\n \"\"\"\n try:\n result = self.grid[row][col] == 0\n except IndexError:\n result = False\n return result\n\n def get_piece(self, row, col):\n \"\"\"\n ...\n \"\"\"\n occupant = self.grid[row][col]\n if occupant == 1:\n return 'black'\n if occupant == 2:\n return 'red'\n else:\n return ' '\n\n def get_winner(self):\n \"\"\"\n ....\n \"\"\"\n return self.winner\n\n def is_winner(self, player):\n \"\"\"\n ...\n \"\"\"\n tile = 1 if player == 'black' else 2\n\n # check horizontal spaces\n for row in range(self.rows):\n for col in range(self.cols - 4):\n \"\"\"print ('check horizontal [%s,%s][%s,%s][%s,%s][%s,%s][%s,%s]' %(row, col, row, col+1, row, col+2, row, col+3, row, col+4))\"\"\"\n if self.grid[row][col] == tile and \\\n self.grid[row][col + 1] == tile and \\\n self.grid[row][col + 2] == tile and \\\n self.grid[row][col + 3] == tile and \\\n self.grid[row][col + 4] == tile:\n return True\n\n # check vertical spaces\n for col in range(self.cols):\n for row in range(self.rows - 4):\n \"\"\"print ('check vertical [%s,%s][%s,%s][%s,%s][%s,%s][%s,%s]' %(row, col, row+1, col, row+2, col, row+3, col, row+4, col))\"\"\"\n if self.grid[row][col] == tile and \\\n self.grid[row + 1][col] == tile and \\\n self.grid[row + 2][col] == tile and \\\n self.grid[row + 3][col] == tile and \\\n self.grid[row + 4][col] == tile:\n return True\n\n # check / diagonal spaces\n for row in range(self.rows - 4):\n for col in range(4, self.cols):\n \"\"\"print ('check / diagonal [%s,%s][%s,%s][%s,%s][%s,%s][%s,%s]' %(row, col, row+1, col-1, row+2, col-2, row+3, col-3, row+4, col-4))\"\"\"\n if self.grid[row][col] == tile and \\\n self.grid[row + 1][col - 1] == tile and \\\n self.grid[row + 2][col - 2] == tile and \\\n self.grid[row + 3][col - 3] == tile and \\\n self.grid[row + 4][col - 4] == tile:\n return True\n\n # check \\ diagonal spaces\n for row in range(self.rows - 4):\n for col in range(self.cols - 4):\n \"\"\"print ('check \\ diagonal [%s,%s][%s,%s][%s,%s][%s,%s][%s,%s]' %(row, col, row+1, col+1, row+2, col+2, row+3, col+3, row+4, col+4))\"\"\"\n if self.grid[row][col] == tile and \\\n self.grid[row + 1][col + 1] == tile and \\\n self.grid[row + 2][col + 2] == tile and \\\n self.grid[row + 3][col + 3] == tile and \\\n self.grid[row + 4][col + 4] == tile:\n return True\n\n return False\n\n def notify(self, event):\n \"\"\"\n Called by an event in the message queue.\n \"\"\"\n\n if isinstance(event, QuitEvent):\n self.running = False\n elif isinstance(event, MouseInputEvent):\n self._set_piece(event.clickpos)\n\n def run(self):\n \"\"\"\n Starts the game engine loop.\n This pumps a Tick event into the message queue for each loop.\n The loop ends when this object hears a QuitEvent in notify().\n \"\"\"\n self.running = True\n self.evManager.Post(InitializeEvent())\n while self.running:\n newTick = TickEvent()\n self.evManager.Post(newTick)\n","sub_path":"app/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"465998948","text":"from flask import Flask, render_template\nfrom flask_mail import Mail, Message\nimport os\nimport re\nimport sys\nimport requests\n\napp = Flask(__name__)\n\n# SMTP服务器配置\napp.config.update(\n MAIL_SERVER='smtp.qq.com',\n MAIL_PORT='465',\n MAIL_USE_SSL=True,\n MAIL_USERNAME='960423580@qq.com',\n MAIL_PASSWORD='hdpvhkswbhyvbdbd'\n)\n\nmail = Mail(app)\n\n\ndef sendout(content='错误,没有发送成功'):\n msg = Message(subject='来自flask的邮件', sender='960423580@qq.com', recipients=['mmlinfangzhi@163.com'])\n msg.html = content\n mail.send(msg)\n print('gaoding')\n return '

邮件发送成功

'\n\n\nif __name__ == '__main__':\n # ip_command = 'ifconfig'\n # b = os.popen(ip_command,'r',1).read()\n # pat1 = re.compile('inet.+?netmask')\n # content1 = ''.join(pat1.findall(b))\n # with app.app_context():\n # #sendout(content= content1)\n # pass\n sendout('emmmm')\n","sub_path":"自动发邮件/邮件.py","file_name":"邮件.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"586199401","text":"import logging\nimport os\n\nfrom nornir.core.task import Result\nfrom nornir.plugins.functions.text import print_result\nfrom nornir.plugins.functions.text import print_title\n\nfrom tests.wrapper import wrap_cli_test\n\noutput_dir = \"{}/output_data\".format(os.path.dirname(os.path.realpath(__file__)))\n\n\ndef echo_task(task, msg=\"Nornir\"):\n return Result(\n host=task.host,\n result=\"Hello from {}\".format(msg),\n output=\"Hello from {}\".format(msg),\n )\n\n\ndef load_data(task):\n data = {\"os\": \"Linux\", \"services\": [\"http\", \"smtp\", \"dns\"]}\n return Result(host=task.host, result=data)\n\n\ndef data_with_greeting(task):\n task.run(task=echo_task)\n task.run(task=load_data)\n\n\ndef parse_data(task):\n\n data = {}\n data[\"failed\"] = False\n data[\"changed\"] = False\n\n if \"dev1.group_1\" == task.host.name:\n data[\"values\"] = [1, 2, 3]\n data[\"changed\"] = True\n\n elif \"dev2.group_1\" == task.host.name:\n data[\"values\"] = [4, 5, 6]\n\n elif \"dev3.group_2\" == task.host.name:\n data[\"values\"] = [7, 8, 9]\n\n elif \"dev4.group_2\" == task.host.name:\n data[\"values\"] = [10, 11, 12]\n data[\"changed\"] = False\n data[\"failed\"] = True\n\n elif \"dev5.no_group\" == task.host.name:\n data[\"values\"] = [13, 14, 15]\n\n if data[\"failed\"]:\n raise Exception(\"Unknown Error -> Contact your system administrator\")\n\n return Result(host=task.host, changed=data[\"changed\"], result=data[\"values\"])\n\n\ndef read_data(task):\n task.run(task=echo_task, severity_level=logging.DEBUG)\n task.run(task=echo_task, msg=\"CRITICAL\", severity_level=logging.CRITICAL)\n task.run(task=parse_data, severity_level=logging.WARN)\n\n\nclass Test(object):\n @wrap_cli_test(output=\"{}/basic_single\".format(output_dir))\n def test_print_basic(self, nornir):\n filter = nornir.filter(name=\"dev1.group_1\")\n result = filter.run(echo_task)\n print_result(result, vars=\"result\")\n\n @wrap_cli_test(output=\"{}/basic_inventory\".format(output_dir))\n def test_print_basic_inventory(self, nornir):\n result = nornir.run(echo_task)\n print_result(result)\n\n @wrap_cli_test(output=\"{}/basic_inventory_one_host\".format(output_dir))\n def test_print_basic_inventory_one_host(self, nornir):\n result = nornir.run(data_with_greeting)\n print_result(result[\"dev2.group_1\"])\n\n @wrap_cli_test(output=\"{}/basic_inventory_one_task\".format(output_dir))\n def test_print_basic_inventory_one_task(self, nornir):\n result = nornir.run(data_with_greeting)\n print_result(result[\"dev2.group_1\"][1])\n\n @wrap_cli_test(output=\"{}/multiple_tasks\".format(output_dir))\n def test_print_multiple_tasks(self, nornir):\n result = nornir.run(data_with_greeting)\n print_title(\"Behold the data!\")\n print_result(result)\n\n @wrap_cli_test(output=\"{}/changed_host\".format(output_dir))\n def test_print_changed_host(self, nornir):\n filter = nornir.filter(site=\"site1\")\n result = filter.run(read_data, severity_level=logging.WARN)\n print_result(result)\n\n @wrap_cli_test(output=\"{}/failed_with_severity\".format(output_dir))\n def test_print_failed_with_severity(self, nornir):\n nornir.config.logging.configure()\n result = nornir.run(read_data)\n print_result(result, vars=[\"exception\", \"output\"], severity_level=logging.ERROR)\n","sub_path":"tests/plugins/functions/text/test_print_result.py","file_name":"test_print_result.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"315177795","text":"from common import *\n\nfrom millsim.HaloTreeDownloader import HaloTreeDownloader\nfrom millsim.HaloPlotter import HaloPlotter\n\ndl = HaloTreeDownloader()\ndl.set_count(HALO_COUNT)\n\nfigno = 1\n\nfor i in HALO_MASS_RANGES:\n print(\\\n \"Retrieving {0} halos from {1:e} to {2:e} Msun... \"\\\n .format(HALO_COUNT, i[0], i[1]), end=\"\", flush=True)\n \n try:\n dl.download_mass_range(i)\n halos = dl.get_halo_history()\n print(\"OK: {0} rows, {1} halos\".format(dl.row_count(), len(halos)))\n \n fig, axes = plt.subplots(1, 2)\n plotter = HaloPlotter(halos)\n plotter.set_axes(axes[0])\n plotter.plotHalos(\\\n figno, \\\n \"Halo DM mass evol. ({0} $M_\\odot$ - {1} $M_\\odot$)\".format(\\\n plotter.quantityToLatex(i[0]), plotter.quantityToLatex(i[1])))\n \n plotter.set_axes(axes[1])\n plotter.plotHaloMean(\\\n figno, \\\n \"Halo DM mass evol. (mean)\".format(\\\n plotter.quantityToLatex(i[0]), plotter.quantityToLatex(i[1])),\\\n True)\n\n figno += 1 \n \n except Exception as e:\n print(\"error: \" + str(e))\n traceback.print_last()\n \nplt.show()\n \n","sub_path":"ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"358405703","text":"from django.shortcuts import render\nfrom django.template import RequestContext, loader\nfrom django import forms\nfrom django.utils.safestring import mark_safe\n\nfrom pull_scores import *\n\nALL_TRACTS = 802\n\nclass CAPP_Form(forms.Form):\n\tyes = forms.ChoiceField(\n\t\t\t\tlabel=\"Go to map?\",\n\t\t\t\tchoices=[(\"YES\", \"Gladly!\")],\n\t\t\t\twidget=forms.RadioSelect,\n\t\t\t\trequired=True)\n\ndef about(request):\n\tc = {}\n\treturn render(request, 'nmatch/about.html', c)\n\ndef home(request):\n\tc = {}\n\treturn render(request, 'nmatch/home.html', c)\n\ndef survey_CAPP(request):\n\tc = {}\n\targs = {}\n\t\n\tif request.method == 'GET':\n\t\tform = CAPP_Form(request.GET)\n\t\tif form.is_valid():\n\t\t\tto_map = form.cleaned_data['yes']\n\t\t\tif to_map:\n\t\t\t\targs['yes'] = to_map\n\n\t\t\t#add args to c dictionary\n\t\t\tc['args'] = args\n\t\t\t\n\t\t\t#get matching tracts and make javascript string\n\t\t\tif c['args'] is not None:\n\t\t\t\t#gets list of matching tracts\n\t\t\t\ttracts = go(ALL_TRACTS)\n\t\t\t\tc['tracts'] = tracts\n\t\t\t\tjava_str = get_string(tracts)\n\t\t\t\tc['java_str'] = java_str\n\t\t\t\treturn render(request, 'nmatch/map_only.html', c)\n\t\telse:\n\t\t\tform = CAPP_Form()\n\tc['form'] = form\n\treturn render(request, 'nmatch/survey_CAPP.html', c)\n\n\n#takes list of tracts and returns javascript string\ndef get_string(tracts):\n\ttract_list = list(str(c) for c in tracts)\n\n\tstring_1 = \"{ where: 'col0\\x3e\\x3e0 \\x3d \"\n\tstring_2 = \"',\\\n\t\tpolygonOptions: {\\\n\t\tfillColor: '#0000FF',\\\n\t\tfillOpacity: 1\\\n\t\t}\\\n\t\t}, \"\n\n\tfull_str = ''\n\n\t#displays only the first five tracts in ordered list\n\tfor tract in tract_list:\n\t\ttract_string = string_1 + tract + string_2\n\t\tfull_str = full_str + tract_string\n\t\n\tfull_str = full_str[:-2]\n\n\treturn full_str\n\n\t","sub_path":"initial_code/website/nmatch/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"422461908","text":"import sqlalchemy as sa\nimport sqlalchemy.dialects.mssql\n\n\nengine = sa.create_engine('mssql+pyodbc://hq01db05/OrantaSch?driver=SQL+Server+Native+Client+11.0')\n\nengine.echo = False\nmetadata = sa.MetaData(engine)\n\ndbo_products = sa.Table('ProductTypes', metadata, autoload=True, schema='meta')\n\n\ndef run(smth):\n rs = smth.execute()\n for row in rs:\n print(row)\ntry:\n s = dbo_products.select(dbo_products.c.Code == '104')\n run(s)\nexcept Exception as Err:\n print('Ok', Err)\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"509301751","text":"#!/usr/bin/env python\n# encoding: utf-8\n# @Time:2020/9/1 17:13\n# @Author:JiahangGu\nfrom typing import List\n\n\nclass Solution:\n def PredictTheWinner(self, nums: List[int]) -> bool:\n \"\"\"\n 递归方法:两个玩家,可以求出先手和后手总分的差值,如果差值大于0则先手赢,否则后手赢。这样在一个玩家选取之后,\n 加上下一次分值的负数来表示差值。每个玩家可以选择第一个或最后一个,就形成了两个子节点,通过计算两个节点并选择\n 最大值可以优先保证先手获胜。\n 递归过程中需要注意,如果当前是对手回合,应该返回对手的最大分数,但由于对手回合得分是负数,需要乘-1求最大值之\n 后再乘-1变回原值,统一的话就使用flag标志当前回合是我(1)还是对手(-1)\n 时间复杂度对于每个点都有两种可能性,共n个点,所以是O(2^n),但n很小<=20所以可以通过。\n :param nums:\n :return:\n \"\"\"\n # def dfs(start, end, flag):\n # if start == end:\n # return nums[start] * flag\n # start_score = nums[start] * flag + dfs(start+1, end, -flag)\n # end_score = nums[end] * flag + dfs(start, end-1, -flag)\n # return max(start_score * flag, end_score * flag) * flag\n # return dfs(0, len(nums)-1, 1) >= 0\n \"\"\"\n 上述递归解法存在大量的重复子状态的问题,比如要求[2,4]所能得到的最大值,在拿走1时需要求一次,在拿走5时还要\n 再求一次,导致了重复状态。可以使用记忆化方法,存储[2,4]的值在下次求解时直接使用。\n 由于递归时自顶向下进行,在用dp时就需要逆向的自底向上,假设dp[i][j]表示在还剩[i,j]的数组时所能得到的最大值,\n 则当前状态dp[i][j]对应的情况是拿走i或拿走j所能得到的最大值,而拿走i对应得到的分数为nums[i]-dp[i+1][j],\n 拿走j对应得到的分数为nums[j]-dp[i][j-1],取最大值。\n 且dp是逐行更新的,所以可以使用一维数组表示,即数组是从右下角向左上角更新,所以当前行的第j位其实是dp[i][j],且\n dp[i][j-1]是已经求出来的,所以一维数组表示即可。\n \"\"\"\n n = len(nums)\n dp = [0] * n\n for i in range(n):\n dp[i] = nums[i]\n for i in range(n-2, -1, -1):\n for j in range(i+1, n):\n dp[j] = max(nums[i] - dp[j], nums[j] - dp[j-1])\n return dp[n-1] >= 0\n\n\ns = Solution()\nprint(s.PredictTheWinner([1, 5, 233, 2]))\n","sub_path":"DP/src/20-9-01-486-predict-the-winner.py","file_name":"20-9-01-486-predict-the-winner.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"373781545","text":"import os\nimport random\n\nclear = lambda: os.system('cls')\n\ncat = {\n 'name': 'Margo',\n 'breed': 'Cat',\n 'color': 'White',\n 'age': 2,\n 'weight': 5.5,\n 'hungry': True,\n 'phrases': [\"Meow!\", \"purrrr purrrr purrrr\", \"pypet is the best!\"]\n}\n\ndef startup_pypet():\n clear()\n print(\"#####PyPet - Game#####\")\n\ndef pypet_stats(pypet):\n clear()\n print(\"#####PyPet - Game#####\\n-----------------------\\n\")\n print(\"It's \" + pypet['name'] + \"!\")\n print('\\n' + pypet['name'] + \" weight \" + str(pypet['weight']) + \" pounds\")\n print('\\n' + pypet['name'] + \" is \" + str(pypet['age']) + \" years old\")\n if pypet['hungry']:\n print(\"\\nYour pet is hungry!\\n\")\n else:\n print(\"\\nYour pet is full!\\n\")\n\ndef chat_with_pypet(pypet):\n clear()\n print(\"Your pet says: \" + random.choice(pypet['phrases']) + \"\\n\")\n\nstartup_pypet()\npypet = cat\nterminate = False\n\nwhile not terminate:\n\n print(\"-----------------------\")\n user_input = input('> ')\n if user_input == 'quit':\n terminate = True\n\n elif user_input == 'feed':\n clear()\n print(\"#####PyPet - Game#####\\n-----------------------\\n\")\n print(\"Omnomnom, you feed your pet!\\n\")\n print(\"Weight: +1 pounds\\n\")\n pypet['weight'] += 1\n pypet['hungry'] = False\n\n elif user_input == 'stats':\n pypet_stats(pypet)\n\n elif user_input == 'chat':\n chat_with_pypet(pypet)\n\n elif user_input == 'author':\n clear()\n print(\"#####PyPet - Game#####\\n-----------------------\\n\")\n print(\"Aslan Hadushkaev (EdwardM)\\n\")\n\n else:\n print(\"\\nCommand not found, please try again!\\n\")\n\nprint('\\nGoodbye!')\n","sub_path":"Python/Games/pypet.py","file_name":"pypet.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"601625476","text":"import numpy as np\nfrom glob import glob\nfrom tqdm import tqdm\n\ndef create_average_parcellation(labels):\n '''\n Creates average parcellation from set of labels,\n for every vertex chooses most frequent label.\n \n Parameters\n -----\n \n labels - ndarray,\n N x M array, N subject, M number of mesh vertices to label\n \n Returns\n -----\n average_label - ndarray,\n array of labels of length M\n '''\n n, m = labels.shape\n average_labels = np.zeros(m)\n for i in tqdm(range(m)):\n vals, freq = np.unique(labels[:, i], return_counts=True)\n ind = np.argmax(freq)\n average_labels[i] = vals[ind]\n return average_labels\n\nif __name__==\"__main__\":\n # Average Desikan parcellation\n labels = []\n files = glob('/data01/ayagoz/sparse_32_concon_HCP/parcellations/desikan_aparc/*.npy')\n for file in tqdm(files):\n label = np.load(file, allow_pickle=True)\n labels.append(label)\n labels= np.array(labels)\n\n aver_labels = create_average_parcellation(labels)\n np.save('/data01/ayagoz/sparse_32_concon_HCP/parcellations/desikan_aparc_average_1113.npy', aver_labels)\n\n # Average Destrieux parcellation\n labels = []\n files = glob('/data01/ayagoz/sparse_32_concon_HCP/parcellations/destrieux_aparc2009/*.npy')\n for file in tqdm(files):\n label = np.load(file, allow_pickle=True)\n labels.append(label)\n labels= np.array(labels)\n\n aver_labels = create_average_parcellation(labels)\n np.save('/data01/ayagoz/sparse_32_concon_HCP/parcellations/destrieux_aparc2009_average_1113.npy', aver_labels)","sub_path":"connective_parcellation/parcellation_processing/compute_aver_fs.py","file_name":"compute_aver_fs.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"302407606","text":"from __future__ import (absolute_import, division, print_function)\nimport inspect\nimport os\nimport shutil\nimport time\nimport unittest\n\nfrom fitbenchmarking import mock_problems\nfrom fitbenchmarking.utils.misc import get_problem_files\nfrom fitbenchmarking.utils.misc import get_css, get_js\nfrom fitbenchmarking.utils.options import Options\n\nclass CreateDirsTests(unittest.TestCase):\n\n def base_path(self):\n \"\"\"\n Helper function that returns the path to\n /fitbenchmarking/benchmark_problems\n \"\"\"\n bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems))\n return bench_prob_dir\n\n def setUp(self):\n \"\"\"\n Create some datafiles to look for.\n \"\"\"\n self.dirname = os.path.join(self.base_path(),\n 'mock_datasets_{}'.format(time.time()))\n os.mkdir(self.dirname)\n\n expected = []\n for i in range(10):\n filename = 'file_{}.txt'.format(i)\n filepath = os.path.join(self.dirname, filename)\n expected.append(filepath)\n\n with open(filepath, 'w+') as f:\n f.write('This is a mock data file to check that finding files'\n 'is correct')\n\n self.expected = sorted(expected)\n\n def tearDown(self):\n \"\"\"\n Clean up created datafiles.\n \"\"\"\n shutil.rmtree(self.dirname)\n\n def test_getProblemFiles_get_correct_probs(self):\n \"\"\"\n Test that the correct files are found\n \"\"\"\n\n problems = get_problem_files(self.dirname)\n\n self.assertIsInstance(problems, list)\n self.assertEqual(self.expected, sorted(problems))\n\n def test_get_css(self):\n \n options = Options()\n print(options.results_dir)\n test_dir = os.path.join(options.results_dir,\"foo\")\n \n expected_css_dir = os.path.join(\"..\",\"css\")\n expected_main_css = os.path.join(expected_css_dir,\"main_style.css\")\n expected_table_css = os.path.join(expected_css_dir,\"table_style.css\")\n expected_custom_css = os.path.join(expected_css_dir,\"custom_style.css\")\n css = get_css(options,test_dir)\n\n self.assertEqual(css['main'], expected_main_css)\n self.assertEqual(css['table'], expected_table_css)\n self.assertEqual(css['custom'], expected_custom_css)\n\n def test_get_js(self):\n \n options = Options()\n print(options.results_dir)\n test_dir = os.path.join(options.results_dir,\"foo\")\n \n expected_js_dir = os.path.join(\"..\",\"js\")\n expected_mathjax_js = os.path.join(expected_js_dir,\"tex-mml-chtml.js\")\n js = get_js(options,test_dir)\n\n self.assertEqual(js['mathjax'], expected_mathjax_js)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"fitbenchmarking/utils/tests/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"310487619","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/5/7 1:35 PM\n# @Author : Yinghao Qin\n# @Email : y.qin@hss18.qmul.ac.uk\n# @File : plot_B_M.py\n# @Software: PyCharm\n\n# summarize statistics/ design comparisons\n# 1. B - B+aug (Goo, Res)\n# result structure\n# a. goo_b, goo_b + aug\n# b. res_b, res_b + aug\n# 2. B - M (Goo, Res)\n# result structure\n# a. goo_b, goo_m\n# b. res_b, res_m\n# 3. G - R (base)\n\n\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 1>. goo/res base\n# 2>. goo/res mod\ntry:\n cache_file = open('summary2b/tr_acc_epoch1.json', 'r')\n cache_contents = cache_file.read()\n goo_b_tr = json.loads(cache_contents)\n goo_b_tr = [goo_b_tr[str(i + 1)] for i in range(30)]\n\n cache_file = open('summary2b/te_acc_epoch1.json', 'r')\n cache_contents = cache_file.read()\n goo_b_te = json.loads(cache_contents)\n goo_b_te = [goo_b_te[str(i + 1)] for i in range(30)]\n\n cache_file = open('summary2b/tr_acc_epoch2.json', 'r')\n cache_contents = cache_file.read()\n goo_m_tr = json.loads(cache_contents)\n goo_m_tr = [goo_m_tr[str(i + 1)] for i in range(30)]\n\n cache_file = open('summary2b/te_acc_epoch2.json', 'r')\n cache_contents = cache_file.read()\n goo_m_te = json.loads(cache_contents)\n goo_m_te = [goo_m_te[str(i + 1)] for i in range(30)]\n\n cache_file.close()\nexcept:\n print(\"something bad happens!\")\n\nbase = [goo_b_tr, goo_b_te]\nmodi = [goo_m_tr, goo_m_te]\n# legends = ['Goo b Train', 'Goo b Test', 'Goo m Train', 'Goo m Test']\nlegends = ['Res b Train', 'Res b Test', 'Res m Train', 'Res m Test']\n\n\ndef plot_accuracy_curve(model1, model2, legend, show=True, path='plot.png'):\n num = 30\n x_axis = np.linspace(1, num, num, endpoint=True)\n plt.plot(x_axis, model1[0], color='r', label=legend[0])\n plt.plot(x_axis, model1[1], color='b', label=legend[1])\n plt.plot(x_axis, model2[0], color='darkgreen', linestyle='--', label=legend[2])\n plt.plot(x_axis, model2[1], color='orange', linestyle='--', label=legend[3])\n plt.legend()\n plt.title('Base vs Modi (ResNet)')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy(%)')\n\n plt.savefig(path)\n if show:\n plt.show()\n else:\n plt.close()\n\n\nplot_accuracy_curve(base, modi, legends, path='summary2b/plot.png')\n","sub_path":"code/summary_mnist/plot_B_M.py","file_name":"plot_B_M.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"274540248","text":"from pandas import HDFStore\nimport os\nimport time\n\nclass SafeHDFStore(HDFStore):\n \"\"\"\n write to HDFStore safely, queue if other process currently has access\n from: https://stackoverflow.com/questions/22522551/pandas-hdf5-as-a-database\n \"\"\"\n def __init__(self, *args, **kwargs):\n probe_interval = kwargs.pop(\"probe_interval\", 0.01)\n self._lock = \"%s.lock\" % args[0]\n while True:\n try:\n self._flock = os.open(self._lock, os.O_CREAT |\n os.O_EXCL |\n os.O_WRONLY)\n break\n except FileExistsError:\n time.sleep(probe_interval)\n except PermissionError:\n time.sleep(probe_interval)\n HDFStore.__init__(self, *args, **kwargs)\n\n def __exit__(self, *args, **kwargs):\n HDFStore.__exit__(self, *args, **kwargs)\n os.close(self._flock)\n os.remove(self._lock)","sub_path":"build/lib/FRETboard/SafeHDFStore.py","file_name":"SafeHDFStore.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"518949931","text":"__author__ = 'nrangrej'\n\n\n# template for \"Stopwatch: The Game\"\nimport simplegui\nimport random\n# define global variables\n# Global state\ncounter = 0\nminutes = 0\nseconds = 0\nmillseconds = 0\nsuccess_stops = 0\ntotal_stops = 0\nposition = [70, 100]\nwidth = 200\nheight = 200\ninterval = 100\nisRunning = True\n\n\n# define helper function format that converts time\n# in tenths of seconds into formatted string A:BC.D\ndef format(time):\n global minutes, seconds, millseconds\n\n if time<600:\n seconds = time/10\n millseconds = time%10\n return str(minutes)+ \":\" + str(\"%02d\"%seconds) + \".\" + str(millseconds)\n else:\n minutes = time/600\n seconds = (time%600)/10\n millseconds = (time%600)%10\n return str(minutes)+ \":\" + str(\"%02d\"%seconds) + \".\" + str(millseconds)\n\n# define event handlers for buttons; \"Start\", \"Stop\", \"Reset\"\ndef start():\n global isRunning\n timer.start()\n isRunning = True\n\ndef stop():\n timer.stop()\n global millseconds, success_stops, total_stops, isRunning\n if isRunning:\n total_stops = total_stops + 1\n if millseconds == 0:\n success_stops = success_stops + 1\n isRunning = False\n\n\n\ndef reset():\n global counter, success_stops, total_stops, isRunning\n if isRunning:\n timer.stop()\n counter = 0\n success_stops = 0\n total_stops = 0\n isRunning = False\n\n\n# define event handler for timer with 0.1 sec interval\ndef tick():\n global counter\n counter = counter + 1\n\n# define draw handler\ndef draw(canvas):\n canvas.draw_text(format(counter), position, 24, \"White\")\n canvas.draw_text(str(success_stops), [150, 30], 26, 'Green')\n canvas.draw_text('/', [165, 30], 26, 'White')\n canvas.draw_text(str(total_stops), [175, 30], 26, 'Blue')\n\n\n# create frame\nframe = simplegui.create_frame(\"StopWatch\", width, height)\n\n# register event handlers\nstart_button = frame.add_button('Start', start, 50)\nstop_btoon = frame.add_button('Stop', stop, 50)\nreset_button = frame.add_button('Reset', reset, 50)\n\n\nframe.set_draw_handler(draw)\ntimer = simplegui.create_timer(interval, tick)\n\n# start frame\nframe.start()\ntimer.start()\n\n# Please remember to review the grading rubric\n","sub_path":"Stopwatch.py","file_name":"Stopwatch.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"335430922","text":"\"\"\"\n===========================\nCreating annotated heatmaps\n===========================\n\nIt is often desirable to show data which depends on two independent\nvariables as a color coded image plot. This is often referred to as a\nheatmap. If the data is categorical, this would be called a categorical\nheatmap.\n\nMatplotlib's `~matplotlib.axes.Axes.imshow` function makes\nproduction of such plots particularly easy.\n\nThe following examples show how to create a heatmap with annotations.\nWe will start with an easy example and expand it to be usable as a\nuniversal function.\n\"\"\"\n\n\n##############################################################################\n#\n# A simple categorical heatmap\n# ----------------------------\n#\n# We may start by defining some data. What we need is a 2D list or array\n# which defines the data to color code. We then also need two lists or arrays\n# of categories; of course the number of elements in those lists\n# need to match the data along the respective axes.\n# The heatmap itself is an `~matplotlib.axes.Axes.imshow` plot\n# with the labels set to the categories we have.\n# Note that it is important to set both, the tick locations\n# (`~matplotlib.axes.Axes.set_xticks`) as well as the\n# tick labels (`~matplotlib.axes.Axes.set_xticklabels`),\n# otherwise they would become out of sync. The locations are just\n# the ascending integer numbers, while the ticklabels are the labels to show.\n# Finally we can label the data itself by creating a `~matplotlib.text.Text`\n# within each cell showing the value of that cell.\n\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n# sphinx_gallery_thumbnail_number = 2\n\nvegetables = [\"Class 1\", \"Class 2\", \"Class 3\", \"Class 4\",\n \"Class 5\", \"Class 6\", \"Class 7\",\"Class 8\", \"Class 9\",\"sum\"]\nfarmers = [\"Class 1\", \"Class 2\", \"Class 3\", \"Class 4\",\n \"Class 5\", \"Class 6\", \"Class 7\",\"Class 8\", \"Class 9\",\"sum\"]\n\n#16 kernal with 70 iterations\nharvest1 = np.array([[197, 1, 29, 0, 5, 12, 0, 0, 1],\n [4,428,8,2,1,13,3,0,18],\n [16,9,776,85,8,6,4,70,9],\n [3,4,58,332,1,1,0,50,5],\n [14,24,20,4,271,5,4,5,6],\n [2,9,1,1,4,322,14,0,3],\n [0,8,6,0,15,54,281,0,2],\n [0,1,12,2,0,4,0,180,0],\n [2,12,11,16,16,2,1,5,443]])\n# [238,496,921,442,321,419,307,310,487]\n#32 kernal with 70 iterations\nharvest2 = np.array([[190,4,26,2,11,7,6,0,1],\n [4,487,5,3,17,4,8,8,14],\n [10,7,780,78,14,4,8,52,4],\n [2,5,50,298,2,1,2,68,6],\n [10,13,11,2,328,1,17,0,2],\n [4,1,2,0,1,148,38,0,3],\n [0,5,4,1,8,2,441,0,4],\n [1,0,14,0,1,2,2,192,0],\n [1,11,11,10,22,2,7,0,423]])\n #[222,533,903,394,404,171,529,320,457]])\n#48 kernal with 70 iterations\nharvest3 = np.array([[186,0,31,1,3,6,0,0,2],\n [4,432,17,1,13,5,2,0,24],\n [15,6,760,69,18,5,4,5,2],\n [3,2,76,298,4,2,0,11,11],\n [18,10,14,2,307,3,11,0,7],\n [6,1,4,1,6,328,22,0,4],\n [0,5,5,1,24,45,275,0,2],\n [0,0,4,10,10,20,0,141,0],\n [2,8,6,6,6,6,2,0,468]])\n #[234,464,917,389,391,420,316,157,520]])\n#64 kernal with 70 iterations\nharvest4= np.array([[180,0,37,0,6,6,0,0,4],\n [2,441,11,1,8,11,2,0,14],\n [12,6,805,65,13,9,1,37,7],\n [0,1,40,344,3,3,0,17,7],\n [10,16,15,1,317,4,9,6,4],\n [1,4,3,1,0,333,29,0,3],\n [0,1,3,2,10,13,319,6,4],\n [0,0,8,7,0,5,0,240,0],\n [1,12,11,6,15,8,2,6,434]])\n #[206,481,938,427,372,392,362,312,477]])\n\n#precision=np.array([])\n#50 iterations with 16 kernels\nprecision1=[0.83,0.87,0.79,0.75,0.88,0.86,0.83,0.74,0.95]\nprecision2=[0.86,0.91,0.78,0.76,0.84,0.9,0.82,0.77,0.94]\nprecision3=[0.85,0.93,0.88,0.71,0.84,0.8,0.88,0.58,0.93]\n'''\nharvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],\n [2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],\n [1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],\n [0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],\n [0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],\n [1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],\n [0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])\n\n\n\nfig, ax = plt.subplots()\nim = ax.imshow(harvest)\n\n# Show all ticks and label them with the respective list entries\nax.set_xticks(np.arange(len(farmers)))\n#ax.set_yticks(np.arange(len(vegetables)), labels=vegetables)\nax.set_yticks(np.arange(len(vegetables)))\nax.set_xticklabels(farmers)\nax.set_yticklabels(vegetables)\n\n# Rotate the tick labels and set their alignment.\nplt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n# Loop over data dimensions and create text annotations.\nfor i in range(len(vegetables)):\n for j in range(len(farmers)):\n text = ax.text(j, i, harvest[i, j],\n ha=\"center\", va=\"center\", color=\"w\")\n\nax.set_title(\"Harvest of local farmers (in tons/year)\")\nfig.tight_layout()\nplt.show()\n'''\n\n#############################################################################\n# Using the helper function code style\n# ------------------------------------\n#\n# As discussed in the :ref:`Coding styles `\n# one might want to reuse such code to create some kind of heatmap\n# for different input data and/or on different axes.\n# We create a function that takes the data and the row and column labels as\n# input, and allows arguments that are used to customize the plot\n#\n# Here, in addition to the above we also want to create a colorbar and\n# position the labels above of the heatmap instead of below it.\n# The annotations shall get different colors depending on a threshold\n# for better contrast against the pixel color.\n# Finally, we turn the surrounding axes spines off and create\n# a grid of white lines to separate the cells.\n\n\ndef heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n \"\"\"\n Create a heatmap from a numpy array and two lists of labels.\n\n Parameters\n ----------\n data\n A 2D numpy array of shape (M, N).\n row_labels\n A list or array of length M with the labels for the rows.\n col_labels\n A list or array of length N with the labels for the columns.\n ax\n A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If\n not provided, use current axes or create a new one. Optional.\n cbar_kw\n A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.\n cbarlabel\n The label for the colorbar. Optional.\n **kwargs\n All other arguments are forwarded to `imshow`.\n \"\"\"\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # Show all ticks and label them with the respective list entries.\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n ax.set_xticklabels(farmers)\n ax.set_yticklabels(vegetables)\n # Let the horizontal axes labeling appear on top.\n #ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)\n\n ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True)\n # Rotate the tick labels and set their alignment.\n #plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",rotation_mode=\"anchor\")\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"left\", rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n #ax.spines[:].set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=1)\n # ax.tick_params(which=\"minor\", bottom=False, left=False)\n ax.tick_params(which=\"minor\", bottom=True, left=True)\n\n return im, cbar\n\n\ndef annotate_heatmap(im, data=None, valfmt=\"{x}\",\n textcolors=(\"black\", \"white\"),\n threshold=None, **textkw):\n \"\"\"\n A function to annotate a heatmap.\n\n Parameters\n ----------\n im\n The AxesImage to be labeled.\n data\n Data used to annotate. If None, the image's data is used. Optional.\n valfmt\n The format of the annotations inside the heatmap. This should either\n use the string format method, e.g. \"$ {x:.2f}\", or be a\n `matplotlib.ticker.Formatter`. Optional.\n textcolors\n A pair of colors. The first is used for values below a threshold,\n the second for those above. Optional.\n threshold\n Value in data units according to which the colors from textcolors are\n applied. If None (the default) uses the middle of the colormap as\n separation. Optional.\n **kwargs\n All other arguments are forwarded to each call to `text` used to create\n the text labels.\n \"\"\"\n\n if not isinstance(data, (list, np.ndarray)):\n data = im.get_array()\n\n # Normalize the threshold to the images color range.\n if threshold is not None:\n threshold = im.norm(threshold)\n else:\n threshold = im.norm(data.max())/2.\n\n # Set default alignment to center, but allow it to be\n # overwritten by textkw.\n kw = dict(horizontalalignment=\"center\",\n verticalalignment=\"center\")\n kw.update(textkw)\n\n # Get the formatter in case a string is supplied\n if isinstance(valfmt, str):\n valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)\n\n # Loop over the data and create a `Text` for each \"pixel\".\n # Change the text's color depending on the data.\n texts = []\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])\n text = im.axes.text(j, i, valfmt(data[i, j], None),fontsize=8, **kw)\n texts.append(text)\n\n return texts\n\n\n##########################################################################\n# The above now allows us to keep the actual plot creation pretty compact.\n#\n'''\nfig, ax = plt.subplots()\n\nim, cbar = heatmap(harvest, vegetables, farmers, ax=ax,\n cmap=\"YlGn\", cbarlabel=\"harvest [t/year]\")\ntexts = annotate_heatmap(im, valfmt=\"{x:.1f}\")\n\nfig.tight_layout()\nplt.show()\n'''\nfig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(18, 12))\n\nharvest1=np.transpose(harvest1)\nim, cbar = heatmap(harvest1, vegetables, farmers, ax=ax1, cmap=\"Greens\")\ntexts = annotate_heatmap(im, valfmt=\"{x}\")\n\nax1.set_title(\"16 kernels and 70 iterations\")\n\nharvest2=np.transpose(harvest2)\nim, cbar = heatmap(harvest2, vegetables, farmers, ax=ax2, cmap=\"Blues\")\ntexts = annotate_heatmap(im, valfmt=\"{x}\")\nax2.set_title(\"32 kernels and 50 iterations\")\n\nharvest3=np.transpose(harvest3)\nim, cbar = heatmap(harvest3, vegetables, farmers, ax=ax3, cmap=\"Purples\")\ntexts = annotate_heatmap(im, valfmt=\"{x}\")\nax3.set_title(\"48 kernels and 40 iterations\")\n\nharvest4=np.transpose(harvest4)\nim, cbar = heatmap(harvest4, vegetables, farmers, ax=ax4 , cmap=\"Wistia\")\ntexts = annotate_heatmap(im, valfmt=\"{x}\")\nax4.set_title(\"64 kernels and 40 iterations\")\n\nfig.tight_layout()\nplt.show()\n#eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff\nfig.savefig('.\\heatmap.svg', format='svg',dpi=600)\nfig.savefig('.\\heatmap.png', format='png',dpi=600)\n#############################################################################\n# Some more complex heatmap examples\n# ----------------------------------\n#\n# In the following we show the versatility of the previously created\n# functions by applying it in different cases and using different arguments.\n#","sub_path":"image_annotated_heatmap.py","file_name":"image_annotated_heatmap.py","file_ext":"py","file_size_in_byte":12053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"286530681","text":"from selenium import webdriver\n\nbrowser = webdriver.Chrome()\n#browser.get(\"http://suninjuly.github.io/find_xpath_form.html\")\nbrowser.get(\"http://suninjuly.github.io/registration2.html\")\nelements = browser.find_elements_by_tag_name(\"input\")\nfor element in elements:\n element.send_keys(\"Im a memer boi\")\n\nbutton = browser.find_element_by_xpath('//button[text()=\"Отправить\"]')\nbutton.click()\n# не забывайте добавлять пустую строку в конце каждого файла в Python\n","sub_path":"SELENIUM/lesson5_step7.py","file_name":"lesson5_step7.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"337130388","text":"#!/usr/bin/env python3\n\nimport socketserver\nimport http.server\nimport binascii\nimport botocore\nimport argparse\nimport hashlib\nimport logging\nimport atexit\nimport signal\nimport boto3\nimport uuid\nimport json\nimport time\nimport sys\nimport os\n\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nfrom base64 import b64encode, b64decode\n\n# {'/path/to/data.key': ( time.time(), AESCipher )\nCIPHERS = {}\n\n\ndef expunge_key(data_key_path):\n logger = logging.getLogger(__name__)\n if data_key_path in CIPHERS:\n del CIPHERS[data_key_path]\n logger.debug('expunged cipher for {}'.format(data_key_path))\n return True\n logger.debug('cipher for {} not found'.format(data_key_path))\n return False\n\n\ndef fetch_cipher(data_key_path):\n logger = logging.getLogger(__name__)\n logger.debug('fetching cipher for {}'.format(data_key_path))\n if data_key_path not in CIPHERS:\n with open(data_key_path, 'r') as dk_fd:\n data_key = json.loads(dk_fd.read())\n encrypted_data_key = b64decode(data_key['CiphertextBlob'])\n\n kms_client = boto3.client('kms')\n decrypted_data_key = kms_client.decrypt(CiphertextBlob=encrypted_data_key)\n if decrypted_data_key['ResponseMetadata']['HTTPStatusCode'] != 200:\n raise ValueError(\"KMS data key decrypt error\")\n\n CIPHERS[data_key_path] = (time.time(), AESCipher(decrypted_data_key['Plaintext']))\n\n logger.debug('found cipher for {}'.format(data_key_path))\n return CIPHERS[data_key_path][1]\n\n\ndef encrypt_data(data_key_path, plain_text):\n logger = logging.getLogger(__name__)\n logger.debug('encrypting data using {}.'.format(data_key_path))\n cipher = fetch_cipher(data_key_path)\n cipher_text = cipher.encrypt(plain_text)\n logger.debug('successful encrypt of data using {}.'.format(data_key_path))\n return cipher_text\n\n\ndef decrypt_data(data_key_path, cipher_text):\n logger = logging.getLogger(__name__)\n logger.debug('decrypting data using {}.'.format(data_key_path))\n cipher = fetch_cipher(data_key_path)\n plain_text = cipher.decrypt(cipher_text)\n logger.debug('successful decrypt of data using {}.'.format(data_key_path))\n return plain_text\n\n\ndef encrypt_file(data_key_path, source_filename, destination_dir, request_id, suffix='.enc'):\n logger = logging.getLogger(__name__)\n logger.debug('RequestID:{} encrypting {} to {} using {}.'.format(request_id, source_filename, destination_dir, data_key_path))\n rc = (False, None)\n filename = os.path.split(source_filename)[-1]\n destination_file = os.path.join(destination_dir, filename + suffix)\n\n try:\n with open(source_filename, 'rb') as input_file:\n input_data = input_file.read()\n\n output_data = {\"filename\": filename}\n output_data[\"SHA1Hash\"] = hashlib.sha1(input_data).hexdigest()\n cipher_text = encrypt_data(data_key_path, input_data)\n encoded_cipher_text = b64encode(cipher_text)\n output_data[\"payload\"] = encoded_cipher_text.decode('utf-8')\n\n with open(destination_file, 'w') as output_file:\n output_file.write(json.dumps(output_data))\n\n rc = (True, None)\n logger.info('RequestID:{} successfully encrypted {} using data key {}'.format(request_id, source_filename, data_key_path))\n except botocore.exceptions.ClientError as e:\n logger.exception(e)\n rc = (False, \"KMS error. Check key authorizations.\")\n except FileNotFoundError as e:\n logger.exception(e)\n rc = (False, \"{} not found\".format(e.filename))\n except IOError as e:\n logger.exception(e)\n rc = (False, str(e))\n except UnicodeDecodeError as e:\n logger.exception(e)\n except Exception as e:\n logger.exception(e)\n raise\n return rc\n\n\ndef decrypt_file(data_key_path, source_filename, destination_dir, request_id):\n logger = logging.getLogger(__name__)\n logger.debug('RequestID:{} decrypting {} to {} using {}.'.format(request_id, source_filename, destination_dir, data_key_path))\n rc = (False, None)\n\n try:\n with open(source_filename, 'r') as sf_fd:\n input_data = json.loads(sf_fd.read())\n filename = input_data['filename']\n encrypted_data = b64decode(input_data['payload'])\n decrypted_data = decrypt_data(data_key_path, encrypted_data)\n logger.debug(str(hashlib.sha1(decrypted_data).hexdigest()) + \" vs \" + str(input_data['SHA1Hash']))\n if hashlib.sha1(decrypted_data).hexdigest() != input_data['SHA1Hash']:\n return (False, 'Integrity check failure')\n with open(os.path.join(destination_dir, filename), 'wb') as of_fd:\n of_fd.write(decrypted_data)\n rc = (True, None)\n logger.info('RequestID:{} successfully decrypted {} using data key {}'.format(request_id, source_filename, data_key_path))\n except botocore.exceptions.ClientError as e:\n logger.exception(e)\n rc = (False, \"KMS error. Check key authorizations.\")\n except binascii.Error as e:\n logger.exception(e)\n rc = (False, \"Integrity error\")\n except FileNotFoundError as e:\n logger.exception(e)\n rc = (False, \"{} not found\".format(e.filename))\n except IOError as e:\n logger.exception(e)\n rc = (False, str(e))\n except Exception as e:\n logger.exception(e)\n raise\n return rc\n\n\nclass AESCipher:\n _block_size = 16\n\n def __init__(self, key):\n self.key = key\n\n def encrypt(self, raw):\n if type(raw) == str:\n padded = raw + (self._block_size - len(raw) % self._block_size) * \\\n chr(self._block_size - len(raw) % self._block_size)\n elif type(raw) == bytes:\n padded = raw + (self._block_size - len(raw) % self._block_size) * \\\n bytes([self._block_size - len(raw) % self._block_size])\n else:\n raise TypeError('AESCipher.encrypt() may handle str and bytes only')\n\n iv = Random.new().read(AES.block_size)\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return iv + cipher.encrypt(padded)\n\n def decrypt(self, enc):\n iv = enc[:16]\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n decrypted = cipher.decrypt(enc[16:])\n return decrypted[:-ord(decrypted[len(decrypted)-1:])]\n\n\nclass KMSCdHandler(http.server.SimpleHTTPRequestHandler):\n def __init__(self, req, client_addr, server):\n http.server.SimpleHTTPRequestHandler.__init__(self, req, client_addr, server)\n\n def do_expunge_key(self, data_key_path, request_id):\n logger = logging.getLogger(__name__)\n logger.debug('RequestID:{} starting expunge of {}'.format(request_id, data_key_path))\n response_data = {'RequestID': request_id}\n success = expunge_key(data_key_path)\n if success:\n response_data['ResponseMessage'] = 'key expunged'\n else:\n response_data['error'] = 'key not expunged'\n self.send_response(200)\n content = json.dumps(response_data)\n self.send_header(\"Content-type\", \"application/json;charset=utf-8\")\n self.send_header(\"Content-length\", len(content))\n self.end_headers()\n self.wfile.write(content.encode(\"utf-8\"))\n self.wfile.flush()\n logger.info('RequestID:{} expunging {}'.format(request_id, data_key_path))\n\n def do_encrypt_files(self, data_key_path, paths, destination_dir, request_id):\n logger = logging.getLogger(__name__)\n logger.debug('RequestID:{} starting encrypt of {} files using {}'.format(request_id, len(paths), data_key_path))\n response_data = {'RequestID': request_id}\n success_count = 0\n for path in paths:\n logger.debug(\"RequestID:{} attempting to encrypt {} to {} using data key {}\".format(request_id, path, destination_dir, data_key_path))\n response_data[path] = encrypt_file(data_key_path, path, destination_dir, request_id)\n if response_data[path][0]:\n success_count += 1\n logger.debug(\"RequestID:{} encrypt {}\".format(request_id, response_data[path]))\n self.send_response(200)\n content = json.dumps(response_data)\n self.send_header(\"Content-type\", \"application/json;charset=utf-8\")\n self.send_header(\"Content-length\", len(content))\n self.end_headers()\n self.wfile.write(content.encode(\"utf-8\"))\n self.wfile.flush()\n logger.info('RequestID:{} successful encrypting {} of {} files using {}'.format(request_id, success_count, len(paths), data_key_path))\n\n def do_decrypt_files(self, data_key_path, paths, destination_dir, request_id):\n logger = logging.getLogger(__name__)\n logger.debug('RequestID:{} starting decrypt of {} files using {}'.format(request_id, len(paths), data_key_path))\n response_data = {'RequestID': request_id}\n success_count = 0\n for path in paths:\n logger.debug(\"RequestID:{} attempting to decrypt {} to {} using data key {}\".format(request_id, path, destination_dir, data_key_path))\n response_data[path] = decrypt_file(data_key_path, path, destination_dir, request_id)\n if response_data[path][0]:\n success_count += 1\n logger.debug(\"RequestID:{} decrypt {}\".format(request_id, response_data[path]))\n self.send_response(200)\n content = json.dumps(response_data)\n self.send_header(\"Content-type\", \"application/json;charset=utf-8\")\n self.send_header(\"Content-length\", len(content))\n self.end_headers()\n self.wfile.write(content.encode(\"utf-8\"))\n self.wfile.flush()\n logger.info('RequestID:{} successful decrypting {} of {} files using {}'.format(request_id, success_count, len(paths), data_key_path))\n\n def do_encrypt_data(self, data_key_path, plain_text, request_id):\n logger = logging.getLogger(__name__)\n logger.debug('RequestID:{} start of encrypt data using {}'.format(request_id, data_key_path))\n response_data = {'RequestID': request_id}\n\n try:\n encrypted_data = encrypt_data(data_key_path, plain_text)\n response_data['CipherText'] = b64encode(encrypted_data).decode('utf-8')\n response_data['SHA1Hash'] = hashlib.sha1(plain_text.encode('utf-8')).hexdigest()\n self.send_response(200)\n logger.info('RequestID:{} successfully encrypted data using {}'.format(request_id, data_key_path))\n except botocore.exceptions.ClientError as e:\n logger.warning('RequestID:{} received botocore.exceptions.ClientError employing {}'.format(request_id, data_key_path))\n response_data['error'] = \"AWS client error employing {}\".format(data_key_path)\n self.send_response(400)\n except IOError as e:\n logger.warning('RequestID:{} I/O error accessing {}'.format(request_id, e.filename))\n response_data['error'] = \"I/O error accessing {}\".format(e.filename)\n self.send_response(400)\n except FileNotFoundError as e:\n logger.warning('RequestID:{} {} not found'.format(request_id, e.filename))\n response_data['error'] = \"{} not found\".format(e.filename)\n self.send_response(400)\n except Exception as e:\n logger.exception(e)\n raise\n\n content = json.dumps(response_data)\n self.send_header(\"Content-type\", \"application/json;charset=utf-8\")\n self.send_header(\"Content-length\", len(content))\n self.end_headers()\n self.wfile.write(content.encode(\"utf-8\"))\n self.wfile.flush()\n\n def do_decrypt_data(self, data_key_path, cipher_text, request_id, sha1=None):\n logger = logging.getLogger(__name__)\n logger.debug('RequestID:{} start of decrypt data using {}'.format(request_id, data_key_path))\n response_data = {'RequestID': request_id}\n\n try:\n decrypted_data = decrypt_data(data_key_path, b64decode(cipher_text))\n response_data['PlainText'] = decrypted_data.decode('utf-8')\n response_data['SHA1Hash'] = hashlib.sha1(response_data['PlainText'].encode('utf-8')).hexdigest() == sha1\n self.send_response(200)\n logger.info('RequestID:{} successfully decrypted data using {}'.format(request_id, data_key_path))\n except botocore.exceptions.ClientError as e:\n logger.warning('RequestID:{} received botocore.exceptions.ClientError employing {}'.format(request_id, data_key_path))\n response_data['error'] = \"AWS client error employing {}\".format(data_key_path)\n self.send_response(400)\n except IOError as e:\n logger.warning('RequestID:{} I/O error accessing {}'.format(request_id, e.filename))\n response_data['error'] = \"I/O error accessing {}\".format(e.filename)\n self.send_response(400)\n except FileNotFoundError as e:\n logger.warning('RequestID:{} {} not found'.format(request_id, e.filename))\n response_data['error'] = \"{} not found\".format(e.filename)\n self.send_response(400)\n except Exception as e:\n logger.exception(e)\n raise\n\n content = json.dumps(response_data)\n self.send_header(\"Content-type\", \"application/json;charset=utf-8\")\n self.send_header(\"Content-length\", len(content))\n self.end_headers()\n self.wfile.write(content.encode(\"utf-8\"))\n self.wfile.flush()\n\n def do_GET(self):\n self.send_response(405)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(\"kmscd\")\n self.wfile.write(\"

POST that stuff.

\")\n self.wfile.write(\"\")\n self.wfile.close()\n\n def do_POST(self):\n request_id = str(uuid.uuid4())\n logger = logging.getLogger(__name__)\n logger.debug('RequestID:{} POST to {}'.format(request_id, self.server, self.path))\n try:\n content_len = int(self.headers['Content-Length'])\n post_body = self.rfile.read(content_len).decode('utf-8')\n request = json.loads(post_body)\n\n data_key_path = request['DataKeyPath']\n\n if request['Action'] == 'EncryptData' and self.path in ['/latest', '/v1.0']:\n self.do_encrypt_data(data_key_path, request['PlainText'], request_id)\n elif request['Action'] == 'DecryptData' and self.path in ['/latest', '/v1.0']:\n self.do_decrypt_data(data_key_path, request['CipherText'], request_id, request['SHA1Hash'])\n elif request['Action'] == 'EncryptFiles' and self.path in ['/latest', '/v1.0']:\n self.do_encrypt_files(data_key_path, request['FilePaths'], request['DestinationDir'], request_id)\n elif request['Action'] == 'DecryptFiles' and self.path in ['/latest', '/v1.0']:\n self.do_decrypt_files(data_key_path, request['FilePaths'], request['DestinationDir'], request_id)\n elif request['Action'] == 'ExpungeKey' and self.path in ['/latest', '/v1.0']:\n self.do_expunge_key(data_key_path, request_id)\n else:\n self.send_response(400)\n content = '{\"error\":\"unsupported request\", \"RequestID\":\"' + str(request_id) + '\"}'\n self.send_header(\"Content-type\", \"application/json;charset=utf-8\")\n self.send_header(\"Content-length\", len(content))\n self.end_headers()\n self.wfile.write(content.encode(\"utf-8\"))\n self.wfile.flush()\n logger.info('RequestID:{} posted unsupported request'.format(request_id))\n\n except BrokenPipeError:\n logger.error('BrokenPipeError for requestid {} talking to client'.format(request_id))\n raise\n\n except Exception as e:\n logger.error('handling unanticipated exception')\n logger.exception(e)\n raise\n\n\"\"\"Generic linux daemon base class for python 3.x.\"\"\"\n\n\nclass daemon:\n \"\"\"A generic daemon class.\n\n Usage: subclass the daemon class and override the run() method.\"\"\"\n\n def __init__(self, pidfile, host, port):\n self.pidfile = pidfile\n self.host = host\n self.port = port\n\n def daemonize(self):\n \"\"\"Deamonize class. UNIX double fork mechanism.\"\"\"\n logger = logging.getLogger(__name__)\n\n try:\n pid = os.fork()\n if pid > 0:\n # exit first parent\n sys.exit(0)\n except OSError as err:\n logger.error('fork #1 failed: {0}\\n'.format(err))\n sys.exit(1)\n\n # decouple from parent environment\n os.chdir('/')\n os.setsid()\n os.umask(0)\n\n # do second fork\n try:\n pid = os.fork()\n if pid > 0:\n\n # exit from second parent\n sys.exit(0)\n except OSError as err:\n logger.error('fork #2 failed: {0}\\n'.format(err))\n sys.exit(1)\n\n # redirect standard file descriptors\n sys.stdout.flush()\n sys.stderr.flush()\n si = open(os.devnull, 'r')\n so = open(os.devnull, 'a+')\n se = open(os.devnull, 'a+')\n\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n\n # write pidfile\n atexit.register(self.delpid)\n\n pid = str(os.getpid())\n with open(self.pidfile, 'w+') as f:\n f.write(pid + '\\n')\n\n def delpid(self):\n os.remove(self.pidfile)\n\n def start(self):\n \"\"\"Start the daemon.\"\"\"\n logger = logging.getLogger(__name__)\n\n # Check for a pidfile to see if the daemon already runs\n try:\n with open(self.pidfile, 'r') as pf:\n\n pid = int(pf.read().strip())\n except IOError:\n pid = None\n\n if pid:\n message = \"pidfile {0} already exist. \" + \\\n \"Daemon already running?\\n\"\n logger.error(message.format(self.pidfile))\n sys.exit(1)\n\n # Start the daemon\n self.daemonize()\n self.run()\n\n def stop(self):\n \"\"\"Stop the daemon.\"\"\"\n logger = logging.getLogger(__name__)\n\n # Get the pid from the pidfile\n try:\n with open(self.pidfile, 'r') as pf:\n pid = int(pf.read().strip())\n except IOError:\n pid = None\n\n if not pid:\n message = \"pidfile {0} does not exist. \" + \\\n \"Daemon not running?\\n\"\n logger.error(message.format(self.pidfile))\n # not an error in a restart\n return\n\n # Try killing the daemon process\n try:\n while 1:\n os.kill(pid, signal.SIGTERM)\n time.sleep(0.1)\n except OSError as err:\n e = str(err.args)\n if e.find(\"No such process\") > 0:\n if os.path.exists(self.pidfile):\n os.remove(self.pidfile)\n else:\n logger.warning(str(err.args))\n sys.exit(1)\n\n def restart(self):\n \"\"\"Restart the daemon.\"\"\"\n self.stop()\n self.start()\n\n def run(self):\n \"\"\"You should override this method when you subclass Daemon.\n\n It will be called after the process has been daemonized by\n start() or restart().\"\"\"\n\n\nclass KMSCDaemon(daemon):\n def run(self):\n logger = logging.getLogger(__name__)\n logger.info(\"starting kmscd\")\n try:\n httpd = socketserver.TCPServer((self.host, self.port), KMSCdHandler)\n logger.info(\"serving at {}:{}\".format(self.host, self.port))\n httpd.serve_forever()\n except Exception as e:\n logger.error(\"Unexpected Exception in KMSCDaemon.run()\")\n logger.exception(e)\n raise\n\nif __name__ == \"__main__\":\n logger = logging.getLogger(__name__)\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', help=\"verbose\")\n parser.add_argument('-l', '--logfile', action='store', dest='logfile', default='kmscd.log', help=\"log file\")\n parser.add_argument('-P', '--pidfile', action='store', dest='pidfile', default='/var/tmp/.kmscd.pid', help=\"Process ID (PID) file\")\n parser.add_argument('-H', '--host', action='store', dest='host', default='localhost', help=\"Changing this to something other than localhost may result in severe security risks\")\n parser.add_argument('-p', '--port', action='store', dest='port', default=23223, type=int, help=\"TCP port to listen\")\n parser.add_argument('command', action='store', default='foreground', help=\"start, stop or foreground\")\n\n args = parser.parse_args()\n\n if args.verbose:\n loglevel = logging.DEBUG\n else:\n loglevel = logging.INFO\n\n if args.command == 'start':\n logging.basicConfig(filename=args.logfile, level=loglevel)\n logger.info('starting server')\n server = KMSCDaemon(pidfile=args.pidfile, host=args.host, port=args.port)\n server.start()\n if args.command == 'stop':\n logging.basicConfig(filename=args.logfile, level=loglevel)\n logger.info('stopping server')\n server = KMSCDaemon(pidfile=args.pidfile, host=None, port=None)\n server.stop()\n if args.command == 'foreground':\n logging.basicConfig(level=loglevel)\n logger.info('starting server in foreground')\n httpd = socketserver.TCPServer((args.host, args.port), KMSCdHandler)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt as e:\n pass\n httpd.server_close()\n","sub_path":"bin/kmscd.py","file_name":"kmscd.py","file_ext":"py","file_size_in_byte":21789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"518000885","text":"from flask import jsonify, make_response, request, url_for, abort\nfrom flask_httpauth import HTTPBasicAuth\nimport random\n\nfrom start_flask import app\nfrom start_flask.models import *\n\n\nauth = HTTPBasicAuth()\n\n@auth.get_password\ndef get_password(username):\n\tif username == 'httptest':\n\t\treturn 'httppwd'\n\treturn None\n\n@auth.error_handler\ndef unauthorized():\n\treturn make_response(jsonify({'error': 'Unauthorized access'}), 403)\n\n@app.errorhandler(404)\ndef not_found(error):\n\treturn make_response(jsonify({'error': 'Not found'}), 404)\n\ntasks = {\n\t1: {\n\t\t'id': 1,\n\t\t'title': u'Java',\n\t\t'description': u'Java is a general-purpose, class-based, object-oriented programming language designed for having lesser implementation dependencies', \n\t\t'done': False\n\t},\n\t2: {\n\t\t'id': 2,\n\t\t'title': u'Python',\n\t\t'description': u'Python is an interpreted, object-oriented, high-level programming language with dynamic semantics.', \n\t\t'done': True\n\t},\n\t3: {\n\t\t'id': 3,\n\t\t'title': u'JavaScript',\n\t\t'description': u\"JavaScript (often shortened to JS) is a lightweight, interpreted, object-oriented language with first-class functions, and is best known as the scripting language for Web pages, but it's used in many non-browser environments as well\",\n\t\t'done': True\n\t},\n\t4: {\n\t\t'id': 4,\n\t\t'title': u'MySQL',\n\t\t'description': u\"MySQL is an Oracle-backed open source relational database management system (RDBMS) based on Structured Query Language (SQL).\",\n\t\t'done': True\n\t}\n}\n\n\ndef public_uri(task):\n\tnew_task = {}\n\tfor field in task:\n\t\tnew_task[field] = task[field]\n\n\t\tif field == 'id':\n\t\t\tnew_task['uri'] = url_for('get_task', task_id=task['id'], _external=True)\n\t\n\treturn new_task\n\n\n@app.route('/todo/api/v1.0/tasks', methods=['GET'])\n@auth.login_required\ndef get_tasks():\n\t# curl -i http://localhost:5000/todo/api/v1.0/tasks\n\t# curl -u httptest:httppwd -i http://localhost:5000/todo/api/v1.0/tasks\n\treturn jsonify({'tasks': [public_uri(task) for task in tasks.values()]})\n\n\n@app.route('/todo/api/v1.0/tasks/', methods=['GET'])\ndef get_task(task_id):\n\t# curl -i http://localhost:5000/todo/api/v1.0/tasks/1\n\ttask = tasks.get(task_id)\n\tprint(task)\n\tif not task or len(task.keys()) == 0:\n\t\tabort(404)\n\t\n\treturn jsonify({'task': task})\n\n\n@app.route('/todo/api/v1.0/tasks', methods=['POST'])\ndef create_task():\n\t# curl -i -H \"Content-Type: application/json\" -X POST -d '{\"title\":\"HTML/CSS\"}' http://localhost:5000/todo/api/v1.0/tasks\n\tif not request.json or not 'title' in request.json:\n\t\tabort(400)\n\t\n\tnewID = random.getrandbits(32)\n\ttask = {\n\t\t'id': newID,\n\t\t'title': request.json['title'],\n\t\t'description': request.json.get('description', \"\"),\n\t\t'done': False\n\t}\n\ttasks.setdefault(newID, task)\n\t\n\treturn jsonify({'task': task}), 201\n\n\n@app.route('/todo/api/v1.0/tasks/', methods=['PUT'])\ndef update_task(task_id):\n\t# curl -i -H \"Content-Type: application/json\" -X PUT -d '{\"title\":\"HTML5/CSS\"}' http://localhost:5000/todo/api/v1.0/tasks/taskID\n\ttask = tasks.get(task_id)\n\n\tif not task or len(task.keys()) == 0 or \\\n\t\tnot request.json or \\\n\t\t('title' in request.json and type(request.json['title']) is not str) or \\\n\t\t('description' in request.json and type(request.json['description']) is not str) or \\\n\t\t('done' in request.json and type(request.json['done']) is not bool):\n\t\tabort(404)\n\t\n\ttask['title'] = request.json.get('title', task['title'])\n\ttask['description'] = request.json.get('description', task['description'])\n\ttask['done'] = request.json.get('done', task['done'])\n\n\treturn jsonify({'task': task})\n\n\n@app.route('/todo/api/v1.0/tasks/', methods=['DELETE'])\ndef delete_task(task_id):\n\t# curl -i -X DELETE http://localhost:5000/todo/api/v1.0/tasks/3\n\ttask = tasks.get(task_id)\n\tif not task or len(task.keys()) == 0:\n\t\tabort(404)\n\ttasks.pop(task_id)\n\t\n\treturn jsonify({'result': True})\n\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port=5000, threaded=True)","sub_path":"start_flask/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"353344512","text":"from flask import Flask, render_template\n\napp = Flask(__name__)\n\n@app.route(\"/index\")\ndef index():\n data = {\n \"name\": \"python\",\n \"age\": 18,\n \"my_dict\": {\"city\": \"sz\"},\n \"my_list\": [1,2,3,4,5],\n \"my_int\": 0\n }\n return render_template(\"index.html\", **data)\n\n\n\ndef list_step_2(li):\n \"\"\"自定义过滤器\"\"\"\n return li[::2]\n\n\n# 注册过滤器\napp.add_template_filter(list_step_2,\"li2\")\n\n@app.template_filter(\"li3\")\ndef list_step_3(li):\n \"\"\"自定义过滤器\"\"\"\n return li[::3]\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","sub_path":"Flask/11_template.py","file_name":"11_template.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"426449959","text":"from django import template\nfrom ..models import Profile, Series, Season, Episode\n\nregister = template.Library()\n\n@register.filter\ndef is_favorite(series, profile):\n if profile is None:\n return False\n else:\n result = profile.series_favorite.filter(pk=series.pk).first() \n if result is None:\n return False\n else:\n return True\n\n@register.filter\ndef is_watched(series, profile):\n if profile is None:\n return False\n else:\n result = profile.series_watched.filter(pk=series.pk).first() \n if result is None:\n return False\n else:\n return True \n\n@register.filter\ndef is_watchlisted(series, profile):\n if profile is None:\n return False\n else:\n result = profile.series_watchlist.filter(pk=series.pk).first() \n if result is None:\n return False\n else:\n return True \n\n\n@register.simple_tag\ndef get_seasons(*args):\n seasons = Season.objects.filter(series=args[0])\n return seasons\n\n@register.simple_tag\ndef get_episodes(*args):\n episodes = Episode.objects.filter(season=args[0])\n return episodes\n ","sub_path":"movies/templatetags/series_tags.py","file_name":"series_tags.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"453906114","text":"def maior_primo_menor_que(n):\n i=n\n if i==0 or i==1:\n i-=1\n elif i==2:\n return(i)\n elif i<0:\n return(-1)\n else:\n divisor=0\n for divisores in range(1,i):\n if i%divisor==0:\n divisores+=1\n if divisores>1:\n i-=1\n else:\n return(i)","sub_path":"backup/user_191/ch34_2020_04_01_13_57_19_398026.py","file_name":"ch34_2020_04_01_13_57_19_398026.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"170809662","text":"import numpy as np\nfrom numpy import matlib\nimport skimage as io\n\nfrom scipy import misc\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nimport skimage as io\nfrom skimage import io\n\n# Homework 1\n# @Eduardo Gonzalez\n\nm = np.array([[1,2, 3],[4, 5, 6],[7, 8, 9]])\n\na = np.array([1, 2, 3])\nb = np.array([4, 5, 6])\nc = np.array([7, 8, 9])\n\nprint(\"m: \")\nprint(m, \"\\n\")\n\nprint(\"a:\", a, \"\\n\")\nprint(\"b:\", b, \"\\n\")\nprint(\"c:\", c, \"\\n\")\n\n\n# Dot product of a and b\naDotb = np.dot(a, b)\nprint(\"Dot Product of a and b:\", aDotb, \"\\n\")\n\n\n# Element Wise Product of a and b\nprint(\"Element Wise Product of a and b:\", np.multiply(a, b), \"\\n\")\n\n\n# Multiply each row of M by a (no for loop)\no = np.matlib.repmat(a, 3, 1) # Creates a 3x1 of [1, 2, 3]\n\nnew = np.multiply(o, m)\n\nprint(\"m * a:\", \"\\n\", new, \"\\n\")\n\n\nprint(np.sort(new))\n\n#-------------------------------------------------------------------\n\n# Part 2\n\n#-------------------------------------------------------------------\n\n\nprint(\"OpenCV\\n\")\n\n# Found that double presicion = np.float64()\nimage1 = np.float64(cv2.imread('/Users/edu/github/ComputerVision/im/image1.jpg'))\nimage2 = np.float64(cv2.imread('/Users/edu/github/ComputerVision/im/image2.jpg'))\n\ncv2.imshow(\"image 1\",image1) # Display image\ncv2.imshow(\"image 2\",image2) # Display image\n\nim1 = cv2.normalize(image1, np.zeros((500, 500)), 0, 1, cv2.NORM_MINMAX)\nim2 = cv2.normalize(image2, np.zeros((500, 500)), 0, 1, cv2.NORM_MINMAX)\n\n# Cut the images in halves\ni1 = im1[:, :250]\ni2 = im2[:, 250:]\n\nhalves = np.concatenate((i1, i2), axis=1)\n\ncv2.imwrite('halves.jpg', halves)\n\n# One and one\nnew = np.empty((500, 500))\nfor row in range(lenght(im1)):\n if row % 2 == 1: # es par\n new[row] = im1[row]\n else: # Se mueve de 1 y ya no es par\n new[row] = im2[row]\n\ncv2.imwrite(new)\n\n#-------------------------------------------------------------------\n\n# Part 3\n\n#-------------------------------------------------------------------\n\nfolder = os.listdir('/Users/edu/github/ComputerVision/George_W_Bush/')\n\n# to store average later\nblank = np.zeros(250, 250, 3)\nblank = np.float64(blank)\n\n# an array of a lot of arrays for the pictures\npics = np.array([np.array(io.imread('George_W_Bush/' + fname)) for fname in folder])\n# double presicion like in exercice 2\nallpics = np.float64(pics)\n\n# We average all the pictures of bush\naverage = np.array(np.mean(allpics, axis=0))\n\nio.imsave('average.jpg', average)\n\n\nplt.show()\n","sub_path":"Gonzalez_Eduardo_ps1.py","file_name":"Gonzalez_Eduardo_ps1.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"167679556","text":"from lmfit import minimize, Parameters\nimport math\nimport numpy as np\nimport statsmodels.api as sm\n\n# x = np.linspace(0, 15, 10)\n# x = \n# x_ols = sm.add_constant(x)\n# y = range(0,10)\n\n# model = sm.OLS(y,x_ols)\n# results = model.fit()\n# print \"OLS: \", format(results.params[0], '.10f'), format(results.params[1], '.10f')\n\n\n\ncrdlist = []\nlat_map = None\nlng_map = None\n\nxxx_map = None\nzzz_map = None\n\ndef initcoords():\n global crdlist\n global lat_map,lng_map,xxx_map,zzz_map\n\n crdlist = []\n lat_map = None\n lng_map = None\n xxx_map = None\n zzz_map = None\n\n\n\ndef mapcoord(lat,lng,x,z):\n global crdlist\n crd = (lat,lng,x,z)\n crdlist.append(crd)\n\ndef calcmap():\n global lat_map,lng_map,xxx_map,zzz_map\n (lat,lng,xxx,zzz) = ([],[],[],[])\n for crd in crdlist:\n lat.append(crd[0])\n lng.append(crd[1])\n xxx.append(crd[2])\n zzz.append(crd[3])\n\n llstack = np.column_stack([lng,lat])\n xxx_ols = sm.add_constant(llstack)\n zzz_ols = sm.add_constant(llstack)\n xzstack = np.column_stack([zzz,xxx])\n lat_ols = sm.add_constant(xzstack)\n lng_ols = sm.add_constant(xzstack)\n\n lat_model = sm.OLS(lat,lat_ols)\n lng_model = sm.OLS(lng,lng_ols)\n xxx_model = sm.OLS(xxx,xxx_ols)\n zzz_model = sm.OLS(zzz,zzz_ols)\n\n lat_map = lat_model.fit()\n lng_map = lng_model.fit()\n xxx_map = xxx_model.fit()\n zzz_map = zzz_model.fit()\n\n latp = lat_map.params\n print(\"lat OLS: {} {} {} \".format(latp[0],latp[1],latp[2]))\n lngp = lng_map.params\n print(\"lng OLS: {} {} {} \".format(lngp[0],lngp[1],lngp[2]))\n xxxp = xxx_map.params\n print(\"xxx OLS: {} {} {} \".format(xxxp[0],xxxp[1],xxxp[2]))\n zzzp = zzz_map.params\n print(\"zzz OLS: {} {} {} \".format(zzzp[0],zzzp[1],zzzp[2]))\n\n\ndef ll2xz(lat,lng):\n global lat_map,lng_map,xxx_map,zzz_map\n #print(\"lat:{} lng:{}\".format(lat,lng))\n x = xxx_map.params[2]*lat + xxx_map.params[1]*lng + xxx_map.params[0]\n z = zzz_map.params[2]*lat + zzz_map.params[1]*lng + zzz_map.params[0]\n return (x,z)\n\n\ndef xz2ll(x,z):\n global lat_map,lng_map,xxx_map,zzz_map\n #print(\"x:{} z:{}\".format(x,z))\n lat = lat_map.params[2]*x + lat_map.params[1]*z + lat_map.params[0]\n lng = lng_map.params[2]*x + lng_map.params[1]*z + lng_map.params[0]\n return (lat,lng)\n\ndef esterrors():\n i = 1\n for crd in crdlist:\n (lat,lng,x,z) = crd\n (xn,zn) = ll2xz(lat,lng)\n (dx,dz) = (xn-x,zn-z)\n err = math.sqrt((dx*dx + dz*dz))\n print(\"{} err:{}\".format(i,err))\n i += 1\n\n\ndef initmsft():\n initcoords()\n mapcoord(47.640490, -122.133797, -149.1, 0.2)\n mapcoord(47.639079, -122.134960, 28.0, -31.4)\n mapcoord(47.638526, -122.134519, 75.4, 19.9)\n mapcoord(47.639368, -122.133926, -29.4, 30.8)\n mapcoord(47.641066, -122.136018, -155.44, -177.96)\n calcmap()\n\ndef initeb12():\n initcoords()\n mapcoord(49.993313, 8.678353, 0, 0) # EB12 orgin streetlamp\n mapcoord(49.993472, 8.677981, 18.45, 27.90) # EB12 doorway\n mapcoord(49.995560, 8.676101, 260.80, 167.7) # SW corner of Rewe\n mapcoord(49.995788, 8.676752, 287.25, 118.35) # SE corner of Rewe\n calcmap()\n\ndef initmsftdublin():\n initcoords()\n mapcoord(53.268396, -6.195296, -103.5, 75.2)\n mapcoord(53.269369, -6.196511, -12.4,-47.8)\n mapcoord(53.269212, -6.194816, -139.0, -27.5)\n calcmap()\n\n\nprint(\"\\n\\nMicrosoft Campus\")\ninitmsft()\n\n(x,z) = ll2xz(47.640490,-122.133797)\nprint(\"x:{} z:{}\".format(x,z))\n\n(lat,lng) = xz2ll(28.0, -31.4)\nprint(\"lat:{} lng:{}\".format(lat,lng))\n\nesterrors()\n\nprint(\"\\n\\nEb12\")\niniteb12()\nesterrors()\n\n(x,z) = ll2xz(49.993313, 8.678353)\nprint(\"x:{} z:{}\".format(x,z))\n(x,z) = ll2xz(49.993472, 8.677981)\nprint(\"x:{} z:{}\".format(x,z))\n(x,z) = ll2xz(49.995560, 8.676101)\nprint(\"x:{} z:{}\".format(x,z))\n(x,z) = ll2xz(49.995788,8.676752,)\nprint(\"x:{} z:{}\".format(x,z))\n\n\n(lat,lng) = xz2ll(0,0)\nprint(\"lat:{} lng:{}\".format(lat,lng))\n\n\n\n\n# define objective function: returns the array to be minimized\n# def fcn2min(params, x, data):\n# a = params['a'].value\n# b = params['b'].value\n\n# model = a + b * x\n# return model - data\n\n# for i in range(-2,3):\n# # create a set of Parameters\n# params = Parameters()\n# params.add('a', value= i)\n# params.add('b', value= 20)\n\n# # do fit, here with leastsq model\n# result = minimize(fcn2min, params, args=(x, y))\n# print \"lmfit: \",result.params.values","sub_path":"old/ols1.py","file_name":"ols1.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"32912322","text":"'''\nCreated on 07-Sep-2018\n\n@author: skanchi\n'''\n\nimport numpy as np\nimport pandas as pd\nimport psycopg2 as pg\nimport pandas.io.sql as psql\n\nfrom config import config\n\ndef task1(conn, engine, conn1):\n print(\"Task1 initiated\")\n \n #Get the last offset populated\n offset_cmd = \"select count(id) from public.transactions\"\n cur= conn1.cursor()\n cur.execute(offset_cmd)\n offset = int(cur.fetchone()[0])\n \n chunk_size = 40000\n dfs=[]\n \n #Reading the table in chunk of 40,000 and storing it in dataframe and writing it to new destination table\n #Writing in batch helps it perform better\n while True:\n sql = \"SELECT * FROM public.transactions offset %d limit %d\" % (offset, chunk_size) \n \n dataframe = pd.read_sql_query(sql, conn)\n dfs.append(dataframe)\n psql.to_sql(dataframe, 'transactions', engine, schema='public', if_exists='append', index=False)\n# dataframe.to_sql(\"transactions\", engine, \"public\", if_exists='append', index=False)\n \n offset += chunk_size\n print(\"%d rows done\\r\"% offset) \n if len(dataframe) < chunk_size:\n break\n \n ###Tried this trigger to handle any new transactions/updates that will come after complete reading is done, \n ###It is working in my local, but is not working to this server since i have only reader access to source db \n #trigger(conn) \n print(\"Task1 completed\") \n return pd.concat(dfs) \n\ndef test(conn):\n offset = 0\n chunk_size = 50000\n dfs=[]\n while True:\n sql = \"SELECT * FROM public.transactions offset %d limit %d\" % (offset, chunk_size) \n \n dataframe = pd.read_sql_query(sql, conn)\n dfs.append(dataframe)\n \n offset += chunk_size\n if len(dataframe) < chunk_size:\n break\n \n return pd.concat(dfs) \n \ndef task2(full_df, engine): \n \n print(\"Task 2 initiated\")\n \n #Filtering transactions that are certified by user and are not in status ​BLOCKED\n #If certified by and last updated time are same, then it is certified by user since it can not be modified once user certifies it\n filtered_df = full_df[(full_df.status!= 'BLOCKED') & (full_df.certified_by_user == full_df.updated)]\n \n #Group by user id and sum on amount to calculate the balance for a given user\n rows = filtered_df.groupby('user_id')\n newdf= rows['amount'].agg(np.sum)\n \n #Writing into another table as asked for task 2.\n #Creates a table and writes the values, if table already exists, drops it and writes it again\n psql.to_sql(newdf, 'user_balances', engine, schema='public', if_exists='replace')\n \n print(\"Task 2 completed\")\n \ndef trigger(conn): \n \n \n cur= conn.cursor()\n \n params = config(\"destination\")\n \n create_extension=\"\"\"\n CREATE EXTENSION postgres_fdw;\n \"\"\"\n \n create_server=\"\"\"\n CREATE SERVER rome_server \n FOREIGN DATA WRAPPER postgres_fdw\n OPTIONS (host %s, user %s, password %s port %s, dbname %s);\n \"\"\" % (params['host'], params['user'],params['password'],params['port'],params['database'])\n \n create_foreign_table=\"\"\"\n CREATE FOREIGN TABLE foreign_rome_table (\n id uuid primary key,\n user_id uuid not null,\n certified_by_user bigint,\n amount numeric(18,2) not null,\n status text,\n created bigint not null,\n updated bigint not null\n )\n SERVER rome_server\n OPTIONS (schema_name 'public', table_name 'transactions');\n \"\"\"\n \n trigger_function=\"\"\"\n CREATE OR REPLACE FUNCTION rec_insert_to_rome_table()\n RETURNS trigger AS\n $$\n BEGIN\n INSERT INTO foreign_rome_table(id, user_id, certified_by_user, amount, status, created, updated)\n VALUES(NEW.id, NEW.user_id, NEW.certified_by_user, NEW.amount, NEW.status, NEW.created, NEW.updated);\n RETURN NEW;\n END;\n $$\n LANGUAGE 'plpgsql';\"\"\"\n \n create_trigger=\"\"\"\n CREATE TRIGGER ins_same_rec_to_remote\n AFTER INSERT\n ON public.transactions\n FOR EACH ROW\n EXECUTE PROCEDURE rec_insert_to_rome_table();\n \"\"\"\n \n cur.execute(create_extension)\n cur.execute(create_server)\n cur.execute(create_foreign_table)\n cur.execute(trigger_function) \n cur.execute(create_trigger)\n \n print(\"trigger executed\")\n\ndef task3(conn):\n print(\"Task 3 initiated\")\n cur = conn.cursor()\n \n #index for Count of all transactions that are blocked.\n index_on_status = \"CREATE INDEX index_on_status ON public.transactions (status)\"\n cur.execute(index_on_status)\n \n #Index to get last 10 certified transactions of any given user.\n index_on_transaction = \"CREATE INDEX index_on_transaction ON public.transactions (user_id, certified_by_user, updated)\"\n cur.execute(index_on_transaction)\n \n #Index to get transactions from yesterday with minimum 1000 euro\n index_transaction_timed = \"CREATE INDEX index_transaction_timed ON public.transactions (certified_by_user, amount, created, updated)\"\n cur.execute(index_transaction_timed)\n \n #Sample queries\n #3.a. Count of all transactions that are blocked.\n sql= \"select count(*) from public.transactions where status!='BLOCKED'\"\n cur.execute(sql)\n \n #3.b. Get last 10 certified transactions of user '028797ea-8766-4ed1-8440-3f9e10b9f9c0'\n sql= \"select * from public.transactions where user_id='028797ea-8766-4ed1-8440-3f9e10b9f9c0' and certified_by_user=updated order by certified_by_user desc limit 10\"\n cur.execute(sql)\n \n #3.c. Get all transactions from yesterday with absolute value at least 1000 Euro that took longer than 5 minutes to certify after creation.\n sql= \"select * from public.transactions where TO_TIMESTAMP(created / 1000) > (current_timestamp - interval '1 day') and abs(amount)>1000 and (certified_by_user-created) > 5*60*1000\"\n cur.execute(sql)\n print(\"Task 3 completed\")","sub_path":"src/com/n26/assignment/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"88499501","text":"import numpy as np\nimport json\n\nimport argparse\nimport streamlit as st\n\nimport os\nimport glob\n\nimport time\nfrom collections import defaultdict\nimport plotly.express as px\n\nimport plotly.graph_objects as go\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(MyEncoder, self).default(obj)\n\ndef run_and_measure(func, arg, n_times=1):\n times = []\n outputs = []\n for i in range(0, n_times):\n start = time.time()\n result = func(*arg)\n end = time.time()\n times.append(end - start)\n outputs.append(result)\n return outputs, times\n\ndef compare_function_performances(funcs, args):\n st.markdown(\"## Performance\")\n process_times = defaultdict(list)\n outputs = []\n\n for func in funcs:\n func_name_base = func.__name__\n func_name = func_name_base\n i = 1\n while func_name in process_times:\n func_name = func_name_base + \"_\" + str(i)\n for arg in args:\n start_time = time.time()\n output = func(*arg)\n process_times[func_name].append(time.time() - start_time)\n outputs.append(output)\n\n fig = go.Figure()\n for func_name, times in process_times.items():\n fig.add_trace(go.Box(y=times, name=func_name))\n fig.update_layout(\n yaxis_title='Running Time[ms]',\n )\n st.write(fig)\n\ndef write_as_json(obj):\n json_str = json.dumps(obj, indent=4, cls = MyEncoder)\n st.markdown(f\"```json\\n{json_str}\\n```\")\n\ndef inspect_function(func, arg, n_times=1):\n st.markdown(f\"## **Inspect Function**/{func.__name__}\")\n outputs, times = run_and_measure(func, arg, n_times=n_times)\n st.write(f\"**Input: ** {arg}\")\n st.write(f\"**Running Time: **{np.mean(times)}\")\n st.write(f\"**Output Type: ** {type(outputs[0])}\")\n st.write(\"**Output**\")\n write_as_json(outputs[0])\n return outputs[0]","sub_path":"pikapi/utils/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"94119687","text":"\"\"\"TenSEAL is a library for doing homomorphic encryption operation on tensors.\n\"\"\"\n\ntry:\n import _tenseal_cpp as _ts_cpp\nexcept ImportError:\n import tenseal._tenseal_cpp as _ts_cpp\n\nfrom tenseal.tensors import bfv_vector, bfv_vector_from, ckks_vector, ckks_vector_from\nfrom tenseal.version import __version__\n\n\nSCHEME_TYPE = _ts_cpp.SCHEME_TYPE\nPublicKey = _ts_cpp.PublicKey\nSecretKey = _ts_cpp.SecretKey\nRelinKeys = _ts_cpp.RelinKeys\nGaloisKeys = _ts_cpp.GaloisKeys\n\n# Vectors\nBFVVector = _ts_cpp.BFVVector\nCKKSVector = _ts_cpp.CKKSVector\n\n# utils\nim2col_encoding = _ts_cpp.im2col_encoding\nenc_matmul_encoding = _ts_cpp.enc_matmul_encoding\n\n\ndef context(\n scheme, poly_modulus_degree, plain_modulus=None, coeff_mod_bit_sizes=None, n_threads=None\n):\n \"\"\"Construct a context that holds keys and parameters needed for operating\n encrypted tensors using either BFV or CKKS scheme.\n\n Args:\n scheme : define the scheme to be used, either SCHEME_TYPE.BFV or SCHEME_TYPE.CKKS.\n poly_modulus_degree (int): The degree of the polynomial modulus, must be a power of two.\n plain_modulus (int): The plaintext modulus. Should not be passed when the scheme is CKKS.\n coeff_mod_bit_sizes (list of int): List of bit size for each coeffecient modulus.\n Can be an empty list for BFV, a default value will be given.\n\n Returns:\n A TenSEALContext object.\n \"\"\"\n if scheme == SCHEME_TYPE.BFV:\n if plain_modulus is None:\n raise ValueError(\"plain_modulus must be provided\")\n if coeff_mod_bit_sizes is None:\n coeff_mod_bit_sizes = []\n\n elif scheme == SCHEME_TYPE.CKKS:\n # must be int, but the value doesn't matter for ckks\n plain_modulus = 0\n if coeff_mod_bit_sizes is None:\n raise ValueError(\"coeff_mod_bit_sizes must be provided\")\n\n else:\n raise ValueError(\"Invalid scheme type, use either SCHEME_TYPE.BFV or SCHEME_TYPE.CKKS\")\n\n # We can't pass None here, everything should be set prior to this call\n if isinstance(n_threads, int) and n_threads > 0:\n return _ts_cpp.TenSEALContext.new(\n scheme, poly_modulus_degree, plain_modulus, coeff_mod_bit_sizes, n_threads\n )\n\n return _ts_cpp.TenSEALContext.new(\n scheme, poly_modulus_degree, plain_modulus, coeff_mod_bit_sizes\n )\n\n\ndef context_from(buff, n_threads=None):\n \"\"\"Construct a context from a serialized buffer.\n\n Args:\n buff : bytes buffer from the original context .\n\n Returns:\n A TenSEALContext object.\n \"\"\"\n if n_threads:\n return _ts_cpp.TenSEALContext.deserialize(buff, n_threads)\n return _ts_cpp.TenSEALContext.deserialize(buff)\n\n\n__all__ = [\n \"bfv_vector\",\n \"bfv_vector_from\",\n \"ckks_vector\",\n \"ckks_vector_from\",\n \"context\",\n \"context_from\",\n \"im2col_encoding\",\n \"__version__\",\n]\n","sub_path":"tenseal/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"379072702","text":"import numpy as np\nimport pandas as pd\nimport os\nimport librosa\nfrom resemblyzer import normalize_volume\nimport scipy\n\ndef extract_mfccs_df(y, sr = 22050, n_mfccs = 13):\n\t\"\"\"\n\tExtracts `n_mfccs` number of mel frequency coefficients. \n\t\"\"\"\n\tmfccs = librosa.feature.mfcc(y, n_mfcc=13)\n\tmfccs_mean = np.mean(mfccs, axis = 1)\n\tmfccs_std = np.std(mfccs, axis=1)\n\tmfccs_df = pd.DataFrame()\n\tfor i in range(0, 13):\n\t\tmfccs_df['mfccs ' + str(i) + ' mean'] = mfccs_mean[i]\n\t\tmfccs_df['mfccs ' + str(i) + ' std'] = mfccs_std[i]\n\tmfccs_df.loc[0] = np.concatenate((mfccs_mean, mfccs_std), axis=0)\n\treturn mfccs_df\n\ndef extract_f0_df(y):\n\t\"\"\"\n\tExtracts the f0 value from the wav chunk\n\t\"\"\"\n\tf_0 = librosa.pyin(y, librosa.note_to_hz('C2'), librosa.note_to_hz('C7'))[0]\n\tmean_f_0 = np.nanmean(f_0)\n\tif np.isnan(mean_f_0):\n\t\tmean_f_0 = 0\n\tf0_df = pd.DataFrame()\n\tf0_df['f0'] = mean_f_0\n\treturn f0_df\n\ndef extract_rms_df(y):\n\t\"\"\"\n\tExtracts the Root-Mean-Square value for each frame in y\n\t\"\"\"\n\trms = librosa.feature.rms(y)\n\trms_mean = np.mean(rms)\n\trms_std = np.std(rms)\n\trms_skew = scipy.stats.skew(rms, axis = 1)[0]\n\n\trms_df = pd.DataFrame()\n\trms_df['rms mean'] = rms_mean\n\trms_df['rms std'] = rms_std\n\trms_df['rms skew'] = rms_skew\n\trms_df.loc[0] = np.array([rms_mean, rms_std, rms_skew])\n\n\treturn rms_df\n\ndef extract_zcr_df(y):\n\t\"\"\"\n\tExtracts the zero crossing rate of y\n\t\"\"\"\n\tzrate = librosa.feature.zero_crossing_rate(y)\n\tzrate_mean = np.mean(zrate)\n\tzrate_std = np.std(zrate)\n\tzrate_skew = scipy.stats.skew(zrate, axis = 1)[0]\n\n\tzrate_df = pd.DataFrame()\n\tzrate_df['zrate mean'] = 0\n\tzrate_df['zrate std'] = 0\n\tzrate_df['zrate skew'] = 0\n\tzrate_df.loc[0]=[zrate_mean, zrate_std, zrate_skew]\n\n\treturn zrate_df\n\ndef extract_chroma_df(y, sr):\n\t\"\"\"\n\tExtracts the Chroma Energy Normalized values of y\n\t\"\"\"\n\tchroma_cens = librosa.feature.chroma_cens(y, sr)\n\tchroma_cens_mean = np.mean(chroma_cens, axis = 1)\n\tchroma_cens_std = np.std(chroma_cens, axis = 1)\n\n\tchroma_stft = librosa.feature.chroma_stft(y, sr)\n\tchroma_stft_mean = np.mean(chroma_stft, axis = 1)\n\tchroma_stft_std = np.std(chroma_stft, axis = 1)\n\n\tchroma_cqt = librosa.feature.chroma_cqt(y, sr)\n\tchroma_cqt_mean = np.mean(chroma_cqt, axis = 1)\n\tchroma_cqt_std = np.std(chroma_cqt, axis = 1)\n\n\tchroma_df = pd.DataFrame()\n\tfor i in range(0,12):\n\t\tchroma_df['chroma_cens ' + str(i) + ' mean'] = chroma_cens_mean[i]\n\t\tchroma_df['chroma_cens ' + str(i) + ' std'] = chroma_cens_std[i]\n\t\tchroma_df['chroma_stft ' + str(i) + 'mean'] = chroma_stft_mean[i]\n\t\tchroma_df['chroma_stft ' + str(i) + 'std'] = chroma_stft_std[i]\n\t\tchroma_df['chroma_cqt ' + str(i) + 'mean'] = chroma_cqt_mean[i]\n\t\tchroma_df['chroma_cqt ' + str(i) + 'std'] = chroma_cqt_std[i]\n\t\n\tchroma_df.loc[0] = np.concatenate((chroma_cens_mean, chroma_cens_std, chroma_stft_mean, chroma_stft_std, chroma_cqt_mean, chroma_cqt_std), axis = 0)\n\n\treturn chroma_df\n\ndef extract_spectral_df(y, sr):\n\tcent = librosa.feature.spectral_centroid(y, sr=sr)\n\tflatness = librosa.feature.spectral_flatness(y)\n\tcontrast = librosa.feature.spectral_contrast(y,sr=sr)\n\trolloff = librosa.feature.spectral_rolloff(y, sr=sr)\n\tbandwidth = librosa.feature.spectral_bandwidth(y, sr=sr)\n\tmel = librosa.feature.melspectrogram(y)\n\n\tband_mean = np.mean(bandwidth)\n\tband_std = np.std(bandwidth)\n\tband_skew = scipy.stats.skew(bandwidth, axis=1)[0]\n\n\t# spectral centroids values\n\tcent_mean = np.mean(cent)\n\tcent_std = np.std(cent)\n\tcent_skew = scipy.stats.skew(cent, axis = 1)[0]\n\n\t# mel spectrogram values\n\tmel_mean = np.mean(mel)\n\tmel_std = np.std(mel)\n\tmel_skew = scipy.stats.skew(mel, axis=1)[0]\n\n\t# spectral contrasts values\n\tcontrast_mean = np.mean(contrast, axis = 1)\n\tcontrast_std = np.std(contrast, axis = 1)\n\n\t# spectral rolloff points values\n\trolloff_mean = np.mean(rolloff)\n\trolloff_std = np.std(rolloff)\n\trolloff_skew = scipy.stats.skew(rolloff, axis = 1)[0]\n\n\t# spectral flatness values\n\tflat_mean = np.mean(flatness)\n\tflat_std = np.std(flatness)\n\tflat_skew = scipy.stats.skew(flatness, axis = 1)[0]\n\n\tspectral_df = pd.DataFrame()\n\tcollist = ['cent mean','cent std','cent skew',\n\t\t\t\t'flat mean', 'flat std', 'flat skew',\n\t\t\t\t'rolloff mean', 'rolloff std', 'rolloff skew',\n\t\t\t\t'mel mean', 'mel std', 'mel skew',\n\t\t\t\t'band mean', 'band std', 'band skew']\n\n\tfor c in collist:\n\t\tspectral_df[c] = 0\n\tdata = np.concatenate((\n\t\t[cent_mean, cent_std, cent_skew], \n\t\t[flat_mean, flat_std, flat_skew],\n\t\t[rolloff_mean, rolloff_std, rolloff_skew],\n\t\t[mel_mean, mel_std, mel_skew],\n\t\t[band_mean, band_std, band_skew]),\n\t\taxis = 0)\n\tspectral_df.loc[0] = data\n\n\treturn spectral_df\n\ndef extract_tonnetz_df(y, sr):\n\ttonnetz = librosa.feature.tonnetz(y, sr)\n\ttonnetz_mean = np.mean(tonnetz, axis=1)\n\ttonnetz_std = np.std(tonnetz, axis=1)\n\ttonnetz_df = pd.DataFrame()\n\tfor i in range(6):\n\t\ttonnetz_df['tonnetz ' + str(i) + ' mean'] = tonnetz_mean[i]\n\t\ttonnetz_df['tonnetz ' + str(i) + ' std'] = tonnetz_std[i]\n\ttonnetz_df.loc[0] = np.concatenate((tonnetz_mean, tonnetz_std), axis=0)\n\treturn tonnetz_df\n\ndef extract_features_from_chunk(wav_chunk):\n\t\"\"\"\n\twav_chunk: a numpy array where all the entries correspond to the waveform values\n\t\tof an original \".wav\" file\n\treturn - features: a set of features extracted from the wav_chunk\n\t\"\"\"\n\tsr = 22050\n\tif wav_chunk.shape[0] == 0:\n\t\treturn pd.DataFrame()\n\tmfccs_df = extract_mfccs_df(wav_chunk)\n\trms_df = extract_rms_df(wav_chunk)\n\tzcr_df = extract_zcr_df(wav_chunk)\n\tf0_df = extract_f0_df(wav_chunk)\n\tspectral_df = extract_spectral_df(wav_chunk, sr)\n\tchroma_df = extract_chroma_df(wav_chunk, sr)\n\ttonnetz_df = extract_tonnetz_df(wav_chunk, sr)\n\tfeatures_df = pd.concat((mfccs_df, rms_df, zcr_df, spectral_df, chroma_df, f0_df, tonnetz_df), axis=1)\n\treturn features_df.replace(np.nan, 0)\n\ndef extract_features_from_csv(csv_file):\n\t\"\"\"\n\tcsv_file: a \".csv\" file containing a numpy array with waveform values for the specified chunk,\n\t\tas well as the last entry, which is a binary value marking the engagement/disengagement for that chunk\n\treturns - \n\tfeatures_array: a numpy array of extracted features for the specified chunk\n\tlabel: 0 for disengaged, 1 for engaged\n\t\"\"\"\n\twhole_array = np.loadtxt(csv_file, delimiter=\",\")\n\twav_chunk = whole_array[:-1]\n\tlabel = whole_array[-1]\n\tfeatures_df = extract_features_from_chunk(wav_chunk)\n\tif not features_df.empty:\n\t\tfeatures_df['Engaged'] = label\n\treturn features_df\n\ndef create_all_features_df(csv_dir):\n\t\"\"\"\n\tcsv_dir: the directory of \".csv\" files to loop through and extract features, which will then be used in the classifier\n\treturns - \n\tfeatures: a numpy array concatenating all of the extracted features\n\tlabels: a numpy array concatenating all of the labels for the chunks\n\t\"\"\"\n\tall_features = []\n\tfor file in os.listdir(csv_dir):\n\t\tprint(\"extracting features for\", file)\n\t\tfile_path = os.path.join(csv_dir, file)\n\t\tfeatures_df = extract_features_from_csv(file_path)\n\t\tif features_df.empty:\n\t\t\tcontinue\n\t\tall_features.append(features_df)\n\treturn pd.concat(all_features)","sub_path":"src/feature_extractors.py","file_name":"feature_extractors.py","file_ext":"py","file_size_in_byte":6928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"587137407","text":"import sys\nfrom abstract_step import *\nimport misc\nimport process_pool\nimport yaml\n\n\nclass PicardMarkDuplicates(AbstractStep):\n '''\n Documentation:\n http://picard.sourceforge.net/command-line-overview.shtml#MarkDuplicates\n\n Examines aligned records in the supplied SAM or BAM file to locate duplicate\n molecules. All records are then written to the output file with the\n duplicate records flagged.\n '''\n\n def __init__(self, pipeline):\n super(PicardMarkDuplicates, self).__init__(pipeline)\n\n self.set_cores(12)\n \n self.add_connection('in/alignments')\n self.add_connection('out/alignments')\n self.add_connection('out/metrics')\n \n self.require_tool('picard-tools')\n\n self.add_option('PROGRAM_RECORD_ID', str, optional = True)\n self.add_option('PROGRAM_GROUP_VERSION', str, optional = True)\n self.add_option('PROGRAM_GROUP_COMMAND_LINE', str, optional = True)\n self.add_option('PROGRAM_GROUP_NAME', str, optional = True)\n self.add_option('COMMENT', str, optional = True)\n\n self.add_option('ASSUME_SORTED', bool, optional = True)\n self.add_option('MAX_FILE_HANDLES', int, optional = True)\n self.add_option('SORTING_COLLECTION_SIZE_RATIO', float, optional = True)\n self.add_option('READ_NAME_REGEX', str, optional = True)\n self.add_option('OPTICAL_DUPLICATE_PIXEL_DISTANCE', int, optional = True)\n\n def runs(self, run_ids_connections_files):\n \n for run_id in run_ids_connections_files.keys():\n\n with self.declare_run(run_id) as run:\n input_paths = run_ids_connections_files[run_id]['in/alignments']\n\n if input_paths == [None]:\n run.add_empty_output_connection(\"alignments\")\n elif len(input_paths) != 1:\n raise StandardError(\"Expected exactly one alignments file.\")\n elif os.path.splitext(input_paths[0])[1] not in ['.sam', '.bam']:\n raise StandardError(\n \"The file %s seems not to be a SAM or BAM file. At \"\n \"least the suffix is wrong.\" % input_paths[0]\n )\n else:\n with run.new_exec_group() as exec_group:\n alignments = run.add_output_file(\n 'alignments', '%s-rm-dup.bam' % run_id, input_paths)\n metrics = run.add_output_file(\n \"metrics\", '%s-rm-dup-metrics.txt' % run_id,\n input_paths)\n mark_duplicates = [\n self.get_tool('picard-tools'), 'MarkDuplicates',\n 'INPUT=%s' % input_paths[0],\n 'OUTPUT=%s' % alignments,\n 'METRICS_FILE=%s' % metrics,\n 'REMOVE_DUPLICATES=true' \n ]\n exec_group.add_command(mark_duplicates)\n","sub_path":"include/steps/picard_markduplicates.py","file_name":"picard_markduplicates.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"599037762","text":"#!/usr/bin/env python3\nimport time\nfrom decimal import Decimal\nfrom hificon import Target\n\n\nPROMPT = \"Enter new volume: \"\n\n\ndef on_feature_change(key, value, prev_value):\n name = target.features[key].name\n\n if prev_value is None: # initial call\n print(\"Initially setting %s\"%name)\n else:\n print(\"Changed %s to %s.\"%(name, value))\n print(PROMPT)\n \n\ndef print_volume():\n print(\"Current volume: %.1f\"%target.volume)\n print(PROMPT)\n\n\nif __name__ == \"__main__\":\n #target = Target()\n ## for testing:\n target = Target(\"emulator:denon\")\n\n target.features.volume.bind(on_set = lambda: print(\"Initially setting volume\"))\n target.bind(on_feature_change = on_feature_change)\n with target:\n target.connect()\n target.schedule(print_volume, requires=(\"volume\",)) # this function needs target.volume -> schedule call\n while True:\n print(PROMPT)\n newvol = input()\n if newvol: target.volume = Decimal(newvol)\n\n","sub_path":"examples/custom_client.py","file_name":"custom_client.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"394130099","text":"\"\"\"\r\nBy: Smayan Das\r\n Jayant Choudhary\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport mne\r\nfrom mne.time_frequency import psd_welch\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import cohen_kappa_score\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.preprocessing import FunctionTransformer\r\nfrom sklearn import model_selection\r\n\r\n# The Sleep Physionet dataset is annotated using `8 labels `:\r\n# Wake (W), Stage 1, Stage 2, Stage 3, Stage 4 corresponding to the range from\r\n# light sleep to deep sleep, REM sleep (R) where REM is the abbreviation for\r\n# Rapid Eye Movement sleep, movement (M), and Stage (?) for any none scored\r\n# segment.\r\n#\r\n# We will work only with 5 stages: Wake (W), Stage 1, Stage 2, Stage 3/4, and\r\n# REM sleep (R). To do so, we use the ``event_id`` parameter in\r\n# :func:`mne.events_from_annotations` to select which events are we\r\n# interested in and we associate an event identifier to each of them.\r\n\r\n\r\n#Setting the channel types : We have only used the EEG(fpz-pz) and EOG(eog_horizontal) channels\r\nmapping = {'EOG horizontal': 'misc',\r\n 'Resp oro-nasal': 'misc',\r\n 'EMG submental': 'misc',\r\n 'Temp rectal': 'misc',\r\n 'Event marker': 'misc'}\r\n\r\n# Mapping the Annotations to event_ids\r\nannotation_desc_2_event_id = {'Sleep stage W': 1,\r\n 'Sleep stage 1': 2,\r\n 'Sleep stage 2': 3,\r\n 'Sleep stage 3': 4,\r\n 'Sleep stage 4': 4,\r\n 'Sleep stage R': 5}\r\n\r\n# Creating a new event_id that unifies stages 3 and 4 according to AASM standards\r\nevent_id = {'Sleep stage W': 1,\r\n 'Sleep stage 1': 2,\r\n 'Sleep stage 2': 3,\r\n 'Sleep stage 3/4': 4,\r\n 'Sleep stage R': 5}\r\n\r\n# Using Pandas, the filenames are extracted from which the data processing starts.\r\ndf = pd.read_csv(\"data_records_sc.csv\")\r\ndf.columns=[\"file_name\"]\r\nrawlist=list(df.file_name)\r\n\r\nitems = len(rawlist)\r\nprint(items)\r\n\r\n#Creating an empty list to store the training epochs data\r\nepochs_data_train = []\r\n\r\nfor item in range(0,240,2):\r\n \"\"\"\r\n The below operations read the edf PSG files, add the annotations via event markers to the raw edf\r\n \"\"\"\r\n raw = mne.io.read_raw_edf(f\"{rawlist[item]}\")\r\n annot = mne.read_annotations(f\"{rawlist[item + 1]}\")\r\n raw.set_annotations(annot, emit_warning=False)\r\n raw.set_channel_types(mapping)\r\n # raw.resample(1)\r\n # raw.plot(duration=60, scalings='auto')\r\n # print(raw)\r\n # print(raw.info)\r\n # print(annot)\r\n events, _ = mne.events_from_annotations(\r\n raw, event_id=annotation_desc_2_event_id, chunk_duration=30.)\r\n\r\n # Create Epochs from the data based on the events found in the annotations\r\n tmax = 30. - 1. / raw.info['sfreq'] # tmax in included\r\n # Try Except method was used as some files were showing errors while getting parsed by MNE and we have ignored them for now.\r\n try:\r\n epochs = mne.Epochs(raw=raw, events=events,event_id=event_id, tmin=0., tmax=tmax, baseline=None)\r\n except:\r\n continue\r\n # It was observed that there were no bad annotations in the whole dataset so we commented out the following line\r\n # epochs.drop_bad()\r\n print(\"len(epochs)\")\r\n #Adding the epoch to the list of training epochs\r\n epochs_data_train.append(epochs)\r\n print(len(epochs_data_train))\r\n\r\n#This function concatenates all the training epochs as metadata without any explicit markers.\r\nepochs_total_train = mne.concatenate_epochs(epochs_list=epochs_data_train)\r\n\r\n# The above method for creating Training data is followed as it is for creating the Test data\r\n\r\n#Creating an empty list to store the test epochs.\r\n\r\nepochs_data_test = []\r\n\r\n# The indices 2 and 30 can be randomized which we are yet to implement.\r\nfor item in range(240,300,2):\r\n raw = mne.io.read_raw_edf(f\"{rawlist[item]}\")\r\n annot = mne.read_annotations(f\"{rawlist[item + 1]}\")\r\n raw.set_annotations(annot, emit_warning=False)\r\n raw.set_channel_types(mapping)\r\n # raw.resample(1)\r\n # raw.plot(duration=60, scalings='auto')\r\n # print(raw)\r\n # print(raw.info)\r\n # print(annot)\r\n events, _ = mne.events_from_annotations(\r\n raw, event_id=annotation_desc_2_event_id, chunk_duration=30.)\r\n\r\n tmax = 30. - 1. / raw.info['sfreq'] # tmax in included\r\n\r\n try:\r\n epochs = mne.Epochs(raw=raw, events=events,event_id=event_id, tmin=0., tmax=tmax, baseline=None)\r\n except:\r\n continue\r\n\r\n # epochs.drop_bad()\r\n\r\n epochs_data_test.append(epochs)\r\n print(len(epochs_data_test))\r\n\r\n#This function concatenates all the test epochs as metadata without any explicit markers.\r\nepochs_total_test = mne.concatenate_epochs(epochs_list=epochs_data_test)\r\n# print(epochs_total)\r\n# print(\"\\n\\n\\n\\n\\n\")\r\n# print(epochs_data)\r\n# print(type(epochs_data))\r\n# print(len(epochs_data))\r\n# print(type(epochs_set))\r\n# print(epochs_set)\r\n\r\ndef eeg_power_band(epochs):\r\n \"\"\"EEG relative power band feature extraction.\r\n\r\n This function takes an ``mne.Epochs`` object and creates EEG features based\r\n on relative power in specific frequency bands that are compatible with\r\n scikit-learn.\r\n\r\n Parameters\r\n ----------\r\n epochs : Epochs\r\n The data.\r\n\r\n Returns\r\n -------\r\n X : numpy array of shape [n_samples, 5]\r\n Transformed data.\r\n \"\"\"\r\n # specific frequency bands\r\n FREQ_BANDS = {\"delta\": [0.5, 4.5],\r\n \"theta\": [4.5, 8.5],\r\n \"alpha\": [8.5, 11.5],\r\n \"sigma\": [11.5, 15.5],\r\n \"beta\": [15.5, 30]}\r\n\r\n psds, freqs = psd_welch(epochs, picks='eeg', fmin=0.5, fmax=30.)\r\n # Normalize the PSDs\r\n psds /= np.sum(psds, axis=-1, keepdims=True)\r\n\r\n X = []\r\n for fmin, fmax in FREQ_BANDS.values():\r\n psds_band = psds[:, :, (freqs >= fmin) & (freqs < fmax)].mean(axis=-1)\r\n X.append(psds_band.reshape(len(psds), -1))\r\n\r\n return np.concatenate(X, axis=1)\r\n\r\n\r\n# Multiclass Classifications using Function Transformer.\r\n\r\n\"\"\"\r\nScikit-learn pipeline composes an estimator as a sequence of transforms\r\nand a final estimator, while the FunctionTransformer converts a python\r\nfunction in an estimator compatible object. In this manner a\r\nscikit-learn estimator is created that takes :class:`mne.Epochs` using\r\n`eeg_power_band` function.\r\n\"\"\"\r\n\r\npipe = make_pipeline(FunctionTransformer(eeg_power_band, validate=False),\r\n KNeighborsClassifier())\r\n\r\n\r\ny_train = epochs_total_train.events[:, 2]\r\npipe.fit(epochs_total_train, y_train)\r\n\r\ny_pred_train = pipe.predict(epochs_total_train)\r\ny_pred_test = pipe.predict(epochs_total_test)\r\ny_test = epochs_total_test.events[:, 2]\r\n\r\n\r\nacc_train = accuracy_score(y_train,y_pred_train)\r\nacc_test = accuracy_score(y_test, y_pred_test)\r\nprint(\"Training Accuracy score: {}\\n\".format(acc_train))\r\nprint(\"Test Accuracy score: {}\\n\".format(acc_test))\r\n\r\nkappa = cohen_kappa_score(y_test, y_pred_test)\r\nprint(\"Kohen Kappa Score: {}\".format(kappa))\r\n\r\nprint(confusion_matrix(y_test, y_pred_test))\r\n\r\nprint(classification_report(y_test, y_pred_test, target_names=event_id.keys()))\r\n\r\n# cv_score = model_selection.cross_val_score(pipe, epochs_total_train, y_train, cv=3)\r\n#\r\n# print(cv_score)\r\n\r\n\r\n","sub_path":"test_KNN.py","file_name":"test_KNN.py","file_ext":"py","file_size_in_byte":7589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"434216364","text":"from rest_framework import serializers\nfrom AplicacionCiclo3.models.user import User\nfrom AplicacionCiclo3.models.account import Account\nfrom AplicacionCiclo3.serializers.accountSerializers import AccountSerializer\n \nclass UserSerializers(serializers.ModelSerializer):\n account = AccountSerializer()\n class Meta:\n model = User\n fields = ['id', 'username', 'password', 'name', 'email', 'account']\n \n def create(self, validated_data):\n accountData = validated_data.pop('account')\n userInstance = User.objects.create(**validated_data)\n Account.objects.create(user=userInstance, **accountData)\n return userInstance\n \n def to_representation(self, obj):\n user = User.objects.get(id=obj.id)\n account = Account.objects.get(user=obj.id) \n return {\n 'id': user.id, \n 'username': user.username,\n 'name': user.name,\n 'email': user.email,\n 'account': {\n 'id': account.id,\n 'balance': account.balance,\n 'lastChangeDate': account.lastChangeDate,\n 'isActive': account.isActive\n }\n }","sub_path":"AplicacionCiclo3/serializers/userSerializers.py","file_name":"userSerializers.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"322801235","text":"from glob import glob\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\n# 각 library들을 import하는 셀.\nimport numpy as np\nimport tensorflow as tf\n\n'''\n# 구글 드라이브를 colab에 마운트하는 코드\nfrom google.colab import drive\n\ndrive.mount('/content/drive')\n'''\n\n# 기본 경로를 지정하는 셀. 코드의 단순화를 위해서 작성.\nbase_dir = Path('./Data/image')\n\n# 기본 경로가 잘 지정되었는지 확인.\nprint(f'{base_dir}/mri_train_input')\n\n# glob 모듈을 이용해서 파일의 경로를 담은 리스트를 지정.\nmri_train_input_files = glob(f'{base_dir}/mri_train_input/*.npy')\nmri_train_label_files = glob(f'{base_dir}/mri_train_label/*.npy')\n\nmri_val_input_files = glob(f'{base_dir}/mri_val_input/*.npy')\nmri_val_label_files = glob(f'{base_dir}/mri_val_label/*.npy')\n\nmri_test_input_files = glob(f'{base_dir}/mri_test_input/*.npy')\nmri_test_label_files = glob(f'{base_dir}/mri_test_label/*.npy')\n\n# tensorflow에서 제공하는 dataset 모듈을 이용해 경로 리스트를 하나씩 슬라이스해주는 tensor dataset을 구성.\nmri_train_data_files = tf.data.Dataset.from_tensor_slices((mri_train_input_files, mri_train_label_files))\nmri_val_data_files = tf.data.Dataset.from_tensor_slices((mri_val_input_files, mri_val_label_files))\nmri_test_data_files = tf.data.Dataset.from_tensor_slices((mri_test_input_files, mri_test_label_files))\n\n# 알맞은 수가 리스트에 잘 담겼는지 확인.\nprint(len(mri_train_input_files), len(mri_train_label_files))\n\n\n# 만들어진 경로 tensor dataset으로부터 실제 데이터셋을 불러오는 map function을 만들기\ndef map_func(inp_path, targ_path):\n inp = []\n targ = []\n for i, t in zip(inp_path, targ_path):\n temp_i = np.load(i)\n temp_i = temp_i.astype(np.float32).tolist()\n inp.append(temp_i)\n temp_t = np.load(t)\n temp_t = temp_t.astype(np.float32).tolist()\n targ.append(temp_t)\n inp = tf.constant(np.expand_dims(inp, 3), dtype=tf.float32)\n targ = tf.constant(np.expand_dims(targ, 3), dtype=tf.float32)\n return inp, targ\n\n\n# 혹시나 있을지 모를 모양의 변화를 교정해주는 map function\ndef _fixup_shape(images, labels):\n images.set_shape([None, 384, 384, 1])\n labels.set_shape([None, 384, 384, 1])\n return images, labels\n\n\n# 가중치를 업데이트하는 주기인 Batch_size를 지정.\nBATCH = 1\n\n# 경로 텐서 데이터셋을 데이터를 담은 텐서 데이터셋으로 변환해주는 셀.\n# map 함수와 파이썬 람다식을 이용했음.\n# prefetch와 CPU 처리 방식은 모두 오토튠을 활용해서 처리.\ntrain_data = mri_train_data_files.batch(BATCH)\ntrain_data = train_data.map(lambda item1, item2: tf.numpy_function(map_func, [item1, item2], [tf.float32, tf.float32]),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\ntrain_data = train_data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\ntrain_data = train_data.map(_fixup_shape)\n\nval_data = mri_val_data_files.batch(BATCH)\nval_data = val_data.map(lambda item1, item2: tf.numpy_function(map_func, [item1, item2], [tf.float32, tf.float32]),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\nval_data = val_data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\nval_data = val_data.map(_fixup_shape)\n\ntest_data = mri_test_data_files.batch(BATCH)\ntest_data = test_data.map(lambda item1, item2: tf.numpy_function(map_func, [item1, item2], [tf.float32, tf.float32]),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\ntest_data = test_data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\ntest_data = test_data.map(_fixup_shape)\n\n# 신경망에 feed할 데이터가 모양이 잘 잡혔나 확인.\nprint(next(iter(train_data))[0].shape, next(iter(train_data))[1].shape, next(iter(val_data))[0].shape,\n next(iter(val_data))[1].shape)\n\n# 신경망(U-Net)을 설명.\nfrom tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\nfrom tensorflow.keras import Model\n\ndef UNet(input_size=(None, None, 1)):\n inp = Input(input_size)\n conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inp)\n conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)\n conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)\n conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)\n conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n\n conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)\n conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(drop5))\n merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)\n conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)\n\n up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(conv6))\n merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)\n conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)\n\n up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(conv7))\n merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)\n conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)\n\n up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(conv8))\n merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)\n conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\n\n model = Model(inputs=inp, outputs=[conv10])\n\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['mae'])\n\n return model\n\n\n# 모델을 지정.\nmodel = UNet(input_size=(384, 384, 1))\n\nmodel.load_weights('./Data/UNet_model.h5')\n\n\n# tf.keras.utils.plot_model(model, show_shapes=True, dpi=42)\n\n# 결과 사진 출력\npred = model.predict(test_data)\n\ni=10\nplt.subplot(1, 3, 1).axis(\"off\")\nimg1 = np.load(f'{base_dir}/mri_test_input/{i}.npy')\nplt.imshow(img1, cmap='gray')\nplt.subplot(1, 3, 2).axis(\"off\")\nplt.imshow(pred[i, :, :, 0], cmap='gray')\nplt.subplot(1, 3, 3).axis(\"off\")\nimg2 = np.load(f'{base_dir}/mri_test_label/{i}.npy')\nplt.imshow(img2, cmap='gray')\nplt.show()\n\nplt.figure(figsize=(11, 9))\nfor i in range(20):\n plt.subplot(5, 4, i+1).axis(\"off\")\n plt.imshow(pred[i, :, :, 0], cmap='gray')\n plt.title(f'pred_{i}')\nplt.show()\n\ndef maximum(data):\n max = np.max(data)\n return max\n\nprint(tf.image.ssim(tf.expand_dims(img1, 2), pred[i], max(maximum(img1), maximum(pred[i]))))\nprint(max(maximum(img1), maximum(pred[i])))","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":8154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"208383515","text":"#!/usr/bin/env python\n\nimport glob\nimport time\nimport RPi.GPIO\nimport sys\n\nfrom subprocess import Popen\n\ndef read_temp():\n l_yes = False\n while (not l_yes):\n with open(device_file) as f:\n lns = f.readlines()\n if (lns[0].strip()[-3:] == 'YES'):\n l_yes = True\n equals_pos = lns[1].find('t=')\n if equals_pos != -1:\n T_str = lns[1][equals_pos+2:]\n T_C = float(T_str) / 1000.0\n #print(lns[0])\n #print(lns[1])\n time.sleep(0.25)\n return T_C\n\n\nclass crockPID:\n def __init__(self, T_set, dt, logfilename=\"T_log.txt\"):\n self.Kp = -1.0\n self.Ki = 0.0\n self.Kd = -100.0\n\n self.T_set = T_set\n self.dt = dt\n\n self.T = read_temp()\n self.T_err = self.calc_err()\n self.time = 0.0\n\n self.T_err_sum = self.T_err\n self.T_oldErr = self.T_err\n self.dT_err = 0.0\n self.pidVal = 0.0\n\n self.l_on = False\n\n self.p_term = 0.0\n self.i_term = 0.0\n self.d_term = 0.0\n\n self.logfile = open(logfilename, \"w\")\n print(\"Initializing crockPID.\")\n print(\" T_set = {T_set:.2f}\".format(T_set=self.T_set))\n print(\" Initial T = {T}\".format(T=self.T))\n\n self.write_header()\n self.write_vals()\n\n def set_log_list(self):\n self.log_list = {\"time\":self.time/60.0,\n \"Temperature\":self.T,\n \"T_set\": self.T_set,\n \"T_error\":self.T_err,\n \"dT_err\":self.dT_err,\n \"p_term\":self.p_term,\n \"i_term\":self.i_term,\n \"d_term\":self.d_term,\n \"pidVal\":self.pidVal,\n \"l_on\":self.l_on }\n\n def write_header(self):\n self.set_log_list()\n txt = \"\"\n for k, v in self.log_list.items():\n txt += k + \",\"\n txt = txt[:-1]+'\\n'\n self.logfile.write(txt)\n\n def write_vals(self):\n self.set_log_list()\n txt = \"\"\n for k, v in self.log_list.items():\n txt += str(v) + \",\"\n txt = txt[:-1]+'\\n'\n self.logfile.write(txt)\n\n def print_vals(self):\n self.set_log_list()\n txt = \"\"\n for k, v in self.log_list.items():\n txt += '{k}={v}, '.format(k=k, v=v)\n print(txt)\n\n def calc_err(self):\n return self.T - self.T_set\n\n def calc_PID(self, dt=0.0):\n\n self.T = read_temp()\n self.T_err = self.calc_err()\n self.T_err_sum += self.T_err\n self.dT_err = (self.T_err - self.T_oldErr) / self.dt\n self.time += self.dt\n\n self.p_term = self.Kp * self.T_err\n self.i_term = self.Ki * self.T_err_sum\n self.d_term = self.Kd * self.dT_err\n\n self.pidVal = self.p_term + self.i_term + self.d_term\n\n self.T_oldErr = self.T_err\n\n return self.pidVal\n\n def get_switch(self):\n\n self.pidVal = self.calc_PID()\n if self.pidVal > 0:\n self.l_on = True\n else:\n self.l_on = False\n\n self.write_vals()\n self.print_vals()\n\n return self.l_on\n\n def close(self):\n self.logfile.close()\n\n\n\n\n\n\n\ncrockPin = 17\nT_set = float(sys.argv[1])\n\nRPi.GPIO.setmode(RPi.GPIO.BCM)\nRPi.GPIO.setup(crockPin, RPi.GPIO.OUT)\n\nPopen(['modprobe', 'w1-gpio'])\nPopen(['modprobe', 'w1-therm'])\n\nbase_dir = '/sys/bus/w1/devices/'\ndevice_folder = glob.glob(base_dir + '28*')[0]\ndevice_file = device_folder + '/w1_slave'\n#print(device_file)\n\n#setup\ndt = 10.0\npid = crockPID(T_set = T_set, dt = dt)\n\n\n#dt = 10.0\ntm = 0.0\nwhile True:\n if (pid.get_switch()):\n RPi.GPIO.output(crockPin, True)\n else:\n RPi.GPIO.output(crockPin, False)\n time.sleep(dt)\n tm += dt\n\npid.close()\n","sub_path":"sousVide/1.1_sousVide/logs/chicken_thighs/18-3-24/sous.py","file_name":"sous.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"567427689","text":"a = [4, 2, 6]\nb = [-3, 0, -1, 2]\n# Concatenacion (union)\nc = [a + b]\nprint(c)\n\n# Multiplicacion (repeticion de los elementos de la lista)\n# X cantidad de veces\na = [1]\nb = a * 3\nprint(b)","sub_path":"Laboratorio de la Computacion/Python/16-listas.py","file_name":"16-listas.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"594698462","text":"import pickle\r\n\r\n# define the path to the pickled model\r\nmodel_path = 'rf-model.pkl'\r\n\r\n# unpickle the random forest model\r\nwith open(model_path, 'rb') as file:\r\n unpickled_rf = pickle.load(file)\r\n\r\n# define a single row of X variables to test the prediction\r\nX_example = [[5.0, 2.0, 3.5, 1.0]]\r\n\r\n# run the unpickled model and print the answer\r\ny_example = unpickled_rf.predict(X_example)\r\nprint(y_example)\r\n","sub_path":"Docker Example/Example 003 ML/load_model.py","file_name":"load_model.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"439996407","text":"from django import forms\n\nfrom CatsManagement.models import Cat\n\nimport datetime\n\nclass SelectCatsForm(forms.Form):\n selectc = forms.ModelMultipleChoiceField(\n label= 'Select cat(s)',\n queryset = Cat.objects.all(), # not optional, use .all() if unsure\n widget = forms.CheckboxSelectMultiple(attrs={\"checked\":\"\"}),\n )\n fromdate = forms.DateField(\n label='From',\n required=False,\n widget=forms.TextInput(attrs=\n {\n 'class':'datepick',\n 'id':'datepicker1'\n }\n )\n )\n todate = forms.DateField(\n label='To',\n required=False,\n widget=forms.TextInput(attrs=\n {\n 'class':'datepick',\n 'id':'datepicker2'\n }\n )\n )\n\n def clean_todate(self):\n todate = self.cleaned_data[\"todate\"]\n currentdate = datetime.datetime.now().date()\n if todate:\n if todate > currentdate:\n raise forms.ValidationError(\"This cannot be a future date!\")\n return todate\n\n def clean(self):\n cleaned_data = super(SelectCatsForm, self).clean()\n if not self.errors:\n fromdate = self.cleaned_data[\"fromdate\"]\n todate = self.cleaned_data[\"todate\"]\n if todate and fromdate:\n if todate < fromdate:\n raise forms.ValidationError({'fromdate': [\"End time cannot be earlier than start time!\", ]})\n return cleaned_data","sub_path":"tables/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"69138841","text":"from GUI.MultiplayerMenu import MultiplayerMenu\nfrom Helpers.image_helper import get_image_path\n\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\n\nfrom GUI.SinglePlayer import SinglePlayer\nfrom GUI.Tournament import Tournament\n\nimport sys\nfrom Styles.ButtonStyles import button_style\n\n\n\n\nclass InitialWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setGeometry(200, 200, 1000, 600)\n self.setFixedSize(1000, 600)\n self.setWindowTitle(\"Menu\")\n self.multiplayers = []\n\n self.init_UI()\n\n def init_UI(self):\n self.init_window()\n self.init_buttons()\n\n def init_buttons(self):\n self.singleplayer_button = QtWidgets.QPushButton(self)\n self.singleplayer_button.setText(\"Singleplayer\")\n self.singleplayer_button.setGeometry(400, 100, 250, 50)\n self.singleplayer_button.setStyleSheet(button_style)\n self.singleplayer_button.clicked.connect(self.on_singleplayer_button)\n self.dialog = SinglePlayer()\n\n self.multiplayer_button = QtWidgets.QPushButton(self)\n self.multiplayer_button.setText(\"Multiplayer\")\n self.multiplayer_button.setGeometry(400, 200, 250, 50)\n self.multiplayer_button.setStyleSheet(button_style)\n self.multiplayer_button.clicked.connect(self.on_mutliplayer_button)\n\n self.tournament_button = QtWidgets.QPushButton(self)\n self.tournament_button.setText(\"Tournament\")\n self.tournament_button.setGeometry(400, 300, 250, 50)\n self.tournament_button.setStyleSheet(button_style)\n self.tournament_button.clicked.connect(self.on_tournament_button)\n self.dialog2 = Tournament()\n\n self.exit_button = QtWidgets.QPushButton(self)\n self.exit_button.setText(\"Exit\")\n self.exit_button.setGeometry(400, 400, 250, 50)\n self.exit_button.setStyleSheet(button_style)\n self.exit_button.clicked.connect(self.exit)\n\n def init_window(self):\n self.BackGround = QPixmap(get_image_path(\"../Sources/Images/Other/background.jpg\"))\n\n self.BackGroundLabel = QtWidgets.QLabel(self)\n self.BackGroundLabel.setPixmap(self.BackGround.scaled(1000, 600))\n self.BackGroundLabel.resize(1000, 600)\n self.BackGroundLabel.setGeometry(0, 0, 1000, 600)\n\n def exit(self):\n app = QApplication.instance()\n app.closeAllWindows()\n\n def on_singleplayer_button(self):\n self.hide()\n self.dialog.show()\n\n def on_mutliplayer_button(self):\n self.multiplayers.append(MultiplayerMenu())\n self.multiplayers[-1].show()\n\n def on_tournament_button(self):\n self.hide()\n self.dialog2.show()\n\n\ndef display_menu():\n app = QApplication(sys.argv)\n win = InitialWindow()\n\n win.show()\n sys.exit(app.exec_())\n","sub_path":"space_invaders/GUI/InitialWindow.py","file_name":"InitialWindow.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"187567638","text":"from google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nproviders = {\n 'Google' : 'www.google.com/accounts/o8/id',\n 'Yahoo' : 'yahoo.com',\n 'MySpace' : 'myspace.com'\n}\n\nclass Main(webapp.RequestHandler):\n def get(self):\n user = users.get_current_user()\n #are they signed in already?\n if user:\n self.response.out.write('Hello %s! [Sign Out Here]' % ( user.nickname(), users.create_logout_url(self.request.uri)))\n else:\n self.response.out.write('Hi! Please sign in at: ')\n for name, uri in providers.items():\n self.response.out.write('[%s]' % ( users.create_login_url(federated_identity=uri), name))\n \napplication = webapp.WSGIApplication([('/', Main)], debug=True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"webapp/second/first_app.py","file_name":"first_app.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"70513620","text":"# Copyright 2019 Lorna Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\ndef bag(goods, size):\n cell = [[0 for col in range(size)] for row in range(len(goods))]\n\n package_size = [i + 1 for i in range(size)]\n\n for j in range(size):\n if goods[0][1] <= package_size[j]:\n cell[0][j] = goods[0][0]\n\n for i in range(1, len(goods)):\n for j in range(size):\n if (package_size[j] - goods[i][1] > 0) and (goods[i][0] + cell[i - 1][package_size[j] - 1 - goods[i][1]]) > \\\n cell[i - 1][j]:\n cell[i][j] = goods[i][0] + cell[i - 1][package_size[j] - goods[i][1] - 1]\n elif (package_size[j] - goods[i][1] == 0) and (goods[i][0] > cell[i - 1][j]):\n cell[i][j] = goods[i][0]\n else:\n cell[i][j] = cell[i - 1][j]\n print(cell) # [[1500, 1500, 1500, 1500], [1500, 1500, 1500, 3000], [1500, 1500, 2000, 3500]]\n\n return cell[len(goods) - 1][size - 1]\n\n\nif __name__ == '__main__':\n goods = [[1500, 1], [3000, 4], [2000, 3]]\n print(bag(goods, 4)) # 3500\n","sub_path":"09-dynamic-planning/dynamic_planning.py","file_name":"dynamic_planning.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"413646049","text":"#import sys\n#sys.path.append(\"./\")\nimport os\nfrom sopel import module\nfrom sopel import tools\n\nfrom .VERSION import *\nfrom .Global import *\nfrom .Admin import Admin\nfrom .Game import Game\nfrom .Player import Player\n\n# convert version string to string array\nversion = VERSION.split(\"\\n\")\n\n# game setup\ndef setup(bot):\n bot.cap_req('igor', 'twitch.tv/tags')\n bot.cap_req('igor', 'twitch.tv/commands')\n if (not bot.memory.contains('players')):\n bot.memory['players'] = tools.SopelMemory()\n\n# all chat lines\n@module.rule('.*')\ndef command(bot, trigger):\n # skip common chat\n if (not trigger.group(0).startswith(bot.config.core.prefix)):\n return\n\n _command(bot, trigger)\n return\n\n\n# all whispers to bot\n@module.event('WHISPER')\n@module.rule('.*')\ndef event(bot, trigger):\n _command(bot, trigger)\n return\n\n\n# common command\ndef _command(bot, trigger):\n g = trigger.group(0)\n if (g.startswith(bot.config.core.prefix)):\n g = g[1:]\n cmds = g.split()\n cmd = cmds[0]\n p = None\n\n # we do initial command thing here to avoid circular deps in\n # Game -> Player -> Battle -> Game\n # start new game\n if (cmd in [ 'start', 'restart' ]):\n p = initPlayer(bot, trigger)\n Game.intro(p)\n\n # ANY: game version and changes\n elif (cmd in [ 'changes' ]):\n for msg in version:\n say(bot, trigger.nick, msg)\n\n # ANY: game version\n elif (cmd in [ 'version' ]):\n say(bot, trigger.nick, version[0])\n\n # ADMIN: admin entry point\n elif (cmd in [ 'admin' ]):\n p = getPlayer(bot, trigger)\n if (p == None):\n say(bot, trigger.nick, 'You must \";start\" a new game first.')\n return\n\n cmds.pop(0)\n Admin.command(p, cmds)\n\n # any other command\n else:\n p = getPlayer(bot, trigger)\n if (p == None):\n say(bot, trigger.nick, 'You must \";start\" a new game first.')\n return\n\n Game.command(p, cmds)\n\n # actually say stuff\n if (p != None and len(p.msgs) > 0):\n say(bot, trigger.nick, ' '.join(p.msgs))\n p.msgs = []\n\n\n############################################\n\n# whisper to user\ndef say(bot, nick, msg):\n bot.say('/w ' + nick + ' ' + msg)\n\n\n# init new player instance\ndef initPlayer(bot, trigger):\n globals['bot'] = bot\n\n # new player instance\n p = Player(trigger.nick)\n\n # set player record\n bot.memory['players'][trigger.nick] = p\n\n return p\n\n\n# returns the player instance for this user\ndef getPlayer(bot, trigger):\n globals['bot'] = bot\n\n # init memory if needed\n if (not bot.memory.contains('players')):\n return None\n\n # player exists, return it\n if (bot.memory['players'].contains(trigger.nick)):\n return bot.memory['players'][trigger.nick]\n\n return None\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__, verbose = False)\n","sub_path":"igor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"605472830","text":"from html.parser import HTMLParser\nfrom urllib.request import urlopen\n\nclass Crawler(HTMLParser):\n def __init__(self):\n super().__init__()\n\n def handle_starttag(self, tag, attrs):\n print(tag)\n\n def error(self, message):\n pass\n\n @staticmethod\n def gather_links(page_url):\n html_string = ''\n print(page_url)\n try:\n response = urlopen(page_url)\n print(response)\n if response.getheader('Content-Type') == 'text/html; charset=utf-8':\n print(\"hello\")\n html_bytes = response.read()\n html_string = html_bytes.decode(\"utf-8\")\n except:\n print(\"error\")\n return html_string","sub_path":"crawler/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"321388944","text":"#!/usr/bin/env python\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nimport sys\n\nimport numpy as np\nnp.random.seed(5)\nnp.set_printoptions(threshold=sys.maxsize)\nimport pandas as pd\nimport glob\nimport os\nimport time\nimport json\nfrom sklearn import preprocessing\nfrom sklearn import ensemble\nfrom sklearn import linear_model\nfrom sklearn import svm\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import explained_variance_score\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\n#from keras.models import Sequential\n#from keras.layers.core import Dense, Dropout, Activation\n#from keras.layers.advanced_activations import PReLU, SReLU, LeakyReLU\n\nfrom sklearn.preprocessing import FunctionTransformer\ntransformer = FunctionTransformer(func=np.log1p, inverse_func=np.expm1)\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import neighbors\nfrom sklearn import tree\nfrom sklearn import gaussian_process\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\n\n\n#from keras.layers import Input, Dense\n#from keras.models import Model\n#import keras.backend as K\nfrom sklearn.metrics import r2_score, mean_squared_error\nimport sklearn.dummy\nimport math\nfrom sklearn.multioutput import MultiOutputRegressor\n\n# Methods\ndef mean_absolute_percentage_error(y_true, y_pred): \n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\ndef coeff_determination(y_pred, y_true): #Order of function inputs is important here \n SS_res = K.sum(K.square( y_true-y_pred )) \n SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )\n return ( 1 - SS_res/(SS_tot + K.epsilon()) )\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\noutputs = ['time']\nfold = ['nfold', 'fold', 'id','percentage','nfold']\n\nPP_OUT_FLAG = True\nLOG_FLAG = False\n\nclass Regression():\n\n def __init__(self, trainFilename, testFilename, resultsDir, num_trees, run_case=True):\n # assert len(trainFilenames) == len(testFilenames)\n self.resultsDir = resultsDir\n #ntrees = 1000\n self.trainFilename = trainFilename\n self.testFilename = testFilename\n self.regressors = {\n 'lm': MultiOutputRegressor(linear_model.LinearRegression()),\n 'rg': MultiOutputRegressor(linear_model.Ridge()),\n 'svm': MultiOutputRegressor(svm.SVR(kernel='rbf')),\n 'gp': MultiOutputRegressor(gaussian_process.GaussianProcessRegressor()),\n 'knn': MultiOutputRegressor(neighbors.KNeighborsRegressor(n_neighbors=5)),\n 'dt': MultiOutputRegressor(tree.DecisionTreeRegressor()),\n 'br': MultiOutputRegressor(ensemble.BaggingRegressor(n_jobs=-1)),\n 'etr': MultiOutputRegressor(ensemble.ExtraTreesRegressor(n_jobs=-1)),\n 'rfr': MultiOutputRegressor(ensemble.RandomForestRegressor(n_jobs=-1,n_estimators=num_trees)),\n 'abr': MultiOutputRegressor(ensemble.AdaBoostRegressor()),\n 'gbr': MultiOutputRegressor(ensemble.GradientBoostingRegressor()),\n }\n\n if run_case:\n self.load_data()\n self.preprocess_data()\n for key in self.regressors.keys():\n self.fit_model(key)\n\n def load_data(self):\n filename = self.trainFilename\n print(self.trainFilename)\n if os.path.exists(filename):\n train_data = pd.read_csv(filename,header=None,encoding = \"ISO-8859-1\")\n filename = self.testFilename\n if os.path.exists(filename):\n test_data1 = pd.read_csv(filename,header=None,encoding = \"ISO-8859-1\")\n\n out_df = train_data.iloc[:,-1].values.reshape(-1,1)\n inp_df = train_data.iloc[:,:-1]\n\n test_out_df1 = test_data1.iloc[:,-1].values.reshape(-1,1)\n test_inp_df1 = test_data1.iloc[:,:-1]\n\n self.train_X = inp_df\n self.train_y = out_df\n self.test_X = test_inp_df1\n self.test_y = test_out_df1\n\n def preprocess_data(self):\n self.preproc_X = Pipeline([('stdscaler', StandardScaler()),('minmax', MinMaxScaler(feature_range=(-1, 1)))])\n self.preproc_y = Pipeline([('stdscaler', StandardScaler()),('minmax', MinMaxScaler(feature_range=(-1, 1)))])\n self.train_X_p = self.preproc_X.fit_transform(self.train_X)#.as_matrix()\n self.train_y_p = self.preproc_y.fit_transform(self.train_y)#.as_matrix()\n self.test_X_p = self.preproc_X.transform(self.test_X)#.as_matrix()\n self.test_y_p = self.preproc_y.transform(self.test_y)#.as_matrix()\n\n def build_model(self, model_type):\n start = time.time()\n model = self.regressors[model_type]\n end = time.time()\n build_time = (end-start)\n return model, build_time\n\n def train_model(self, model, model_type):\n start = time.time()\n model.fit(self.train_X_p, self.train_y_p)\n end = time.time()\n training_time = (end - start)\n return model, training_time\n\n def test_model(self, model):\n start = time.time()\n test_yhat_p = model.predict(self.test_X_p)\n end = time.time()\n inference_time = (end - start)\n return test_yhat_p , inference_time\n\n def compute_metric(self, test_y, test_yhat):\n results = []\n test_y = test_y.reshape(-1,1)\n test_yhat = test_yhat\n for out_index in range(test_y.shape[1]):\n y_true = test_y[:,out_index]\n y_pred = test_yhat[:,out_index]\n r2 = r2_score(y_true, y_pred) \n evs = explained_variance_score(y_true, y_pred) \n mae = mean_absolute_error(y_true, y_pred)\n rmse = np.sqrt(mean_squared_error(y_true, y_pred))\n rho = np.corrcoef(y_true, y_pred)[0][1]\n # mape = mean_absolute_percentage_error(y_true, y_pred) \n result = [r2, rho, evs, mae, rmse]\n results.append(result)\n res_df = pd.DataFrame(results)\n res_df.columns = ['r2','rho','evs', 'mae', 'rmse']\n return(res_df)\n\n def fit_model(self, model_type):\n\n res_dict = {}\n outputFilename = os.path.basename(self.trainFilename).replace('train', 'meta_%s' % (model_type))\n output_base = os.path.splitext(outputFilename)[0]\n outputFilename = '%s/%s.json' % (self.resultsDir,output_base)\n\n if not os.path.exists(outputFilename):\n model, build_time = self.build_model(model_type)\n model, train_time = self.train_model(model, model_type)\n \n test_yhat_p , inference_time = self.test_model(model)\n test_yhat = self.preproc_y.inverse_transform(test_yhat_p)\n res_df = self.compute_metric(self.test_y,test_yhat)\n \n res_dict['build_time'] = build_time\n res_dict['train_time'] = train_time\n res_dict['inference_time'] = inference_time\n res_dict['model'] = model_type\n\n outputFilename = os.path.basename(self.trainFilename).replace('train', 'meta_%s' % (model_type))\n output_base = os.path.splitext(outputFilename)[0]\n outputFilename = '%s/%s.json' % (self.resultsDir,output_base)\n \n with open(outputFilename, 'w') as fp:\n json.dump(res_dict, fp)\n\n output_base = output_base.replace('meta', 'pred')\n outputFilename = '%s/%s.csv' % (self.resultsDir,output_base)\n np.savetxt(outputFilename, test_yhat, delimiter=\",\")\n\n output_base = output_base.replace('pred', 'metric')\n outputFilename = '%s/%s.csv' % (self.resultsDir,output_base)\n res_df.to_csv(outputFilename)\n\nif __name__ == '__main__':\n\n # Load variable names\n import argparse\n parser = argparse.ArgumentParser(description='Arguments for run_regressors')\n parser.add_argument('num_trees', metavar='num_trees', type=str, help='number of decision trees')\n args = parser.parse_args()\n\n\n import os\n if not os.path.exists('results/'):\n os.mkdir('results/')\n resultsDir = 'results/'\n \n trainFilenames = []\n testFilenames = []\n pattern = 'folds/train_*.csv'\n trainFiles = glob.glob(pattern)\n \n for trainFilename in trainFiles:\n trainFilenames.append(trainFilename)\n testFilename = trainFilename.replace('train', 'test')\n testFilenames.append(testFilename)\n\n Regression(trainFilename, testFilename, resultsDir, int(args.num_trees))\n\n","sub_path":"Other_Python/Basic_Regression/run_regressors.py","file_name":"run_regressors.py","file_ext":"py","file_size_in_byte":8532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"282468541","text":"from CMGTools.HNL.hn3l_cfg import *\n\n# specify the samples considered\n# from CMGTools.HNL.samples.signal import all_signals_e as samples\n# from CMGTools.HNL.samples.signal_new import signals_e as samples\n# from CMGTools.HNL.samples.signal_13sept18 import all_signals_e as samples\n# from CMGTools.HNL.samples.samples_data_2017_noskim import Single_ele_2017, Single_ele_2017B, Single_ele_2017C, Single_ele_2017D, Single_ele_2017E, Single_ele_2017F\n# from CMGTools.HNL.samples.samples_data_2017_noskim import Single_ele_2017B\n# from CMGTools.HNL.samples.localsignal import WJetsToLNu_ext\nfrom CMGTools.HNL.samples.samples_mc_2017 import TTJets_amcat\n# from CMGTools.HNL.samples.samples_mc_2017 import DYBB,DYJetsToLL_M10to50\n# samples = [Single_ele_2017B,Single_ele_2017C,Single_ele_2017D,Single_ele_2017E,Single_ele_2017F]\n#samples = [DYBB, DYJetsToLL_M10to50, DYJetsToLL_M50, DYJetsToLL_M10to50_ext, DYJetsToLL_M50_ext]\n#samples = [DYBB, DYJetsToLL_M10to50, DYJetsToLL_M50, DYJetsToLL_M50_ext]\n# from CMGTools.HNL.samples.samples_mc_2017_noskim import WJetsToLNu \n# samples = [WJetsToLNu]\nsamples = [TTJets_amcat]\n# from CMGTools.HNL.samples.localsignal import HN3L_M_2_V_0p00244948974278_e_massiveAndCKM_LO\n# samples = [HN3L_M_2_V_0p00244948974278_e_massiveAndCKM_LO]\n# samples = [DYJetsToLL_M10to50]\n\n\n# edit the lines here to specify your ntuple production mode \nproduction = False # state whether you're running production mode or not\nisData = False\nisSignal = True\npromptLeptonType = \"ele\" # choose from 'ele', 'mu'\nL1L2LeptonType = \"ee\" # choose from 'ee', 'mm', 'em'\n\n\n# this calls the master cfg file with the proper settings\nconfig = generateKeyConfigs(samples,production, promptLeptonType, L1L2LeptonType, isData = isData, isSignal = isSignal)\n","sub_path":"cfg/hn3l_eee_cfg.py","file_name":"hn3l_eee_cfg.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"444648296","text":"def closure_fragment(name, **kwargs):\n native.closure_fragment(name = name, **kwargs)\n\n # Android\n defs = kwargs.get(\"defines\", [])\n defs = defs + [\n \"goog.userAgent.ASSUME_MOBILE_WEBKIT=true\",\n \"goog.userAgent.product.ASSUME_ANDROID=true\",\n ]\n args = dict(**kwargs)\n args[\"defines\"] = defs\n fragment_name = name + \"-android\"\n native.closure_fragment(name = fragment_name, **args)\n\n # Chrome\n defs = kwargs.get(\"defines\", [])\n defs = defs + [\n \"goog.userAgent.ASSUME_WEBKIT=true\",\n \"goog.userAgent.product.ASSUME_CHROME=true\",\n ]\n args = dict(**kwargs)\n args[\"defines\"] = defs\n fragment_name = name + \"-chrome\"\n native.closure_fragment(name = fragment_name, **args)\n\n # Edge and IE\n defs = kwargs.get(\"defines\", [])\n defs = defs + [\n \"goog.userAgent.ASSUME_IE=true\",\n ]\n args = dict(**kwargs)\n args[\"defines\"] = defs\n fragment_name = name + \"-ie\"\n native.closure_fragment(name = fragment_name, **args)\n\n # iOS\n defs = kwargs.get(\"defines\", [])\n defs = defs + [\n # We use the same fragments for iPad and iPhone, so just compile a\n # generic mobile webkit.\n \"goog.userAgent.ASSUME_MOBILE_WEBKIT=true\",\n ]\n args = dict(**kwargs)\n args[\"defines\"] = defs\n fragment_name = name + \"-ios\"\n native.closure_fragment(name = fragment_name, **args)\n\n # Firefox\n defs = kwargs.get(\"defines\", [])\n defs = defs + [\n \"goog.userAgent.ASSUME_GECKO=true\",\n \"goog.userAgent.product.ASSUME_FIREFOX=true\",\n ]\n args = dict(**kwargs)\n args[\"defines\"] = defs\n fragment_name = name + \"-firefox\"\n native.closure_fragment(name = fragment_name, **args)\n\ndef closure_test_suite(name, data):\n browsers = {\n \"firefox\": (\"ff\", \"//java/client/src/org/openqa/selenium/firefox\"),\n \"chrome\": (\"chrome\", \"//java/client/src/org/openqa/selenium/chrome\"),\n \"ie\": (\"ie\", \"//java/client/src/org/openqa/selenium/ie\"),\n \"safari\": (\"safari\", \"//java/client/src/org/openqa/selenium/safari\"),\n }\n\n data = data + [\n \"@com_google_javascript_closure_library//:com_google_javascript_closure_library\",\n ]\n\n tests = []\n for browser in browsers.keys():\n spec = browsers[browser]\n test_name = \"%s-%s\" % (name, browser)\n native.java_test(\n name = test_name,\n test_class = \"org.openqa.selenium.javascript.ClosureTestSuite\",\n jvm_flags = [\n \"-Dselenium.browser=%s\" % spec[0],\n \"-Djs.test.timeout=20\",\n \"-Djs.test.dir=%s\" % native.package_name(),\n ],\n data = data,\n runtime_deps = [\n \"//java/client/test/org/openqa/selenium/javascript:javascript\",\n spec[1],\n ],\n tags = [\"no-sandbox\"],\n )\n tests.append(\":\" + test_name)\n\n native.test_suite(\n name = name,\n tests = tests,\n tags = [\"manual\", \"no-sandbox\"],\n )\n\n native.java_binary(\n name = name + \"_debug_server\",\n main_class = \"org.openqa.selenium.environment.webserver.JettyAppServer\",\n data = data,\n testonly = 1,\n runtime_deps = [\n \"//java/client/test/org/openqa/selenium/environment\",\n ],\n )\n","sub_path":"javascript/rules.bzl","file_name":"rules.bzl","file_ext":"bzl","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"181551507","text":"# Uses python3\n# Problem Description\n\n# Task. The goal in this problem is to find the minimum number of coins needed to change the input value\n# (an integer) into coins with denominations 1, 5, and 10.\n\n# Input Format. The input consists of a single integer m.\n\n# Constraints. 1 ≤ m ≤ 10**3 .\n\n# Output Format. Output the minimum number of coins with denominations 1, 5, 10 that changes m.\n\n# Example :\n# 24 <--- input\n# 6 <--- output\n\n# 26 <-- input\n#4 <-- output\n\ndef get_change(m):\n \n count = 0\n while(m > 0):\n # By Greedy approach, first we subtract highest value from denominations ( 10 here ) \n # if value of m (i.e. money) is greater than 10 \n if m > 10:\n m -= 10\n \n # if m < 10 but greater than 5, then we will subtract '5' ( second largest value of denomination here)\n elif m > 5:\n m -= 5\n \n # if m < 5, then we subtract '1' ( lowest value of denomination )\n else:\n m -= 1\n\n # finally we increase count by 1 \n count += 1\n\n return count\n\ndef main():\n m = int(input()) \n print(get_change(m))\n\nmain()","sub_path":"Greedy_Algorithm/money_change_problem.py","file_name":"money_change_problem.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"623981620","text":"# Uses python3\nimport sys\n\ndef gcd_naive(a, b):\n if b>a:\n \ttemp = b\n \tb = a\n \ta = temp\n while(b!=0):\n \ttemp = b\n \tb = a%b\n \ta = temp\n return a\n\ntoken = input()\ntoken = token.split()\na = token[0]\nb = token[1]\nprint(gcd_naive(int(a), int(b)))","sub_path":"week2_algorithmic_warmup/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"616569012","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport gconvutils as gutils\nfrom groupy.gconv.pytorch_gconv import P4ConvZ2, P4ConvP4\n\nclass P4Net(nn.Module):\n def __init__(self):\n super(P4Net, self).__init__() \n self.conv1 = P4ConvZ2(in_channels=1, out_channels=8, kernel_size=5, stride=1)\n self.conv2 = P4ConvP4(in_channels=8, out_channels=16, kernel_size=5, stride=1)\n self.conv3 = P4ConvP4(in_channels=16, out_channels=32, kernel_size=5)\n\n self.fc1 = nn.Linear(64, 16)\n self.fc2 = nn.Linear(16, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = gutils.plane_group_spatial_max_pooling(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = gutils.plane_group_spatial_max_pooling(x, 2, 2)\n x = F.relu(self.conv3(x))\n x = gutils.plane_group_spatial_max_pooling(x, 1, 1)\n x = torch.max(x, dim=2)[0]\n x = torch.flatten(x, 1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\nclass P4NetC(nn.Module):\n def __init__(self):\n super(P4NetC, self).__init__() \n self.conv1 = P4ConvZ2(in_channels=3, out_channels=16, kernel_size=3, stride=1)\n self.conv2 = P4ConvP4(in_channels=16, out_channels=32, kernel_size=3, stride=1)\n self.conv3 = P4ConvP4(in_channels=32, out_channels=64, kernel_size=3, stride=1)\n\n self.fc1 = nn.Linear(64 * 4 * 4, 256)\n self.fc2 = nn.Linear(256, 64)\n self.fc3 = nn.Linear(64, 10)\n\n self.dropout = nn.Dropout(p=0.2)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = gutils.plane_group_spatial_max_pooling(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = gutils.plane_group_spatial_max_pooling(x, 2, 2)\n x = F.relu(self.conv3(x))\n x = torch.max(x, dim=2)[0]\n x = torch.flatten(x, 1)\n x = self.dropout(F.relu(self.fc1(x)))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x","sub_path":"p4_conv.py","file_name":"p4_conv.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"151735452","text":"import matplotlib.pyplot as plt\n\nimport numpy as np;\nimport os;\nimport pandas as pd\nimport csv;\nimport glob;\n\n\nimport plotly.graph_objects as go\nimport plotly.express as px\n\n\n\n\nlabels = [\"100\", \"1000\", \"5000\", \"10000\", \"50000\", \"100000\", \"500000\", \"1000000\", \"1500000\"]\n#labels=[\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\"]\nrun_time_data= [ ]\nlatency_data = [ ]\ncontext_switch_delta=[]\nfilenames=[\"python-services-100recordExperiment-0MBs-run0.csv\", \"python-services-1000recordExperiment-0MBs-run0.csv\", \"python-services-5000recordExperiment-0MBs-run0.csv\", \"python-services-10000recordExperiment-0MBs-run0.csv\", \"python-services-50000recordExperiment-0MBs-run0.csv\", \"python-services-100000recordExperiment-0MBs-run0.csv\", \"python-services-500000recordExperiment-0MBs-run0.csv\", \"python-services-1000000recordExperiment-0MBs-run0.csv\", \"python-services-1500000recordExperiment-0MBs-run0.csv\"]\n\n\nevery_single_stat_in_zall=[[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [],[] ,[] ,[] , [], [], [], [], [], [], [], [] ]\nevery_single_stat_metric_title =[];\n\ncontext_data = [ ]\n\n\n\ndef export_graphs_as_images(fig, file_name, title):\n\tfile_name=file_name.split('.',1)[0]\n\tif not os.path.exists(file_name +\"_images\"):\n\t\tos.mkdir(file_name +\"_images\")\n\tfig.write_image(file_name +\"_images/\"+title +\".png\")\n\tprint(\"saved image: \" +title +\".png to \" + os.path.abspath(file_name +\"_images\"))\n\t\n\n\n\ndef get_names(file_to_open):\n with open(file_to_open, 'r') as csvfile:\n reader = csv.reader(csvfile)\n num = []\n for i, row in enumerate(reader):\n if i ==0:\n firstline = ''.join(row).split()\n lenfirstline = len(firstline)\n #print firstline, lenfirstline\n num.append(len(''.join(row).split()))\n m = max(num)\n rng = range(1, m - lenfirstline + 2)\n #remove )\n rng = firstline[:-1] + rng\n return rng\n\ndef plot_bar_all(labels, all_data, all_title):\n\tcount = 0;\n\tfor sublist in all_data:\n\t\tindex = np.arange(len(labels))\n\t\tplt.bar(index, sublist)\n\t\tplt.xlabel(\"Csv Size\", fontsize=5)\n\t\tplt.xticks(index, labels, fontsize=5, rotation=15)\n\t\tplt.title(all_title[count])\n\n\n\t\tprint (sublist)\n\t\tfig = go.Figure([go.Bar(x=labels, y=sublist)])\n\t\tfig.update_layout(xaxis=dict(type='category'))\n\t\tfig.update_layout(title = { 'text':all_title[count]})\n\t\tfig.update_xaxes(title=\"Csv Row Count\")\n\t\texport_graphs_as_images(fig, \"Service1_images\", all_title[count])\n\n\t\tcount +=1\n\n\ndef plot_bar_x(label, all_data, all_Title):\n index = np.arange(len(labels))\n print(\"whatever\")\n plt.bar(index, data)\n print(\"asd\")\n plt.xlabel(\"Csv Size\", fontsize=15)\n plt.ylabel(\"Run Time( ms)\")\n plt.xticks(index, label, fontsize=15, rotation=30)\n plt.title(Title);\n plt.show();\n\n\n\npath = os.getcwd();\n\nall_data = [];\nfor file_name in filenames:\n\tcsv_file = str(path) + '/' + file_name;\n\tprint(csv_file)\n\tdata = list(csv.reader(open(csv_file)))\n\tall_data.append(data);\n\n\n\n\nfor sublist in all_data:\n\tcounter=0;\n\tfor i in range(0, len(sublist)):\n\t\tif len(sublist[i]) > 0:\n\n\t\t\tif sublist[i][0] == \"zAll\":\n\t\t\t\t#goes to 30\n\t\t\t\tprint(\"found it on row {}\".format(i));\t\n\t\t\t\t#run_time_data.append(sublist[i+1][23])\n\t\t\t\t#latency_data.append(sublist[i+1][16])\n\t\t\t\t#context_switch_data.append(sublist[i+1][4])\n\t\t\t\tevery_single_stat_metric_title = sublist[i][3:17]\n\t\t\t\tfor j in range(3, 17):\n\n\t\t\t\t\tevery_single_stat_in_zall[counter].append(sublist[i+1][j])\n\t\t\t\t\tcounter +=1;\n\n\nplot_bar_all(labels, every_single_stat_in_zall, every_single_stat_metric_title)\n#print (every_single_stat_metric_title)\n\n#print every_single_stat_in_zall[1]\n#plot_bar_x(label, run_time_data, \"Runtime Graph\");\n\n\n\n","sub_path":"python/individual services/service_one/test/history/quick-graph.py","file_name":"quick-graph.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"110904054","text":"from twisted.internet import defer\n\nimport deliver_formatters\nimport base\n\n\nclass XmppNotifier(object):\n #@defer.inlineCallbacks\n def notify(self, user, event_type, event):\n if event_type == 'message' and not user.get('off', False):\n message, recommender, recocomment, sfrom = event\n if recommender:\n formatter = deliver_formatters.parsers[\n user.get('interface', 'redeye')]['recommendation']\n else:\n formatter = deliver_formatters.parsers[\n user.get('interface', 'redeye')]['message']\n formatted = formatter(DummyRequest(user),\n dict(message=message,\n recommender=recommender,\n recocomment=recocomment)\n )\n user.send_plain(formatted, sfrom)\n return 1 # defer.returnValue(1)\n elif event_type == 'comment' and not user.get('off', False):\n comment, sfrom = event\n formatter = deliver_formatters.parsers[user.get(\n 'interface', 'redeye')]['comment']\n formatted = formatter(DummyRequest(user),\n dict(comment=comment)\n )\n user.send_plain(formatted, sfrom)\n return 1 # defer.returnValue(1)\n return 0 # defer.returnValue(0)\n\n\nclass DummyRequest(object):\n \"\"\"Dummy request class.\n Used for storing user object and passing it to formatters.\n \"\"\"\n\n def __init__(self, user):\n self.user = user\n","sub_path":"bnw/xmpp/xmpp_notifier.py","file_name":"xmpp_notifier.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"157494921","text":"# -*-coding: utf-8 -*-\nimport json\nimport datetime\nfrom sqlalchemy.ext.declarative import DeclarativeMeta\nfrom sqlalchemy.orm import class_mapper, defer\n\n\nclass AlchemyEncoder(json.JSONEncoder):\n\n def default(self, obj):\n if isinstance(obj.__class__, DeclarativeMeta):\n # an SQLAlchemy class\n fields = {}\n for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:\n data = obj.__getattribute__(field)\n try:\n json.dumps(data, ensure_ascii=False) # this will fail on non-encodable values, like other classes\n fields[field] = data\n except TypeError: # 添加了对datetime的处理\n if isinstance(data, datetime.datetime):\n fields[field] = data.isoformat()\n elif isinstance(data, datetime.date):\n fields[field] = data.isoformat()\n elif isinstance(data, datetime.timedelta):\n fields[field] = (datetime.datetime.min + data).time().isoformat()\n else:\n fields[field] = None\n # a json-encodable dict\n return fields\n return json.JSONEncoder.default(self, obj)\n\n\ndef defer_everything_but(entity, cols):\n m = class_mapper(entity)\n return [defer(k) for k in\n set(p.key for p\n in m.iterate_properties\n if hasattr(p, 'columns')).difference(cols)]\n\n\ndef object_to_dict(obj, found=None):\n if found is None:\n found = set()\n mapper = class_mapper(obj.__class__)\n columns = [column.key for column in mapper.columns]\n get_key_value = lambda c: (c, getattr(obj, c).isoformat()) if isinstance(getattr(obj, c), datetime) else (c, getattr(obj, c))\n out = dict(map(get_key_value, columns))\n for name, relation in mapper.relationships.items():\n if relation not in found:\n found.add(relation)\n related_obj = getattr(obj, name)\n if related_obj is not None:\n if relation.uselist:\n out[name] = [object_to_dict(child, found) for child in related_obj]\n else:\n out[name] = object_to_dict(related_obj, found)\n return out","sub_path":"expansion/AlchemyEncoder.py","file_name":"AlchemyEncoder.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"277416388","text":"import numpy as np\n\n\ndef pairwise_distances(x, y=None):\n '''\n Input: x is a Nxd matrix\n y is an optional Mxd matirx\n Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]\n if y is not given then use 'y=x'.\n i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2\n '''\n x_norm = (x ** 2).sum(1).view(1, -1)\n print('Did we make it?')# square every element, sum, resize to list\n print(x_norm)\n if y is not None:\n y_t = torch.transpose(y, 0, 1)\n y_norm = (y ** 2).sum(1).view(1, -1)\n else:\n y_t = torch.transpose(x, 0, 1)\n y_norm = x_norm.view(1, -1)\n\n dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)\n # Ensure diagonal is zero if x=y\n # if y is None:\n # dist = dist - torch.diag(dist.diag)\n return torch.clamp(dist, 0.0, np.inf)\n\n\ntest = np.ones((10, 10))\ntest[0][0] = 3\nprint(test)\nholder = pairwise_distances(test)","sub_path":"Playground.py","file_name":"Playground.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"558539520","text":"#!/usr/bin/env python\n\nimport sys\nimport numpy as np\nimport cv2 as cv\n\ndef elipse(img):\n\n\n hsv_min = np.array((0, 135, 123), np.uint8)\n hsv_max = np.array((255, 255, 255), np.uint8)\n\n #fn = './pictures/number2/img3.jpg'\n #img = cv.imread(fn)\n\n cv.imwrite('./pictures/hvost.jpg', img)\n\n points = []\n\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n thresh = cv.inRange(hsv, hsv_min, hsv_max)\n contours0, hierarchy = cv.findContours(thresh.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n for cnt in contours0:\n point = [0,0]\n if len(cnt) > 5: #10\n ellipse = cv.fitEllipse(cnt)\n\n x = int(ellipse[0][0])\n y = int(ellipse[0][1])\n\n point[0] = x\n point[1] = y\n points.append(point)\n\n cv.circle(img, (x, y), 2, (255, 0, 0), 2)\n\n cv.ellipse(img, ellipse, (0, 255, 0), 2)\n\n #cv.imshow('contours', img)\n cv.imwrite('./pictures/number3/img_with_point.jpg', img)\n\n #cv.waitKey()\n #cv.destroyAllWindows()\n\n return points\n\n\n\n\n","sub_path":"elipse.py","file_name":"elipse.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"257977957","text":"\n\"\"\"\nA sample showing how to make a Python script as an app.\n\"\"\"\n\n__version__ = \"0.0.8\"\n\n__copyright__ = \"Copyright 2015, Aldebaran Robotics\"\n__author__ = 'YOURNAME'\n__email__ = 'YOUREMAIL@aldebaran.com'\n\nimport json\nimport time\n\nimport stk.runner\nimport stk.events\nimport stk.services\nimport stk.logging\n\nKEY_TABLETSETATE = \"app-with-tablet/TabletState\"\n\nclass ALMyService(object):\n \"A sample standalone app, that demonstrates simple Python usage\"\n APP_ID = \"com.aldebaran.dx-team-presentation\"\n def __init__(self, qiapp):\n self.qiapp = qiapp\n self.events = stk.events.EventHelper(qiapp.session)\n self.s = stk.services.ServiceCache(qiapp.session)\n self.logger = stk.logging.get_logger(qiapp.session, self.APP_ID)\n\n def show(self, command):\n self.events.set(KEY_TABLETSETATE, json.dumps(command))\n\n def on_start(self):\n \"Ask to be touched, waits, and exits.\"\n self.show({\"title\": \"intro\"})\n self.s.ALTextToSpeech.say(\"This is the first page\")\n time.sleep(1.0)\n self.show({\"title\": \"other\"})\n self.s.ALTextToSpeech.say(\"This is another page\")\n time.sleep(1.0)\n self.show({\"title\": \"last\"})\n self.s.ALTextToSpeech.say(\"This is the last page, I'm done\")\n self.stop()\n\n def stop(self):\n \"Standard way of stopping the application.\"\n self.qiapp.stop()\n\n def on_stop(self):\n \"Cleanup\"\n self.show({})\n self.logger.info(\"Application finished.\")\n self.events.clear()\n\nif __name__ == \"__main__\":\n stk.runner.run_service(ALMyService)\n","sub_path":"templates/app-with-tablet/app/scripts/myservice.py","file_name":"myservice.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"265432246","text":"#!/bin/python3\n''' Python 3 script to recover scores after maps.db corruption in unnamed-sdvx-clone. '''\n\nimport sqlite3\n\ngood_db = sqlite3.connect( 'maps.db' )\nbad_db = sqlite3.connect( 'corrupt.db' )\n\nbc = bad_db.cursor()\nbc2 = bad_db.cursor()\ngc = good_db.cursor()\n\nbc.execute( 'select count(*) from difficulties' )\ncount = bc.fetchone()[0]\n\nbc.execute( 'select rowid, path from difficulties order by rowid' )\ntotal_scores = 0\n\nfor _ in range(count):\n b_diffid, path = bc.fetchone()\n gc.execute( 'select rowid from difficulties where path=:path', {'path': path} )\n g_diffid = gc.fetchone()[0]\n #print( 'map old %i to %i' %(b_diffid, g_diffid) )\n bc2.execute( 'select count(*) from scores where diffid=:diffid', {'diffid': b_diffid} )\n score_count = bc2.fetchone()[0]\n bc2.execute( 'select * from scores where diffid=:diffid', {'diffid': b_diffid} )\n for __ in range(score_count):\n score, crit, near, miss, gauge, gameflags, diffid, hitstats, timestamp = bc2.fetchone()\n new_score_row = ( score, crit, near, miss, gauge, gameflags, g_diffid, hitstats, timestamp )\n gc.execute( 'insert into scores values ( ?, ?, ?, ?, ?, ?, ?, ?, ? )', new_score_row )\n total_scores += 1\n #print( 'found score %i from time %i' %(score, timestamp) )\n\n\ngood_db.commit()\nprint( 'migrated %i scores successfully' %total_scores )\n\nbc.close()\nbc2.close()\ngc.close()\n","sub_path":"fix.py","file_name":"fix.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"500570300","text":"from SoundMusic.Music import Note\nfrom SoundMusic.EventExtraction import Event\nfrom SoundMusic.utils import Envelope as envl\nimport numpy as np\nfrom pysndfx import AudioEffectsChain as Fx\nimport librosa as lr\nimport math\nfrom math import sin, pi, floor, ceil\nfrom SoundMusic.utils.Random import maybe\nimport random\nfrom SoundMusic.utils.Wave import get_strong_freq, stitch_nwaves\n\nclass IInstrument:\n def add_event(self, event: Event): raise NotImplementedError()\n def play(self, note: Note, smpRt: int): raise NotImplementedError()\n def range(self): return (0, math.inf)\n\nclass MelodicSample(IInstrument):\n def __init__(self):\n self.event_map = {}\n\n def add_event(self, event: Event):\n p = event.get_pitch()\n if not p in self.event_map:\n self.event_map[p] = [event]\n else:\n self.event_map[p] += [event]\n\n def get_event(self, pitch):\n if pitch in self.event_map:\n return (random.choice(self.event_map[pitch]), pitch)\n else:\n c_pitch = min(self.event_map.keys(), key=lambda k: abs(k-pitch))\n return (random.choice(self.event_map[c_pitch]), c_pitch)\n\n def play(self, note: Note, smpRt: int) -> np.ndarray:\n event, c_pitch = self.get_event(note.pitch)\n ratio = (len(event.data) / smpRt) / note.duration\n ratio = np.clip(ratio, 0.5, 100)\n shift = note.pitch - c_pitch\n wave = lr.effects.harmonic(event.data)\n wave = (\n Fx()\n .pitch(shift * 100)\n .tempo(ratio)\n .highpass(lr.midi_to_hz(note.pitch))\n )(wave)\n wave = envl.adsr(len(wave))(wave)\n return wave\n\n def range(self):\n s = math.inf\n b = 0\n for p in self.event_map:\n s = min(p, s)\n b = max(p, b)\n return (s,b)\n\nclass Oscillator(IInstrument):\n def __init__(self):\n self.sample = None\n \n def add_event(self, event: Event):\n if maybe(0.5): self.sample = event.data\n\n def play(self, note: Note, smpRt: int):\n b_wave = np.array([])\n freq = lr.midi_to_hz(note.pitch)\n oscs = floor(note.duration * freq)\n for _ in range(oscs):\n l = round(0.005 * smpRt)\n p = random.randint(0, len(self.sample) - l)\n p_wave = self.sample[p:p+l]\n b_wave = np.concatenate((b_wave, p_wave))\n l = round(0.005 * smpRt)\n p = random.randint(0, len(self.sample) - l)\n wave = self.sample[p:p+l]\n wave = np.tile(wave, oscs)\n b_wave = (\n Fx()\n .gain(-10)\n )(b_wave)\n wave = wave + b_wave\n factor = (len(wave) / smpRt) / note.duration\n wave = (\n Fx()\n .speed(factor)\n .highpass(100)\n .lowpass(2000)\n )(wave)\n wave = envl.adsr(len(wave))(wave)\n return wave\n\nclass Granulator(IInstrument):\n def __init__(self):\n self.grans = []\n self.size = 300\n self.smoothing = 10\n self.limit = 100\n self.break_limit = 5\n self.gran_len = self.size + self.smoothing\n self.cached_range = None\n\n def add_event(self, event):\n n_grans = len(event.data) / self.gran_len\n grans = np.array_split(event.data, n_grans)\n self.grans += grans\n\n def gen_wave(self, dur, smpRt):\n n_grans = ceil(math.floor(dur * smpRt / self.size))\n grans = random.choices(self.grans, k=n_grans)\n random.shuffle(grans)\n return stitch_nwaves(grans, self.smoothing)\n \n def play(self, note: Note, smpRt):\n wave = None\n shift = math.inf\n for _ in range(self.limit):\n twave = self.gen_wave(note.duration, smpRt)\n tc_pitch = get_strong_freq(twave)\n if tc_pitch == - math.inf: continue\n tshift = note.pitch - tc_pitch\n if abs(tshift) < abs(shift):\n wave = twave\n shift = tshift\n if abs(shift) < 30: break\n wave = (\n Fx()\n .pitch(shift * 100)\n .highpass(lr.midi_to_hz(note.pitch))\n )(wave)\n wave = envl.adsr(len(wave))(wave)\n return wave\n\n def range(self):\n if self.cached_range == None:\n s = math.inf\n b = 0\n r = 0\n for _ in range(self.limit):\n p = get_strong_freq(self.gen_wave(random.uniform(0.5, 1), 22050))\n if p == - math.inf: continue\n if s < p < b: r += 1\n if r > self.break_limit: break\n s = min(s, p)\n b = max(b, p)\n self.cached_range = (s,b)\n return self.cached_range\n\nall_instruments = [\n MelodicSample,\n Oscillator,\n Granulator,\n]","sub_path":"old/SoundMusic/Instruments.py","file_name":"Instruments.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"483057438","text":"import urllib.parse\n\nimport requests\nfrom adminsortable.models import SortableMixin\nfrom django.db import models\nfrom django.db.models import Max\nfrom enumfields import EnumField\nfrom parler.models import TranslatableModel, TranslatedFields\n\nfrom utils.auth import BearerAuth\nfrom utils.models import SerializableMixin\n\nfrom .enums import ServiceType\nfrom .exceptions import MissingGDPRUrlException\n\n\ndef get_next_data_field_order():\n try:\n return AllowedDataField.objects.all().aggregate(Max(\"order\"))[\"order__max\"] + 1\n except TypeError:\n return 1\n\n\nclass AllowedDataField(TranslatableModel, SortableMixin):\n field_name = models.CharField(max_length=30, unique=True)\n translations = TranslatedFields(label=models.CharField(max_length=64))\n order = models.PositiveIntegerField(\n default=get_next_data_field_order, editable=False, db_index=True\n )\n\n class Meta:\n ordering = [\"order\"]\n\n def __str__(self):\n return self.safe_translation_getter(\"label\", super().__str__())\n\n\nclass Service(TranslatableModel):\n service_type = EnumField(\n ServiceType, max_length=32, blank=False, null=True, unique=True\n )\n name = models.CharField(max_length=200, blank=False, null=False, unique=True)\n translations = TranslatedFields(\n title=models.CharField(max_length=64),\n description=models.TextField(max_length=200, blank=True),\n )\n allowed_data_fields = models.ManyToManyField(AllowedDataField)\n created_at = models.DateTimeField(auto_now_add=True)\n gdpr_url = models.CharField(\n max_length=2000,\n blank=True,\n help_text=(\n \"Enter the URL of the service. Final URLs are generated by concatenating the url with the profile UUID.\"\n ),\n )\n gdpr_query_scope = models.CharField(\n max_length=200, blank=True, help_text=\"GDPR API query operation scope\"\n )\n gdpr_delete_scope = models.CharField(\n max_length=200, blank=True, help_text=\"GDPR API delete operation scope\"\n )\n implicit_connection = models.BooleanField(\n default=False,\n help_text=\"If enabled, this service doesn't require explicit service connections to profiles\",\n )\n\n class Meta:\n permissions = (\n (\"can_manage_profiles\", \"Can manage profiles\"),\n (\"can_view_profiles\", \"Can view profiles\"),\n (\"can_manage_sensitivedata\", \"Can manage sensitive data\"),\n (\"can_view_sensitivedata\", \"Can view sensitive data\"),\n (\n \"can_view_verified_personal_information\",\n \"Can view verified personal information\",\n ),\n )\n\n def save(self, *args, **kwargs):\n # Convenience for saving Services with only service_type and no name.\n # When service_type is removed from the code base, this should be\n # removed as well and every Service creation requires a name at that point.\n if not self.name and self.service_type:\n self.name = self.service_type.value\n\n return super().save(*args, **kwargs)\n\n def __str__(self):\n return self.safe_translation_getter(\"title\", super().__str__())\n\n def has_connection_to_profile(self, profile, allow_implicit=True):\n \"\"\"Has this service an implicit or explicit connection to a profile\n\n Checking can be limited only to explicit connection by setting\n allow_implicit to False. By default, implicit connection is checked.\"\"\"\n if allow_implicit and self.implicit_connection:\n return True\n\n return self.serviceconnection_set.filter(profile=profile).exists()\n\n\nclass ServiceClientId(models.Model):\n service = models.ForeignKey(\n Service, on_delete=models.CASCADE, related_name=\"client_ids\"\n )\n client_id = models.CharField(max_length=256, null=False, blank=False, unique=True)\n\n\nclass ServiceConnection(SerializableMixin):\n profile = models.ForeignKey(\n \"profiles.Profile\", on_delete=models.CASCADE, related_name=\"service_connections\"\n )\n service = models.ForeignKey(Service, on_delete=models.PROTECT)\n created_at = models.DateTimeField(auto_now_add=True)\n enabled = models.BooleanField(default=True)\n\n class Meta:\n unique_together = (\"profile\", \"service\")\n\n def __str__(self):\n return \"{} {} - {}\".format(\n self.profile.first_name, self.profile.last_name, self.service\n )\n\n serialize_fields = (\n {\"name\": \"service\", \"accessor\": lambda x: getattr(x, \"name\")},\n {\"name\": \"created_at\", \"accessor\": lambda x: x.strftime(\"%Y-%m-%d\")},\n )\n\n def download_gdpr_data(self, api_token: str):\n \"\"\"Download service specific GDPR data by profile.\n\n API token needs to be for a user that can access information for the related profile on\n on the related GDPR API.\n \"\"\"\n if self.service.gdpr_url:\n url = urllib.parse.urljoin(self.service.gdpr_url, str(self.profile.pk))\n try:\n response = requests.get(url, auth=BearerAuth(api_token), timeout=5)\n response.raise_for_status()\n return response.json()\n except requests.RequestException:\n return {}\n return {}\n\n def delete_gdpr_data(self, api_token: str, dry_run=False):\n \"\"\"Delete service specific GDPR data by profile.\n\n API token needs to be for a user that can access information for the related profile on\n on the related GDPR API.\n\n Dry run parameter can be used for asking the service if delete is possible.\n An exception will be raised by this method if deletion response from the\n service indicates an error or if GDPR related URLs have not been configured\n for the related service.\n \"\"\"\n data = {}\n if dry_run:\n data[\"dry_run\"] = True\n\n if self.service.gdpr_url:\n url = urllib.parse.urljoin(self.service.gdpr_url, str(self.profile.pk))\n response = requests.delete(\n url, auth=BearerAuth(api_token), timeout=5, data=data\n )\n response.raise_for_status()\n return True\n\n raise MissingGDPRUrlException(\n f\"Service {self.service.name} does not define an URL for GDPR removal.\"\n )\n","sub_path":"services/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"122398527","text":"\nimport math, random\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport visdom\nimport time\nimport os\n\nvis = visdom.Visdom(port = 8097)\nimport torch.multiprocessing as mp\n#win_4 = vis.line(Y=torch.tensor([0]),opts=dict(title='reward'))\n\n\nimport time\n\nttime= time.time()\ndef time_check(num=0):\n global ttime\n print(f'{num} time:{time.time()-ttime}')\n ttime = time.time()\n\n\"\"\"\n+double\n+dueling\n+episodic mem\n+nstep\n+per\n+image version\n+frame_stack\n+RND\n+lstm\n\"\"\"\nmax_shared_q_size = 5\nframe_stack = 1\nn_step = 5\nPER_alpha = 0.9 # 0 is uniform per\ncount_episode = False\nRND_const = 0\nstart_frame = 1000\nnum_frames = 50000\nbatch_size =32\nvis_render=True\nEPS_CONST = 1\nlr = 0.0006\nrnd_lr = 0.00001\nburn_in_len = 5\nmem_size = 20000\nseq_len = 7\nenv_id = 'CartPole-v0'\n\n#env = gym.make(env_id)\ncnn_enable=True\ns_dim = 1*frame_stack\na_dim = 2\nstate_shape = (1,1,84,84)\nimport torchvision\ntogray = torchvision.transforms.Grayscale()\ntoten = torchvision.transforms.ToTensor()\nresize = torchvision.transforms.Resize((84,84))\ntopil = torchvision.transforms.ToPILImage()\ndef obs_preproc(x):\n xten = toten(togray(resize(topil(x))))\n return xten.reshape(state_shape)\n\nclass env_cover():\n def __init__(self,env_id):\n self.env = gym.make(env_id)\n def reset(self):\n ss = self.env.reset()\n #ss = np.delete(ss,[1,3])\n return torch.from_numpy(ss).float().view(1,s_dim).to(dev)\n #return obs_preproc(env.render(mode='rgb_array')).to(dev)\n def step(self,act):\n ss,rr,dd,_ = self.env.step(act)\n #ss = np.delete(ss,[1,3])\n return torch.from_numpy(ss).float().view(1,s_dim).to(dev),rr,dd,0\n\n def close(self):\n self.env.close()\n\n\ncnn_enable = False\nvis_render=False\ns_dim = 2\nstate_shape = (1,1,s_dim)\na_dim = 3\n\nenv_id = 'MountainCar-v0'\n\n\n\nenv = env_cover(env_id)\n\n\nuse_cuda = False\nuse_cuda = torch.cuda.is_available()\ndev = torch.device('cuda' if use_cuda else 'cpu')\nprint(dev)\n\nimport torch.utils.data\n\nfrom collections import deque\n\nclass ReplayBuffer():\n def __init__(self,capacity, mainQ, targetQ, shared_state):\n self.win_bar = vis.bar(X=torch.rand([10]))\n self.win_bar_td = vis.bar(X=torch.rand([10]))\n\n self.count = 0\n self.capacity = capacity\n self.buffer = deque(maxlen= capacity)\n self.mainQ = mainQ\n self.targetQ= targetQ\n self.shared_state = shared_state\n def push(self, data ):\n \n# [[state ,action,reward,gamma,ireward,igamma ],state_mem]\n with torch.no_grad():\n state = data[0].to(dev)\n action = data[1].to(dev)\n reward = data[2].to(dev)\n gamma = data[3].to(dev)\n ireward = data[4].to(dev)\n igamma = data[5].to(dev)\n self.mainQ.reset_state()\n self.targetQ.reset_state()\n mhx, mcx = self.mainQ.get_state()\n thx, tcx = self.targetQ.get_state()\n b_len = state.size(0)\n \n td_loss, state_mem = calc_td(self.mainQ,self.targetQ,state,action,reward,gamma,ireward,igamma,\n mhx.to(dev),mcx.to(dev),thx.to(dev),tcx.to(dev), \n b_len-n_step, stored_state=True)\n \n \n \n \n self.count += data[0].size(0) if not count_episode else 1\n priority = []\n eta = 0.9\n td_loss = td_loss.view(-1)\n for i in range(len(td_loss)-seq_len):\n p = (eta*td_loss[i:i+seq_len].max()+(1.-eta)*td_loss[i:i+seq_len].mean())**PER_alpha\n priority.append(p)\n \n priority = torch.stack(priority).view(-1)\n# td_loss_total = sum(priority)/len(priority)\n td_loss_total = priority.max()\n with self.shared_state[\"vis\"].get_lock():\n vis.bar(X=td_loss.cpu().view(-1,1), win= self.win_bar_td, opts=dict(title='push td_loss'))\n self.buffer.append([data,td_loss,priority,td_loss_total,state_mem])\n while self.count > self.capacity:\n self.count -= self.buffer.popleft()[0][0].size(0) if not count_episode else 1\n\n def sample(self,batch_size):\n weight = [self.buffer[i][3] for i in range(len(self.buffer))]\n batch_epi = list(torch.utils.data.WeightedRandomSampler(torch.stack(weight),batch_size, True))\n s = []\n for episode_idx in batch_epi:\n episode = self.buffer[episode_idx][0]\n priority = self.buffer[episode_idx][2]\n state_mem = self.buffer[episode_idx][4]\n \n ii = list(torch.utils.data.WeightedRandomSampler(priority , 1, True))[0]\n \n start = ii - burn_in_len if ii-burn_in_len>=0 else 0\n brun_state = episode[0][start:ii].to(dev)\n mhx = state_mem[start][0].to(dev)\n mcx = state_mem[start][1].to(dev)\n thx = state_mem[start][2].to(dev)\n tcx = state_mem[start][3].to(dev)\n \n state =episode[0][ii:ii+seq_len+n_step] \n action =episode[1][ii:ii+seq_len+n_step]\n reward =episode[2][ii:ii+seq_len+n_step]\n gamma =episode[3][ii:ii+seq_len+n_step] \n ireward =episode[4][ii:ii+seq_len+n_step]\n igamma =episode[5][ii:ii+seq_len+n_step]\n \n s.append([episode_idx,ii,state,action,reward,gamma,ireward,igamma,mhx,mcx, thx,tcx ,brun_state])\n\n epi_idx,seq_idx,state, action, reward,gamma,ireward,igamma,mhx,mcx, thx,tcx, burn_state = zip(*s)\n \n shape = (batch_size,-1)\n state = torch.cat(state,1).to(dev)\n action = torch.cat(action,1).to(dev)\n reward = torch.cat(reward,1).to(dev)\n gamma = torch.cat(gamma,1).to(dev)\n ireward = torch.cat(ireward,1).to(dev)\n igamma = torch.cat(igamma,1).to(dev)\n \n epi_idx = torch.LongTensor(epi_idx).reshape(shape).to(dev)\n seq_idx = torch.LongTensor(seq_idx).reshape(shape).to(dev)\n \n mhx = torch.cat(mhx,0).reshape((batch_size,1,-1 )).to(dev)\n mcx = torch.cat(mcx,0).reshape((batch_size,1,-1 )).to(dev)\n thx = torch.cat(thx,0).reshape((batch_size,1,-1 )).to(dev)\n tcx = torch.cat(tcx,0).reshape((batch_size,1,-1 )).to(dev)\n \n return epi_idx,seq_idx,state, action, reward,gamma,ireward,igamma,mhx,mcx, thx,tcx, burn_state\n \n def priority_update(self,epi_idx,seq_idx,loss):\n td_array = self.buffer[epi_idx][1]\n# priority = self.buffer[epi_idx][2]\n# total_priority = self.buffer[epi_idx][3]\n \n for i in range(seq_len):\n td_array[seq_idx+i] = loss[i].abs()\n# for i in range(seq_len):\n# priority[seq_idx+i] = loss[i]\n \n start = seq_idx-seq_len \n start = start if start>=0 else 0\n end = seq_idx+seq_len\n end = end if end<= len(td_array)-seq_len else len(td_array)-seq_len\n\n eta = 0.9\n for i in range(start, end):\n p = (eta*td_array[i:i+seq_len].max()+(1.-eta)*td_array[i:i+seq_len].mean())**PER_alpha\n self.buffer[epi_idx][2][i] = p.view(-1)\n self.buffer[epi_idx][3] = sum(self.buffer[epi_idx][2])/len(self.buffer[epi_idx][2])\n bar = []\n for i in range(len(self.buffer)):\n bar.append(self.buffer[i][3])\n\n with self.shared_state[\"vis\"].get_lock():\n vis.bar(X=torch.stack(bar), win= self.win_bar, opts=dict(title='total priority'))\n \n def __len__(self):\n return self.count\n def __repr__(self):\n return '\\rmem size: {}/{} ' .format(self.count, self.capacity)\n\n\n\nclass Flatten(nn.Module):\n def forward(self,inputs):\n return inputs.view(inputs.size(0),-1)\n\nclass DQN(nn.Module):\n def __init__(self, num_inputs, num_outputs, dev ):\n super(DQN,self).__init__()\n if cnn_enable:\n size=7*7*64\n self.feature = nn.Sequential(\n nn.Conv2d(num_inputs,64,8,stride= 4),nn.ReLU(),\n nn.Conv2d(64,64,4,stride=2),nn.ReLU(),\n nn.Conv2d(64,64,3,stride=1),nn.ReLU(),\n Flatten(),\n nn.Linear(size,128),nn.ReLU(),\n )\n else :\n self.feature = nn.Sequential(\n nn.Linear(s_dim,128),nn.ReLU(),\n )\n\n self.lstm_size = 128\n self.lstm = nn.LSTMCell(self.lstm_size, self.lstm_size)\n \n self.advantage = nn.Sequential(\n nn.Linear(self.lstm_size,128),nn.ReLU(),\n nn.Linear(128,128),nn.ReLU(),\n nn.Linear(128,num_outputs),\n )\n self.value = nn.Sequential(\n nn.Linear(self.lstm_size,128),nn.ReLU(),\n nn.Linear(128,128),nn.ReLU(),\n nn.Linear(128,1),\n )\n self.iadvantage = nn.Sequential(\n nn.Linear(self.lstm_size,128),nn.ReLU(),\n nn.Linear(128,128),nn.ReLU(),\n nn.Linear(128,num_outputs),\n )\n self.ivalue = nn.Sequential(\n nn.Linear(self.lstm_size,128),nn.ReLU(),\n nn.Linear(128,128),nn.ReLU(),\n nn.Linear(128,1),\n )\n self.hx = None\n self.cx = None\n\n self.dev = dev\n \n def forward(self,x):\n \n x = self.feature(x)\n \n if self.hx is None: \n self.hx = torch.zeros((x.size(0) ,self.lstm_size)).to(self.dev)\n self.cx = torch.zeros((x.size(0) ,self.lstm_size)).to(self.dev)\n \n self.hx, self.cx = self.lstm(x , (self.hx, self.cx))\n \n x= self.hx\n \n adv = self.advantage(x)\n val = self.value(x)\n iadv = self.iadvantage(x)\n ival = self.ivalue(x)\n \n Q = val + adv - adv.mean()\n iQ = ival + iadv - iadv.mean()\n Qa = Q.argmax(1).view(-1,1)\n iQa = iQ.argmax(1).view(-1,1)\n return Q,Qa,iQ,iQa\n\n \n def set_state(self, hx, cx):\n self.hx = hx\n self.cx = cx\n \n def reset_state(self):\n self.hx = None\n self.cx = None\n\n def get_state(self):\n if self.hx is None:\n return torch.zeros((1 ,self.lstm_size)).to(self.dev), torch.zeros((1 ,self.lstm_size)).to(self.dev)\n else:\n return self.hx.detach(), self.cx.detach()\n \n\nclass RND(nn.Module):\n def __init__(self,num_inputs):\n super(RND,self).__init__()\n self.target= nn.Sequential(\n nn.Conv2d(num_inputs*2,64,8,stride=4),nn.ReLU(),\n nn.Conv2d(64,64,4,stride=2),nn.ReLU(),\n nn.Conv2d(64,64,3,stride=1),nn.ReLU(),\n Flatten(),\n nn.Linear(64*7*7,128),nn.ReLU(),\n nn.Linear(128,128),\n )\n self.predictor = nn.Sequential(\n nn.Conv2d(num_inputs*2,64,8,stride=4),nn.ReLU(),\n nn.Conv2d(64,64,4,stride=2),nn.ReLU(),\n nn.Conv2d(64,64,3,stride=1),nn.ReLU(),\n Flatten(),\n nn.Linear(64*7*7,128),nn.ReLU(),\n nn.Linear(128,128),nn.ReLU(),\n nn.Linear(128,128),\n )\n for m in self.modules():\n if isinstance(m,nn.Linear):\n nn.init.orthogonal_(m.weight,np.sqrt(2))\n m.bias.data.zero_()\n for param in self.target.parameters():\n param.requires_grad =False\n\n def forward(self, obs, next_obs):\n Tobs = torch.cat([obs,next_obs],dim=1)\n target_feature = self.target(Tobs)\n predict_feature = self.predictor(Tobs)\n return predict_feature*RND_const, target_feature*RND_const\n\n\n\n\n\n\n\n\ndef update_target(tar,cur):\n tar.load_state_dict(cur.state_dict())\n\n\ndef calc_td(main_model,target_model,state, action, reward,gamma,ireward,igamma,mhx,mcx, thx,tcx, story_len, stored_state =False): \n y_t_hat = []\n iy_t_hat = []\n state_mem = []\n with torch.no_grad():\n main_model.set_state(mhx,mcx)\n target_model.set_state(thx,tcx)\n if stored_state:\n mhx,mcx = main_model.get_state()\n thx,tcx = target_model.get_state()\n state_mem.append([mhx,mcx,thx,tcx])\n \n for i in range(n_step):\n _,_,_,_ = main_model(state[i])\n _,_,_,_ = target_model(state[i])\n if stored_state:\n mhx,mcx = main_model.get_state()\n thx,tcx = target_model.get_state()\n state_mem.append([mhx,mcx,thx,tcx])\n for i in range(story_len):\n qv,_,iqv,_ = main_model(state[i+n_step])\n _,tqa,_,tiqa = target_model(state[i+n_step])\n if stored_state:\n mhx,mcx = main_model.get_state()\n thx,tcx = target_model.get_state()\n state_mem.append([mhx,mcx,thx,tcx])\n y_t_hat.append(reward[i] + (gamma[i+n_step]**n_step)*qv.gather(1,tqa))\n iy_t_hat.append(ireward[i] + (igamma[i+n_step]**n_step)*iqv.gather(1,tiqa))\n \n losses=[]\n main_model.reset_state()\n target_model.reset_state()\n main_model.set_state(mhx,mcx)\n target_model.set_state(thx,tcx)\n for i in range(story_len):\n q,_,iq,_ = main_model(state[i])\n td = q.gather(1,action[i]) - y_t_hat[i]\n itd = iq.gather(1,action[i]) - iy_t_hat[i]\n losses.append(td+itd)\n \n return torch.cat(losses,1).abs(), state_mem\n \n\n\n\n\ndef actor_process(a_id,num_frames,shared_state,shared_queue,block=True, eps=0.1):\n print(f'#{a_id} start')\n win_epsil = vis.line(Y=torch.tensor([0]),opts=dict(title='epsilon'+str(a_id)))\n win_r = vis.line(Y=torch.tensor([0]),opts=dict(title='reward'+str(a_id)))\n win_exp_q = vis.line(Y=torch.tensor([0]),opts=dict(title='exp_q'+str(a_id)))\n\n \n mainQ = DQN(s_dim, a_dim, dev ).to(dev)\n rnd_model = RND(s_dim).to(dev)\n mainQ.load_state_dict(shared_state[\"mainQ\"].state_dict())\n \n episode_reward=0\n local_mem = []\n epsilon = 1\n state_mem = []\n done = True\n gamma = 0.997\n state = env.reset()\n q_val=[]\n for frame_idx in range(num_frames):\n if done:\n if len(local_mem)!=0:\n with shared_state[\"vis\"].get_lock():\n vis.line(X=torch.tensor([frame_idx]), Y=torch.tensor([episode_reward]), win = win_r, update='append')\n # vis.line(X=torch.tensor([frame_idx]), Y=torch.tensor([epsilon]), win = win_epsil, update='append')\n vis.line(Y=torch.cat(q_val,0), win= win_exp_q, opts=dict(title='exp_q'+str(a_id))) \n for i in range(n_step):\n local_mem.append([torch.zeros(state.size()).to(dev),0,0,0,0,0])\n \n # for i in range(len(local_mem)-n_step):\n # local_mem[i][5] = 0.99 if local_mem[i][3]!=0 else 0 \n # state = local_mem[i][0]\n # next_state = local_mem[i+n_step][0]\n # \n ## state = torch.cat([local_mem[j if j>=0 else 0][0] for j in range(i-frame_stack+1,i+1)],1)\n ## next_state = torch.cat([local_mem[j if j>=0 else 0][0] for j in range(i-frame_stack+1+n_step,i+1+n_step)],1)\n # pred , targ = rnd_model(state.to(dev),next_state.to(dev))\n # i_reward = ((pred-targ)**2).mean().item()\n # local_mem[i][4] = i_reward\n \n for i in range(len(local_mem)-n_step):\n local_mem[i][2] = sum([local_mem[i+j][2] *(local_mem[i+j][3]**j) for j in range(n_step)])\n # local_mem[i][4] = sum([local_mem[i+j][4] *(0.99**j) for j in range(n_step)])\n \n # ll = []\n # for i in range(len(local_mem)-n_step):\n # ll.append(local_mem[i][4])\n # win_ir = vis.line(Y=torch.tensor(ll),win= win_ir)\n with torch.no_grad():\n mainQ.reset_state()\n# targetQ.reset_state()\n mhx,mcx= mainQ.get_state()\n# thx,tcx= targetQ.get_state()\n state,action,reward,gamma,ireward,igamma = zip(*local_mem)\n \n b_len = len(local_mem)\n state = torch.stack(state)\n action = torch.LongTensor(action).reshape((b_len,1,1))\n reward = torch.Tensor(reward).reshape((b_len,1,1))\n gamma = torch.Tensor(gamma).reshape((b_len,1,1))\n ireward = torch.Tensor(ireward).reshape((b_len,1,1))\n igamma = torch.Tensor(igamma).reshape((b_len,1,1))\n \n blocking = True if shared_queue.qsize()>max_shared_q_size and block else False\n shared_queue.put([state.cpu() ,action,reward,gamma,ireward,igamma ],block=blocking)\n \n \n while True:\n with shared_state[\"wait\"].get_lock():\n if shared_state[\"wait\"].value > 0:\n shared_state[\"wait\"].value -=1\n break\n time.sleep(0.01)\n \n if block == False:\n return 0\n \n state = env.reset()\n episode_reward=0\n gamma = 0.997\n local_mem = []\n state_mem = []\n mainQ.reset_state()\n# targetQ.reset_state()\n q_val = []\n \n \n \n \n # epsilon= 0.01**(EPS_CONST*frame_idx/num_frames)\n epsilon= eps\n \n with torch.no_grad():\n mhx,mcx = mainQ.get_state()\n# thx,tcx = targetQ.get_state()\n# state_mem.append([mhx,mcx,thx,tcx])\n# state_mem.append([mhx,mcx])\n qv,qa,iqv,iqa = mainQ(state)\n# _,_,_,_ = targetQ(state)\n \n action = qa.item() if random.random() > epsilon else random.randrange(a_dim)\n \n q_val.append(qv.detach())\n# if vis_render:\n# vis.image(state.view(84,84),win = win_img)\n \n next_state , reward, done ,_ = env.step(action)\n local_mem.append([state, action ,reward, gamma, 0 , 0])\n \n state = next_state\n episode_reward += reward\n \n \n if shared_state[\"update\"][a_id]:\n mainQ.load_state_dict(shared_state[\"mainQ\"].state_dict())\n# targetQ.load_state_dict(shared_state[\"targetQ\"].state_dict())\n shared_state[\"update\"][a_id]=False\n \n print('actor_update',mainQ.value[0].weight[0][0:5].detach())\n \n \n print('done')\n env.close()\n \n\n \n \ndef learner_process(max_id,num_frames,shared_state,shared_queue,block=True):\n try:\n win_ir = vis.line(Y=torch.tensor([0]),opts=dict(title='ireward'))\n win_l0 = vis.line(Y=torch.tensor([0]),opts=dict(title='loss'))\n win_l1 = vis.line(Y=torch.tensor([0]),opts=dict(title='rnd_loss'))\n \n mainQ = DQN(s_dim, a_dim, dev ).to(dev)\n targetQ = DQN(s_dim, a_dim, dev ).to(dev)\n rnd_model = RND(s_dim).to(dev)\n \n mainQ.load_state_dict(shared_state[\"mainQ\"].state_dict())\n targetQ.load_state_dict(shared_state[\"targetQ\"].state_dict())\n \n optimizer = optim.Adam(mainQ.parameters(),lr)\n rnd_optimizer = optim.Adam(rnd_model.parameters(),rnd_lr)\n \n \n replay_buffer = ReplayBuffer(mem_size,mainQ , targetQ,shared_state)\n def soft_update(target_model, model, tau):\n for target_param, param in zip(target_model.parameters(), model.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)\n def update():\n epi_idx,seq_idx,state, action, reward,gamma,ireward,igamma,mhx,mcx, thx,tcx, burn_state = replay_buffer.sample(batch_size)\n \n burned_hx = []\n burned_cx = []\n burned_thx = []\n burned_tcx = []\n with torch.no_grad():\n for i in range(batch_size):\n mainQ.reset_state()\n targetQ.reset_state()\n \n mainQ.set_state(mhx[i],mcx[i])\n targetQ.set_state(thx[i],tcx[i])\n \n for j in range(len(burn_state[i])):\n _,_,_,_ = mainQ(burn_state[i][j])\n _,_,_,_ = targetQ(burn_state[i][j])\n \n t_mhx,t_mcx = mainQ.get_state()\n burned_hx.append(t_mhx)\n burned_cx.append(t_mcx)\n \n t_thx,t_tcx = targetQ.get_state()\n burned_thx.append(t_thx)\n burned_tcx.append(t_tcx)\n \n mhx = torch.cat(burned_hx,0).to(dev)\n mcx = torch.cat(burned_cx,0).to(dev)\n thx = torch.cat(burned_thx,0).to(dev)\n tcx = torch.cat(burned_tcx,0).to(dev)\n \n loss,_ = calc_td(mainQ,targetQ,state, action, reward,gamma,ireward,igamma,mhx,mcx, thx,tcx,seq_len) \n optimizer.zero_grad()\n loss.pow(2).mean().backward()\n optimizer.step()\n # pm,tm = rnd_model(state,nstate)\n # rnd_loss = ((pm-tm)**2).mean()\n # rnd_optimizer.zero_grad()\n # rnd_loss.backward()\n # rnd_optimizer.step()\n \n for i in range(len(epi_idx)):\n replay_buffer.priority_update(epi_idx[i],seq_idx[i],loss[i].detach())\n \n return loss.pow(2).mean().item(),0\n \n # if len(replay_buffer)==0:\n if block==False:\n if shared_queue.qsize()<2 :\n print('return shared q size > 2 ')\n return 0\n data = shared_queue.get(block=True)\n replay_buffer.push(data)\n \n \n while len(replay_buffer) < start_frame and block:\n \n data = shared_queue.get(block=True)\n replay_buffer.push(data)\n print(repr(replay_buffer),end='\\r')\n \n \n for frame_idx in range(num_frames):\n print(repr(replay_buffer),end='\\r')\n if shared_queue.qsize()!=0:\n# while shared_queue.qsize() != 0:\n data = shared_queue.get()\n replay_buffer.push(data)\n \n loss, rnd_loss = update()\n print(f'#learner l:{loss:.5f}')\n with shared_state[\"vis\"].get_lock():\n vis.line(X=torch.tensor([frame_idx]),Y=torch.tensor([loss]),win=win_l0,update ='append')\n vis.line(X=torch.tensor([frame_idx]),Y=torch.tensor([rnd_loss]),win=win_l1,update ='append')\n \n with shared_state[\"wait\"].get_lock():\n shared_state[\"wait\"].value +=1\n \n \n if frame_idx % 4 == 0:\n # if random.random() < 1/10 :\n soft_update(targetQ,mainQ,0.3)\n# update_target(targetQ,mainQ)\n if frame_idx % 3 == 0:\n # if random.random() < 1/20 :\n shared_state[\"mainQ\"].load_state_dict(mainQ.state_dict())\n shared_state[\"targetQ\"].load_state_dict(targetQ.state_dict())\n for i in range(max_id):\n shared_state[\"update\"][i]=True\n if block == False:\n return 0\n except Exception as e: \n print(e)\n \n\n\nif __name__ == '__main__':\n os.system('cls')\n \n vis.close()\n \n num_processes = 2\n \n shared_queue = mp.Queue()\n shared_state = dict()\n \n shared_state[\"mainQ\"] = DQN(s_dim, a_dim, dev ).share_memory()\n shared_state[\"targetQ\"] = DQN(s_dim, a_dim, dev ).share_memory()\n \n shared_state[\"update\"] = mp.Array('i', [0 for i in range(num_processes)])\n# shared_state[\"wait\"] = mp.Array('i', [0 for i in range(num_processes)])\n shared_state[\"vis\"] = mp.Value('i',0)\n shared_state[\"wait\"] = mp.Value('i',0)\n shared_state[\"wait\"].value = start_frame//10\n \n \n# for i in range(100):\n# actor_process(0,num_frames,shared_state,shared_queue,False)\n# actor_process(0,num_frames,shared_state,shared_queue,False)\n# learner_process(1,num_frames,shared_state,shared_queue,False)\n# time.sleep(10)\n## \n proc_list = []\n proc_list.append(mp.Process(target=learner_process, args=(num_processes,num_frames,shared_state,shared_queue)))\n eps = [0.1,0.2,0.4,0.3,0.2,0.6,0.4,0.6,0.2,0.4]\n for i in range(num_processes):\n proc_list.append( mp.Process(target=actor_process, args=(i,num_frames,shared_state,shared_queue,eps[i])) )\n\n\n for proc in proc_list:\n proc.start()\n \n try:\n for proc in proc_list:\n proc.join()\n except:\n print('qclose')\n shared_queue.close()\n print('process close')\n for proc in proc_list:\n proc.terminate()\n \n \n shared_queue.join_thread()\n \n ","sub_path":"ddqn_nstep_per_verup_lstm.py","file_name":"ddqn_nstep_per_verup_lstm.py","file_ext":"py","file_size_in_byte":25313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"526754100","text":"import pandas\nimport pickle\nimport io\nimport csv\nfrom sklearn import linear_model\n\n\ndef analyze_qbs():\n '''Function that analyzes the QB data and creates a model from it.'''\n players = {}\n teams = {}\n opponents = {}\n with open('data/qb_data.csv') as f:\n reader = csv.DictReader(f, delimiter=',')\n for row in reader:\n players[row['name']] = 0\n teams[row['team']] = 0\n opponents[row['opponent']] = 0\n f.close()\n\n with io.open('computed/qb_data.csv', 'w', newline='') as stat_file:\n stat_writer = csv.writer(stat_file)\n cats = ['year', 'week']\n for k, v in players.items():\n cats.append(k)\n for k, v in teams.items():\n cats.append(k)\n for k, v in opponents.items():\n cats.append(k)\n cats.extend(['home', 'pa_att', 'pa_cmp', 'pa_yds', 'pa_tds', 'pa_int',\n 'pa_2pts', 'ru_att', 'ru_yds', 'ru_tds', 'ru_2pts',\n 'rec_receptions', 'rec_yds', 'rec_tds', 'rec_2pts',\n 'fumb', 'fantasy points'])\n stat_writer.writerow(cats)\n with open('data/qb_data.csv') as f:\n reader = csv.DictReader(f, delimiter=',')\n for row in reader:\n players[row['name']] = 1\n teams[row['team']] = 1\n opponents[row['opponent']] = 1\n stats = [row['year'], row['week']]\n for k, v in players.items():\n stats.append(v)\n for k, v in teams.items():\n stats.append(v)\n for k, v in opponents.items():\n stats.append(v)\n stats.extend([row['home'], row['pa_att'], row['pa_cmp'],\n row['pa_yds'], row['pa_tds'], row['pa_int'],\n row['pa_2pts'], row['ru_att'], row['ru_yds'],\n row['ru_tds'], row['ru_2pts'],\n row['rec_receptions'], row['rec_yds'],\n row['rec_tds'], row['rec_2pts'], row['fumb'],\n row['fantasy points']])\n stat_writer.writerow(stats)\n players[row['name']] = 0\n teams[row['team']] = 0\n opponents[row['opponent']] = 0\n\n f.close()\n stat_file.close()\n # loading the data as a panda\n df = pandas.read_csv('computed/qb_data.csv', delimiter=\",\")\n\n # getting the dvs\n labels = ['pa_yds', 'pa_tds', 'pa_int', 'pa_2pts', 'ru_yds', 'ru_tds',\n 'ru_2pts', 'rec_receptions', 'rec_yds', 'rec_tds', 'rec_2pts',\n 'fumb']\n\n # getting the ivs\n features = df.drop(['year', 'pa_att', 'pa_cmp', 'pa_yds', 'pa_tds',\n 'pa_int', 'pa_2pts', 'ru_att', 'ru_yds', 'ru_tds',\n 'ru_2pts', 'rec_receptions', 'rec_yds', 'rec_tds',\n 'rec_2pts', 'fumb', 'fantasy points'], axis=1)\n\n for label in labels:\n dv = df[label]\n # defining the linear regression estimator for each iv, and\n # training it with the data\n regr = linear_model.LinearRegression()\n regr.fit(features, dv)\n\n # serializing the model to a file\n pickle.dump(regr, open(\"models/qb_\" + label + \".pkl\", \"wb\"))\n","sub_path":"analyzers/analyze_qbs.py","file_name":"analyze_qbs.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"614042971","text":"'''\n@Author: your name\n@Date: 2020-06-15 09:38:08\n@LastEditTime: 2020-06-15 09:55:04\n@LastEditors: Please set LastEditors\n@Description: In User Settings Edit\n@FilePath: /opt/websocket/websocketClient_2.py\n'''\n#!/usr/bin/env python\n\nimport ssl\nimport time\nimport asyncio\nimport pathlib\nimport websockets\n\n#ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\nssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\nlocalhost_pem = pathlib.Path(__file__).with_name(\"server.pem\")\nssl_context.load_verify_locations(localhost_pem)\n\nasync def hello():\n #uri = \"wss://10.255.175.109:4500/api/v1/device/ptd/remote_access/test_1\"\n uri = \"wss://10.255.175.109:4500/api/internal/remote_access/device/ptd/data/test_1/123123\"\n async with websockets.connect(\n uri, ssl=ssl_context\n ) as websocket:\n print(\"open\")\n name = input(\"What's your name? \")\n print(f\"> {name}\")\n await websocket.send(name)\n greeting = await websocket.recv()\n print(greeting)\n \n print(\"close\")\n time.sleep(100)\n \n\nasyncio.get_event_loop().run_until_complete(hello())\nprint(dir(asyncio.get_event_loop()))\n","sub_path":"websocket/websocketClient_2.py","file_name":"websocketClient_2.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"515445622","text":"# coding=utf-8\n\nfrom enum import Enum, IntEnum\nfrom os import environ\n\nfrom flask_wtf import CSRFProtect\n\n\nclass ErrorCodes(IntEnum):\n unknown_route = 0\n unauthorized = 1\n invalid_api_key = 2\n incorrect_parameters = 3\n bad_data_format = 4\n\n\nclass ValidationTypes(Enum):\n json = \"json\"\n params = \"params\"\n\n\nOWNER_ROLE = 267627879762755584\nADMIN_ROLE = 267628507062992896\nMODERATOR_ROLE = 267629731250176001\nDEVOPS_ROLE = 409416496733880320\nHELPER_ROLE = 267630620367257601\n\nALL_STAFF_ROLES = (OWNER_ROLE, ADMIN_ROLE, MODERATOR_ROLE, DEVOPS_ROLE)\n\nSERVER_ID = 267624335836053506\n\nDISCORD_API_ENDPOINT = \"https://discordapp.com/api\"\n\nDISCORD_OAUTH_REDIRECT = \"/auth/discord\"\nDISCORD_OAUTH_AUTHORIZED = \"/auth/discord/authorized\"\nDISCORD_OAUTH_ID = environ.get('DISCORD_OAUTH_ID', '')\nDISCORD_OAUTH_SECRET = environ.get('DISCORD_OAUTH_SECRET', '')\nDISCORD_OAUTH_SCOPE = 'identify email guilds.join'\nOAUTH_DATABASE = \"oauth_data\"\n\nPREFERRED_URL_SCHEME = environ.get(\"PREFERRED_URL_SCHEME\", \"https\") # Change this in testing to \"http\"\n\nERROR_DESCRIPTIONS = {\n # 5XX\n 500: \"The server encountered an unexpected error ._.\",\n 501: \"Woah! You seem to have found something we haven't even implemented yet!\",\n 502: \"This is weird, one of our upstream servers seems to have experienced an error.\",\n 503: \"Looks like one of our services is down for maintenance and couldn't respond to your request.\",\n 504: \"Looks like an upstream server experienced a timeout while we tried to talk to it!\",\n 505: \"You're using an old HTTP version. It might be time to upgrade your browser.\",\n # 4XX\n 400: \"You sent us a request that we don't know what to do with.\",\n 401: \"Nope! You'll need to authenticate before we let you do that.\",\n 403: \"No way! You're not allowed to do that.\",\n 404: \"We looked, but we couldn't seem to find that page.\",\n 405: \"That's a real page, but you can't use that method.\",\n 408: \"We waited a really long time, but never got your request.\",\n 410: \"This used to be here, but it's gone now.\",\n 411: \"You forgot to tell us the length of the content.\",\n 413: \"No way! That payload is, like, way too big!\",\n 415: \"The thing you sent has the wrong format.\",\n 418: \"I'm a teapot, I can't make coffee. (._.)\",\n 429: \"Please don't send us that many requests.\"\n}\n\n# PaperTrail logging\nPAPERTRAIL_ADDRESS = environ.get(\"PAPERTRAIL_ADDRESS\") or None\nPAPERTRAIL_PORT = int(environ.get(\"PAPERTRAIL_PORT\") or 0)\n\n# DataDog logging\nDATADOG_ADDRESS = environ.get(\"DATADOG_ADDRESS\") or None\nDATADOG_PORT = int(environ.get(\"DATADOG_PORT\") or 0)\n\n# CSRF\n\nCSRF = CSRFProtect()\n","sub_path":"pysite/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"329133470","text":"\"\"\"update Weekly with week number and approved attr\n\nRevision ID: 44d96416e314\nRevises: 0e56107ed903\nCreate Date: 2017-07-20 16:06:46.009417\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '44d96416e314'\ndown_revision = '0e56107ed903'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('weekly', sa.Column('approved', sa.Boolean(), nullable=True))\n op.add_column('weekly', sa.Column('weeknum', sa.Integer(), nullable=True))\n op.create_unique_constraint(None, 'weekly', ['weeknum'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'weekly', type_='unique')\n op.drop_column('weekly', 'weeknum')\n op.drop_column('weekly', 'approved')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/44d96416e314_update_weekly_with_week_number_and_.py","file_name":"44d96416e314_update_weekly_with_week_number_and_.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"59630462","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('factura', '0007_auto_20151228_1622'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='detallefactura',\n name='factura',\n field=models.ForeignKey(related_name=b'factura', db_column=b'factura_id', to='factura.Factura'),\n ),\n ]\n","sub_path":"Semana14Hackaton/jquispe/farmacia/apps/factura/migrations/0008_auto_20200902_1623.py","file_name":"0008_auto_20200902_1623.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"35093533","text":"#-*- coding:utf-8 _*-\n\n\"\"\"\n@version: \n@author: CharlesXu\n@license: Q_S_Y_Q \n@file: Add_Two_Numbers.py\n@time: 2018/1/25 19:22\n@desc: Leetcode第二题,需要回头再看\n\"\"\"\nfrom Cython.Compiler.ExprNodes import ListNode\n\n'''\nExample:\n Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)\n Output: 7 -> 0 -> 8\n Explanation: 342 + 465 = 807.\n'''\n'''\n 思路:\n 先构建一个空的头结点不动,然后尾节点从头结点开始向后不断生成薪的节点,遍历两条链的公共部分\n 每次相加相应位数字和进位,分配到结果的链表中,公共部分遍历完后再确定长的链表剩余的部分,同样的方式遍历完。\n'''\n\nclass Solution(object):\n\n def addTwoNumbers(self, l1, l2):\n '''\n :param l1:\n :param l2:\n :return:\n '''\n p = dummy = ListNode(-1)\n carry = 0\n while l1 or l2 or carry:\n val = (l1 and l1.val or 0) + (l2 and l2.val or 0) + carry\n carry = val / 10\n p.next = ListNode(val % 10)\n l1 = l1 and l1.next\n l2 = l2 and l2.next\n p = p.next\n return dummy.next\n\n","sub_path":"MachineLearning/Leetcode/002.add_two_numbers/Add_Two_Numbers.py","file_name":"Add_Two_Numbers.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"124412570","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 9 18:22:58 2020\n\n@author: sportyjames\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\norigin_url = 'http://xiaohua.zol.com.cn/lengxiaohua/' #冷笑话\n\ndef get_url_msg(origin_url):\n head = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)\\\n Chrome/77.0.3865.120 Safari/537.36'}\n response = requests.get(origin_url, headers=head) # 从链接中获取到回复信息\n bsobj = BeautifulSoup(response.text, 'lxml') #利用bs解析\n return bsobj\n\nbsobj = get_url_msg(origin_url)\nlink_list = []\nfor a_tag in bsobj.find_all('a', string = re.compile('查看全文')):\n link = a_tag.get('href')\n if link:\n link_list.append(link)\n\norigin_url = origin_url.split('/')\n\nwords_list = []\nfor link in link_list:\n article_link = origin_url[0]+\"//\"+origin_url[2]+link\n bsobj = get_url_msg(article_link)\n\n for article_text in bsobj.find_all(name=\"div\", attrs={\"class\" :\"article-text\"}):\n article = article_text.get_text()\n article = article.replace(\" \",\"\")\n #article = article.split()\n re.sub(\"[\\n]+\",\"\",article)\n #print(article)\n words_list.append(article)\n\n\n# 创建一个txt文件,文件名为mytxtfile,并向文件写入msg\ndef text_create(name, msg):\n desktop_path = \"\" # 新创建的txt文件的存放路径\n full_path = desktop_path + name + '.txt' # 也可以创建一个.doc的word文档\n file = open(full_path, \"wb+\")\n file.write(msg.encode(\"utf-8\")) # msg也就是下面的msg!\n file.close()\n\n\ntext_create('mytxtfile', \"\\n\".join(words_list))\n# 调用函数创建一个名为mytxtfile的.txt文件,并向其写入msg\n","sub_path":"爬笑话.py","file_name":"爬笑话.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"488554838","text":"#\n# @lc app=leetcode.cn id=209 lang=python3\n#\n# [209] 长度最小的子数组\n#\nimport array\nfrom typing import List\n# @lc code=start\n\n\nclass Solution:\n def minSubArrayLen(self, target: int, nums: List[int]) -> int:\n size = len(nums)\n ans, left, sums = 100000, 0, 0\n for i in range(size):\n sums += nums[i]\n while sums >= target:\n ans = min(ans, i+1-left)\n sums -= nums[left]\n left += 1\n return ans if ans != 100000 else 0\n\n\n# @lc code=end\nnums = [1, 4, 4]\nprint(Solution().minSubArrayLen(4, nums))\n","sub_path":"209.长度最小的子数组.py","file_name":"209.长度最小的子数组.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"340270442","text":"import torch\nimport torch.nn as nn\nimport sys\nimport argparse\n\nfrom utils import get_test_cifar\nfrom attack import ArchTransferAttack\nfrom attack import BarrierMethodAttack\nfrom attack import BetterSecondOrderAttack\nfrom attack import ChihaoHappyAttack\nfrom attack import DeepFoolAttack\nfrom attack import FWAdampAttack\nfrom attack import FWAdampAttackPlus\nfrom attack import PGDAttack\nfrom attack import SobolHappyAttack\nfrom models import WideResNet\nfrom model import get_model_for_attack\nfrom models import WideResNet28\nfrom eval_model import eval_model_with_attack\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Test Robust Accuracy')\n parser.add_argument('--batch_size', type=int, default=128, metavar='N',\n help='input batch size for training (default: 128)')\n parser.add_argument('--step_size', type=int, default=0.003,\n help='step size for pgd attack(default:0.003)')\n parser.add_argument('--epsilon', type=float, default=8/255.0,\n help='max distance for pgd attack (default: 8/255)')\n parser.add_argument('--perturb_steps', type=int, default=20,\n help='iterations for pgd attack (default pgd20)')\n parser.add_argument('--model_name', type=str, default=\"model2\")\n parser.add_argument(\n '--model_path', type=str,\n default=\"./models/weights/model-wideres-pgdHE-wide10.pt\"\n )\n parser.add_argument('--device', type=str, default=\"cuda:0\")\n parser.add_argument(\n '--attacker',\n choices=[\n 'pgd', 'fw', 'arch_transfer', 'barrier',\n 'stochastic_sample', 'sobol_sample',\n 'deepfool', 'second_order'\n ],\n default='fw')\n return parser.parse_args()\n\n\ndef merge_state_dicts(dicts):\n refd = dicts[0]\n result = dict()\n if isinstance(refd, dict):\n for k in refd.keys():\n result[k] = merge_state_dicts([d[k] for d in dicts])\n return result\n else:\n return sum(dicts)\n\n\ndef divide_state_dict(d, n):\n result = dict()\n if isinstance(d, dict):\n for k in d.keys():\n result[k] = divide_state_dict(d[k], n)\n return result\n else:\n return d / n\n\n\ndef get_attacker(attacker, step_size, epsilon, perturb_steps):\n if attacker == 'fw':\n print('Using FW-AdAmp', file=sys.stderr)\n return FWAdampAttackPlus(\n step_size, epsilon, perturb_steps)\n elif attacker == 'pgd':\n print('Using PGD', file=sys.stderr)\n return PGDAttack(\n step_size, epsilon, perturb_steps)\n elif attacker == 'arch_transfer':\n print('Using Arch Transfer', file=sys.stderr)\n return ArchTransferAttack(\n step_size, epsilon, perturb_steps)\n elif attacker == 'barrier':\n print('Using Barrier Method Attack', file=sys.stderr)\n return BarrierMethodAttack(\n step_size, epsilon, perturb_steps)\n elif attacker == 'stochastic_sample':\n print('Using Random Sampling', file=sys.stderr)\n return ChihaoHappyAttack(\n step_size, epsilon, perturb_steps)\n elif attacker == 'sobol_sample':\n print('Using Sobol Sampling', file=sys.stderr)\n return SobolHappyAttack(\n step_size, epsilon, perturb_steps)\n elif attacker == 'deepfool':\n print('Using DeepFool', file=sys.stderr)\n return DeepFoolAttack(\n step_size, epsilon, perturb_steps)\n elif attacker == 'second_order':\n print('Using Second Order Attack', file=sys.stderr)\n return BetterSecondOrderAttack(\n step_size, epsilon, perturb_steps)\n\n\nif __name__ == '__main__':\n args = parse_args()\n device = torch.device(args.device)\n if args.model_name != \"\":\n model = get_model_for_attack(args.model_name).to(device)\n # 根据model_name, 切换要攻击的model\n else:\n # 防御任务, Change to your model here\n model = WideResNet28().to(device)\n checkpoint = torch.load(\n './models/weights/WideResNet28TRADE_FWAWP-best.pt')\n model.load_state_dict(checkpoint['model'])\n # 攻击任务:Change to your attack function here\n # Here is a attack baseline: PGD attack\n model = nn.DataParallel(model, device_ids=[0])\n attack = get_attacker(\n args.attacker, args.step_size, args.epsilon, args.perturb_steps)\n model.eval()\n test_loader = get_test_cifar(args.batch_size)\n natural_acc, robust_acc, distance = eval_model_with_attack(\n model, test_loader, attack, args.epsilon, device)\n print(\n \"Natural Acc: %.5f, Robust acc: %.5f, distance: %.5f\" %\n (natural_acc, robust_acc, distance)\n )\n","sub_path":"adv/attack_main.py","file_name":"attack_main.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"569843442","text":"#! -*- coding: utf-8 -*-\n\n#---------------------------------\n# モジュールのインポート\n#---------------------------------\nimport os\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\n#---------------------------------\n# クラス; 学習モジュール基底クラス\n#---------------------------------\nclass Trainer():\n\t# --- コンストラクタ ---\n\tdef __init__(self, output_dir=None, model_file=None):\n\t\t# --- 出力ディレクトリ作成 ---\n\t\tself.output_dir = output_dir\n\t\tif (output_dir is not None):\n\t\t\tos.makedirs(output_dir, exist_ok=True)\n\t\t\n\t\t# --- モデル構築 ---\n\t\tdef _load_model(model_file):\n\t\t\tif (model_file is not None):\n\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\treturn None\n\t\t\n\t\tself.model = _load_model(model_file)\n\t\treturn\n\t\n\t# --- 学習 ---\n\tdef fit(self, x_train, y_train, x_test=None, y_test=None, epochs=5):\n\t\t# --- 学習 ---\n\t\tself.model.fit(x_train, y_train, epochs=epochs)\n\t\t\n\t\t# --- 学習結果を評価 ---\n\t\tif ((x_test is not None) and (y_test is not None)):\n\t\t\ttest_loss, test_acc = self.model.evaluate(x_test, y_test, verbose=2)\n\t\t\tprint('Test Accuracy: {}'.format(test_acc))\n\t\t\tprint('Test Loss: {}'.format(test_loss))\n\t\t\n\t\treturn\n\t\n\t# --- 推論 ---\n\tdef predict(self, x_test):\n\t\tpredictions = self.model.predict(x_test)\n\t\treturn predictions\n\t\t\n\t# --- ラベルインデックス取得 ---\n\tdef GetLabelIndex(self, label, onehot=True):\n\t\tif (onehot):\n\t\t\tlabel = np.argmax(label, axis=1)\n\t\tn_category = max(label)+1\n\t\t\n\t\treturn np.array([np.arange(len(label))[label==i] for i in range(n_category)])\n\n#---------------------------------\n# クラス; CNN学習モジュール\n#---------------------------------\nclass TrainerCNN(Trainer):\n\t# --- コンストラクタ ---\n\tdef __init__(self, input_shape, output_dir=None):\n\t# --- モデル構築 ---\n\t\tdef _load_model(input_shape):\n\t\t\tmodel = keras.models.Sequential()\n\t\t\tmodel.add(keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))\n\t\t\tmodel.add(keras.layers.MaxPooling2D((2, 2)))\n\t\t\tmodel.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))\n\t\t\tmodel.add(keras.layers.MaxPooling2D((2, 2)))\n\t\t\tmodel.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))\n\t\t\tmodel.add(keras.layers.MaxPooling2D((2, 2)))\n\t\t\tmodel.add(keras.layers.Flatten(input_shape=input_shape))\n\t\t\tmodel.add(keras.layers.Dense(64, activation='relu'))\n\t\t\tmodel.add(keras.layers.Dense(10, activation='softmax'))\n\t\t\t\n\t\t\tmodel.summary()\n\t\t\t\n\t\t\tmodel.compile(\n\t\t\t\toptimizer='adam',\n\t\t\t\tloss = 'sparse_categorical_crossentropy',\n\t\t\t\tmetrics=['accuracy'])\n\t\t\t\n\t\t\treturn model\n\t\t\n\t\t# --- 基底クラスの初期化 ---\n\t\tsuper().__init__(output_dir)\n\t\t\n\t\t# --- モデル構築 ---\n\t\tself.model = _load_model(input_shape)\n\t\tif (self.output_dir is not None):\n\t\t\tkeras.utils.plot_model(self.model, os.path.join(self.output_dir, 'plot_model.png'), show_shapes=True)\n\t\t\n\t\treturn\n\t\n\n#---------------------------------\n# クラス; MLP学習モジュール\n#---------------------------------\nclass TrainerMLP(Trainer):\n\t# --- コンストラクタ ---\n\tdef __init__(self, input_shape, output_dir=None):\n\t# --- モデル構築 ---\n\t\tdef _load_model(input_shape):\n\t\t\tmodel = keras.models.Sequential()\n\t\t\tmodel.add(keras.layers.Flatten(input_shape=input_shape))\n\t\t\tmodel.add(keras.layers.Dense(128, activation='relu'))\n\t\t\tmodel.add(keras.layers.Dense(10, activation='softmax'))\n\t\t\t\n\t\t\tmodel.summary()\n\t\t\t\n\t\t\tmodel.compile(\n\t\t\t\toptimizer='adam',\n\t\t\t\tloss = 'sparse_categorical_crossentropy',\n\t\t\t\tmetrics=['accuracy'])\n\t\t\t\n\t\t\treturn model\n\t\t\n\t\t# --- 基底クラスの初期化 ---\n\t\tsuper().__init__(output_dir)\n\t\t\n\t\t# --- モデル構築 ---\n\t\tself.model = _load_model(input_shape)\n\t\tif (self.output_dir is not None):\n\t\t\tkeras.utils.plot_model(self.model, os.path.join(self.output_dir, 'plot_model.png'), show_shapes=True)\n\t\t\n\t\treturn\n\t\n","sub_path":"python/tensorflow_sample/Ver2.x/02_cnn_cifar10/trainer/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"15647988","text":"\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense\n\n# load dataset\nimport pandas as pd\ndataset = pd.read_csv(\"breastcancer.csv\")\n\n\nX = dataset.iloc[:, 2:32].values\ny = dataset.iloc[:, 1].values\n\nprint(dataset.iloc[:, 1].value_counts())\n\n# Encoding categorical data\nlabelencoder_X_1 = LabelEncoder()\n# Fit label encoder and return encoded labels M=1, B=0\ny = labelencoder_X_1.fit_transform(y)\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.25, random_state=0)\n\n\nmy_first_nn = Sequential() # create model\nmy_first_nn.add(Dense(20, input_dim=30, activation='relu')) # hidden layer\nmy_first_nn.add(Dense(1, activation='sigmoid')) # output layer\nmy_first_nn.compile(loss='binary_crossentropy',\n optimizer='adam', metrics=['accuracy'])\nmy_first_nn_fitted = my_first_nn.fit(X_train, y_train, epochs=100, verbose=0,\n initial_epoch=0)\n\nprint(my_first_nn.summary())\nprint(my_first_nn.evaluate(X_test, y_test))\n","sub_path":"DL ICP1/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"221568415","text":"import os\nfrom time import sleep\n\nimport unittest\n\nfrom appium import webdriver\nimport warnings\n\n\n\nclass SimpleAndroidTests(unittest.TestCase):\n def setUp(self):\n warnings.filterwarnings('ignore')\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['deviceName'] = 'Android Emulator'\n desired_caps['app'] = r\"F:\\自动化测试学习\\App\\apks\\ApiDemos-debug.apk\"\n\n self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)\n\n def tearDown(self):\n self.driver.quit()\n\n \n\n\n def test_simple_actions(self):\n el = self.driver.find_element_by_accessibility_id('Graphics')\n el.click()\n\n el = self.driver.find_element_by_accessibility_id('Arcs')\n el.click()\n\n self.driver.find_element_by_android_uiautomator('new UiSelector().text(\"Graphics/Arcs\")')\n # self.driver.find_element_by_android_uiautomator('new UiSelector().text(\"Graphics/Arcs\")')\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(SimpleAndroidTests)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"APP_autotest/android_simple.py","file_name":"android_simple.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"227304577","text":"#Expresiones regulares\nimport re\n\n\"\"\"\ntexto = \"This car is too fast\"\npatron = \"car\"\nencontrado = re.search(patron, texto)\nif encontrado:\n print(\"Patron {} encontrado en el texto \".format(patron))\n ini = encontrado.start()\n fin = encontrado.end()\n print(f\"El patron empieza en {ini} y termina en {fin}\")\nelse:\n print(f\"Patron {patron} no encontrado\")\n\n#Juventud, divino tesoro,¡ya te vas para no volver! Cuando quiero llorar, no lloro,y a veces lloro sin querer\n\n\n\"\"\"\n\"\"\"\ntexto = input(\"Ingrese un texto que desea saber cuantas veces aparece una palabra\")\npatron = input(\"Ingrese el patron de busqueda:\")\nresultado = re.findall(patron, texto) #Genera una lista con las palabras repetidas\ncantidad = len(resultado) #Imprime la longitud de la lista \nprint(resultado)\nprint(cantidad)\n\"\"\"\nlistaPatrones = []\nwhile True:\n try:\n texto = input(\"Ingrese un texto que desea saber cuantas veces aparece una palabra: -->\").lower()\n cantidadPatrones = int(input(\"Ingrese la cantidad de patrones que usted va a utilizar para la busqueda: -->\"))\n break\n\n except ValueError:\n print(\"Error, en Valor, por favor ingrese un valor valido\")\n cantidadPatrones = int(input(\"Ingrese la cantidad de patrones que usted va a utilizar para la busqueda: -->\"))\n break\n\nfor element in range(0,cantidadPatrones):\n palabra = str(input(f\"Ingrese el {element} patron: -->\"))\n listaPatrones.append(palabra.lower())\n\nfor patron in listaPatrones:\n print(\" \")\n print(f\"Se busca el patron: {patron}\")\n resultado = re.findall(patron, texto)\n veces = len(resultado)\n print(resultado)\n print(f\"La cantidad de veces que aparece la palabra son:{veces}\")\n print(\" \")\n","sub_path":"Plan de Estudios/Software Engineering Career/0. Python and Flask/Avanzado/1. ExpresionesRegulares/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"88110902","text":"'''\nFile: simLVcircuit_alignEStime.py\nDescription: simulate circuit with ngspice\nHistory:\n Date Programmer SAR# - Description\n ---------- ---------- ----------------------------\n Author: w.x.chan@gmail.com 08MAR2021 - Created\n Author: w.x.chan@gmail.com 13APR2021 - v2.0.0\n Author: w.x.chan@gmail.com 21APR2021 - v2.1.0\n Author: w.x.chan@gmail.com 09JUN2021 - v3.0.0\n -adjust EStime to within 0.1% od minimum volume\n -adjust try_initLVvol=initLVvol*1.05\n'''\n########################################################################\n_version='3.0.0'\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport sys\nimport vtk\nimport os\nimport inspect\nfrom heartFEM import ngspice_py\nimport numpy as np\nfrom scipy import interpolate\n########################################################################\n\nsuffixDict={4:'T ',3:'g ',2:'meg',1:'k ',0:' ',-1:'m ',-2:'u ',-3:'n ',-4:'p ',-5:'f '}\n\ndef simLVcircuit_alignEStime(casename,stopTime,lvufile,period,targetEStime,init_timetopeaktension,try_timetopeaktension=None,lvinputvar='V',initLAvol=0,initRAvol=0,initLVvol=0,initRVvol=0,vla0=None,vra0=None,init_file=None,init_time=None,iterationNumber=100,verbose=True):\n\n if try_timetopeaktension is None:\n try_timetopeaktension=init_timetopeaktension\n adj_timetopeaktension=0.1*init_timetopeaktension\n elif isinstance(try_timetopeaktension,(int,float)):\n adj_timetopeaktension=0.1*init_timetopeaktension\n else:\n adj_timetopeaktension=try_timetopeaktension[1]\n try_timetopeaktension=try_timetopeaktension[0]\n tune_timetopeaktension=None\n last_EStime=0\n for n in range(iterationNumber):\n logger.info(\" Trying timetopeak=\"+repr(try_timetopeaktension))\n case_dir,lvufilename = os.path.split(lvufile)\n timetopeak_from_to=[init_timetopeaktension,try_timetopeaktension]\n ngspice_py.simLVcircuit(casename,stopTime,lvufile,lvinputvar=lvinputvar,initLAvol=initLAvol,initRAvol=initRAvol,initLVvol=initLVvol,initRVvol=initRVvol,vla0=vla0,vra0=vra0,init_file=init_file,init_time=init_time,timetopeak_from_to=timetopeak_from_to,verbose=verbose)\n \n cir_results=np.loadtxt(case_dir+'/'+'circuit_results.txt',skiprows=1)[:,2:4]\n cir_results[:,0]*=1000. #set to ms\n while cir_results[-1,0]>=(2.*period):\n cir_results[:,0]-=period\n cir_results=cir_results[cir_results[:,0]>=0]\n cir_results=cir_results[cir_results[:,0]=(2.*period):\n cir_results[:,0]-=period\n cir_results=cir_results[cir_results[:,0]>=0]\n cir_results=cir_results[cir_results[:,0] n:\r\n print(\"Your guess is too high, guess again\")\r\n \r\n found = False\r\n else:\r\n found = True\r\n\r\n \r\nif found == True and count < 4:\r\n print(\"You guess the correct numberin %d counts\"% count)\r\n print(\"Now all the bananas are yours!\")\r\nelse:\r\n print(\"You failed!Try again next time\")\r\n","sub_path":"L2/Q6_ivanchau.py","file_name":"Q6_ivanchau.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"421176122","text":"# рекурсия\nnumber = int(input('enter a positive integer'))\n\n\ndef factorial(n):\n if n == 0 or n == 1:\n return 1\n elif n > 1:\n return n * factorial(n - 2)\n\n\n\nprint(factorial(number))","sub_path":"task_8_1.py","file_name":"task_8_1.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"386950871","text":"import os\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\n\ndef read_feature_csv(filename):\n \n time = []\n feat = []\n feedback = None\n backbutton = None\n with open('data/allfeature/'+filename, mode='r') as csv_file:\n \n csv_reader = csv.reader(csv_file)\n \n for ii, row in enumerate(csv_reader):\n cur_feat = []\n if ii == 0:\n feedback = int(row[-3])\n backbutton = row[-1]\n elif ii == 1:\n headers = row\n else:\n for jj, entry in enumerate(row):\n if jj == 0:\n time.append(float(entry))\n elif jj == 1:\n if entry == 'listen':\n cur_feat.append(0.0)\n else:\n cur_feat.append(1.0)\n elif jj == 8:\n if entry == 'left':\n cur_feat.append(0.0)\n else:\n cur_feat.append(1.0)\n else:\n cur_feat.append(float(entry))\n feat.append(cur_feat)\n \n# if ii % 1000 == 0:\n# print('Processed', ii)\n \n time = np.array(time)\n feat = np.array(feat)\n \n return time, feat, feedback, backbutton, headers[1:]\n\ndef remove_outliers(x):\n upper_quartile = np.percentile(x, 75)\n lower_quartile = np.percentile(x, 25)\n IQR = (upper_quartile - lower_quartile) * 2.0\n quartileSet = (lower_quartile - IQR, upper_quartile + IQR)\n \n result1 = np.where(x <= quartileSet[0])[0]\n result2 = np.where(x >= quartileSet[1])[0]\n \n x[result1] = 0.0\n x[result2] = 0.0\n \n return x\n\ndef get_start_end_inds(start_val, end_val, t):\n \n if not start_val == 0:\n start_ind = np.argmin(abs(t[-1] - t - start_val))\n else:\n start_ind = 0\n \n if not end_val == -1:\n end_ind = np.argmin(abs(t - t[0] - end_val))\n else:\n end_ind = t.shape[0]\n \n if start_ind > end_ind:\n start_ind, end_ind = end_ind, start_ind\n \n return start_ind, end_ind\n\ndef get_model_data(T,X,Y1,Y2,info,headers,start_val=0,end_val=-1):\n \n T_arr = []\n X_arr = []\n Y1_arr = []\n Y2_arr = []\n \n for ii in range(len(X)):\n if info[ii][2][:8] == 'writewrd':\n activity_type = 0\n elif info[ii][2][:9] == 'storyhear':\n activity_type = 1\n else:\n activity_type = 2\n \n start_ind, end_ind = get_start_end_inds(start_val, end_val, T[ii])\n \n cur_X = X[ii][start_ind:end_ind,:]\n cur_Y1 = Y1[ii]*np.ones((cur_X.shape[0],1)) + 1.0\n \n if Y2[ii] == 'True':\n cur_Y2 = np.ones((cur_X.shape[0],1))\n else:\n cur_Y2 = np.zeros((cur_X.shape[0],1))\n \n \n cur_T = T[ii][start_ind:end_ind]\n cur_T = cur_T - cur_T[0]\n cur_X = np.hstack((cur_X, np.ones_like(cur_Y1)*activity_type))\n \n ind = headers.index('Lines')\n cur_X[:,ind] = cur_X[:,ind] - cur_X[0,ind]\n \n ind = headers.index('Picture Side')\n if activity_type == 0:\n cur_X[:,ind] = 0\n \n ind = headers.index('Eye Aspect Ratio')\n cur_X[:,ind] = np.maximum(cur_X[:,ind], -0.1)\n cur_X[:,ind] = np.minimum(cur_X[:,ind], 0.1)\n \n ind = headers.index('Pupil Ratio')\n cur_X[:,ind] = np.maximum(cur_X[:,ind], -0.3)\n cur_X[:,ind] = np.minimum(cur_X[:,ind], 0.3)\n \n ind = headers.index('Head Orientation')\n change_inds = np.where(cur_X[:,ind] > np.pi)\n cur_X[change_inds,ind] -= 2*np.pi\n \n T_arr.append(cur_T)\n X_arr.append(cur_X)\n Y1_arr.append(cur_Y1)\n Y2_arr.append(cur_Y2)\n \n return T_arr, X_arr, Y1_arr, Y2_arr\n\ndef plot_cdf(z, title):\n \n fig, ax = plt.subplots(2,1,figsize=(8, 8))\n x = np.sort(z)\n y = np.array(range(len(z)))/float(len(z))\n \n ax[0].plot(x, y, 'o-b')\n \n perc = [np.percentile(x, ii) for ii in [20, 40, 60, 80]]\n \n text = ' Percentiles:\\n 0% {:.1f}\\n 20% {:.1f}\\n 40% {:.1f}\\n 60% {:.1f}\\n 80% {:.1f}\\n 100% {:.1f}'.format(0, perc[0], perc[1], perc[2], perc[3], x[-1])\n \n ax[0].set_xlim(0,800)\n ax[0].set_title(title, fontsize=20)\n ax[0].set_ylabel('CDF', fontsize=14)\n ax[0].text(0.75, 0.1, text, color=\"k\", fontsize=14,\n transform=ax[0].transAxes)\n \n ax[1].boxplot(z, vert=False, showfliers=True, widths = 0.65)\n ax[1].set_ylabel('Boxplot', fontsize=14)\n ax[1].set_xlim(0,800)\n ax[1].set_yticklabels([])\n ax[1].set_xlabel('Time (s)', fontsize=14)\n plt.show()\n\ndef create_cdfs(T, X, Y1, Y2):\n \n bail_len = []\n completed_len = []\n all_len = []\n \n for ii in range(len(X)):\n \n if Y2[ii][0] == 0:\n completed_len.append(T[ii][-1])\n else:\n bail_len.append(T[ii][-1])\n all_len.append(T[ii][-1])\n \n plot_cdf(bail_len, 'Bailed')\n plot_cdf(completed_len, 'Completed')\n plot_cdf(all_len, 'All')\n \n\ndef main():\n \n T = []\n X = []\n Y1 = []\n Y2 = []\n info = []\n \n for filename in os.listdir('data/allfeature'):\n res = filename.split('_')\n vid_ind = res[0]\n activity_ind = res[1]\n activity_name = res[2] + '.' + res[3]\n \n if vid_ind:\n time, feat, feedback, backbutton, headers = read_feature_csv(filename)\n if time.shape[0] > 1: \n T.append(time)\n X.append(feat)\n Y1.append(feedback)\n Y2.append(backbutton)\n info.append([vid_ind, activity_ind, activity_name])\n \n model_T, model_X, model_Y1, model_Y2 = get_model_data(T,X,Y1,Y2,info,headers)\n \n create_cdfs(model_T, model_X, model_Y1, model_Y2)\n #np.savez('all_data.npz', X=model_X, Y1=model_Y1, Y2=model_Y2, T=model_T)\n\n \nif __name__ == \"__main__\":\n main()","sub_path":"log_file_scripts_archive/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":6164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"598277091","text":"import os\nimport pickle\nimport http.client\n\ndata_file_name = 'football_api_data_Leagues2021.pickle'\ndata_txt_file_name = 'football_api_data_Leagues2021.txt'\n\n\nif os.path.exists(data_file_name):\n with open(data_file_name, 'rb') as f:\n data = pickle.load(f)\n with open(data_txt_file_name, \"r\") as f:\n data_text = f.read()\nelse:\n with open(\"key.txt\", \"r\") as data_file:\n api_key = data_file.read()\n print(api_key)\n\n headers = {\n 'x-rapidapi-host': \"v3.football.api-sports.io\",\n 'x-rapidapi-key': api_key}\n print(headers)\n\n import requests\n api_url = 'https://v3.football.api-sports.io/leagues?season=2021'\n data = requests.get(api_url, headers=headers)\n\n # conn = http.client.HTTPSConnection(\"v3.football.api-sports.io\")\n # conn.request(\"GET\", \"/fixtures?live=all\", headers=headers)\n # conn.request(\"GET\", \"/fixtures?date=2022-04-05\", headers=headers)\n # res = conn.getresponse()\n # data = res.read()\n\n print(data)\n with open(data_file_name, 'wb') as f:\n pickle.dump(data, f)\n with open(data_txt_file_name, \"w\") as f:\n f.write(data.text)\n\n # decoded_data = data.decode(\"utf-8\")\n # with open(decoded_data_file_name, 'wb') as f:\n # pickle.dump(decoded_data, f)\n #\n # with open(decoded_data_txt_file_name, \"w\") as data_file:\n # data_file.write(decoded_data)\n\nprint(data)\nprint(data.text)\n","sub_path":"WebServices/HelloWorld.football.api-sports.leagues2021.py","file_name":"HelloWorld.football.api-sports.leagues2021.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"75993495","text":"import logging\r\nimport sys\r\n\r\nimport smpplib.gsm\r\nimport smpplib.client\r\nimport smpplib.consts\r\n\r\n# if you want to know what's happening\r\nlogging.basicConfig(level='DEBUG')\r\nsmpplib.gsm.ENCODINGS\r\n# Two parts, GSM default / UCS2, SMS with UDH\r\nparts, encoding_flag, msg_type_flag = smpplib.gsm.make_parts(u'Hello World PJTINHA')\r\n\r\nclient = smpplib.client.Client('10.168.13.187', 9800)\r\n\r\n# Print when obtain message_id\r\nclient.set_message_sent_handler(\r\n lambda pdu: sys.stdout.write('sent {} {}\\n'.format(pdu.sequence, pdu.message_id)))\r\n\r\n# Handle delivery receipts (and any MO SMS)\r\ndef handle_deliver_sm(pdu):\r\n sys.stdout.write('delivered {}\\n'.format(pdu.receipted_message_id))\r\n return 0 # cmd status for deliver_sm_resp\r\n\r\nclient.set_message_received_handler(lambda pdu: handle_deliver_sm(pdu))\r\n\r\nclient.connect()\r\nclient.bind_transceiver(system_id='DYNAMO', password='dynamo', system_type='dyn' )\r\n\r\nfor part in parts:\r\n pdu = client.send_message(\r\n source_addr_ton=smpplib.consts.SMPP_TON_NWSPEC,\r\n #source_addr_ton=smpplib.consts.SMPP_TON_ALNUM,\r\n source_addr_npi=smpplib.consts.SMPP_NPI_UNK,\r\n # Make sure it is a byte string, not unicode:\r\n source_addr='5838',\r\n #source_addr='UPSTREAM',\r\n #smpplib.consts.SMPP_TON_UNK\r\n #smpplib.consts.SMPP_TON_ALNUM\r\n\r\n dest_addr_ton=smpplib.consts.SMPP_TON_INTL,\r\n dest_addr_npi=smpplib.consts.SMPP_NPI_ISDN,\r\n # Make sure thease two params are byte strings, not unicode:\r\n destination_addr='5524988398508',\r\n short_message=part,\r\n\r\n data_coding=encoding_flag,\r\n esm_class=msg_type_flag,\r\n registered_delivery=True,\r\n )\r\n print(pdu.sequence)\r\n\r\n# Enters a loop, waiting for incoming PDUs\r\nclient.listen()","sub_path":"sendsms.py","file_name":"sendsms.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"198389052","text":"import numpy as np\nimport math\nimport os\nfrom matplotlib import pyplot as plt\nfrom matplotlib import patches as patches\nfrom astropy.io import fits as pyfits\nfrom astropy.visualization import (PercentileInterval, ImageNormalize,\n SqrtStretch, LogStretch, LinearStretch)\nfrom copy import copy\nfrom .utils import PyKEArgumentHelpFormatter\nfrom . import kepio, kepmsg, kepplot\n\n\ninfile = False; aperfile = False; maskfile = 'maskfile.txt'\nplotfile = 'kepmask.png'; pxdim = 0; pydim = 0; pimg = None; mask = []\nzscale = False; xmin = 0.0; xmax = 1000.0; ymin = 0.0; ymax = 1000.0\nzmin = False; zmax = False; norm=None; kepid = ''; ra = ''; dec = ''; kepmag = ''\nseason = ''; quarter = -1; skygroup = ''; channel = ''; module = ''\noutput = ''; column = ''; row = ''; colmap='jet'; aid = None; bid = None\ncid = None; fid = None; pkepmag = None; pkepid = None\npra = None; pdec = None\nax = None\n\n__all__ = ['kepmask']\n\n\ndef kepmask(infile, frameno=100, maskfile='maskfile.txt', plotfile='kepmask.png',\n imin=None, imax=None, iscale='linear', cmap='bone',\n verbose=False, logfile='kepmask.log'):\n \"\"\"\n kepmask - plots, creates or edits custom target masks for target pixel\n files.\n\n The product from this task is a target mask definition file which\n can be used by kepextract to extract a light curve from target pixel data.\n This tool is a GUI interface for defining a pixel mask by moving a mouse\n over image pixels and selecting them by pressing the left-button of your\n mouse/keypad.\n\n Parameters\n ----------\n infile : str\n The name of a target pixel file from the MAST Kepler archive,\n containing a standard mask definition image in the second data\n extension.\n frameno : int\n Frame number in the target pixel file.\n maskfile : str\n The name of an ASCII mask definition file. This is either the name of\n a file to be plotted, a file to be created, or a file to be edited.\n plotfile : str\n The name of a PNG plot file containing a record of the mask defined or\n uploaded by this task.\n imin : float or None\n Minimum intensity value (in electrons per cadence) for the image\n display. The default minimum intensity level is the median of the\n faintest 10% of pixels in the image.\n imax : float or None\n Maximum intensity value (in electrons per cadence) for the image\n display. The default maximum intensity level is the median of the\n brightest 10% of pixels in the image.\n iscale : str\n Type of intensity scaling for the image display.\n * linear\n * log\n * sqrt\n cmap : str\n Color intensity scheme for the image display.\n verbose : bool\n Print informative messages and warnings to the shell and logfile?\n logfile : str\n Name of the logfile containing error and warning messages.\n\n Examples\n --------\n .. code-block:: bash\n\n $ kepmask ktwo202933888-c02_lpd-targ.fits.gz\n\n .. image:: ../_static/images/api/kepmask.png\n :align: center\n \"\"\"\n\n global pimg, zscale, zmin, zmax, xmin, xmax, ymin, ymax, quarter, norm\n global pxdim, pydim, kepmag, skygroup, season, channel\n global module, output, row, column, mfile, pfile\n global pkepid, pkepmag, pra, pdec, colmap\n global pxdim, pydim\n\n # input arguments\n zmin = imin; zmax = imax; zscale = iscale; colmap = cmap\n mfile = maskfile; pfile = plotfile\n\n # log the call\n hashline = '--------------------------------------------------------------'\n kepmsg.log(logfile, hashline, verbose)\n call = ('KEPMASK -- '\n + ' infile={}'.format(infile)\n + ' maskfile={}'.format(mfile)\n + ' plotfile={}'.format(pfile)\n + ' frameno={}'.format(frameno)\n + ' imin={}'.format(imin)\n + ' imax={}'.format(imax)\n + ' iscale={}'.format(iscale)\n + ' cmap={}'.format(cmap)\n + ' verbose={}'.format(verbose)\n + ' logfile={}'.format(logfile))\n\n kepmsg.log(logfile, call + '\\n', verbose)\n kepmsg.clock('KEPMASK started at', logfile, verbose)\n\n # open TPF FITS file and check whether or not frameno exists\n try:\n tpf = pyfits.open(infile, mode='readonly')\n except:\n errmsg = ('ERROR -- KEPIO.OPENFITS: cannot open ' +\n infile + ' as a FITS file')\n kepmsg.err(logfile, errmsg, verbose)\n\n try:\n naxis2 = tpf['TARGETTABLES'].header['NAXIS2']\n except:\n errmsg = ('ERROR -- KEPMASK: No NAXIS2 keyword in ' + infile +\n '[TARGETTABLES]')\n kepmsg.err(logfile, errmsg, verbose)\n\n if frameno > naxis2:\n errmsg = ('ERROR -- KEPMASK: frameno is too large. There are'\n ' {} rows in the table.'.format(naxis2))\n kepmsg.err(logfile, errmsg, verbose)\n\n tpf.close()\n\n # read TPF data pixel image\n kepid, channel, skygroup, module, output, quarter, season, \\\n ra, dec, column, row, kepmag, xdim, ydim, pixels = \\\n kepio.readTPF(infile, 'FLUX', logfile, verbose)\n img = pixels[frameno]\n pkepid = copy(kepid)\n pra = copy(ra)\n pdec = copy(dec)\n pkepmag = copy(kepmag)\n pxdim = copy(xdim)\n pydim = copy(ydim)\n pimg = copy(img)\n\n # print target data\n print('')\n print(' KepID: {}'.format(kepid))\n print(' RA (J2000): {}'.format(ra))\n print('Dec (J2000): {}'.format(dec))\n print(' KepMag: {}'.format(kepmag))\n print(' SkyGroup: {}'.format(skygroup))\n print(' Season: {}'.format(season))\n print(' Channel: {}'.format(channel))\n print(' Module: {}'.format(module))\n print(' Output: {}'.format(output))\n print('')\n\n # subimage of channel for plot\n ymin = copy(row)\n ymax = ymin + ydim\n xmin = copy(column)\n xmax = xmin + xdim\n\n # intensity scale\n if imin is None and imax is None:\n imin, imax = PercentileInterval(95.).get_limits(pimg)\n else:\n if imin is None:\n imin, _ = PercentileInterval(95.).get_limits(pimg)\n else:\n _, imax = PercentileInterval(95.).get_limits(pimg)\n\n if zscale == 'sqrt':\n norm = ImageNormalize(vmin=imin, vmax=imax, stretch=SqrtStretch())\n elif zscale == 'linear':\n norm = ImageNormalize(vmin=imin, vmax=imax, stretch=LinearStretch())\n elif zscale == 'log':\n norm = ImageNormalize(vmin=imin, vmax=imax, stretch=LogStretch())\n\n zmin = copy(imin)\n zmax = copy(imax)\n\n # plot limits\n ymin = float(ymin) - 0.5\n ymax = float(ymax) - 0.5\n xmin = float(xmin) - 0.5\n xmax = float(xmax) - 0.5\n\n # plot style\n plt.rcParams['figure.dpi'] = 80\n plt.figure(figsize=[10, 7])\n\n global mask, aid, bid, cid, did, fid\n\n aid = plt.connect('button_press_event', clicker1)\n bid = plt.connect('button_press_event', clicker2)\n cid = plt.connect('button_press_event', clicker3)\n did = plt.connect('button_press_event', clicker4)\n fid = plt.connect('button_press_event', clicker6)\n\n redraw()\n plt.show()\n\ndef redraw():\n global ax\n\n plt.clf()\n plt.axes([0.73, 0.09, 0.25, 0.4])\n plt.text(0.1, 1.0,' KepID: {}'.format(pkepid, fontsize=12))\n plt.text(0.1, 0.9,' RA (J2000): {}'.format(pra, fontsize=12))\n plt.text(0.1, 0.8,'Dec (J2000): {}'.format(pdec, fontsize=12))\n plt.text(0.1, 0.7,' KepMag: {}'.format(pkepmag, fontsize=12))\n plt.text(0.1, 0.6,' SkyGroup: {}'.format(skygroup, fontsize=12))\n plt.text(0.1, 0.5,' Season: {}'.format(season, fontsize=12))\n plt.text(0.1, 0.4,' Channel: {}'.format(channel, fontsize=12))\n plt.text(0.1, 0.3,' Module: {}'.format(module, fontsize=12))\n plt.text(0.1, 0.2,' Output: {}'.format(output, fontsize=12))\n plt.text(0.1, 0.1,' Column: {}'.format(column, fontsize=12))\n plt.text(0.1, 0.0,' Row: {}'.format(row, fontsize=12))\n plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])\n plt.xlim(0.0, 1.0)\n plt.ylim(-0.05, 1.12)\n # clear button\n plt.axes([0.73, 0.86, 0.25, 0.11])\n plt.text(0.5, 0.5, 'CLEAR', fontsize=24,\n horizontalalignment='center', verticalalignment='center')\n plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[],\n yticks=[])\n plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')\n plt.xlim(0.0, 1.0)\n plt.ylim(0.0, 1.0)\n # load mask button\n plt.axes([0.73, 0.74, 0.25, 0.11])\n plt.text(0.5, 0.5, 'LOAD', fontsize=24,\n horizontalalignment='center', verticalalignment='center')\n plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])\n plt.fill([0.0, 1.0, 1.0, 0.0, 0.0],[0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')\n plt.xlim(0.0, 1.0)\n plt.ylim(0.0, 1.0)\n # dump custom aperture to file button\n plt.axes([0.73, 0.62, 0.25, 0.11])\n plt.text(0.5, 0.5, 'DUMP', fontsize=24,\n horizontalalignment='center', verticalalignment='center')\n plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])\n plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')\n plt.xlim(0.0, 1.0)\n plt.ylim(0.0, 1.0)\n # print window to png file button\n plt.axes([0.73, 0.50, 0.25, 0.11])\n plt.text(0.5, 0.5, 'PRINT', fontsize=24,\n horizontalalignment='center', verticalalignment='center')\n plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])\n plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')\n plt.xlim(0.0, 1.0)\n plt.ylim(0.0, 1.0)\n # set the image window location and size\n ax = plt.axes([0.07, 0.09, 0.63, 0.88])\n # force tick labels to be absolute rather than relative\n plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))\n plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))\n plt.subplots_adjust(0.06, 0.1, 0.93, 0.88)\n # plot the image window\n imgsum = pimg.reshape((pydim, pxdim))\n plt.imshow(imgsum, aspect='auto', interpolation='nearest', origin='lower',\n extent=(xmin, xmax, ymin, ymax), cmap=colmap, vmin=zmin,\n vmax=zmax, norm=norm)\n plt.gca().set_autoscale_on(False)\n plt.xlabel('Pixel Column Number', {'color' : 'k'}, fontsize=14)\n plt.ylabel('Pixel Row Number', {'color' : 'k'}, fontsize=14)\n plt.tick_params(labelsize=12)\n\n plt.draw()\n\n# -----------------------------------------------------------\n# clear all pixels from pixel mask\ndef clicker1(event):\n global mask\n if event.inaxes:\n if event.button == 1:\n if (event.x > 601 and event.x < 801 and\n event.y > 492 and event.y < 522):\n print(\"Masked pixels cleared!\")\n mask = []\n redraw()\n return\n\n# -----------------------------------------------------------\n# load mask from file\ndef clicker2(event):\n global mask, mfile, colmap\n\n if colmap in ['Greys','binary','bone','gist_gray','gist_yarg',\n 'gray','pink','RdGy']:\n sqcol = 'g'\n else:\n sqcol = '#ffffee'\n\n if event.inaxes:\n if event.button == 1:\n if (event.x > 601 and event.x < 801 and\n event.y > 422 and event.y < 482):\n plt.clf()\n redraw()\n try:\n lines = kepio.openascii(mfile, 'r', None, False)\n for line in lines:\n mask = []\n work = line.strip().split('|')\n y0 = int(work[3])\n x0 = int(work[4])\n work = work[5].split(';')\n for i in range(len(work)):\n n = int(work[i].split(',')[0]) + y0\n m = int(work[i].split(',')[1]) + x0\n mask.append(str(m) + ',' + str(n))\n x = [m - 0.5, m + 0.5, m + 0.5, m - 0.5, m - 0.5]\n y = [n - 0.5, n - 0.5, n + 0.5, n + 0.5, n - 0.5]\n ax.add_patch(patches.Rectangle((x[0], y[0]), 1, 1,\n color='red', lw=4,\n fill=True, alpha=0.3))\n plt.draw()\n print(\"Mask definition loaded successfully!\")\n except:\n errmsg = ('ERROR -- KEPMASK: Cannot open or read mask '\n 'file ' + mfile)\n kepmsg.err('kepmask.log', errmsg, True)\n return\n\n# -----------------------------------------------------------\n# dump custom aperture definition file\ndef clicker3(event):\n global mfile\n\n if event.inaxes:\n if event.button == 1:\n if (event.x > 601 and event.x < 801 and\n event.y > 354 and event.y < 415):\n masktxt = 'NEW|'\n masktxt += skygroup + '|'\n masktxt += str(pkepid)\n masktxt += ',TAD_NO_HALO,TAD_NO_UNDERSHOOT_COLUMN|'\n masktxt += str(int(row)) + '|'\n masktxt += str(int(column)) + '|'\n for coord in sorted(set(mask)):\n masktxt += str(int(coord.split(',')[1]) - int(row)) + ','\n masktxt += str(int(coord.split(',')[0]) - int(column)) + ';'\n if os.path.isfile(mfile):\n os.remove(mfile)\n out = open(mfile, 'a')\n out.write(masktxt[:-1] + '\\n')\n out.close()\n print('Wrote custom aperture definition to {0}'.format(mfile))\n return\n\n# -----------------------------------------------------------\n# print plot to png with left-mouse click\ndef clicker4(event):\n\n if event.inaxes:\n if event.button == 1:\n if (event.x > 601 and event.x < 801 and\n event.y > 285 and event.y < 347):\n plt.savefig(pfile)\n print('Wrote plot hardcopy file {0}'.format(pfile))\n return\n\n# -----------------------------------------------------------\n# this function will be called with every click of the mouse\ndef clicker6(event):\n global mask\n if event.inaxes:\n if event.button == 1:\n if (event.x > 75 and event.x < 580 and\n event.y > 53 and event.y < 550):\n redraw()\n m = event.xdata + 0.5\n n = event.ydata + 0.5\n txt = str(int(m)) + ',' + str(int(n))\n if txt in mask:\n tmpmask = []\n for pixel in mask:\n if pixel != txt:\n tmpmask.append(pixel)\n mask = tmpmask\n else:\n mask.append(txt)\n if colmap in ['Greys','binary','bone','gist_gray','gist_yarg',\n 'gray','pink','RdGy']:\n sqcol = 'g'\n else:\n sqcol = '#ffffee'\n for pixel in mask:\n m = int(pixel.split(',')[0])\n n = int(pixel.split(',')[1])\n x = [m - 0.5, m + 0.5, m + 0.5, m - 0.5, m - 0.5]\n y = [n - 0.5, n - 0.5, n + 0.5, n + 0.5, n - 0.5]\n ax.add_patch(patches.Rectangle((x[0], y[0]), 1, 1,\n color='red', lw=4,\n fill=True, alpha=0.3))\n plt.draw()\n\ndef kepmask_main():\n import argparse\n parser = argparse.ArgumentParser(\n description=(\"Plot, create or edit custom light curve \"\n \"extraction masks for target pixel files \"),\n formatter_class=PyKEArgumentHelpFormatter)\n parser.add_argument('infile', help='name of input target pixel FITS file',\n type=str)\n parser.add_argument('--frameno', default=100,\n help='The number of the frame to plot',\n type=int)\n parser.add_argument('--maskfile', default='maskfile.txt',\n help='name of ASCII custom aperture definition file',\n type=str)\n parser.add_argument('--plotfile', default='kepmask.png',\n help='name of output PNG plot file', type=str)\n parser.add_argument('--imin', default=None, type=float,\n help='minimum of image intensity scale [e-]')\n parser.add_argument('--imax', default=None, type=float,\n help='maximum of image intensity scale [e-]')\n parser.add_argument('--iscale', default='linear',\n help='type of image intensity scale',\n type=str,\n choices=['linear', 'log', 'sqrt'])\n parser.add_argument('--cmap', default='bone', help='image colormap',\n type=str)\n parser.add_argument('--verbose', action='store_true',\n help='Write to a log file?')\n parser.add_argument('--logfile', '-l', help='Name of ascii log file',\n default='kepmask.log', dest='logfile', type=str)\n args = parser.parse_args()\n kepmask(args.infile, args.frameno, args.maskfile, args.plotfile, args.imin,\n args.imax, args.iscale, args.cmap, args.verbose, args.logfile)\n","sub_path":"pyke/kepmask.py","file_name":"kepmask.py","file_ext":"py","file_size_in_byte":17514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"443039134","text":"\"\"\"\n-------------------------------------------------------\nCode that demonstrates how to run of the Jackknife code\n-------------------------------------------------------\n\"\"\"\n\" Preliminary setup \"\nimport sys\nimport os\nfrom pylab import cm\n\n# parameters file\nimport jackknife_params as par\n\n# import the main Class\njck_dir = par.source_dir\nsys.path.append(jck_dir)\n\n# home directory for code\nif par.beautify:\n code_home_dir = par.code_home_dir\nelse:\n code_home_dir = None\n\nimport jackknife_run_setup as jackk_run\njack_run = jackk_run.Jack_knife_Run(code_home_dir=code_home_dir, param_dir=par.here)\n\n# import the main Class\nimport jackknife as jackk\njack = jackk.Jack_knife(code_home_dir=code_home_dir, param_dir=par.here)\n\n# import the plotting Class\nimport jackknife_plots as jackkplt\njackplt = jackk.Jack_knife_Plot(code_home_dir=code_home_dir, param_dir=par.here)\n#------------------------------------------------#\n\n\"\"\"\n*******************\nDo the calculations\n*******************\nFor info on the Jackknife method see:\nhttp://people.bu.edu/aimcinto/jackknife.pdf\n\"\"\"\n\ndef generate_jackknife_pathches():\n \"\"\"\n Generates the Jackknife patches\n \"\"\"\n jack_run.create_jck_regions()\n return\n\ndef calculate_gammat():\n \"\"\"\n Calculates gamma_t as a function of theta\n \"\"\"\n jack_run.run_gamma_t()\n return\n\ndef calculate_jackknife_covariance():\n \"\"\"\n Run code to get covariance matrix from Jackknife\n \"\"\"\n jack_run.jck_covariance()\n return\n\ndef calculate_jackknife_covariance_MPI():\n \"\"\"\n Run code to get covariance matrix from Jackknife, parallelized\n \"\"\"\n jack_run.jck_covariance_MPI()\n return\n\ndef plt_lns(show=False, filename=None, fontsize=8, save_dpi=1000, figsize=[3.5,3.], fig_format='.png'):\n \"\"\"\n Plots Jackknife patches with lenses\n \"\"\"\n jackplt.plot_lenses_jck(show=show, filename=filename, fontsize=fontsize, save_dpi=save_dpi, figsize=figsize, fig_format=fig_format)\n return\n\ndef plt_rnd(show=False, filename=None, fontsize=8, save_dpi=1000, figsize=[3.5,3.], fig_format='.png'):\n \"\"\"\n Plots Jackknife patches with random points\n \"\"\"\n jackplt.plot_randoms_jck(show=show, filename=filename, fontsize=fontsize, save_dpi=save_dpi, figsize=figsize, fig_format=fig_format)\n return\n\ndef plt_src(show=False, filename=None, fontsize=8, save_dpi=1000, figsize=[3.5,3.], fig_format='.png',\n marker='*', ms=4, alpha=1., color='gray'):\n \"\"\"\n Plots the sources\n \"\"\"\n jackplt.plot_sources(show=show, filename=filename, fontsize=fontsize, save_dpi=save_dpi, figsize=figsize, fig_format=fig_format,\n marker=marker, ms=ms, alpha=alpha, color=color)\n\ndef plt_ls(show=False, filename=None, fontsize=8, save_dpi=1000, figsize=[3.5,3.], fig_format='.png',\n marker='*', ms=4, alpha=1., color='gray'):\n \"\"\"\n Plots (l)ensed and (s)ources in the same plot\n \"\"\"\n jackplt.plot_lens_sources(show=False, filename=None, fontsize=8, save_dpi=1000, figsize=[3.5,3.], fig_format=fig_format,\n\t marker_len='o', ms_len=4, alpha_len=1.,\n\t\t\t\t\t marker_src='*', ms_src=4, alpha_src=1., color_src='gray')\n\n return\n\ndef plt_lens_hpix(show=False, filename=None, fontsize=8, save_dpi=1000, figsize=[3.5,3.], fig_format='.png', xsize=400):\n \"\"\"\n Plots lensed as an healpix map\n \"\"\"\n jackplt.plot_lenses_hpix(show=show, filename=filename, fontsize=fontsize, save_dpi=save_dpi, figsize=figsize, fig_format=fig_format, xsize=xsize)\n\n return\n\n# def plt_jack(show=False, filename=None, fontsize=8, save_dpi=1000, figsize=[15,2.5], plt_centers=True, fig_format='.png',\n# lns_marker='+', rnd_marker='+', lns_cmap=cm.rainbow, rnd_cmap=cm.rainbow, cen_marker='o', cen_color='k'):\n# \"\"\"\n# Plots lensed as an healpix map\n# \"\"\"\n# jackplt.plot_jack(show=show, filename=filename, fontsize=fontsize, save_dpi=save_dpi, figsize=figsize, plt_centers=plt_centers, fig_format=fig_format,\n# lns_marker=lns_marker, rnd_marker=rnd_marker, lns_cmap=lns_cmap, rnd_cmap=rnd_cmap, cen_marker=cen_marker, cen_color=cen_color)\n\n return\n\n################################################################################\n# if the file is directly called\nif __name__ == \"__main__\":\n \" Generate Jackknife patches \"\n # generate_jackknife_pathches()\n\n \" Calculate gamma_t \"\n # calculate_gammat()\n\n \" Calculate Jackknife covariance matrix \"\n # calculate_jackknife_covariance()\n\n \" Calculate Jackknife covariance matrix, parallelized \"\n # calculate_jackknife_covariance_MPI()\n\n \" Plot Jackknife lens pathces \"\n # plt_lns()\n\n \" Plot Jackknife random pathces \"\n # plt_rnd()\n\n \" Plot sources \"\n # plt_src()\n\n \" Plot lenses together with sources \"\n # plt_ls()\n\n \" Plot healpix map of lenses \"\n # plt_lens_hpix()\n\n \" Plot nicer lenses and randoms \"\n # plt_jack()\n\n exit()\n","sub_path":"measurements/Jackknife/run/jackknife_run.py","file_name":"jackknife_run.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"360232609","text":"import socket\nimport sys\nimport datetime\nimport random\nimport re\nimport math\n\n\nserverAddress = 'localhost'\nport = 12000\n\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\ntry:\n serverSocket.bind((serverAddress, port))\nexcept socket.error:\n print(\"Startimi i serverit deshtoi! Binding Failed.\")\n sys.exit()\n\n\nprint('Serveri eshte i gatshem te pranoje kekresa...' +\"\\n\")\n\ndef IPAddress(address):\n return address[0]\n\ndef PortNumber(address):\n return str(address[1])\n\ndef Consonants(text):\n consonantNr = 0\n for i in text.upper():\n if (i == 'B' or i == 'C' or i == 'D' or i == 'F' or i == 'G' or i == 'H' or i == 'J' or i == 'K'\n or i=='L' or i=='M' or i=='N' or i=='P' or i=='Q' or i=='R' or i=='S' or i=='T' or i=='V'\n or i == 'W' or i=='X' or i=='Z'):\n consonantNr += 1\n return str(consonantNr)\n\n\ndef Print(text):\n text = text.strip()\n return text\n\n\ndef PCName():\n pcName = socket.gethostname()\n if not pcName:\n return \"Emri i hostit nuk mund te gjendet!\"\n else:\n return pcName\n\n\ndef getTime():\n now = datetime.datetime.now()\n return \"Data dhe koha tani: \" + now.strftime(\"%Y-%m-%d , %H:%M:%S\")\n\n\ndef randNumber():\n numbersArray = []\n for i in range(7):\n number = random.randint(1, 49)\n numbersArray.append(number)\n numbers = str(numbersArray)\n numbers = numbers.replace('[', '(').replace(']', ')')\n return numbers\n\n\ndef fibonacci(member):\n try:\n if member.isdigit():\n memberNo = int(member)\n fibSeq = []\n fibSeq.append(0)\n fibSeq.append(1)\n for i in range(2, memberNo):\n fibSeq.append(fibSeq[i-1]+fibSeq[i-2])\n return \"Anetari i \" + member + \" i vargut Fibonacci eshte numri: \" + str(fibSeq[memberNo-1])\n else:\n return \"Komande jo valide!\"\n except MemoryError:\n print(\"Numri qe ka derguar klienti ka qene shume i madh dhe i paprocesueshem\")\n return \"Numri qe keni derguar ka qene shume i madh dhe i paprocesueshem\"\n\ndef convert(urdheri):\n vlera = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", urdheri)\n vlera = str(vlera).replace(\"[\", \"\").replace(\"]\", \"\").replace(\"'\", \"\").replace(\"'\", \"\")\n vlera = float(vlera) # ekstraktimi i numrit nga stringu i marre\n if urdheri[11:31].upper()=='KILOWATTTOHORSEPOWER':\n return str(vlera) + \" KiloWatt = \" + str(vlera*1.34102) + \"HorsePower\"\n elif urdheri[11:31].upper()=='HORSEPOWERTOKILOWATT':\n return str(vlera) + \" HorsePower = \" +str(vlera/1.341) +\" KiloWatt\"\n elif urdheri[11:27].upper() == 'DEGREESTORADIANS':\n return str(vlera) + \" degrees = \"+ str(vlera*math.pi/180) + \" rad\"\n elif urdheri[11:27].upper() == 'RADIANSTODEGREES':\n return str(vlera) +\" rad = \" + str(vlera*180/math.pi) + \" degrees\"\n elif urdheri[11:26].upper() == 'GALLONSTOLITERS':\n return str(vlera) + \" gallons = \" + str(vlera * 3.785) + \" Liters\"\n elif urdheri[11:26].upper() == 'LITERSTOGALLONS':\n return str(vlera) + \" Liters = \" + str(vlera / 3.785) + \" Gallons\"\n else:\n return \"Keni dhene formatin gabim!\"\n\n\ndef prim(number):\n if number.isdigit():\n number = int(number)\n nrPjest = 0\n for i in range(1, number+1):\n if number % i == 0:\n nrPjest += 1\n\n if nrPjest==2:\n return \"Numri \" + str(number) + \" eshte numer prim\"\n else:\n return \"Numri \" + str(number) + \" nuk eshte numer prim\"\n else:\n return \"Keni dhene komande jo valide!\"\n\n\ndef rockpapersci(human):\n aksioni = (\"Rock\", \"paper\", \"scissors\")\n pc = aksioni[random.randint(0, 2)]\n\n human = human.lower()\n pc = pc.lower()\n\n if human == pc:\n return \"(You) \" + human + \" vs \" + pc + \" (PC)\" + \"\\nDraw\"\n elif human == 'paper':\n if pc == 'rock':\n return \"(You) \" + human + \" vs \" + pc + \" (PC)\" + \"\\nYou win\"\n elif pc == 'scissors':\n return \"(You) \" + human + \" vs \" + pc + \" (PC)\" + \"\\nYou lose\"\n\n elif human == 'scissors':\n if pc == 'rock':\n return \"(You) \" + human + \" vs \" + pc + \" (PC)\" + \"\\nYou lose\"\n elif pc == 'paper':\n return \"(You) \" + human + \" vs \" + pc + \" (PC)\" + \"\\nYou win\"\n\n elif human == 'rock':\n if pc == 'paper':\n return \"(You) \" + human + \" vs \" + pc + \" (PC)\" + \"\\nYou lose\"\n if pc == 'scissors':\n return \"(You) \" + human + \" vs \" + pc + \" (PC)\" + \"\\nYou win\"\n else:\n return \"Keni dhene komande jo valide!\"\n\n\nwhile True:\n methodByte, client_address = serverSocket.recvfrom(128)\n method = methodByte.decode(\"utf-8\")\n print(\"Serveri u lidh me klientin \" + client_address[0] + \" me port \"+ str(client_address[1]) +\"\\n\")\n try:\n if not method:\n break\n if method.upper() == 'IPADRESA':\n answer = \"IP adresa e Klientit eshte: \" + IPAddress(client_address)\n elif method.upper() == 'NUMRIIPORTIT':\n answer = \"Klienti eshte duke perdorur portin \" + PortNumber(client_address)\n elif method[0:16].upper() == 'BASHKETINGELLORE':\n answer = \"Teksti i shenuar ka \" + Consonants(method[17:]) + \" bashketingellore\"\n elif method[0:8].upper() == 'PRINTIMI':\n answer = \"Teksti i printuar: \" + Print(method[8:])\n elif method.upper() == 'EMRIIKOMPJUTERIT':\n answer = \"Emri i hostit: \" + PCName()\n elif method.upper() == 'KOHA':\n answer = getTime()\n elif method.upper() == 'LOJA':\n answer = randNumber()\n elif method[0:9].upper() == 'FIBONACCI':\n answer = fibonacci(method[10:])\n elif method[0:10].upper() == 'KONVERTIMI':\n answer = convert(method)\n elif method[0:4].upper() == 'PRIM':\n answer = prim(method[5:])\n elif method[0:17].upper() == 'ROCKPAPERSCISSORS':\n answer = rockpapersci(method[18:])\n else:\n answer = \"Keni dhene nje komande jo valide!\"\n print(\"Klienti ka dhene komande jo valide!\")\n serverSocket.sendto(str.encode(answer), client_address)\n print(\"Klientit \" + client_address[0] + \" iu dergua pergjigja: \" + str(answer) + \"\\n\")\n except ConnectionResetError:\n print(\"Serveri u shkeput me klient!\")\n except ConnectionAbortedError:\n print(\"Serveri u shkeput me klient!\")","sub_path":"FIEK_UDP_Serveri.py","file_name":"FIEK_UDP_Serveri.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"160121949","text":"import discord\nfrom discord.ext import commands\n\nimport datetime\n\n\"\"\"Personal note: these are the limits for embeds: Number of fields = 25, field name = 256, value = 1024, description = 2048\"\"\"\n\n\nclass UserCog:\n \"\"\"Cog meant to interact with the user data type in Discord. \"\"\"\n\n def __init__(self, client):\n self.client = client\n\n @commands.command(pass_context=True)\n async def userinfo(self, ctx, member: discord.User = None):\n \"\"\"Function meant to return information about a user, or if none is specified, the user who called the command.\"\"\"\n\n if member is None:\n member = ctx.message.author\n\n userinfoembed = discord.Embed(color=member.color)\n\n userinfoembed.set_author(name=\"{0}#{1}\".format(member.name, member.discriminator), icon_url=member.avatar_url)\n\n userinfoembed.set_thumbnail(url=member.avatar_url)\n\n role_list = []\n for x in member.roles:\n role_list.append(x)\n\n for x in role_list:\n if x.is_everyone:\n role_list.pop(role_list.index(x))\n\n userroles = \"\"\n for x in role_list:\n userroles += x.name + \"\\n\"\n\n # Personal note: inline defaults to true, if set to false will put the field on a separate line.\n userinfoembed.add_field(name=\"**Username:**\", value=\"{0}#{1}\".format(member.name, member.discriminator))\n userinfoembed.add_field(name=\"**Nickname:**\", value=member.display_name)\n\n userinfoembed.add_field(name=\"**User ID:**\", value=member.id)\n userinfoembed.add_field(name=\"**Status:**\", value=member.status)\n\n userinfoembed.add_field(name=\"**Join Date:**\", value=member.joined_at.strftime(\"%Y-%m-%d %H:%M:%S%Z\")) # can add .%f for microseconds but that messes up formatting :p\n userinfoembed.add_field(name=\"**Creation Date:**\", value=str(member.created_at)[0:19]) # For ex, 2016-01-29 16:21:03.342000 will be cut down to 2016-01-29 16:21:03\n\n userinfoembed.add_field(name=\"**Game:**\", value=member.game)\n userinfoembed.add_field(name=\"**Roles:**\", value=userroles)\n\n userinfoembed.set_footer(text=datetime.datetime.now().strftime(\"Generated on: %Y-%m-%d, At: %H:%M:%S%Z\"))\n\n await self.client.say(embed=userinfoembed)\n\n @commands.command(pass_context=True)\n async def serverinfo(self, ctx):\n \"\"\"Returns information on the server the command is called in.\"\"\"\n\n serverinfoembed = discord.Embed(color=14434903)\n\n voicechannels = 0\n textchannels = 0\n\n for x in ctx.message.server.channels:\n if x.type == discord.ChannelType.text:\n textchannels += 1\n elif x.type == discord.ChannelType.voice:\n voicechannels += 1\n\n # Only prints up to 20, causes errors if too many roles or emojis.\n emojistring = \"\"\n for x in ctx.message.server.emojis[0:10]:\n emojistring += x.name + \": \" + str(x) + \"\\n\"\n\n role_list = []\n for x in ctx.message.server.roles[0:11]:\n role_list.append(x)\n\n for x in role_list:\n if x.is_everyone:\n role_list.pop(role_list.index(x))\n\n rolestring = \"\"\n for x in role_list:\n rolestring += x.name + \"\\n\"\n\n serverinfoembed.set_author(name=\"Server: {0}\".format(ctx.message.server.name), icon_url=ctx.message.server.icon_url)\n\n serverinfoembed.set_thumbnail(url=ctx.message.server.icon_url)\n\n serverinfoembed.add_field(name=\"**Server Name:**\", value=ctx.message.server.name)\n serverinfoembed.add_field(name=\"**Server Region:**\", value=ctx.message.server.region)\n\n serverinfoembed.add_field(name=\"**Server ID:**\", value=ctx.message.server.id)\n serverinfoembed.add_field(name=\"**Creation Date:**\", value=str(ctx.message.server.created_at)[0:19])\n\n serverinfoembed.add_field(name=\"**Owner:**\", value=\"{0}#{1}\".format(ctx.message.server.owner.name, ctx.message.server.owner.discriminator))\n serverinfoembed.add_field(name=\"**Server Icon:**\", value=\"[Click me!]({0})\".format(ctx.message.server.icon_url))\n\n serverinfoembed.add_field(name=\"**Channels (Voice/Text)**\", value=\"{0}/{1}\".format(voicechannels, textchannels))\n serverinfoembed.add_field(name=\"**Member Count:**\", value=str(len(ctx.message.server.members)))\n\n serverinfoembed.add_field(name=\"**Roles({0}) (This list only contains the first 10! Use the roles command to see all of them.):**\".format(len(ctx.message.server.roles)), value=rolestring)\n serverinfoembed.add_field(name=\"**Emojis({0}) (This list only contains the first 10! Use the emojis command to see all of them.):**\".format(len(ctx.message.server.emojis)), value=emojistring)\n\n serverinfoembed.set_footer(text=datetime.datetime.now().strftime(\"Generated on: %Y-%m-%d, At: %H:%M:%S%Z\"))\n\n await self.client.say(embed=serverinfoembed)\n\n @commands.command(pass_context=True)\n async def emojis(self, ctx):\n \"\"\"Will return all of the emojis on the server, split into 1-25 and 26-50 to avoid hitting the character limit.\"\"\"\n\n emojistring1 = \"\"\n for x in ctx.message.server.emojis[0:25]:\n emojistring1 += x.name + \" \" + str(x) + \"\\n\"\n\n emojistring2 = \"\"\n for x in ctx.message.server.emojis[25:50]:\n emojistring2 += x.name + \" \" + str(x) + \"\\n\"\n\n emojistring3 = \"\"\n for x in ctx.message.server.emojis[50:75]:\n emojistring3 += x.name + \" \" + str(x) + \"\\n\"\n\n emojistring4 = \"\"\n for x in ctx.message.server.emojis[75:]:\n emojistring4 = x.name + \" \" + str(x) + \"\\n\"\n\n if emojistring1 is \"\" and emojistring2 is \"\":\n await self.client.say(\"No emojis currently exist on the server!\")\n\n if emojistring1 is not \"\":\n await self.client.say(\"**Emojis 1-25:**\\n{0}\".format(emojistring1))\n if emojistring2 is not \"\":\n await self.client.say(\"**Emojis 25-50:**\\n{0}\".format(emojistring2))\n if emojistring3 is not \"\":\n await self.client.say(\"**Emojis 50-75:**\\n{0}\".format(emojistring3))\n if emojistring4 is not \"\":\n await self.client.say(\"**Emojis 75-100:**\\n{0}\".format(emojistring4))\n elif emojistring1 is \"\":\n await self.client.say(\"No emojis currently exist on the server!\")\n\n @commands.command(pass_context=True)\n async def roles(self, ctx):\n role_list = []\n\n for x in ctx.message.server.roles:\n role_list.append(x)\n\n for x in role_list:\n if x.is_everyone:\n role_list.pop(role_list.index(x))\n\n rolestring = \"\"\n for role in role_list[0:25]:\n rolestring += role.name + \"\\n\"\n\n rolestring2 = \"\"\n for role in role_list[25:50]:\n rolestring2 += role.name + \"\\n\"\n\n rolestring3 = \"\"\n for role in role_list[50:75]:\n rolestring3 += role.name + \"\\n\"\n\n rolestring4 = \"\"\n for role in role_list[75:100]:\n rolestring4 += role.name + \"\\n\"\n\n if rolestring is not \"\":\n await self.client.say(\"**Roles 1-25:**\\n{0}\".format(rolestring))\n if rolestring2 is not \"\":\n await self.client.say(\"**Roles 25-50:**\\n{0}\".format(rolestring2))\n if rolestring3 is not \"\":\n await self.client.say(\"**Roles 50-75:**\\n{0}\".format(rolestring3))\n if rolestring4 is not \"\":\n await self.client.say(\"**Roles 75-100:**\\n{0}\".format(rolestring4))\n if rolestring is \"\":\n await self.client.say(\"There are currently no roles on this server!\")\n\n @commands.command(pass_context=True)\n async def profilepicture(self, ctx, member: discord.User = None):\n \"\"\"Returns profile picture of the mentioned user, or the message author themself if none is specified.\"\"\"\n\n if member is None:\n member = ctx.message.author\n\n await self.client.say(member.avatar_url)\n\n\ndef setup(client):\n client.add_cog(UserCog(client))","sub_path":"main/cogs/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":8031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"533069727","text":"#!/usr/bin/env python2.7\n#\n# Unittest module for schedutils.py\n#\n\nimport unittest\n# import schedinc\nfrom utils import schedutils\n\n\nclass TestMotherClass(object):\n pass\n\n\nclass TestDaughterClass(TestMotherClass):\n pass\n\n\nclass TestGranddaughterClass(TestDaughterClass):\n pass\n\n\nclass TestSisterClass(TestMotherClass):\n pass\n\n\nclass TestGreatGrandDaughterClass(TestGranddaughterClass):\n pass\n\nclass TestSecondGrandDaughterClass(TestSisterClass):\n pass\n\n\nclass SchedUtilsTest(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_get_all_subclasses(self):\n \"Find all subclasses of a given class\"\n self.assertEqual(str(schedutils.get_all_subclasses(TestMotherClass)),\n \"\"\"[, , , , ]\"\"\")\n\n self.assertEqual(str(schedutils.get_all_subclasses(TestSisterClass)),\n \"\"\"[]\"\"\")\n\n self.assertEqual(str(schedutils.get_all_subclasses(TestDaughterClass)),\n \"\"\"[, ]\"\"\")\n\n def test_is_subclass(self):\n self.assertTrue(schedutils.is_subclass(TestSecondGrandDaughterClass,\n TestMotherClass))\n\n self.assertTrue(schedutils.is_subclass(TestSisterClass,\n TestMotherClass))\n\n self.assertFalse(schedutils.is_subclass(TestSecondGrandDaughterClass,\n TestDaughterClass))\n\n # False - class is subclass of itself\n self.assertFalse(schedutils.is_subclass(TestMotherClass,\n TestMotherClass))\n # False - class is subclass of its own subclass\n self.assertFalse(schedutils.is_subclass(TestMotherClass, TestSecondGrandDaughterClass))\n \n def test_strip_tags(self):\n \"Stripping HTML tags\"\n self.assertEquals(schedutils.strip_tags(\"

MSJ\"),\n \"MSJ\")\n self.assertEquals(schedutils.strip_tags(\"

OFF

\"),\n \"OFF\")\n self.assertEquals(schedutils.strip_tags(\"

OFF\"),\n \"OFF\")\n self.assertEquals(schedutils.strip_tags(\"

OFF\"),\n \"OFF\")\n # self.assertEquals(schedutils.strip_tags(\"OFF

\"),\n # \"OFF\")\n self.assertEquals(schedutils.strip_tags(\"

OFF

\"),\n \"OFF\")\n self.assertEquals(schedutils.strip_tags(\"

&OFF

\"),\n \"OFF\")\n self.assertEquals(schedutils.strip_tags(\"

<OFF>

\"),\n \"OFF\")\n self.assertEquals(schedutils.strip_tags(\"\"),\n \"\")\n\n def test_subtract_lists(self):\n a = [1, 2, 3, 4, 5]\n b = [1, 2, 3]\n self.assertItemsEqual(schedutils.subtract_lists(a, b),\n [4, 5])\n\n self.assertItemsEqual(schedutils.subtract_lists(b, a),\n [])\n self.assertItemsEqual(schedutils.subtract_lists(a, a),\n [])\n\n c = ['a', 'b', 'c','d', 'e']\n d = ['c', 'd', 'e']\n\n self.assertItemsEqual(schedutils.subtract_lists(c, d),\n ['a','b'])\n\n self.assertItemsEqual(schedutils.subtract_lists([], b),\n [])\n self.assertItemsEqual(schedutils.subtract_lists(b, []),\n b)\n\n def test_list_intersection(self):\n a = [1, 2, 3, 4, 5]\n b = [1, 2, 3]\n self.assertItemsEqual(schedutils.list_intersection(a, b),\n [1,2,3])\n self.assertItemsEqual(schedutils.list_intersection(b, a),\n [1,2,3])\n\n self.assertItemsEqual(schedutils.list_intersection(a, []),\n [])\n self.assertItemsEqual(schedutils.list_intersection([], []),\n [])\n \n def test_sort_dict_by_val(self):\n d = {'a': 1,\n 'b': 100,\n 'c': 50,\n 'd': 43,\n 'e': -120,\n 'f': 0\n }\n\n self.assertItemsEqual(schedutils.sort_dict_by_val(d),\n [('e', -120),\n ('f', 0),\n ('a', 1),\n ('d', 43),\n ('c', 50),\n ('b', 100)\n ])\n self.assertItemsEqual(schedutils.sort_dict_by_val(d, reverse=True),\n [\n ('b', 100),\n ('c', 50),\n ('d', 43),\n ('a', 1),\n ('f', 0),\n ('e', -120), \n ])\n d2 = {}\n self.assertItemsEqual(schedutils.sort_dict_by_val(d2),\n [])\n self.assertItemsEqual(schedutils.sort_dict_by_val(None),\n [])\n\n \n def test_sort_dict_by_key(self):\n d = {'a': 1,\n 'f': 0,\n 'b': 100,\n 'c': 50,\n 'd': 43,\n 'e': -120,\n }\n self.assertItemsEqual(schedutils.sort_dict_by_key(d),\n [('a', 1), ('b', 100), ('c', 50), ('d', 43), ('e', -120),\n ('f', 0)])\n self.assertItemsEqual(schedutils.sort_dict_by_key(d, reverse=True),\n [('f', 0), ('e', -120), ('d', 43), ('c', 50),\n ('b', 100), ('a', 1)])\n self.assertItemsEqual(schedutils.sort_dict_by_key({}),\n [])\n\n def test_merging_dicts(self):\n # >>> d1 = {'A':1,'B':2,'C':3}\n # >>> d2 = {'B':22, 'C':33, 'Z':9}\n # >>> print merge_dicts(d1, d2)\n # {'A': (1, 0), 'C': (3, 33), 'B': (2, 22), 'Z': (0, 9)}\n\n d1 = {'A':1,'B':2,'C':3}\n d2 = {'B':22, 'C':33, 'Z':9}\n self.assertDictEqual(schedutils.merge_dicts(d1, d2),\n {'A': (1, 0), 'C': (3, 33), 'B': (2, 22), 'Z': (0, 9)})\n\n def test_flatten(self):\n seq = []\n for x in schedutils.flatten( [(1,2,3),(3,4,5),8, (9, (10,11))]):\n seq.append(x)\n self.assertItemsEqual(seq, [1,2,3,3,4,5,8,9,10,11])\n\n\n def test_dictFromSequence(self):\n seq = [1,2,3,4,5,6,7,8]\n self.assertDictEqual(schedutils.dictFromSequence(seq),\n {1:2,\n 3:4,\n 5:6,\n 7:8})\n seq2 = ['a', 1, 'b', 2, \"c\", 3]\n self.assertDictEqual(schedutils.dictFromSequence(seq2),\n {'a':1,\n 'b':2,\n 'c':3})\n seq3 = ['a', \"foo\", 'b', 2, \"c\", (1,2,3)]\n self.assertDictEqual(schedutils.dictFromSequence(seq3),\n {'a':\"foo\",\n 'b':2,\n 'c': (1,2,3,)})\n\n\nclass InvertDictTest(unittest.TestCase):\n def test_invert_dict(self):\n d1 = {'a':1,\n 'b':2,\n 'c':3}\n self.assertDictEqual(schedutils.invert_dict(d1),\n {1: 'a', 2: 'b', 3: 'c'})\n\n def test_invert_dict_of_lists_error(self):\n \"Should raise type error because values need to be lists\"\n d1 = {'a':1,\n 'b':2,\n 'c':3}\n self.assertRaises(TypeError, schedutils.invert_dict_of_lists,d1)\n\n def test_invert_dict_of_lists_normal(self):\n d1 = {'a':[1,2],\n 'b':[3,4],\n 'c':[5,6]}\n self.assertDictEqual({1: ['a'], 2: ['a'], 3: ['b'], \n 4: ['b'], 5: ['c'], 6: ['c']},\n schedutils.invert_dict_of_lists(d1))\n\n def test_invert_dict_of_lists_dupes(self):\n d1 = {'a':[1,2],\n 'b':[2,3,4],\n 'c':[4,6]}\n self.assertDictEqual({1: ['a'], 2: ['a', 'b'], 3: ['b'], 4: ['c', 'b'], \n 6: ['c']},\n schedutils.invert_dict_of_lists(d1))\n \n\nclass SchedUtilsReorderTest(unittest.TestCase):\n def test_reorder_list_basic(self):\n elems = ['one', 'two', 'three', 'four', 'five']\n unordered = ['two', 'five', 'three']\n\n self.assertListEqual(['two', 'three', 'five'],\n schedutils.reorder_list(unordered, elems))\n\n self.assertListEqual(['two', 'five','three'],\n schedutils.reorder_list(elems, unordered))\n\n def test_reorder_list_silent_drop(self):\n elems = ['one', 'two', 'three', 'four', 'five']\n dropped = ['five', 'one', 'ninetynine']\n\n # drop 'ninetytine' because not in template\n self.assertListEqual(['one', 'five'],\n schedutils.reorder_list(dropped, elems))\n \n\n def test_reorder_list_malformed(self):\n elems = ['one', 'two', 'three', 'four', 'five']\n self.assertListEqual([],\n schedutils.reorder_list([], elems))\n\n self.assertListEqual([],\n schedutils.reorder_list([], []))\n self.assertListEqual([],\n schedutils.reorder_list(None, []))\n self.assertListEqual([],\n schedutils.reorder_list([], None))\n \n\nclass MonthYearSchedUtilsTest(unittest.TestCase):\n\n def test_month_year_iter(self):\n l = []\n for (y,m) in schedutils.month_year_iter(11,2009,7,2012):\n l.append( (y,m) )\n self.assertListEqual([(2009, 11),\n (2009, 12),\n (2010, 1),\n (2010, 2),\n (2010, 3),\n (2010, 4),\n (2010, 5),\n (2010, 6),\n (2010, 7),\n (2010, 8),\n (2010, 9),\n (2010, 10),\n (2010, 11),\n (2010, 12),\n (2011, 1),\n (2011, 2),\n (2011, 3),\n (2011, 4),\n (2011, 5),\n (2011, 6),\n (2011, 7),\n (2011, 8),\n (2011, 9),\n (2011, 10),\n (2011, 11),\n (2011, 12),\n (2012, 1),\n (2012, 2),\n (2012, 3),\n (2012, 4),\n (2012, 5),\n (2012, 6)],\n l)\n\n def test_month_year_iter_degenerate_input(self):\n l = []\n # end is before start\n for (y,m) in schedutils.month_year_iter(11,2009,7,2008):\n l.append( (y,m) )\n self.assertListEqual([],\n l)\n\n def test_month_year_iter_degenerate_input(self):\n l = []\n # start and end the same\n for (y,m) in schedutils.month_year_iter(11,2009,11,2009):\n l.append( (y,m) )\n self.assertListEqual([],\n l)\n\n def test_month_year_iter_single_month(self):\n l = []\n # just one month span\n for (y,m) in schedutils.month_year_iter(11,2009,12,2009):\n l.append( (y,m) )\n self.assertListEqual( [(2009, 11)],\n l)\n def test_tableify(self):\n \"Table Tests\"\n t = {(\"a1\", \"b1\"): 1,\n (\"a2\",\"b2\"): 2,\n (\"a3\",\"b3\"): 3,\n }\n t2 = schedutils.tableify(t)\n\n self.assertEqual(t2,[['', 'b1', 'b2', 'b3'],\n ['a1', 1, '', ''],\n ['a2', '', 2, ''],\n ['a3', '', '', 3]])\n\n d1 = {\"b1\":10,\n \"b2\": 9,\n \"b3\": 8}\n d2 = {\"a1\":10,\n \"a2\": 9,\n \"a3\": 8}\n t3 = schedutils.tableify(t, col_sorting_dict=d1)\n self.assertEqual(t3,[['', 'b3', 'b2', 'b1'],\n ['a1', '', '', 1],\n ['a2', '', 2, ''],\n ['a3', 3, '', '']])\n\n t4 = schedutils.tableify(t, row_sorting_dict=d2)\n self.assertEqual(t4,[['', 'b1', 'b2', 'b3'],\n ['a3', '', '', 3],\n ['a2', '', 2, ''],\n ['a1', 1, '', '']])\n\n t5 = schedutils.tableify(t, col_sorting_dict=d1, row_sorting_dict=d2)\n self.assertEqual(t5,[['', 'b3', 'b2', 'b1'],\n ['a3', 3, '', ''],\n ['a2', '', 2, ''],\n ['a1', '', '', 1]])\n\n t6 = schedutils.tableify(t, transpose=True, col_sorting_dict=d1, row_sorting_dict=d2)\n self.assertEqual(t6,[['', 'a3', 'a2', 'a1'],\n ['b3', 3, '', ''],\n ['b2', '', 2, ''],\n ['b1', '', '', 1]])\n\nclass SchedUtilsFilterTest(unittest.TestCase):\n def test_filter_dict_basic(self):\n d1 = {'a':1,\n 'b':2,\n 'c':3}\n self.assertDictEqual( {'a': 1, 'b': 2},\n schedutils.filter_dict(d1, ['a','b']))\n \n self.assertDictEqual( {},\n schedutils.filter_dict(d1, ['e','d']))\n\n self.assertDictEqual( {'a': 1},\n schedutils.filter_dict(d1, ['a']))\n\n self.assertDictEqual( {},\n schedutils.filter_dict(d1, []))\n\n self.assertDictEqual( {},\n schedutils.filter_dict({}, []))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/unit/utils/schedutils_test.py","file_name":"schedutils_test.py","file_ext":"py","file_size_in_byte":14673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"563730819","text":"from cv2 import cv2\nimport numpy as np\n\n# 2. 모델 불러오기\nfrom keras.models import load_model\ndef prediction(img,model):\n model = model\n img = cv2.resize(img, dsize=(100, 100), interpolation=cv2.INTER_AREA)\n x=[]\n x.append(img/256)\n img = np.array(x)\n\n print(\"예측\")\n k=model.predict(img)\n\n for i in k:\n if i[0] > i[1]:\n return \"아이유\",k\n else:\n return \"수지\",k\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"538398686","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\nclass Movie:\n\n type= [\"爱情片\" , \"动作片\", \"科幻片\"]\n time= {f\"120min\"}\n pingfen=0\n def __init__(self,pingfen,name,type):\n self.pingfen = pingfen\n self.name = name\n self.type = type\n\n def yingping(self):\n if self.pingfen >= 8:\n print(f\"影评:好看\")\n elif self.pingfen <= 4:\n print(f\"影评:难看\")\n else:\n print(f\"影评:一般\")\n\n\nprint(Movie.type)\nM=Movie(8,\"绣春刀\",\"动作片\")\nprint(f\"电影:{M.name} 评分: {M.pingfen} 类型: {M.type}\")\nM.yingping()\n\n","sub_path":"Movie.py","file_name":"Movie.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"95148309","text":"from selenium.webdriver.remote.webdriver import WebDriver\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom base_classes.component import Component\n\n\nclass CreateTaskForm(Component):\n CONTAINER = '//div[contains(@class, \"js-addNewTask\")]'\n\n TITLE_INPUT = '//input[@id=\"inputNewTaskTitle\"]'\n CLOSE_BUTTON = None\n SUBMIT_BUTTON = None\n CLOSED_FORM = '//div[contains(@class, \"task-list-add-task-button\")' \\\n ' and contains(@class,\"js-addNewTask\")]'\n\n def __init__(self, driver: WebDriver, column_id: int):\n Component.__init__(self, driver)\n self.column_id = column_id\n self.CONTAINER = f'//div[contains(@class, \"js-addNewTask\") and @data-column-id=\"{column_id}\"]'\n self.SUBMIT_BUTTON = f'//div[@id=\"addTaskButton{column_id}\"]'\n self.CLOSE_BUTTON = f'//div[@id=\"closeNewTaskFormButton{column_id}\"]'\n self.CLOSED_FORM = f'//div[contains(@class, \"task-list-add-task-button\")' \\\n f' and contains(@class,\"js-addNewTask\")' \\\n f' and @data-column-id=\"{column_id}\"]'\n\n def open(self):\n self.driver.find_element_by_xpath(self.CONTAINER).click()\n\n def close(self):\n self.driver.find_element_by_xpath(self.CLOSE_BUTTON).click()\n\n def set_title(self, title: str):\n self.driver.find_element_by_xpath(self.TITLE_INPUT).send_keys(title)\n\n def submit(self):\n self.driver.find_element_by_xpath(self.SUBMIT_BUTTON).click()\n\n def wait_for_closed(self):\n WebDriverWait(self.driver, 10).until(\n lambda d: d.find_element_by_xpath(self.CLOSED_FORM)\n )\n","sub_path":"components/board/tasks/create_task_form.py","file_name":"create_task_form.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"626113807","text":"\nimport sys,subprocess\nfrom math import *\n#The file passed by argument\ninput_file=open(sys.argv[1],'r')\n\nreport_file=\"./reports/\"+sys.argv[1].split(\".\")[0]+\"_GRA-NACC.csv\"\n#Output strings\nlast_acc=[0.0,0.0,0.0]\nlast_grav=[0.0,0.0,0.0]\nclean=\"current time,Gx,Gy,Gz,Ax,Ay,Az\\n\"\n\nNANOINSEC=1000000000\n\ni=0\n#Turns the lineal acc into lineal vel, and standarize the format of the rotational vel\nfor line in input_file:\n\ti=i+1\n\tfirst_split=line.split(\",\",1)\n\tsecond_split=first_split[1].split(\",\")\n\tcurrent_time=float(second_split[0])/NANOINSEC\n\tif (first_split[0]==\"GRA\"):\t\t\n\t\tlast_grav=[float(second_split[1]),float(second_split[2]),float(second_split[3])]\n\t\tif(i%25==0):\n\t\t\tclean+=str(current_time)+\",\"+str(last_grav[0])+\",\"+str(last_grav[1])+\",\"+str(last_grav[2])+\",\"+str(last_acc[0])+\",\"+str(last_acc[1])+\",\"+str(last_acc[2])+\"\\n\"\n\t\t\n\tif(first_split[0]==\"NACC\"):\n\t\tlast_acc=[float(second_split[1]),float(second_split[2]),float(second_split[3])]\n\t\tif(i%25==0):\n\t\t\tclean+=str(current_time)+\",\"+str(last_grav[0])+\",\"+str(last_grav[1])+\",\"+str(last_grav[2])+\",\"+str(last_acc[0])+\",\"+str(last_acc[1])+\",\"+str(last_acc[2])+\"\\n\"\n\t\t\ninput_file.close()\n\n\noutput_file=open(report_file,'w')\noutput_file.write(clean)\noutput_file.close()","sub_path":"tests/trajectoryCalculator/reporterGRA-NACC.py","file_name":"reporterGRA-NACC.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"20109415","text":"# 1.4 Пространства имён и области видимости – Step 8\n# v.1\n\n\nclass NameSpace:\n\n general_list = []\n\n def __init__(self, arg_name, arg_parent_ns):\n self.name = arg_name\n self.parent_namespace = arg_parent_ns\n self.var_list = []\n NameSpace.general_list.append(self)\n\n @staticmethod\n def get_ns_by_name(name):\n for ns in NameSpace.general_list:\n if ns.name == name:\n return ns\n return None\n\n\ndef main():\n # Global namespace should always exist\n NameSpace('global', None)\n\n n = int(input())\n for i in range(n):\n\n # get next command\n command, name_space, var = input().split(' ')\n\n # create \n if command == 'create':\n ns = NameSpace.get_ns_by_name(var)\n if ns is not None:\n NameSpace(name_space, ns)\n\n # add \n elif command == 'add':\n ns = NameSpace.get_ns_by_name(name_space)\n if ns is not None:\n ns.var_list.append(var)\n\n # get \n elif command == 'get':\n ns = NameSpace.get_ns_by_name(name_space)\n found = 'None'\n while ns is not None and found == 'None':\n for v in ns.var_list:\n if var == v:\n found = ns.name\n break\n else:\n ns = ns.parent_namespace\n print(found)\n\n else:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Step1-4-8.py","file_name":"Step1-4-8.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"345212686","text":"def shifttext(inp, shift):\n data = []\n #strs = 'abcdefghijklmnopqrstuvwxyz'\n strs = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n for i in inp:\n if i.strip() and i in strs:\n data.append(strs[(strs.index(i) + shift) % 26])\n else:\n data.append(i)\n output = ''.join(data)\n return output\n\ncip = 'EVIRE'\n\nfor i in range(26):\n output = shifttext(cip, i)\n print(\"%d & %s\" % (i, output) + r'\\\\')\n","sub_path":"C1-Cryptography-Overview/shift.py","file_name":"shift.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"130583624","text":"import tensorflow as tf\n\nidx1 = [1,2,3]\nidx2 = [2,4,5]\n\nintersection = tf.sets.intersection(tf.expand_dims(tf.convert_to_tensor(idx1), 0), tf.expand_dims(tf.convert_to_tensor(idx2), 0))\nsess = tf.compat.v1.Session()\nwith sess.as_default():\n assert tf.compat.v1.get_default_session() is sess\n print(intersection.eval())","sub_path":"results/StackOverflowRepaired/s57917398_repaired_1.py","file_name":"s57917398_repaired_1.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"652526665","text":"import pickle\nimport argparse\nimport matplotlib.pyplot as plt\nimport pylab as pl\nimport numpy as np\nfrom powerplant_classes import PowerPlant\n\n# read args\nparser = argparse.ArgumentParser(description='Create Histogram from '\\\n + 'power plant database.')\nparser.add_argument('PowerWatch', type=str, help='Powerplant database.')\nparser.add_argument('CARMA', type=str, \n\thelp=\"optional another database for comparison\")\nparser.add_argument('GEO', type=str, \n\thelp=\"optional another database for comparison\")\nargs = parser.parse_args()\n\n# capacity list is a list of lists, one for each database\ncapacity_list = []\n# read databases\nfor database in [args.PowerWatch, args.CARMA, args.GEO]:\n\twith open(database, 'rb') as f:\n\t\tpower_plant_list = pickle.load(f)\n\t\tcapacity_list.append(sorted(filter(None,\n\t\t\t[plant.capacity for plant in power_plant_list])))\n\ncumulative = [np.cumsum(database) for database in capacity_list]\n\n# create histogram\nbins = np.logspace(-2,4,51)\nlinbins = np.arange(10,200,10)\n\n# plt.hist(capacity_list, bins, histtype='step',\n# \tlabel = ['PowerWatch', 'CARMA', 'GEO'], cumulative=True)\n\n# plt.plot(capacity_list[0], xrange(len(capacity_list[0])), label = 'PowerWatch')\n# plt.plot(capacity_list[1], xrange(len(capacity_list[1])), label = 'CARMA')\n# plt.plot(capacity_list[2], xrange(len(capacity_list[2])), label = 'GEO')\n\nplt.plot(capacity_list[0], cumulative[0], label = 'PowerWatch')\nplt.plot(capacity_list[1], cumulative[1], label = 'CARMA')\nplt.plot(capacity_list[2], cumulative[2], label = 'GEO')\n# plt.gca().set_xscale(\"log\")\n# plt.gca().set_yscale(\"log\")\n\n# pl.hist([capacity_list, capacity_list2, capacity_list3], linbins, histtype='step')\n\nplt.xlabel('Capacity in MW')\nplt.ylabel('Cumulative Numbers')\nplt.title('Cumulative Power Plant Number Distribution')\n# plt.axis([0.1, 28000, 1, 52000])\nplt.grid(True)\nplt.legend(loc='upper left')\nplt.show()","sub_path":"visualization/make_histograms.py","file_name":"make_histograms.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"365803505","text":"\"\"\"This module is for functions that create plots for analysing the data from\nfrunning a kernel on a graph. The module paper_figures uses these functions for\nmaking figures with titles labels and captions for the paper about this research\n\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nimport numpy as np\n\nBINCOUNT = 50\n# #Traces\n# ============================================\ndef plot_kernel_traces(dframe, rows, **kwargs):\n \"\"\" Makes a plot of the kernel values for the rows over time\n\n Arguments:\n - `dframe`: the DataFrame of kernel values at time steps in the columns and vertices as rows\n - `rows`: the set of vertices to display\n - `**kwargs`: matplotlib keyword args passed to plot\n\n Returns:\n - `percs`: the kernel values restricted to the rows transposed for plotting\n \"\"\"\n # TODO: normalize\n percs = dframe.ix[rows].T\n ax = percs.plot(**kwargs)\n return percs\n\n\ndef random_targets_trace(dframe, nplots, nseries, pool=None, **kwargs):\n \"\"\" Make nplots figures that each display nseries samples from the data\n in dframe\n\n draws from pool or the index of the dframe if no pool is given\n \"\"\"\n if pool==None:\n pool = dframe.index\n targetcollection = [np.random.permutation(pool)[:nseries]\n for i in range(nplots)]\n [t.sort() for t in targetcollection]\n data = [plot_kernel_traces(dframe, t, **kwargs)\n for t in targetcollection]\n return data\n\n\ndef show_PCA(df):\n pca = mlab.PCA(df)\n plt.plot(pca.Y)\n return pca\n\n# # Density Estimation\n# ================================================================\n\n# ## Nonparametric\n# ----------------\ndef distribution_describe(df, colindex=None, plot=False,\n transform=None, **kwargs):\n \"\"\"Makes a simple description using pandas.describe\n If df is DataFrame with vertices in rows and time steps in columns\n then this will give a rough picture of how the distribution is changing over\n time.\n\n Arguments:\n - `df`:\n - `colindex`:\n - `plot`:\n\n Returns:\n - `desc`: the description frame\n\n Note:\n You can select elements from the description frame that is return if you\n would rather manipulate the data by hand.\n \"\"\"\n if transform is None:\n lf = df\n else:\n lf = transform(df)\n if colindex == None:\n colindex = df.columns\n fr = lf[colindex]\n desc = fr.describe().ix[[1, 2, 4, 5, 6]]\n if plot:\n desc.T.plot(**kwargs)\n return desc\n\n# ## Parametric\n# -------------\ndef cdf_plot(df, fitter=stats.norm,\n cdf=True, colors = ['b','g','r','c','m','y','k','w'],):\n \"\"\"\n\n Arguments:\n - `df`:\n - `fitter`: a statistical distribution with a fit method\n - `cdf`: defaults to True, showing the CDF version.\n If false use the survival function.\n \"\"\"\n ords = map(lambda s: df[s].dropna().order(ascending=cdf), df.columns)\n names = [s.name for s in ords]\n oframes = map(lambda s: pd.DataFrame({'x':s,\n 'CDF(x)':s.rank(ascending=cdf)/s.count()\n }).set_index('x'),\n ords)\n fig, ax = plt.subplots(1,1,1)\n figs = [ax.plot(seq.index, seq, color=col, label='%s empirical'%name)\n for seq, col, name in zip(oframes, colors, names)]\n #using a model if one is suggested to us.\n if fitter is not None:\n models = map(lambda s: fitter(*fitter.fit(s)), ords)\n domains = map(lambda s: np.linspace(s.min(),s.max(), 1000), ords)\n if cdf:\n yvals = [model.cdf(domain) for model, domain in zip(models,domains)]\n else:\n yvals = [model.sf(domain) for model, domain in zip(models,domains)]\n for x, y, col, name in zip(domains, yvals, colors, names):\n ax.plot(x, y, label='%s model' %name,\n color=col, linestyle='--')\n for model in models:\n print(model.args)\n return fig, ax\n\ndef show_histogram_parameteric_fit(seq, t, quantile=0, fitter=stats.norm):\n \"\"\" Show the histogram of a sequence along with a parametric fit. Allows for\n filtering using a quantile in case the fit only applies to the tail of the\n distibution.\n\n Arguments:\n - `seq`:\n - `t`:\n - `fitter`:\n \"\"\"\n seq.hist(bins=BINCOUNT, normed=True)\n scaling = 1\n if quantile:\n plt.axvline(x=seq.quantile(quantile), color='k',\n label='quantile %.2f' % quantile)\n filtered = seq[seq>seq.quantile(quantile)]\n scaling = 1-quantile\n else:\n filtered = seq\n if fitter is not None:\n params = fitter.fit(filtered)\n rv = fitter(*params)\n r = (filtered.min(), filtered.max())\n domain = np.arange(*r,step=(r[1]-r[0])/1000)\n pdf = pd.Series(rv.pdf(domain)*scaling, index=domain, name='pdf')\n pdf.plot(color='r')\n\n\n# # Correlation\n#======================================================\ndef correlation_changes_over_time(df, times, log=True,\n condition=np.median, color1='b', color2='r'\n ):\n \"\"\"\n Show how we have a strong correlation between two adjacent batches but a\n weaker correlation as the time goes further away. The condition and colors\n are to show that the correlation is better when condition=True and worse\n when it is False. greater than condition is color1. The colors will blend\n for records that change their conditional\n\n Arguments:\n - `df`:\n - `times`: the batches to show\n - `log`:default=True\n - `condition`:=np.median\n - `color1`:='b'\n - `color2`:='r'\n\n \"\"\"\n\n frame = df[times]\n if log:\n frame = np.log(frame)\n func = lambda s: condition(s.dropna())\n color_mask = np.where(frame > frame.apply(func), color1, color2)\n fig, axes = plt.subplots(len(times)-1,1)\n for i,t in enumerate(frame.columns[1:]):\n axis = axes[i]\n axis.scatter(x=frame[frame.columns[0]], y=frame[t],\n s=10, alpha=.25, c=color_mask[:,0])\n if color2:\n axis.scatter(x=frame[frame.columns[0]], y=frame[t],\n s=10, alpha=.7, c=color_mask[:,i])\n return fig, axes\n\n\n\ndef polyfit_plot(frame, degree=1, residuals=True,):\n \"\"\" Fit a dataframe and return the polynomials then show them on a plot.\n\n Arguments:\n - `frame`: the data you want to fit\n - `degree`: of the polynomial\n - `residuals`: do you want to residuals defaults to True\n \"\"\"\n\n paramframe = pd.DataFrame({s:np.polyfit(y=frame[s].ix[s::],\n x=frame[s].ix[s::].index,deg=degree)\n for s in frame})\n modelframe = pd.DataFrame({s:pd.Series(np.polyval(p=paramframe[s],\n x=frame[s].ix[s::].index),\n index=frame[s].ix[s::].index)\n for s in frame})\n ax = modelframe.plot(style='--', legend=False)\n frame.plot(ax=ax, style='+-', legend=False)\n ax.legend(ncol=2)\n if residuals:\n resids = (modelframe-frame)\n rax = resids.plot(kind='bar')\n return ax\n\n# # Putting vertices into Feature Space\ndef scatter_vertices(df, alpha=.3):\n \"\"\" make a scatter plot whose data elements are vertices and whos axes\n are summary statistics of kernel values over time.\n See kernel_analysis.summararize_vertices for a description of arguments.\n \"\"\"\n plt.scatter(df[df.columns[0]], df[df.columns[1]], alpha=alpha)\n fig = plt.gcf()\n ax = fig.axes[0]\n ax.set_xlabel(df.columns[0])\n ax.set_ylabel(df.columns[1])\n return fig, ax\n\n# # To see if we can apply a model\ndef auto_correlate(df, columns='diff'):\n \"\"\"makes a autocorrelation plot of the columns\n\n Arguments:\n - `df`:\n - `columns`:\n \"\"\"\n # TODO: autocorrelation\n correlogram = pd.tools.plotting.autocorrelation_plot\n plt.figure()\n ax = correlogram(df[columns].dropna())\n return ax\n","sub_path":"code/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":8106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"629794170","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport abc\nfrom typing import Awaitable, Callable, Dict, Optional, Sequence, Union\n\nimport google.api_core\nfrom google.api_core import exceptions as core_exceptions\nfrom google.api_core import gapic_v1, operations_v1\nfrom google.api_core import retry as retries\nimport google.auth # type: ignore\nfrom google.auth import credentials as ga_credentials # type: ignore\nfrom google.cloud.location import locations_pb2 # type: ignore\nfrom google.longrunning import operations_pb2 # type: ignore\nfrom google.oauth2 import service_account # type: ignore\n\nfrom google.cloud.edgecontainer_v1 import gapic_version as package_version\nfrom google.cloud.edgecontainer_v1.types import resources, service\n\nDEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(\n gapic_version=package_version.__version__\n)\n\n\nclass EdgeContainerTransport(abc.ABC):\n \"\"\"Abstract transport class for EdgeContainer.\"\"\"\n\n AUTH_SCOPES = (\"https://www.googleapis.com/auth/cloud-platform\",)\n\n DEFAULT_HOST: str = \"edgecontainer.googleapis.com\"\n\n def __init__(\n self,\n *,\n host: str = DEFAULT_HOST,\n credentials: Optional[ga_credentials.Credentials] = None,\n credentials_file: Optional[str] = None,\n scopes: Optional[Sequence[str]] = None,\n quota_project_id: Optional[str] = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n always_use_jwt_access: Optional[bool] = False,\n api_audience: Optional[str] = None,\n **kwargs,\n ) -> None:\n \"\"\"Instantiate the transport.\n\n Args:\n host (Optional[str]):\n The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is mutually exclusive with credentials.\n scopes (Optional[Sequence[str]]): A list of scopes.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you're developing\n your own client library.\n always_use_jwt_access (Optional[bool]): Whether self signed JWT should\n be used for service account credentials.\n \"\"\"\n\n scopes_kwargs = {\"scopes\": scopes, \"default_scopes\": self.AUTH_SCOPES}\n\n # Save the scopes.\n self._scopes = scopes\n\n # If no credentials are provided, then determine the appropriate\n # defaults.\n if credentials and credentials_file:\n raise core_exceptions.DuplicateCredentialArgs(\n \"'credentials_file' and 'credentials' are mutually exclusive\"\n )\n\n if credentials_file is not None:\n credentials, _ = google.auth.load_credentials_from_file(\n credentials_file, **scopes_kwargs, quota_project_id=quota_project_id\n )\n elif credentials is None:\n credentials, _ = google.auth.default(\n **scopes_kwargs, quota_project_id=quota_project_id\n )\n # Don't apply audience if the credentials file passed from user.\n if hasattr(credentials, \"with_gdch_audience\"):\n credentials = credentials.with_gdch_audience(\n api_audience if api_audience else host\n )\n\n # If the credentials are service account credentials, then always try to use self signed JWT.\n if (\n always_use_jwt_access\n and isinstance(credentials, service_account.Credentials)\n and hasattr(service_account.Credentials, \"with_always_use_jwt_access\")\n ):\n credentials = credentials.with_always_use_jwt_access(True)\n\n # Save the credentials.\n self._credentials = credentials\n\n # Save the hostname. Default to port 443 (HTTPS) if none is specified.\n if \":\" not in host:\n host += \":443\"\n self._host = host\n\n def _prep_wrapped_messages(self, client_info):\n # Precompute the wrapped methods.\n self._wrapped_methods = {\n self.list_clusters: gapic_v1.method.wrap_method(\n self.list_clusters,\n default_retry=retries.Retry(\n initial=1.0,\n maximum=10.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.ServiceUnavailable,\n ),\n deadline=60.0,\n ),\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.get_cluster: gapic_v1.method.wrap_method(\n self.get_cluster,\n default_retry=retries.Retry(\n initial=1.0,\n maximum=10.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.ServiceUnavailable,\n ),\n deadline=60.0,\n ),\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.create_cluster: gapic_v1.method.wrap_method(\n self.create_cluster,\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.update_cluster: gapic_v1.method.wrap_method(\n self.update_cluster,\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.delete_cluster: gapic_v1.method.wrap_method(\n self.delete_cluster,\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.generate_access_token: gapic_v1.method.wrap_method(\n self.generate_access_token,\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.list_node_pools: gapic_v1.method.wrap_method(\n self.list_node_pools,\n default_retry=retries.Retry(\n initial=1.0,\n maximum=10.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.ServiceUnavailable,\n ),\n deadline=60.0,\n ),\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.get_node_pool: gapic_v1.method.wrap_method(\n self.get_node_pool,\n default_retry=retries.Retry(\n initial=1.0,\n maximum=10.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.ServiceUnavailable,\n ),\n deadline=60.0,\n ),\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.create_node_pool: gapic_v1.method.wrap_method(\n self.create_node_pool,\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.update_node_pool: gapic_v1.method.wrap_method(\n self.update_node_pool,\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.delete_node_pool: gapic_v1.method.wrap_method(\n self.delete_node_pool,\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.list_machines: gapic_v1.method.wrap_method(\n self.list_machines,\n default_retry=retries.Retry(\n initial=1.0,\n maximum=10.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.ServiceUnavailable,\n ),\n deadline=60.0,\n ),\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.get_machine: gapic_v1.method.wrap_method(\n self.get_machine,\n default_retry=retries.Retry(\n initial=1.0,\n maximum=10.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.ServiceUnavailable,\n ),\n deadline=60.0,\n ),\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.list_vpn_connections: gapic_v1.method.wrap_method(\n self.list_vpn_connections,\n default_retry=retries.Retry(\n initial=1.0,\n maximum=10.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.ServiceUnavailable,\n ),\n deadline=60.0,\n ),\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.get_vpn_connection: gapic_v1.method.wrap_method(\n self.get_vpn_connection,\n default_retry=retries.Retry(\n initial=1.0,\n maximum=10.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.ServiceUnavailable,\n ),\n deadline=60.0,\n ),\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.create_vpn_connection: gapic_v1.method.wrap_method(\n self.create_vpn_connection,\n default_timeout=60.0,\n client_info=client_info,\n ),\n self.delete_vpn_connection: gapic_v1.method.wrap_method(\n self.delete_vpn_connection,\n default_timeout=60.0,\n client_info=client_info,\n ),\n }\n\n def close(self):\n \"\"\"Closes resources associated with the transport.\n\n .. warning::\n Only call this method if the transport is NOT shared\n with other clients - this may cause errors in other clients!\n \"\"\"\n raise NotImplementedError()\n\n @property\n def operations_client(self):\n \"\"\"Return the client designed to process long-running operations.\"\"\"\n raise NotImplementedError()\n\n @property\n def list_clusters(\n self,\n ) -> Callable[\n [service.ListClustersRequest],\n Union[service.ListClustersResponse, Awaitable[service.ListClustersResponse]],\n ]:\n raise NotImplementedError()\n\n @property\n def get_cluster(\n self,\n ) -> Callable[\n [service.GetClusterRequest],\n Union[resources.Cluster, Awaitable[resources.Cluster]],\n ]:\n raise NotImplementedError()\n\n @property\n def create_cluster(\n self,\n ) -> Callable[\n [service.CreateClusterRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def update_cluster(\n self,\n ) -> Callable[\n [service.UpdateClusterRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def delete_cluster(\n self,\n ) -> Callable[\n [service.DeleteClusterRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def generate_access_token(\n self,\n ) -> Callable[\n [service.GenerateAccessTokenRequest],\n Union[\n service.GenerateAccessTokenResponse,\n Awaitable[service.GenerateAccessTokenResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def list_node_pools(\n self,\n ) -> Callable[\n [service.ListNodePoolsRequest],\n Union[service.ListNodePoolsResponse, Awaitable[service.ListNodePoolsResponse]],\n ]:\n raise NotImplementedError()\n\n @property\n def get_node_pool(\n self,\n ) -> Callable[\n [service.GetNodePoolRequest],\n Union[resources.NodePool, Awaitable[resources.NodePool]],\n ]:\n raise NotImplementedError()\n\n @property\n def create_node_pool(\n self,\n ) -> Callable[\n [service.CreateNodePoolRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def update_node_pool(\n self,\n ) -> Callable[\n [service.UpdateNodePoolRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def delete_node_pool(\n self,\n ) -> Callable[\n [service.DeleteNodePoolRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def list_machines(\n self,\n ) -> Callable[\n [service.ListMachinesRequest],\n Union[service.ListMachinesResponse, Awaitable[service.ListMachinesResponse]],\n ]:\n raise NotImplementedError()\n\n @property\n def get_machine(\n self,\n ) -> Callable[\n [service.GetMachineRequest],\n Union[resources.Machine, Awaitable[resources.Machine]],\n ]:\n raise NotImplementedError()\n\n @property\n def list_vpn_connections(\n self,\n ) -> Callable[\n [service.ListVpnConnectionsRequest],\n Union[\n service.ListVpnConnectionsResponse,\n Awaitable[service.ListVpnConnectionsResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def get_vpn_connection(\n self,\n ) -> Callable[\n [service.GetVpnConnectionRequest],\n Union[resources.VpnConnection, Awaitable[resources.VpnConnection]],\n ]:\n raise NotImplementedError()\n\n @property\n def create_vpn_connection(\n self,\n ) -> Callable[\n [service.CreateVpnConnectionRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def delete_vpn_connection(\n self,\n ) -> Callable[\n [service.DeleteVpnConnectionRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def list_operations(\n self,\n ) -> Callable[\n [operations_pb2.ListOperationsRequest],\n Union[\n operations_pb2.ListOperationsResponse,\n Awaitable[operations_pb2.ListOperationsResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def get_operation(\n self,\n ) -> Callable[\n [operations_pb2.GetOperationRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None,]:\n raise NotImplementedError()\n\n @property\n def delete_operation(\n self,\n ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]:\n raise NotImplementedError()\n\n @property\n def get_location(\n self,\n ) -> Callable[\n [locations_pb2.GetLocationRequest],\n Union[locations_pb2.Location, Awaitable[locations_pb2.Location]],\n ]:\n raise NotImplementedError()\n\n @property\n def list_locations(\n self,\n ) -> Callable[\n [locations_pb2.ListLocationsRequest],\n Union[\n locations_pb2.ListLocationsResponse,\n Awaitable[locations_pb2.ListLocationsResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def kind(self) -> str:\n raise NotImplementedError()\n\n\n__all__ = (\"EdgeContainerTransport\",)\n","sub_path":"packages/google-cloud-edgecontainer/google/cloud/edgecontainer_v1/services/edge_container/transports/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":17460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"150595893","text":"from sklearn.metrics.pairwise import cosine_similarity\nimport numpy as np\nimport MeCab\nmt = MeCab.Tagger('')\nmt.parse('')\n\ndef mean_reciprocal_rank_score(actual_value, predicted_values):\n pos = 0\n val = 0\n for i in predicted_values:\n if i == actual_value and pos == 0:\n val = 1\n break\n elif i == actual_value and pos == 1:\n val = 0.5\n break\n elif i == actual_value and pos == 2:\n val = 0.33\n break\n else:\n val = 0\n pos += 1\n\n return val\n\ndef crude_ranks(sorted_list, query, vector):\n X = vector.transform([str(query)])\n rank = []\n ids_list = []\n for ids, items in sorted_list:\n for sentences in items:\n Y = vector.transform([sentences])\n ids_list.append(ids)\n rank.append(cosine_similarity(X, Y))\n flat = [x for sublist in rank for x in sublist]\n ranks = sorted(zip(ids_list, flat), key=lambda l:l[1], reverse=True)[:3]\n return ranks\n\ndef delete_duplicate_ids(ranks):\n temp = 0\n ids_list = []\n filtered = []\n for page_id, score in ranks:\n if page_id == temp:\n continue\n else:\n ids_list.append(page_id)\n filtered.append([page_id, score[0]])\n temp = page_id\n return filtered, ids_list\n\ndef filtering_ranks(ranks, sorted_list, query, vector):\n extra_ranks = 0\n filtered_extra = None\n filtered, ids_list = delete_duplicate_ids(ranks)\n saved_list = sorted_list\n if len(ids_list) < 3:\n for ids in ids_list:\n index = 0\n for matched_id, items in sorted_list:\n if ids == matched_id:\n saved_list.pop(index)\n index += 1\n extra_ranks = crude_ranks(saved_list, query, vector)\n filtered_extra, ids_list_extra = delete_duplicate_ids(extra_ranks)\n sum_filtered = filtered + filtered_extra\n \n return sorted(sum_filtered, key=lambda l:l[1], reverse=True)[:3]\n else:\n return filtered\n\ndef get_vector(text, gensim_model):\n sum_vec = np.zeros(200)\n word_count = 0\n node = mt.parseToNode(text)\n while node:\n fields = node.feature.split(\",\")\n if fields[0] == '名詞' or fields[0] == '動詞' or fields[0] == '形容詞':\n try: \n temp = gensim_model.wv[node.surface]\n except KeyError:\n temp = 0\n sum_vec += temp\n word_count += 1\n node = node.next\n return sum_vec / word_count\n\ndef cos_sim(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n\ndef word2vec_ranks(perpage_sequence_match, query, gensim_model):\n X = get_vector(query, gensim_model)\n rank = []\n ids_list = []\n for ids, items in perpage_sequence_match:\n for sentences in items:\n Y = get_vector(sentences, gensim_model)\n ids_list.append(ids)\n rank.append(cos_sim(X, Y))\n return sorted(zip(ids_list, rank), key=lambda l:l[1], reverse=True)[:3]","sub_path":"retrieval_Model/Page_Ranking_Experiment/pipelines/ranking.py","file_name":"ranking.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"584888257","text":"\"\"\"\nID: oranged1\nLANG: PYTHON2\nTASK: milk\n\"\"\"\nfin = open ('milk.in', 'r')\nfout = open ('milk.out', 'w')\n\nmmm = list(map(int, fin.readline().strip().split()))\nprice_list = []\nfor i in range(mmm[1]):\n mlk = list(map(int, fin.readline().strip().split()))\n price_list.append(mlk)\nprice_list = sorted(price_list)\nmilk_num = mmm[0]\nprice = 0\nfor i in price_list:\n if milk_num >= i[1]:\n price += i[1] * i[0]\n milk_num -= i[1]\n elif i[1] > milk_num:\n price += milk_num * i[0]\n milk_num -= milk_num\n if milk_num == 0:\n break\nfout.write(str(price) + \"\\n\")\nfout.close()","sub_path":"Veritas/CH1_4/milk.py","file_name":"milk.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"482735523","text":"import os\nimport logging\nfrom http import HTTPStatus\nfrom typing import List, Dict\n\nimport requests\nfrom requests.exceptions import HTTPError\n\nfrom checkov.common.models.consts import TFC_HOST_NAME\nfrom checkov.common.goget.registry.get_registry import RegistryGetter\nfrom checkov.terraform.module_loading.content import ModuleContent\nfrom checkov.terraform.module_loading.loader import ModuleLoader\nfrom checkov.terraform.module_loading.loaders.versions_parser import (\n order_versions_in_descending_order,\n get_version_constraints\n)\nfrom checkov.terraform.module_loading.module_params import ModuleParams\n\n\nclass RegistryLoader(ModuleLoader):\n modules_versions_cache: Dict[str, List[str]] = {} # noqa: CCE003 # public data\n\n def __init__(self) -> None:\n super().__init__()\n self.module_version_url = \"\"\n self.best_version = \"\"\n\n def discover(self, module_params):\n module_params.REGISTRY_URL_PREFIX = os.getenv(\"REGISTRY_URL_PREFIX\", \"https://registry.terraform.io/v1/modules\")\n module_params.token = os.getenv(\"TFC_TOKEN\", \"\")\n\n def _is_matching_loader(self, module_params: ModuleParams) -> bool:\n\n # Since the registry loader is the first one to be checked,\n # it shouldn't process any github modules\n if module_params.module_source.startswith((\"github.com\", \"bitbucket.org\", \"git::\")):\n return False\n\n self._process_inner_registry_module(module_params)\n if os.path.exists(module_params.dest_dir):\n return True\n\n if module_params.module_source.startswith(TFC_HOST_NAME):\n # indicates a private registry module\n module_params.REGISTRY_URL_PREFIX = f\"https://{TFC_HOST_NAME}/api/registry/v1/modules\"\n module_params.module_source = module_params.module_source.replace(f\"{TFC_HOST_NAME}/\", \"\")\n else:\n # url for the public registry\n module_params.REGISTRY_URL_PREFIX = \"https://registry.terraform.io/v1/modules\"\n\n if module_params.module_source.startswith(module_params.REGISTRY_URL_PREFIX):\n # TODO: implement registry url validation using remote service discovery\n # https://www.terraform.io/internals/remote-service-discovery#remote-service-discovery\n pass\n module_params.module_version_url = \"/\".join((module_params.REGISTRY_URL_PREFIX, module_params.module_source, \"versions\"))\n if not module_params.module_version_url.startswith(module_params.REGISTRY_URL_PREFIX):\n # Local paths don't get the prefix appended\n return False\n\n # If versions for a module are cached, determine the best version and return True.\n # If versions are not cached, get versions, then determine the best version and return True.\n # Best version needs to be determined here for setting most accurate dest_dir.\n if module_params.module_version_url in RegistryLoader.modules_versions_cache.keys():\n module_params.best_version = self._find_best_version(module_params)\n return True\n if not self._cache_available_versions(module_params):\n return False\n module_params.best_version = self._find_best_version(module_params)\n\n if not module_params.inner_module:\n module_params.dest_dir = os.path.join(module_params.root_dir, module_params.external_modules_folder_name,\n TFC_HOST_NAME, *module_params.module_source.split(\"/\"),\n module_params.best_version)\n if os.path.exists(module_params.dest_dir):\n return True\n # verify cache again after refresh\n if module_params.module_version_url in RegistryLoader.modules_versions_cache.keys():\n return True\n return False\n\n def _load_module(self, module_params: ModuleParams) -> ModuleContent:\n if os.path.exists(module_params.dest_dir):\n return ModuleContent(dir=module_params.dest_dir)\n\n best_version = module_params.best_version\n logging.debug(\n f\"Best version for {module_params.module_source} is {best_version} based on the version constraint {module_params.version}\")\n request_download_url = \"/\".join((module_params.REGISTRY_URL_PREFIX, module_params.module_source, best_version, \"download\"))\n try:\n response = requests.get(url=request_download_url, headers={\"Authorization\": f\"Bearer {module_params.token}\"})\n response.raise_for_status()\n except HTTPError as e:\n self.logger.warning(e)\n if response.status_code != HTTPStatus.OK and response.status_code != HTTPStatus.NO_CONTENT:\n return ModuleContent(dir=None)\n else:\n # https://www.terraform.io/registry/api-docs#download-source-code-for-a-specific-module-version\n module_download_url = response.headers.get('X-Terraform-Get', '')\n self.logger.debug(f\"Cloning module from: X-Terraform-Get: {module_download_url}\")\n if module_download_url.startswith(\"https://archivist.terraform.io/v1/object\"):\n try:\n registry_getter = RegistryGetter(module_download_url)\n registry_getter.temp_dir = module_params.dest_dir\n registry_getter.do_get()\n return_dir = module_params.dest_dir\n except Exception as e:\n str_e = str(e)\n if 'File exists' not in str_e and 'already exists and is not an empty directory' not in str_e:\n self.logger.error(f\"failed to get {module_params.module_source} because of {e}\")\n return ModuleContent(dir=None, failed_url=module_params.module_source)\n if module_params.inner_module:\n return_dir = os.path.join(module_params.dest_dir, module_params.inner_module)\n return ModuleContent(dir=return_dir)\n else:\n return ModuleContent(dir=None, next_url=response.headers.get(\"X-Terraform-Get\", \"\"))\n\n def _find_module_path(self, module_params: ModuleParams) -> str:\n # to determine the exact path here would be almost a duplicate of the git_loader functionality\n return \"\"\n\n def _find_best_version(self, module_params: ModuleParams) -> str:\n versions_by_size = RegistryLoader.modules_versions_cache.get(module_params.module_version_url, [])\n if module_params.version == \"latest\":\n module_params.version = versions_by_size[0]\n version_constraints = get_version_constraints(module_params.version)\n num_of_matches = 0\n for version in versions_by_size:\n for version_constraint in version_constraints:\n if not version_constraint.versions_matching(version):\n break\n else:\n num_of_matches += 1\n if num_of_matches == len(version_constraints):\n return version\n else:\n num_of_matches = 0\n return \"latest\"\n\n def _cache_available_versions(self, module_params: ModuleParams) -> bool:\n # Get all available versions for a module in the registry and cache them.\n # Returns False on failure.\n try:\n response = requests.get(url=module_params.module_version_url, headers={\"Authorization\": f\"Bearer {module_params.token}\"})\n response.raise_for_status()\n available_versions = [\n v.get(\"version\") for v in response.json().get(\"modules\", [{}])[0].get(\"versions\", {})\n ]\n RegistryLoader.modules_versions_cache[module_params.module_version_url] = order_versions_in_descending_order(\n available_versions)\n return True\n except HTTPError as e:\n self.logger.debug(e)\n return False\n\n def _process_inner_registry_module(self, module_params: ModuleParams) -> None:\n # Check if the source has '//' in it. If it does, it indicates a reference for an inner module.\n # Example: \"terraform-aws-modules/security-group/aws//modules/http-80\" =>\n # module_source = terraform-aws-modules/security-group/aws\n # dest_dir = modules/http-80\n module_source_components = module_params.module_source.split(\"//\")\n if len(module_source_components) > 1:\n module_params.module_source = module_source_components[0]\n module_params.dest_dir = module_params.dest_dir.split(\"//\")[0]\n module_params.inner_module = module_source_components[1]\n\n\nloader = RegistryLoader()\n","sub_path":"checkov/terraform/module_loading/loaders/registry_loader.py","file_name":"registry_loader.py","file_ext":"py","file_size_in_byte":8657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"125520492","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2014 Jordi Mas i Hernandez \n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the\n# Free Software Foundation, Inc., 59 Temple Place - Suite 330,\n# Boston, MA 02111-1307, USA.\n\nimport logging\nimport yaml\n\n\nclass FeedSources(object):\n\n def __init__(self):\n self._urls = []\n\n @property\n def urls(self):\n return self._urls\n\n def read(self, filename):\n self._urls = []\n with open(filename, 'r') as f:\n self._read_str(f)\n\n def _read_str(self, string):\n doc = yaml.load(string)\n feeds = doc[\"feeds\"]\n for feed in feeds:\n url = feed[\"url\"]\n msg = 'Feed url {0}'.format(url)\n logging.debug(msg)\n self._urls.append(url)\n","sub_path":"qec/fetcher/feedsources.py","file_name":"feedsources.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"420060427","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /private/var/folders/q3/1b9f00755fngs2554s60x4_h0000gn/T/pycharm-packaging/web3/web3/utils/module_testing/version_module.py\n# Compiled at: 2018-05-28 04:44:24\n# Size of source mod 2**32: 395 bytes\nfrom eth_utils import is_string\n\nclass VersionModuleTest:\n\n def test_net_version(self, web3):\n version = web3.version.network\n if not is_string(version):\n raise AssertionError\n elif not version.isdigit():\n raise AssertionError\n\n def test_eth_protocolVersion(self, web3):\n protocol_version = web3.version.ethereum\n if not is_string(protocol_version):\n raise AssertionError\n elif not protocol_version.isdigit():\n raise AssertionError","sub_path":"pycfiles/ethfuncdecorator-0.3.0.tar/version_module.cpython-36.py","file_name":"version_module.cpython-36.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"413892711","text":"import pandas as pd\n\ndef get_nefin_risk_factors(out = 'df'):\n \"\"\"\n Downloads brazilian daily risk factors data from \n http://nefin.com.br/risk_factors.html and outputs it as a df or dict of dfs.\n\n Parameters\n ----------\n out = 'df' or 'dict'\n\n Returns\n -------\n Pandas Data Frame if out = 'df',\n Dictionary of pandas data frames if out = 'dict'\n \"\"\" \n \n if out not in {'df', 'dict'}:\n raise ValueError(\"out must be 'df' or 'dict'\")\n\n factors = ['Market_Factor', 'SMB_Factor', 'HML_Factor', 'WML_Factor',\n 'IML_factor', 'Risk_Free'] \n url = 'http://nefin.com.br/Risk%20Factors/{}.xls'\n dfs = {}\n\n for factor in factors:\n dfs[factor] = pd.read_excel(url.format(factor))\n dfs[factor]['Date'] = pd.to_datetime(dfs[factor][['year', 'month', 'day']])\n dfs[factor] = dfs[factor].drop(columns=['year', 'month', 'day'])\n dfs[factor] = dfs[factor].set_index('Date')\n dfs[factor] = dfs[factor].iloc[:,0]\n\n if out == 'df':\n return pd.DataFrame.from_dict(dfs)\n else:\n return dfs\n","sub_path":"get_nefin_risk_factors.py","file_name":"get_nefin_risk_factors.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"51083869","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nclass Session(models.Model):\n TYPE = (\n (1, 'Sign up'),\n (2, 'Restore password'),\n )\n\n session_key = models.CharField(max_length=40)\n date = models.DateTimeField(auto_now=True)\n user = models.ForeignKey(User)\n type = models.IntegerField(choices=TYPE, default=None)\n\n\nclass Oauth(models.Model):\n SERVER = (\n (1, 'Google'),\n (2, 'Yandex'),\n (3, 'Mail.ru'),\n )\n\n user = models.ForeignKey(User)\n oauth_id = models.CharField(max_length=200)\n server = models.IntegerField(choices=SERVER)\n\n class Meta:\n unique_together = (('oauth_id', 'server'),)\n\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"361426310","text":"import gym\n\nfrom stable_baselines3.common.policies import MlpPolicy\n# from stable_baselines3.common.vec_env import DummyVecEnv\n# from stable_baselines3.common import make_vec_env\nfrom stable_baselines3 import PPO2\n# from stable_baselines3.common.env_checker import check_env\n# from env.mimo_env.py import LaserControllerEnv\n\nenv = gym.make('laser_cbc:mimocontrol-v0')\n\n# Optional: PPO2 requires a vectorized environment to run\n# the env is now wrapped automatically when passing it to the constructor\n# env = DummyVecEnv([lambda: env])\n\n# from stable_baselines3 import A2C\n\n# env = gym.make('laser_cbc:mimocontrol-v0')\n# model = A2C(MlpPolicy, env, verbose=1, tensorboard_log=\"./tensorboardoutputs/\")\n# model.learn(total_timesteps=100000)\n# model.save(\"a2c_laser\")\n# model = A2C.load(\"a2c_laser\")\n\n# from stable_baselines3 import DDPG\n\n# env = gym.make('laser_cbc:mimocontrol-v0')\n# model = DDPG(MlpPolicy, env, verbose=1, tensorboard_log=\"./tensorboardoutputs/\")\n# model.learn(total_timesteps=100000)\n# model.save(\"ddpg_laser\")\n# model = DDPG.load(\"ddpg_laser\")\n\n# # Evaluate the trained agent\n# eval_env = LaserControllerEnv()\n# mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=100)\n# print(f\"mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}\")\n\nmodel = PPO2(MlpPolicy, env, verbose=1)\nmodel.learn(total_timesteps=10000)\n\nobs = env.reset()\nfor i in range(1000):\n action, _states = model.predict(obs)\n obs, rewards, dones, info = env.step(action)\n env.render()\n","sub_path":"gym/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"452789973","text":"#####################################################\n# Example GCC Build Script #\n#####################################################\n# (c) James S Renwick 2014 #\n#####################################################\n\nfrom __future__ import print_function\n\nHELP = \"\"\"\n\nUsage:\n build.py [opts...] [target]\n\n -verbose Lowers the verbosity level to 0,\n displaying all messages.\n\n --help Prints this help message.\n \n target The build target to execute (e.g.\n build, clean.) Defaults to 'main'.\n\n\"\"\"\n\nimport os\nimport re\nimport sys\n\n# Import the build manager\nsys.path.append(\".build\")\nfrom gcc_build import *\n\n\n# The collated and categorised files\nFILES = { }\n\ndef perform_init():\n global FILES\n \n new_section(\"preparing\")\n\n # Search for files to build\n FILES = get_files(SOURCE_DIRS)\n\n fcount = sum(map(len, FILES.values())) \n log(\"Including %s files\"%fcount, LogLevel.vInfo)\n\n\ndef perform_build():\n global FILES\n \n new_section(\"compiling\")\n\n # === C COMPILE ===\n\n if FILES.has_key(\"c-files\"):\n compile_c(FILES[\"c-files\"], [\"-x\", \"c\"])\n else:\n log(\"Not compiling C source files\", LogLevel.vInfo)\n\n # === C++ COMPILE ===\n\n if FILES.has_key(\"cpp-files\"):\n compile_cpp(FILES[\"cpp-files\"], [\"-x\", \"c++\"])\n else:\n log(\"Not compiling C++ source files\", LogLevel.vInfo)\n\n # === Link ===\n new_section(\"linking\")\n\n # Refresh file list\n FILES = get_files(SOURCE_DIRS)\n\n # Now link the object files\n if FILES.has_key(\"object-files\"):\n \n args = []\n args.extend(LINKFLAGS)\n args.extend([\"-o\", OUTPUT_FILE])\n \n for file in FILES[\"object-files\"]:\n args.append(file)\n\n rc = invoke_gpp(args)\n log(\"Returned with code %s\"%rc, LogLevel.vInfo)\n else:\n log(\"Linking disabled, skipping\", LogLevel.vInfo)\n\n\ndef perform_clean():\n if not FILES.has_key(\"clean-files\"):\n log(\"No files to clean\", LogLevel.vInfo)\n else:\n for file in FILES[\"clean-files\"]:\n try:\n os.remove(file)\n log(\"Removed file '%s'\"%file, LogLevel.vInfo)\n except Exception as e:\n log(\"Error removing file '%s': %s\"%(file,e), LogLevel.Warn)\n\n\n# ==================================================\n\n# TARGET(NAME, FUNCTION)\n# TARGET(NAME, FUNCTION, [DEPENDENCIES])\n\nTARGET(\"main\", \"build\")\nTARGET(\"clean\", perform_clean, [perform_init])\nTARGET(\"build\", perform_build, [perform_init])\nTARGET(\"rebuild\", lambda:None, [\"clean\", perform_build])\n\n# ==================================================\nif (__name__ == \"__main__\"):\n \n for i in range(1, len(sys.argv)):\n arg = sys.argv[i]\n\n # Look for optional args\n if arg.startswith(\"-\") and arg == \"--help\":\n print(HELP)\n exit(0)\n\n # Execute the target\n run_target(get_start_target())\n\n \n","sub_path":"build-example.py","file_name":"build-example.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"489932900","text":"\"\"\"\r\n catch_mickey_lab.py\r\n\r\n Click on a moving Mickey Mouse to win this game.\r\n \"\"\"\r\n\r\nimport pygame\r\nimport random\r\npygame.init()\r\n\r\ndef title_screen(screen, background):\r\n # Display a title screen for the game.\r\n # Play begins when the mouse is clicked.\r\n\r\n disneyFont = pygame.font.Font(\"disney-comic.ttf\", 50)\r\n disney2Font = pygame.font.Font(\"disney-comic.ttf\", 30)\r\n title_label = disneyFont.render(\"Catch Mickey Mouse\", 1, (0,0,0), (255,255,0))\r\n instr1_label = disney2Font.render(\"Click on Mickey to win.\", 1, (0,0,0), (255,255,0))\r\n instr2_label = disney2Font.render(\"You only have ten seconds!\",1, (0,0,0), (255,255,0))\r\n\r\n myFont = pygame.font.Font(None, 30)\r\n my_label = myFont.render(\"Click to start\", 1, (0,0,0), (255, 255, 0))\r\n\r\n clock = pygame.time.Clock()\r\n keepGoing = True\r\n\r\n while keepGoing:\r\n \r\n clock.tick(30) # Frame rate 30 frames per second.\r\n\r\n for event in pygame.event.get(): \r\n if event.type == pygame.QUIT:\r\n keepGoing = False\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n keepGoing = False\r\n\r\n screen.blit(background, (0,0))\r\n screen.blit(title_label, (30,50))\r\n screen.blit(instr1_label, (30,200))\r\n screen.blit(instr2_label, (30,240))\r\n screen.blit(my_label, (500,420))\r\n\r\n pygame.display.flip()\r\n \r\n\f\r\ndef game(screen, background, games, games1, mouseClicks,sx,sy):\r\n # Play the Catch Mickey Game\r\n\r\n # Create a Mickey Mouse Surface.\r\n mick1 = pygame.image.load(\"Mickey2.jpg\") # Load image onto a Surface\r\n mick1 = mick1.convert() # Conert pixel format.\r\n aColor = mick1.get_at((1,1)) # Get color to make transparent.\r\n mick1.set_colorkey(aColor) # Now make it transparent.\r\n mick1 = pygame.transform.scale(mick1, (50,50)) # Scale the surface.\r\n (m1_x, m1_y) = (295,215) # Initial Mickey location.\r\n\r\n \r\n dx = random.randint(sx, sy) # Pick initial speed for Mickey.\r\n dy = random.randint(sx, sy)\r\n \r\n timerFont = pygame.font.Font(None, 40) # For displaying time remaining.\r\n \r\n clock = pygame.time.Clock() # A clock for setting a frame rate.\r\n keepGoing = True # Signals the game is over.\r\n seconds = 10 # Time left to play - initially 10 secs.\r\n frames = 0 # Frame count for keeping time.\r\n win = False # Flags game win or loss.\r\n\r\n while keepGoing:\r\n \r\n clock.tick(30) # Frame rate 30 frames per second.\r\n for event in pygame.event.get(): # Handle events\r\n if event.type == pygame.QUIT:\r\n keepGoing = False\r\n elif event.type == pygame.MOUSEBUTTONDOWN: # Check for a click\r\n (mx, my) = pygame.mouse.get_pos() # on Mickey.\r\n mouseClicks += 1\r\n pygame.draw.circle(background,(255,0,0),(mx,my),10,0)\r\n if mx >= m1_x and mx <= m1_x + 50 and \\\r\n my >= m1_y and my <= m1_y + 50:\r\n (win, keepGoing) = (True, False) # Caught Mickey!\r\n pygame.draw.circle(background,(0,255,0),(mx,my),5,0)\r\n \r\n\r\n\r\n frames += 1 # The frame count tracks time.\r\n if frames == 30: # Note: 30 frames per second\r\n (frames, seconds) = (0, seconds - 1)\r\n\r\n (m1_x, m1_y) = (m1_x + dx, m1_y + dy) # Move Mickey\r\n \r\n if m1_x < 0 or m1_x > 590: # Bounce Mickey off the screen boundaries.\r\n dx = -dx # Change horizontal direction.\r\n if m1_y < 0 or m1_y > 430: #\r\n dy = -dy # Change vertical direction.\r\n\r\n if seconds == 0: # When time runs out the game\r\n keepGoing = False # the game is over.\r\n\r\n timer_text = timerFont.render( \"Seconds: %d\" % seconds, 1, (0,0,0), (255,255,0))\r\n gamesLeft = timerFont.render( \"Game: %d of %d\" % ((games-games1+1),games), 1, (0,0,0), (255,255,0))\r\n mouseClicks1=timerFont.render(\"Mouse Clicks: %d\" %(mouseClicks), 1, (0,0,0), (255,255,0))\r\n \r\n \r\n screen.blit(background, (0,0))\r\n screen.blit(timer_text, (10,10))\r\n screen.blit(gamesLeft, (425,5))\r\n screen.blit(mouseClicks1, (0,450))\r\n screen.blit(mick1, (m1_x,m1_y))\r\n pygame.display.flip()\r\n \r\n if win: # Display a win or\r\n messageScreen(screen, background,\"You won!!\") # lose screen.\r\n else:\r\n messageScreen(screen, background, \"You lost!!\")\r\n\f \r\n\r\ndef messageScreen(screen, background, message):\r\n # Display a message on the screen for three seconds.\r\n\r\n disneyFont = pygame.font.Font(\"disney-comic.ttf\", 70)\r\n msg_label = disneyFont.render(message, 1, (0,0,0), (255,255,0))\r\n\r\n clock = pygame.time.Clock()\r\n keepGoing = True\r\n frames = 0\r\n\r\n while keepGoing:\r\n \r\n clock.tick(30) # Frame rate 30 frames per second.\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n keepGoing = False\r\n\r\n frames = frames + 1 # Count the number of frames displayed\r\n\r\n if frames == 90: # After 3 seconds terminate the message\r\n keepGoing = False # display.\r\n\r\n screen.blit(background, (0,0))\r\n screen.blit(msg_label, (170,180))\r\n\r\n pygame.display.flip()\r\n\r\n\r\ndef main():\r\n # Run the Catch Mickey Game. \r\n\r\n ### Add code here to prompt the user on the number of\r\n ### games they want to play\r\n\r\n games = int(input(\"how many: \"))\r\n\r\n \r\n\r\n ###\r\n ###\r\n\r\n screen = pygame.display.set_mode((640,480)) # Create a screen\r\n pygame.display.set_caption(\"Catch Mickey\") # and caption\r\n\r\n background = pygame.Surface(screen.get_size())\r\n background = background.convert()\r\n background.fill((255, 255, 0))\r\n\r\n\r\n title_screen(screen, background) # Display title and instructions.\r\n ###\r\n \r\n games1 = games\r\n mouseClicks = 0\r\n sx = 15\r\n sy = 25\r\n while games1 != 0:\r\n game(screen,background,games,games1,mouseClicks,sx,sy) # Play the game.\r\n games1 -= 1\r\n sx+=10\r\n sy+=10\r\n background.fill((255, 255, 0))\r\n \r\n\r\n\r\n \r\n\r\n \r\n\r\n\r\n ###\r\n ###\r\n \r\n messageScreen(screen, background, \"The End\") # Final \"The End\" Screen.\r\n\r\n\r\n# Call the main function\r\n\r\nmain()\r\npygame.quit() \r\n \r\n","sub_path":"computer-science-i/misc/catch_mickey_lab.py","file_name":"catch_mickey_lab.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"318411613","text":"class Solution:\n def twoSum(self, numbers, target):\n\n pair = {}\n for i in range(len(numbers)):\n if numbers[i] in pair.keys():\n return [pair.get(numbers[i]), i + 1]\n else:\n pair[target - numbers[i]] = i + 1\n return []\n","sub_path":"167/167.two-sum-ii-input-array-is-sorted.234403645.Accepted.leetcode.py","file_name":"167.two-sum-ii-input-array-is-sorted.234403645.Accepted.leetcode.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"193149872","text":"class Solution:\n def commonChars(self, strings: List[str]) -> List[str]:\n if 0 == len(strings):\n return []\n\n string = strings[0]\n common = self.countChars(string)\n\n for string in strings[1:]:\n tempCommon = self.countChars(string)\n self.getMinCount(common, tempCommon)\n\n result = []\n for key in common:\n for count in range(common[key]):\n result.append(key)\n\n return result\n\n def getMinCount(self, count1, count2):\n for k in count1:\n count1[k] = min(count1[k], count2.get(k, 0))\n\n def countChars(self, string):\n count = {}\n for char in string:\n count[char] = count.get(char, 0) + 1\n return count\n","sub_path":"leetcode/solved/001002. Find Common Characters.py","file_name":"001002. Find Common Characters.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"124705771","text":"from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404, render, redirect, HttpResponse\n# Create your views here.\n\n\ndef start(request):\n return render(request, template_name='calc/main.html')\n\ndef calc(request):\n num1 = request.GET['num1']\n num2 = request.GET['num2']\n runtype = request.GET['type']\n if runtype=='divide' and num2 == '0':\n ctx = {\n 'result':'division by zero'\n }\n elif runtype=='plus':\n ctx={\n 'result':int(num1)+int(num2)\n }\n elif runtype=='minus':\n ctx={\n 'result':int(num1)-int(num2)\n }\n elif runtype=='multiply':\n ctx={\n 'result':int(num1)*int(num2)\n }\n elif runtype=='divide':\n ctx={\n 'result':int(num1)/int(num2)\n }\n return render(request, 'calc/result.html', ctx)","sub_path":"calc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"181165546","text":"import tkinter\nimport random\n\n# list of possible colors\ncolors = ['Red', 'Blue', 'Green', 'Pink', 'Black',\n 'Yellow', 'Orange', 'Cyan', 'Purple', 'Brown', ]\n\nscore = 0\n\n#The game time left, initially 30 seconds\n\ntimeleft = 60\n\n#Function that will start the game\n\ndef startGame(event):\n\n if timeleft == 60:\n #start the countdown timer.\n countdown()\n\n #run the function to\n # choose the next color.\n nextColor()\n\n\"\"\"Function to choose and display the next color\"\"\"\n\ndef nextColor():\n # use the globally declared 'score'\n #and 'play' variables above.\n global score\n global timeleft\n\n #if a game is currently in play\n if timeleft > 0:\n\n\n # make the text entry box active\n e.focus_set()\n\n \"\"\"\n if the color typed is equal\n to the color of the text\n \"\"\"\n\n if e.get().lower() == colors[1].lower() :\n score += 1\n\n #clear the text entry box.\n e.delete(0, tkinter.END)\n\n random.shuffle(colors)\n\n #Change the color to type, by changing the\n #text _and_ the color to a random color value\n label.config(fg = str(colors[1]), text = str(colors[0]))\n\n #update the score\n scoreLabel.config(text = 'Score: {}'.format(str(score)))\n\n#Countdown timer function\n\ndef countdown():\n global timeleft\n\n #If a game is in play\n if timeleft > 0:\n\n #decrement the timer\n timeleft -= 1\n\n # update the time left label\n timeLabel.config(text = 'Time left {}'.format(str(timeleft)))\n\n #run the function again after one second\n timeLabel.after(1000, countdown)\n\n\n\n\n\n#Driver code\n#Create a GUI window\nroot = tkinter.Tk()\n\n#Set the title\nroot.title('Guess the color')\n\n# settin the size\nroot.geometry('375x200')\n\n# adding an instruction label\ninstructions = tkinter.Label(root, text = 'Type the color of the words, '\n 'and not the word text !'\n , font = ('Helvetica', 12))\n\ninstructions.pack()\n\n#add a score label\n\nscoreLabel = tkinter.Label(root, text = 'Press enter to start ',\n font = ('Helvetica', 12))\n\nscoreLabel.pack()\n\n# add a time left label\n\ntimeLabel = tkinter.Label(root, text = 'Time left{}'.format(str(timeleft)),\n font = ('Helvetica', 12))\n\ntimeLabel.pack()\n\n#add a label for displaying the color\n\nlabel = tkinter.Label(root, font = ('Helvetica', 60))\n\nlabel.pack()\n\n#add a text entry box for typing in the colors\ne = tkinter.Entry(root)\n\n\n#running the 'start Game' function when the enter key is pressed\nroot.bind('', startGame)\ne.pack()\n\n# set the focus_set()\ne.focus_set()\n\n#start the GUI\nroot.mainloop()\n","sub_path":"Python_Color_Game.py","file_name":"Python_Color_Game.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"275654072","text":"from django.contrib.auth.views import redirect_to_login\r\nfrom django.core.exceptions import PermissionDenied\r\nfrom django.shortcuts import get_object_or_404\r\n\r\nfrom projects.models import Project\r\n\r\n\r\ndef can_apply(fn):\r\n \"\"\"\r\n Test if a student can apply or retract; The system is in timephase 3, user is a student and proposal is nonprivate.\r\n\r\n :param fn:\r\n :return:\r\n \"\"\"\r\n\r\n def wrapper(*args, **kw):\r\n request = args[0]\r\n\r\n # user needs to be logged in (so no need for login_required on top of this)\r\n if not request.user.is_authenticated:\r\n page = args[0].path\r\n return redirect_to_login(\r\n next=page,\r\n login_url='index:login',\r\n redirect_field_name='next', )\r\n\r\n if request.user.groups.exists():\r\n raise PermissionDenied(\"Only students can apply to proposals\")\r\n if 'pk' in kw:\r\n pk = int(kw['pk'])\r\n prop = get_object_or_404(Project, pk=pk)\r\n if not prop.public_visible():\r\n raise PermissionDenied(\"Error, project is not visible for students.\")\r\n if prop.Progress == 2:\r\n raise PermissionDenied(\"Cannot apply, project is already finished\")\r\n if prop.Progress == 3:\r\n raise PermissionDenied(\"Cannot apply, project is reserved\")\r\n if prop.Apply == 'supervisor':\r\n raise PermissionDenied(\"To apply to this project, please contact the supervisor.\")\r\n return fn(*args, **kw)\r\n\r\n return wrapper\r\n","sub_path":"students/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"448026491","text":"# Python 3.6.1\n\nname = '6nm'\nref_info = dict()\nwith open('{}.pdb'.format(name), 'r') as ref:\n ref_info['HEAD'] = list()\n for line in ref:\n l = line.split()\n if l[0] == 'ATOM':\n ref_info[l[1]] = [line[0:27], line[57:]]\n else:\n ref_info['HEAD'].append(line)\n\nwith open('{}.frame'.format(name), 'r') as frame, open('{}.new.pdb'.format(name), 'w') as pdb:\n for line in ref_info['HEAD'][0:-1]:\n pdb.write(line)\n\n record_trigger = False\n for line in frame:\n if record_trigger:\n l = line.split()\n i = l[0]\n pdb.write('{head} {coords} {tail}'.format(head=ref_info[i][0], tail=ref_info[i][1], coords=' '.join(l[3:])))\n if line.startswith('ITEM: ATOMS'):\n record_trigger = True\n pdb.write(ref_info['HEAD'][-1])\n","sub_path":"PrepareInput/zFrame2PDB.py","file_name":"zFrame2PDB.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"251727993","text":"# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport logging\nimport os\nimport subprocess\nimport time\nimport sys\n\nimport mopy.paths\n\nfrom shutil import copyfileobj, rmtree\nfrom signal import SIGTERM\nfrom tempfile import mkdtemp, TemporaryFile\n\n\nclass TimeoutError(Exception):\n \"\"\"Allows distinction between timeout failures and generic OSErrors.\"\"\"\n pass\n\n\ndef _poll_for_condition(\n condition, max_seconds=10, sleep_interval=0.1, desc='[unnamed condition]'):\n \"\"\"Poll until a condition becomes true.\n\n Arguments:\n condition: callable taking no args and returning bool.\n max_seconds: maximum number of seconds to wait.\n Might bail up to sleep_interval seconds early.\n sleep_interval: number of seconds to sleep between polls.\n desc: description put in TimeoutError.\n\n Returns:\n The true value that caused the poll loop to terminate.\n\n Raises:\n TimeoutError if condition doesn't become true before max_seconds is reached.\n \"\"\"\n start_time = time.time()\n while time.time() + sleep_interval - start_time <= max_seconds:\n value = condition()\n if value:\n return value\n time.sleep(sleep_interval)\n\n raise TimeoutError('Timed out waiting for condition: %s' % desc)\n\n\nclass _BackgroundShell(object):\n \"\"\"Manages a mojo_shell instance that listens for external applications.\"\"\"\n\n def __init__(self, mojo_shell_path, shell_args=None):\n \"\"\"In a background process, run a shell at mojo_shell_path listening\n for external apps on an instance-specific socket.\n\n Arguments:\n mojo_shell_path: path to the mojo_shell binary to run.\n shell_args: a list of arguments to pass to mojo_shell.\n\n Raises:\n a TimeoutError if the shell takes too long to create the socket.\n \"\"\"\n self._tempdir = mkdtemp(prefix='background_shell_')\n self._socket_path = os.path.join(self._tempdir, 'socket')\n self._output_file = TemporaryFile()\n\n shell_command = [mojo_shell_path,\n '--enable-external-applications=' + self._socket_path]\n if shell_args:\n shell_command += shell_args\n logging.getLogger().debug(shell_command)\n\n self._shell = subprocess.Popen(shell_command, stdout=self._output_file,\n stderr=subprocess.STDOUT)\n _poll_for_condition(lambda: os.path.exists(self._socket_path),\n desc=\"External app socket creation.\")\n\n\n def __del__(self):\n if self._shell:\n self._shell.terminate()\n self._shell.wait()\n if self._shell.returncode != -SIGTERM:\n copyfileobj(self._output_file, sys.stdout)\n rmtree(self._tempdir)\n\n\n @property\n def socket_path(self):\n \"\"\"The path of the socket where the shell is listening for external apps.\"\"\"\n return self._socket_path\n\n\nclass BackgroundAppGroup(object):\n \"\"\"Manages a group of mojo apps running in the background.\"\"\"\n\n def __init__(self, paths, app_urls, shell_args=None):\n \"\"\"In a background process, spins up mojo_shell with external\n applications enabled, passing an optional list of extra arguments.\n Then, launches apps indicated by app_urls in the background.\n The apps and shell are automatically torn down upon destruction.\n\n Arguments:\n paths: a mopy.paths.Paths object.\n app_urls: a list of URLs for apps to run via mojo_launcher.\n shell_args: a list of arguments to pass to mojo_shell.\n\n Raises:\n a TimeoutError if the shell takes too long to begin running.\n \"\"\"\n self._shell = _BackgroundShell(paths.mojo_shell_path, shell_args)\n\n # Run apps defined by app_urls in the background.\n self._apps = []\n for app_url in app_urls:\n launch_command = [\n paths.mojo_launcher_path,\n '--shell-path=' + self._shell.socket_path,\n '--app-url=' + app_url,\n '--app-path=' + paths.FileFromUrl(app_url),\n '--vmodule=*/mojo/shell/*=2']\n logging.getLogger().debug(launch_command)\n app_output_file = TemporaryFile()\n self._apps.append((app_output_file,\n subprocess.Popen(launch_command,\n stdout=app_output_file,\n stderr=subprocess.STDOUT)))\n\n\n def __del__(self):\n self._StopApps()\n\n\n def __enter__(self):\n return self\n\n\n def __exit__(self, ex_type, ex_value, traceback):\n self._StopApps()\n\n\n def _StopApps(self):\n \"\"\"Terminate all background apps.\"\"\"\n for output_file, app in self._apps:\n app.terminate()\n app.wait()\n if app.returncode != -SIGTERM:\n copyfileobj(output_file, sys.stdout)\n self._apps = []\n\n\n @property\n def socket_path(self):\n \"\"\"The path of the socket where the shell is listening for external apps.\"\"\"\n return self._shell._socket_path\n","sub_path":"mojo/tools/mopy/background_app_group.py","file_name":"background_app_group.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"345393372","text":"#! /usr/bin/env python\n\nimport os,sys,glob,shutil\nimport numpy as np\nimport argparse\nfrom skysat_stereo import asp_utils as asp\nfrom skysat_stereo import skysat\nfrom p_tqdm import p_map\nimport itertools\nfrom pygeotools.lib import iolib,warplib\n\ndef getparser():\n parser = argparse.ArgumentParser(description='Script to compute DEM mosaics from triplet output directory')\n parser.add_argument('-DEM_folder', help='Folder containing subdirectories of DEM', required=True)\n parser.add_argument('-out_folder', help='Where composite DEMs are to be saved, if none, creates a composite DEM directory in the input main directory', required=False)\n parser.add_argument('-identifier',help='if we want to mosaic individually aligned DEM which have been produced by skysat_coreg.py, place the identifiers here',required=False,default=None)\n mode_ch = ['video','triplet']\n parser.add_argument('-mode',default='triplet',choices=mode_ch,help=\"select if mosaicing video or triplet stereo output DEMs (default: %(default)s)\")\n binary_ch = [1,0]\n parser.add_argument('-filter_dem',choices=binary_ch,default=1,type=int,\n help=\"filter video DEM composites using max NMAD and min count combination (default: %(default)s)\")\n parser.add_argument('-min_video_count',type=float,default=2,\n help='minimum DEM count to use in filtering (default: %(default)s)')\n parser.add_argument('-max_video_nmad',type=float,default=5,\n help='maximum DEM NMAD variability to filter, if DEM count is also <= min_count (default: %(default)s)') \n return parser \n\ndef main():\n parser = getparser()\n args = parser.parse_args()\n dir = os.path.abspath(args.DEM_folder)\n if args.out_folder:\n out_folder = os.path.abspath(args.out_folder)\n else:\n out_folder = os.path.join(dir,'composite_dems')\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n if args.identifier:\n # for indi align DEMs\n identifier = args.identifier\n else:\n identifier = ''\n if args.mode == 'triplet':\n dir_list = sorted(glob.glob(os.path.join(dir,'20*/')))\n valid_for_nadir_dir = []\n valid_for_aft_dir = []\n valid_nadir_aft_dir = []\n for for_nadir_dir in sorted(glob.glob(os.path.join(dir_list[0],'*/'))):\n try:\n D_sub = iolib.fn_getma(os.path.join(for_nadir_dir,'run-D_sub.tif'),3)\n stats = [np.percentile(D_sub.compressed(),(2,98)),np.mean(D_sub.compressed())]\n DEM = glob.glob(os.path.join(for_nadir_dir,'run*{}*-DEM.tif'.format(identifier)))[0]\n valid_for_nadir_dir.append(for_nadir_dir)\n except:\n continue\n for for_aft_dir in sorted(glob.glob(os.path.join(dir_list[1],'*/'))):\n try:\n # see ASP issue for this dirty hack: https://github.com/NeoGeographyToolkit/StereoPipeline/issues/308\n D_sub = iolib.fn_getma(os.path.join(for_aft_dir,'run-D_sub.tif'),3)\n stats = [np.percentile(D_sub.compressed(),(2,98)),np.mean(D_sub.compressed())]\n DEM = glob.glob(os.path.join(for_aft_dir,'run*{}*-DEM.tif'.format(identifier)))[0]\n valid_for_aft_dir.append(for_aft_dir)\n except:\n continue\n for nadir_aft_dir in sorted(glob.glob(os.path.join(dir_list[2],'*/'))):\n try:\n D_sub = iolib.fn_getma(os.path.join(nadir_aft_dir,'run-D_sub.tif'),3)\n stats = [np.percentile(D_sub.compressed(),(2,98)),np.mean(D_sub.compressed())]\n DEM = glob.glob(os.path.join(nadir_aft_dir,'run*{}*-DEM.tif'.format(identifier)))[0]\n valid_nadir_aft_dir.append(nadir_aft_dir)\n except:\n continue\n for_nadir_list = [glob.glob(os.path.join(dir,'run*{}*-DEM.tif'.format(identifier)))[0] for dir in valid_for_nadir_dir]\n nadir_aft_list = [glob.glob(os.path.join(dir,'run*{}*-DEM.tif'.format(identifier)))[0] for dir in valid_nadir_aft_dir]\n for_aft_list = [glob.glob(os.path.join(dir,'run*{}*-DEM.tif'.format(identifier)))[0] for dir in valid_for_aft_dir]\n total_dem_list = for_nadir_list+for_aft_list+nadir_aft_list\n stats_list = ['nmad','count','median']\n print('total dems are {}'.format(len(total_dem_list)))\n out_fn_list = [os.path.join(out_folder,'triplet_{}_mos.tif'.format(stat)) for stat in stats_list]\n print(\"Mosaicing output total per-pixel nmad, count, nmad and 3 DEMs from 3 stereo combinations in parallel\")\n dem_mos_log = p_map(asp.dem_mosaic,[total_dem_list]*3+[for_aft_list,nadir_aft_list,for_nadir_list],out_fn_list+[os.path.join(out_folder,x) for x in ['for_aft_dem_median_mos.tif', 'nadir_aft_dem_median_mos.tif', 'for_nadir_dem_median_mos.tif']],['None']*6,[None]*6,stats_list+['median']*3,[None]*6,num_cpus=4)\n out_log_fn = os.path.join(out_folder,'skysat_triplet_dem_mos.log')\n print(\"Saving triplet DEM mosaic log at {}\".format(out_log_fn))\n with open(out_log_fn,'w') as f:\n for log in dem_mos_log:\n f.write(log) \n elif args.mode=='video':\n dir_list = sorted(glob.glob(os.path.join(dir,'1*/')))\n valid_video_dir = []\n for video_dir in dir_list:\n try:\n D_sub = iolib.fn_getma(os.path.join(video_dir,'run-D_sub.tif'),3)\n stats = [np.percentile(D_sub.compressed(),(2,98)),np.mean(D_sub.compressed())]\n DEM = glob.glob(os.path.join(video_dir,'run*{}*-DEM.tif'.format(identifier)))[0]\n valid_video_dir.append(video_dir)\n except:\n continue \n video_dem_list = [glob.glob(os.path.join(dir,f'run*{identifier}*-DEM.tif'))[0] for dir in valid_video_dir]\n stats_list = ['median','count','nmad']\n print('total dems are {}'.format(len(video_dem_list)))\n out_fn_list = [os.path.join(out_folder,'video_{}_mos.tif'.format(stat)) for stat in stats_list]\n dem_mos_log = p_map(asp.dem_mosaic,[video_dem_list]*3,out_fn_list,['None']*3,[None]*3,stats_list,[None]*3) \n out_log_fn = os.path.join(out_folder,'skysat_video_dem_mos.log')\n with open(out_log_fn,'w') as f:\n for log in dem_mos_log:\n f.write(log)\n if args.filter_dem == 1:\n print(\"Filtering DEM using NMAD and count metrics\")\n min_count = args.min_video_count\n max_nmad = args.max_video_nmad\n print(f\"Filter will use min count of {min_count} and max NMAD of {max_nmad}\")\n mos_ds_list = warplib.memwarp_multi_fn(out_fn_list)\n # Filtered array list contains dem_filtered,nmad_filtered, count_filtered in order\n filtered_array_list = skysat.filter_video_dem_by_nmad(mos_ds_list,min_count,max_nmad)\n trailing_str = f'_filt_max_nmad{max_nmad}_min_count{min_count}.tif'\n out_filter_fn_list = [os.path.splitext(fn)[0]+trailing_str for fn in out_fn_list]\n for idx,fn in enumerate(out_filter_fn_list):\n iolib.writeGTiff(filtered_array_list[idx],fn,mos_ds_list[idx])\n print(\"Script complete\")\n\nif __name__==\"__main__\":\n main()\n\n \n","sub_path":"scripts/skysat_dem_mos.py","file_name":"skysat_dem_mos.py","file_ext":"py","file_size_in_byte":7195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"416759875","text":"from Vector import Vector\nfrom Weapon import Weapon\nfrom Wall import Wall\nfrom WeaponCollision import WeaponCollision\nfrom KeyboardClass import Keyboard\nfrom Button import Button\nfrom UserCar import UserCar\nfrom Tree import Tree\nfrom Sprite import Sprite\nfrom Interaction import Interaction\nfrom Interaction import Interaction\nimport random\n\ntry:\n import simplegui\nexcept ImportError:\n import SimpleGUICS2Pygame.simpleguics2pygame as simplegui\n\n\nDISPLAYW = 1000\nDISPLAYH = 675\n\nuserCarImg = simplegui.load_image('http://personal.rhul.ac.uk/zeac/084/User_car_nitro.jpg')\n#image enemy cars\n\npapayaImg = simplegui.load_image('http://personal.rhul.ac.uk/zeac/084/Papaya_image.jpg')\nexplosionSheet = simplegui.load_image('http://www.cs.rhul.ac.uk/courses/CS1830/sprites/explosion-spritesheet.png')\n\n# x values should be updated and not y values by any +ve values\ntreeImg = simplegui.load_image('http://personal.rhul.ac.uk/zeac/084/Test_image.jpg')\ncar_crash = simplegui.load_image('http://personal.rhul.ac.uk/zeac/084/carcrash.png')\n\n\n\nexplosionSprite = Sprite(explosionSheet, 9, 9)\n\nuserCar = UserCar(userCarImg, Vector((0, DISPLAYH/2)), 5, 5)\ntree1 = Tree(treeImg, 0+treeImg.get_height()/2, DISPLAYW)\ntree2 = Tree(treeImg, DISPLAYH-treeImg.get_height()/2, DISPLAYW)\nw1 = Wall((0, 75), (DISPLAYW, 75), 12, 'Green', Vector((0, 1)))\nw2 = Wall((0, 600), (DISPLAYW, 600), 12, 'Green', Vector((0, -1)))\n\nkbd = Keyboard()\n\n#interaction = Interaction(userCar, kbd)\n\n\n\ndef click(pos):\n for x in range(0, len(arrayButton)):\n if arrayButton[x].contains(pos):\n print(\"button\")\n arrayButton[x].clickBtn()\n\ndef draw(canvas):\n image = simplegui.load_image(\"https://i.imgur.com/8tYYDrc.png\")\n canvas.draw_image(image, (image.get_width()/2, image.get_height()/2), (image.get_width(),image.get_height()), (DISPLAYW/2,DISPLAYH/2), (image.get_width(), image.get_height()))\n for x in range(0, len(arrayButton)):\n arrayButton[x].draw(canvas)\n\n\ndef enter_game():\n frame.set_draw_handler(drawGame)\n\n\ninteraction = Interaction(userCar, kbd, [tree1, tree2], w1, w2)\n\n#parameter passed in as canvas\ndef drawGame(canvas):\n global papaya_time, tree_speed\n # global levelImage\n # image = simplegui.load_image(levelImage)\n # canvas.draw_image(image, (image.get_width()/2, image.get_height()/2), (image.get_width(),image.get_height()), (display_width/2,display_height/2), (image.get_width(), image.get_height()))\n\n\n #obj_Int.CarsCollison()\n #obj_Int.TouchPapaya()\n #obj_Int.missileCollision()\n interaction.update()\n tree1.update()\n tree2.update()\n userCar.update()\n tree1.draw(canvas)\n tree2.draw(canvas)\n userCar.draw(canvas)\n w1.draw(canvas)\n w2.draw(canvas)\n interaction.weapColl.update()\n interaction.weapColl.draw(canvas)\n\n\n\n#PUT LOGIC SOMEWHERE FOR BOOSTING\n#if (papaya_time == True and obj_Int.touchPapaya == False):\n#draw_Papaya(canvas)\n\n # if (interaction.carCollision == True):\n # explosionSprite.draw(canvas, userCar.pos.x, userCar.pos.y)\n # explosionSprite.animate = True\n\n #Logic to contain all explosions on screen and hiding what isn't being exploded.\n\n\ndef timer_handler():\n print(\"Papaya Spawn Stuff\")\n\n\ndef draw_Papaya(canvas):\n pass\n #canvas.draw_image(papayaImg, (25, 25), (50, 50), (papaya_x, papaya_y), (50, 50))\n\ndef clickMainMenu(pos):\n for x in range(0, len(arrayButton)):\n if(arrayButton[x].contains(pos)):\n arrayButton[x].clicked = True\n arrayButton[x].clickBtn()\n\ndef clickLevelSelect(pos):\n for x in range(0, len(levelButton)):\n if(levelButton[x].contains(pos)):\n levelButton[x].clickBtn()\n\ndef enter_help():\n pass\n\ndef level_select(canvas):\n image = simplegui.load_image(\"https://i.imgur.com/8tYYDrc.png\")\n canvas.draw_image(image, (image.get_width()/2, image.get_height()/2), (image.get_width(),image.get_height()), (DISPLAYW/2,DISPLAYH/2), (image.get_width(), image.get_height()))\n level1 = Button(\"https://i.imgur.com/sZlcBI9.png\", (150, 450), enter_level1)\n level2 = Button(\"https://i.imgur.com/VWL7wfu.png\", (500, 450), enter_level2)\n level3 = Button(\"https://i.imgur.com/wVUdTVL.png\", (850, 450), enter_level3)\n global levelButton\n levelButton = [level1, level2, level3]\n for x in range(0, len(levelButton)):\n levelButton[x].draw(canvas)\n\ndef enter_level_select():\n frame.set_mouseclick_handler(clickLevelSelect)\n frame.set_draw_handler(level_select)\n\ndef quit():\n exit(0)\n\ndef enter_level1():\n # levelImage = \"\"\n frame.set_draw_handler(drawGame)\n\ndef enter_level2():\n # levelImage = \"\"\n frame.set_draw_handler(drawGame)\n\ndef enter_level3():\n # levelImage = \"\"\n frame.set_draw_handler(drawGame)\n\n\nstart = Button(\"https://i.imgur.com/xoZnCmL.png\", (120, 450), enter_level_select)\nhelp = Button(\"https://i.imgur.com/6OfeKop.png\", (470, 450), enter_help)\nquit = Button(\"https://i.imgur.com/zSSFt11.png\", (820,450), quit)\narrayButton = [start, help, quit]\nframe = simplegui.create_frame(\"Papaya Racers\", DISPLAYW, DISPLAYH)\nframe.set_mouseclick_handler(clickMainMenu)\nframe.set_canvas_background('white')\nframe.set_draw_handler(draw)#automatically passes on the canvas\nframe.set_keydown_handler(kbd.keyDown)\nframe.set_keyup_handler(kbd.keyUp)\ntimer = simplegui.create_timer(5000, timer_handler)\ntimer.start()\nframe.start()\n","sub_path":"Papaya.py","file_name":"Papaya.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"557576028","text":"\"\"\" Contains the non-database models for our app.\n\nPurpose: contains the models for unsaved data and read-only data in json format\nAuthor: Tom W. Hartung\nDate: Winter, 2017.\nCopyright: (c) 2017 Tom W. Hartung, Groja.com, and JooMoo Websites LLC.\nReference:\n (none, yet)\n\"\"\"\n\nimport json\nimport os\nfrom django.contrib import messages\n\nfrom .database import Questionnaire\n\nDJANGO_DEBUG = os.environ.get('DJANGO_DEBUG')\n\n\nclass Score:\n\n \"\"\" Class to calculate, contain, and display the score for the quiz \"\"\"\n\n def __init__(self):\n self.score_is_complete = False\n self.unanswered_question_count = -1\n self.e_score = 0\n self.i_score = 0\n self.n_score = 0\n self.s_score = 0\n self.f_score = 0\n self.t_score = 0\n self.j_score = 0\n self.p_score = 0\n self.opposite_type = {\n \"E\": \"I\", \"I\": \"E\",\n \"N\": \"S\", \"S\": \"N\",\n \"F\": \"T\", \"T\": \"F\",\n \"J\": \"P\", \"P\": \"J\",\n }\n self.e_pct = None\n self.i_pct = None\n self.n_pct = None\n self.s_pct = None\n self.f_pct = None\n self.t_pct = None\n self.j_pct = None\n self.p_pct = None\n\n def score_quiz(self, quiz_size_slug, cleaned_data):\n\n \"\"\" Process the data from the form and set the scores \"\"\"\n \"\"\" question_list is 0 based, the form questions are 1-based \"\"\"\n\n # self.print_cleaned_data(cleaned_data)\n questions = Questions()\n questions_in_form = Questionnaire.get_question_count_for_slug(quiz_size_slug)\n questions_answered = 0\n\n for form_question_str in sorted(cleaned_data):\n if not form_question_str.startswith(\"question_\"):\n continue\n question_int = int(form_question_str.replace(\"question_\", \"\"))\n answer_123_type = questions.get_answer_123_type(question_int)\n answer_str = cleaned_data[form_question_str]\n if len(answer_str) > 0:\n answer_int = int(answer_str)\n answer_weight_str = questions.get_answer_weight(question_int, answer_str)\n answer_weight_int = int(answer_weight_str)\n self.tally_answer(answer_123_type, answer_int, answer_weight_int)\n questions_answered += 1\n if DJANGO_DEBUG:\n answer_text = questions.get_answer_text(question_int, answer_str)\n question_text = questions.get_question_text(question_int)\n print('Score.score_quiz -',\n str(question_int) + ' (' + answer_123_type + ')', '/',\n str(answer_int) + ' (' + answer_weight_str + ')',\n question_text, '/',\n answer_text)\n\n print('Score - score_quiz: questions_answered/questions_in_form',\n str(questions_answered) + '/' + str(questions_in_form))\n self.unanswered_question_count = questions_in_form - questions_answered\n if self.unanswered_question_count == 0:\n self.score_is_complete = True\n return self\n\n def save_questionnaire(self, cleaned_data, quiz_size_slug):\n email = cleaned_data[\"email\"]\n if email == '':\n print( 'views.quiz: No email given, not saving quiz')\n else:\n print( 'views.quiz: saving quiz for \"' + email + '\"')\n quiz_db = Questionnaire()\n quiz_db.save_questionnaire(cleaned_data, quiz_size_slug)\n\n def print_cleaned_data(self, cleaned_data):\n \"\"\" print out the cleaned data, in order by question number \"\"\"\n print('Score.print_cleaned_data - cleaned_data:')\n\n for question_xx in sorted(cleaned_data):\n print('\\tanswer for ' + question_xx + ': ' + cleaned_data[question_xx])\n\n def tally_answer(self, answer_123_type, answer_int, answer_weight_int):\n\n \"\"\" Add the answer_weight to the appropriate score data member \"\"\"\n\n if answer_int <= 3:\n type_for_answer = answer_123_type\n else:\n type_for_answer = self.opposite_type[answer_123_type]\n\n if type_for_answer is \"E\":\n self.e_score += answer_weight_int\n elif type_for_answer is \"I\":\n self.i_score += answer_weight_int\n elif type_for_answer is \"N\":\n self.n_score += answer_weight_int\n elif type_for_answer is \"S\":\n self.s_score += answer_weight_int\n elif type_for_answer is \"F\":\n self.f_score += answer_weight_int\n elif type_for_answer is \"T\":\n self.t_score += answer_weight_int\n elif type_for_answer is \"J\":\n self.j_score += answer_weight_int\n elif type_for_answer is \"P\":\n self.p_score += answer_weight_int\n\n if DJANGO_DEBUG:\n print('Score.tally_answer - added',\n str(answer_weight_int) + ' to '+ type_for_answer + ': ',\n self.__str__())\n return True\n\n def is_complete(self):\n return self.score_is_complete\n\n def set_incomplete_message(self, request):\n if self.unanswered_question_count == 1:\n incomplete_msg = 'There is ' + \\\n str(self.unanswered_question_count) + ' unanswered question'\n else:\n incomplete_msg = 'There are ' + \\\n str(self.unanswered_question_count) + ' unanswered questions'\n messages.add_message(request, messages.ERROR, incomplete_msg)\n return True\n\n def set_quiz_results_messages(self, request):\n \"\"\" Set the messages we display on the results page \"\"\"\n four_letter_type = \"Type: \" + self.as_four_letter_type()\n pcts_and_counts_html = self.get_pcts_and_counts_html()\n messages.add_message(request, messages.INFO, four_letter_type)\n messages.add_message(request, messages.INFO, pcts_and_counts_html)\n return True\n\n def as_four_letter_type(self):\n \"\"\" Return a string containing the four letter type \"\"\"\n four_letter_type = ''\n\n if self.i_score < self.e_score:\n four_letter_type += 'E'\n elif self.i_score == self.e_score:\n four_letter_type += 'X'\n else:\n four_letter_type += 'I'\n\n if self.s_score < self.n_score:\n four_letter_type += 'N'\n elif self.s_score == self.n_score:\n four_letter_type += 'X'\n else:\n four_letter_type += 'S'\n\n if self.t_score < self.f_score:\n four_letter_type += 'F'\n elif self.t_score == self.f_score:\n four_letter_type += 'X'\n else:\n four_letter_type += 'T'\n\n if self.p_score < self.j_score:\n four_letter_type += 'J'\n elif self.p_score == self.j_score:\n four_letter_type += 'X'\n else:\n four_letter_type += 'P'\n\n return four_letter_type\n\n def get_pcts_and_counts_html(self):\n \"\"\" Return an html string containing the score's percents and counts \"\"\"\n score_list = self.as_list_of_pcts_and_counts()\n pcts_and_counts_html = '
    '\n for score_pair in score_list:\n pcts_and_counts_html += '
  • '\n for single_score in score_pair:\n pcts_and_counts_html += single_score + ' '\n pcts_and_counts_html += '
  • '\n pcts_and_counts_html += '
'\n return pcts_and_counts_html\n\n def calculate_percentages(self):\n \"\"\" Calculate the percentages \"\"\"\n total_ei_score = self.e_score + self.i_score\n total_ns_score = self.n_score + self.s_score\n total_ft_score = self.f_score + self.t_score\n total_jp_score = self.j_score + self.p_score\n\n if total_ei_score > 0:\n self.e_pct = round(100 * self.e_score / total_ei_score)\n self.i_pct = round(100 * self.i_score / total_ei_score)\n else:\n self.e_pct = 0\n self.i_pct = 0\n\n if total_ns_score > 0:\n self.n_pct = round(100 * self.n_score / total_ns_score)\n self.s_pct = round(100 * self.s_score / total_ns_score)\n else:\n self.n_pct = 0\n self.s_pct = 0\n\n if total_ft_score > 0:\n self.f_pct = round(100 * self.f_score / total_ft_score)\n self.t_pct = round(100 * self.t_score / total_ft_score)\n else:\n self.f_pct = 0\n self.t_pct = 0\n\n if total_jp_score > 0:\n self.j_pct = round(100 * self.j_score / total_jp_score)\n self.p_pct = round(100 * self.p_score / total_jp_score)\n else:\n self.j_pct = 0\n self.p_pct = 0\n\n def as_list_of_pcts_and_counts(self):\n \"\"\" Return a list containing both percentages and counts \"\"\"\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_list = [\n ['E: ' + str(self.e_pct) + '% (' + str(self.e_score) + ')',\n 'I: ' + str(self.i_pct) + '% (' + str(self.i_score) + ')'],\n ['N: ' + str(self.n_pct) + '% (' + str(self.n_score) + ')',\n 'S: ' + str(self.s_pct) + '% (' + str(self.s_score) + ')'],\n ['F: ' + str(self.f_pct) + '% (' + str(self.f_score) + ')',\n 'T: ' + str(self.t_pct) + '% (' + str(self.t_score) + ')'],\n ['J: ' + str(self.j_pct) + '% (' + str(self.j_score) + ')',\n 'P: ' + str(self.p_pct) + '% (' + str(self.p_score) + ')']\n ]\n return score_list\n\n def to_kv_pairs(self):\n \"\"\" Returns the current score as a list of key-value pairs \"\"\"\n score = {\n \"E\": self.e_score,\n \"I\": self.i_score,\n \"N\": self.n_score,\n \"S\": self.s_score,\n \"F\": self.f_score,\n \"T\": self.t_score,\n \"J\": self.j_score,\n \"P\": self.p_score,\n }\n return score\n\n #\n # Reference for purpose of __str__() and __repl__():\n # http://stackoverflow.com/questions/3691101/what-is-the-purpose-of-str-and-repr-in-python\n #\n\n def __repl__(self):\n return str(self.to_kv_pairs())\n\n def __str__(self):\n score_str = 'E/I: ' + str(self.e_score) + '/' + str(self.i_score) + '; '\n score_str += 'N/S: ' + str(self.n_score) + '/' + str(self.s_score) + '; '\n score_str += 'F/T: ' + str(self.f_score) + '/' + str(self.t_score) + '; '\n score_str += 'J/P: ' + str(self.j_score) + '/' + str(self.p_score)\n return score_str\n\n\nclass Questions:\n\n \"\"\" Read in and work with all the questions in the entire quiz \"\"\"\n\n\n def __init__(self):\n\n \"\"\" Populate the question_list with questions from the json file \"\"\"\n\n self.question_list = self.read_quiz_json()\n\n def read_quiz_json(self):\n\n \"\"\" Read the quiz questions and answers from the json file \"\"\"\n\n site_content_dir = os.path.abspath(os.path.dirname(__file__))\n QUIZ_FILE_DIR = site_content_dir + '/static/content/json/quiz/'\n QUIZ_FILE_NAME = 'seeourminds_quiz.json'\n\n quiz_file_path = QUIZ_FILE_DIR + QUIZ_FILE_NAME\n quiz_json_file = open(quiz_file_path)\n quiz_json_string = quiz_json_file.read()\n quiz_json_file.close()\n question_list = json.loads(quiz_json_string)\n return(question_list)\n\n def get_quiz_question(self, question_int):\n\n \"\"\" Return the entire quiz question (answers, weights, etc.)\"\"\"\n\n quiz_question = self.question_list[question_int]\n # print('Questions.get_quiz_question - question_int:', question_int)\n # print('Questions.get_quiz_question - quiz_question:', quiz_question)\n return quiz_question\n\n def get_question_text(self, question_int):\n\n \"\"\" Get and return the question_text for the question \"\"\"\n\n quiz_question = self.get_quiz_question(question_int)\n question_text = quiz_question['question_text']\n return question_text\n\n def get_choices(self, question_int):\n\n \"\"\" Return the answer choices for the given question \"\"\"\n\n quiz_question = self.get_quiz_question(question_int)\n choices = []\n\n if len(quiz_question['answer_1_text']) > 0 and \\\n int(quiz_question['answer_1_weight']) > 0:\n choice_1 = ['1', quiz_question['answer_1_text']]\n choices.append(choice_1)\n\n if len(quiz_question['answer_2_text']) > 0 and \\\n int(quiz_question['answer_2_weight']) > 0:\n choice_2 = ['2', quiz_question['answer_2_text']]\n choices.append(choice_2)\n\n if len(quiz_question['answer_3_text']) > 0 and \\\n int(quiz_question['answer_3_weight']) > 0:\n choice_3 = ['3', quiz_question['answer_3_text']]\n choices.append(choice_3)\n\n if len(quiz_question['answer_4_text']) > 0 and \\\n int(quiz_question['answer_4_weight']) > 0:\n choice_4 = ['4', quiz_question['answer_4_text']]\n choices.append(choice_4)\n\n if len(quiz_question['answer_5_text']) > 0 and \\\n int(quiz_question['answer_5_weight']) > 0:\n choice_5 = ['5', quiz_question['answer_5_text']]\n choices.append(choice_5)\n\n if len(quiz_question['answer_6_text']) > 0 and \\\n int(quiz_question['answer_6_weight']) > 0:\n choice_6 = ['6', quiz_question['answer_6_text']]\n choices.append(choice_6)\n\n answer_7_text = quiz_question.get('answer_7_text')\n # print(\"answer_7_text:\", answer_7_text)\n\n if answer_7_text is not None:\n choice_7 = ['7', answer_7_text]\n choices.append(choice_7)\n\n # print('Questions.get_choices - question_int:', question_int)\n # print('Questions.get_choices - len(choices):', len(choices))\n return choices\n\n def get_answer_123_type(self, question_int):\n\n \"\"\" Get and return the answer_123_type (e.g., \"E\") for the question \"\"\"\n\n quiz_question = self.get_quiz_question(question_int)\n answer_123_type = quiz_question['answer_123_type']\n return answer_123_type\n\n def get_answer_text(self, question_int, answer_str):\n\n \"\"\" Get and return the answer_X_text for the selected answer 'X' \"\"\"\n\n quiz_question = self.get_quiz_question(question_int)\n answer_text_key = \"answer_\" + answer_str + \"_text\"\n answer_text = quiz_question[answer_text_key]\n return answer_text\n\n def get_answer_weight(self, question_int, answer_str):\n\n \"\"\" Get and return the answer_X_weight for the selected answer 'X' \"\"\"\n\n quiz_question = self.get_quiz_question(question_int)\n answer_weight_key = \"answer_\" + answer_str + \"_weight\"\n answer_weight = quiz_question[answer_weight_key]\n return answer_weight\n","sub_path":"23-som_postgresql_exp/Site/content/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"223049710","text":"from appconf import AppConf\nfrom django.conf import settings\n\n\nclass SortableConf(AppConf):\n STATIC_URL = u'/static/'\n JQUERY_LIB = u\"{}{}\".format(\n getattr(settings, u'STATIC_URL', u'/static/'),\n u\"sortable/js/jquery-2.1.0.min.js\"\n )\n JQUERYUI_LIB = u\"{}{}\".format(\n getattr(settings, u'STATIC_URL', u'/static/'),\n u\"sortable/js/jquery-ui-1.10.4.custom.min.js\"\n )\n\n def configure_static_url(self, value):\n if not getattr(settings, 'STATIC_URL', None):\n self._meta.holder.STATIC_URL = value\n return value\n return getattr(settings, 'STATIC_URL')\n\n def configure_jquery_lib(self, value):\n if not getattr(settings, 'JQUERY_LIB', None):\n self._meta.holder.JQUERY_LIB = value\n return value\n return getattr(settings, 'JQUERY_LIB')\n\n def configure_jqueryui_lib(self, value):\n if not getattr(settings, 'JQUERYUI_LIB', None):\n self._meta.holder.JQUERYUI_LIB = value\n return value\n return getattr(settings, 'JQUERYUI_LIB')\n","sub_path":"sortable/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"25037335","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\nCopyright (c) 2016, Ahmed Şeref\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of mongo_pager nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nimport math\n\n__author__ = u'Ahmed Şeref GÜNEYSU'\n\n\nclass MongoPager(object):\n \"\"\"\n Usage:\n >>> from pymongo import MongoClient\n >>> import uuid\n >>> from pymongopager import MongoPager\n >>> db = MongoClient(\"mongodb://localhost/test\")[\"test\"]\n >>> contents_cursor = cursor=db.get_collection(\"contents\").find({})\n\n >>> _ = db.get_collection(\"contents\").remove({})\n >>> for i in range(10):\n ... _ = db.get_collection(\"contents\").insert_one({'_id': uuid.uuid1().__str__()})\n\n >>> print contents_cursor.clone().count()\n 10\n >>> pages = MongoPager(cursor=contents_cursor.clone(),\n ... limitpages=None,\n ... perpage=3)\n >>> for page in pages:\n ... print page.count(with_limit_and_skip=True)\n 3\n 3\n 3\n 1\n >>> _ = db.get_collection(\"contents\").remove({})\n >>> for i in range(100):\n ... _ = db.get_collection(\"contents\").insert_one({'_id': uuid.uuid1().__str__()})\n\n >>> print contents_cursor.clone().count()\n 100\n >>> pages = MongoPager(cursor=contents_cursor.clone(),\n ... limitpages=4,\n ... perpage=35)\n >>> for page in pages:\n ... print page.count(with_limit_and_skip=True)\n 35\n 35\n 30\n \"\"\"\n\n def __init__(self, cursor, page=0, perpage=25, limitpages=None):\n \"\"\"\n :param cursor: Cursor object that would be paged.\n :type cursor: pymongo.cursor.CursorType\n\n :param limitpages: If set to `0` or `None` iterates all pages until cursor contains no documents.\n :type limitpages: int\n\n :param perpage:\n :type perpage: int\n\n :param page:\n :type page: int\n \"\"\"\n self.page = page\n self.cursor = cursor\n self.perpage = perpage\n\n self.limitpages = limitpages or int(math.ceil(float(cursor.count()) / self.perpage))\n\n def __iter__(self):\n return self\n\n def next(self):\n if (self.limitpages != 0 or self.limitpages) and self.page == self.limitpages:\n raise StopIteration()\n self.page += 1\n cursor = self.cursor.clone().skip((self.page - 1) * self.perpage).limit(self.perpage)\n if cursor.count(with_limit_and_skip=True) == 0:\n raise StopIteration()\n else:\n return cursor\n","sub_path":"pymongopager/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"330727597","text":"import numpy as np\nfrom scipy.misc import logsumexp\n\n\ndef _calculate_log_prob(vishid, hidbiases, visbiases, logZ, testX):\n numcases = testX.shape[0]\n pd = np.matmul(testX, np.transpose(visbiases)) + \\\n np.sum(\n np.log(1.0+np.exp(\n np.matmul(np.ones(shape=(numcases, 1)), hidbiases) +\n np.matmul(testX, vishid))),\n 1, keepdims=True)\n logprob = np.sum(pd) / numcases - logZ\n return logprob\n\n\ndef _base_rate(imgTrain, num_runs):\n eps = 0.00001\n # 784, 50000 - 60000\n [numdims, numcases] = imgTrain.shape\n\n p_int = np.mean(imgTrain, 1, keepdims=True)\n p_int[p_int == 0] = eps\n p_int[p_int == 1] = 1.0 - eps\n\n log_base_rate = np.log(p_int) - np.log(1.0 - p_int)\n\n return np.transpose(log_base_rate)\n\n\ndef _estimate_log_z(vishid, hidbiases, visbiases, numruns, beta, imgTrain=None):\n \"\"\"\n :param vishid: a matrix of RBM weights[numvis, numhid]\n :param hidbiases: a row vector of hidden biases[1 numhid]\n :param visbiases: a row vector of visible biases[1 numvis]\n :param numruns: number of AIS runs\n :param beta: a row vector containing beta's\n :param imgTrain: the data that is divided into batches (numcases numdims numbatches)\n :return:\n \"\"\"\n\n [numdims, numhids] = vishid.shape\n\n if imgTrain is not None:\n visbiases_base = _base_rate(imgTrain, numruns)\n else:\n visbiases_base = 0.0 * visbiases\n\n\n numcases = numruns\n\n visbias_base = np.tile(visbiases_base, (numcases, 1))\n hidbias = np.tile(hidbiases, (numcases, 1))\n visbias = np.tile(visbiases, (numcases, 1))\n\n logww = np.zeros(shape=(numcases, 1))\n negdata = np.tile(1.0 / (1.0 + np.exp(-visbiases_base)), (numcases, 1))\n negdata = negdata > np.random.rand(numcases, numdims)\n # negdata = negdata > 0.5\n\n logww = logww - (np.matmul(negdata, np.transpose(visbiases_base)) + numhids * np.log(2.0))\n\n Wh = np.matmul(negdata, vishid) + hidbias\n Bv_base = np.matmul(negdata, np.transpose(visbiases_base))\n Bv = np.matmul(negdata, np.transpose(visbiases))\n\n for bb in beta[1:]:\n expWh = np.exp(bb * Wh)\n logww = logww + (1.0 - bb) * Bv_base + bb * Bv + np.sum(np.log(1.0 + expWh), 1, keepdims=True)\n\n poshidprobs = np.divide(expWh, (1.0 + expWh))\n poshidstates = poshidprobs > np.random.rand(numcases, numhids)\n # poshidstates = poshidprobs > 0.5\n\n negdata = np.divide(\n 1.0,\n 1.0 + np.exp(\n - (1.0 - bb) * visbias_base\n - bb * (np.matmul(poshidstates, np.transpose(vishid)) + visbias)))\n negdata = negdata > np.random.rand(numcases, numdims)\n # negdata = negdata > 0.5\n\n Wh = np.matmul(negdata, vishid) + hidbias\n Bv_base = np.matmul(negdata, np.transpose(visbiases_base))\n Bv = np.matmul(negdata, np.transpose(visbiases))\n\n expWh = np.exp(bb * Wh)\n logww = logww - ((1.0 - bb) * Bv_base + bb * Bv + np.sum(np.log(1.0 + expWh), 1, keepdims=True))\n\n expWh = np.exp(Wh)\n logww = logww + np.matmul(negdata, np.transpose(visbiases)) + np.sum(np.log(1.0+expWh), 1, keepdims=True)\n\n r_AIS = logsumexp(logww.flatten()) - np.log(numcases)\n\n logZZ_base = np.sum(np.log(1.0 + np.exp(visbiases_base))) + numhids * np.log(2.0)\n logZZ_est = r_AIS + logZZ_base\n\n return logZZ_est\n\n\ndef estimate_log_prob(b, c, w, x_train, x_val, x_test, big_betas):\n # from scipy.io import savemat\n # m_dict = {'b': b, 'c': c, 'w': w, 'x_train': x_train, 'x_val': x_val, 'x_test': x_test}\n # savemat('test_data.mat', m_dict)\n\n num_runs = 100\n\n if big_betas:\n beta = np.concatenate(\n [\n np.arange(0.0, 0.5, 1.0/1000.0, np.float64),\n np.arange(0.5, 0.9, 1.0/10000.0, np.float64),\n np.arange(0.9, 1.0, 1.0/100000.0, np.float64),\n ], axis=0)\n else:\n beta = np.concatenate(\n [\n np.arange(0.0, 0.5, 1.0/100.0, np.float64),\n np.arange(0.5, 0.9, 1.0/500.0, np.float64),\n np.arange(0.9, 1.0, 1.0/1000.0, np.float64),\n ], axis=0)\n\n log_z = _estimate_log_z(w, c, b, num_runs, beta, x_train)\n log_prob_train = _calculate_log_prob(w, c, b, log_z, np.transpose(x_train))\n log_prob_val = _calculate_log_prob(w, c, b, log_z, np.transpose(x_val))\n log_prob_test = _calculate_log_prob(w, c, b, log_z, np.transpose(x_test))\n return log_prob_train, log_prob_val, log_prob_test\n\n\n","sub_path":"ais.py","file_name":"ais.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"468898116","text":"import numpy as np\nfrom sklearn.decomposition import PCA\nimport pickle as pk\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom mpl_toolkits.mplot3d import Axes3D\n\nplt.rcParams.update({'font.size':16})\n\nvec_scale = pk.load(open('visualize_attrvec_scale.pkl', 'rb'))\n#vec_scale = pk.load(open('visualize_attrvec.pkl', 'rb'))\nname_list = pk.load(open('name_list.pkl', 'rb'))\nid_2_director = pk.load(open('id_2_director.pkl', 'rb'))\nid_2_cast = pk.load(open('id_2_cast.pkl', 'rb'))\nid_2_genre = pk.load(open('id_2_genre.pkl', 'rb'))\n\n#rescale\n\n#genre2\nvec_scale[2*6,1] += 0.05 \nvec_scale[2*6,2] -= 0.03\n\n#genre3\nvec_scale[4*6,1] += 0.05 \n\n#cast3\nvec_scale[7*6,1] -= 0.02 \nvec_scale[7*6,2] += 0.02 \n\n\nid_2_topic = {}\nfor i in range(50):\n\tid_2_topic[i] = 'topic'+str(i)\n\nname_dic = {'d':id_2_director, 'c': id_2_cast, 'g':id_2_genre, 't': id_2_topic}\n\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nx = vec_scale[::6,0]\ny = vec_scale[::6,1]\nz = vec_scale[::6,2]\nprint(len(x))\ncolors = []\nname_cat = ['Genre2', 'Director', 'Genre1', 'Cast2', 'Genre3', 'Cast1','Topic 7', 'Cast3']\n#name_list = []\nfor i in range(8):\n colors += [(i+1)*100 for j in range(1)]\n# name_list += [name_cat[i] for j in range(6)]\n \nax.scatter(x,y,z,c=colors, s=50) \nfor i in range(8):\n\tax.text(x[i],y[i],z[i], '%s' % (name_cat[i]), size=30, zorder=1, color='k')\n\tax.set_xlabel(\"Factor 1\", labelpad=10)\n\tax.set_ylabel(\"Factor 2\", labelpad=10)\n\tax.set_zlabel('Factor 3', labelpad=10)\n\t\"\"\"\n\tfor j in range(6):\n\t\tname = name_list[6*i+j]\n\t\tprint(name)\n\t\tax.text(x[6*i+j],y[6*i+j],z[6*i+j], '%s' % (name[1]+'_'+str(name[0])), size=10, zorder=1, color='k') \n\t\"\"\"\nplt.show()","sub_path":"data/case_study_3dim/visulize.py","file_name":"visulize.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"35810174","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport torch\nimport numpy as np\nimport math\nimport os\nimport random\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.autograd as autograd\nimport torch.nn.functional as F\nimport copy\nimport pickle\nimport matplotlib.pyplot as plt\nimport timeit\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport replay_buffer\nfrom replay_buffer import PrioritizedReplayBuffer\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nsave_dir=\"./tmp\"\n\ndevice = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\nUSE_CUDA = torch.cuda.is_available()\nVariable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)\n\nbinary_fields = ['gender','mechvent','re_admission']\nnorm_fields= ['age','Weight_kg','GCS','HR','SysBP','MeanBP','DiaBP','RR','Temp_C','FiO2_1',\n 'Potassium','Sodium','Chloride','Glucose','Magnesium','Calcium',\n 'Hb','WBC_count','Platelets_count','PTT','PT','Arterial_pH','paO2','paCO2',\n 'Arterial_BE','HCO3','Arterial_lactate','SOFA','SIRS','Shock_Index',\n 'PaO2_FiO2','cumulated_balance', 'elixhauser', 'Albumin', 'CO2_mEqL', 'Ionised_Ca','time']\nlog_fields = ['max_dose_vaso','SpO2','BUN','Creatinine','SGOT','SGPT','Total_bili','INR',\n 'input_total','input_1hourly','output_total','output_1hourly']\nfeature_fields=binary_fields+norm_fields+log_fields\n\ndf = pd.read_csv('./rl_train_set_1h.csv')\ntest_df=pd.read_csv('./rl_test_set_1h.csv')\n\naction_map = {}\ncount = 0\nfor iv in range(5):\n for vaso in range(5):\n action_map[(iv,vaso)] = count\n count += 1\n\nREWARD_THRESHOLD = 20\nnoise_std = 0.4\ndim=128\nac_dim=25\nlr=0.0001\nalpha=1\nbeta_start = 0.2\nper_epsilon = 1e-5\nbuffer_size=50000\nmax_iters = 300000\nsample = 2\nbatch_size = 32\ngamma = 0.99\n\nclass NoisyLinear(nn.Module):\n def __init__(self, in_features, out_features, use_cuda, std_init=noise_std):\n super(NoisyLinear, self).__init__()\n \n self.use_cuda = use_cuda\n self.in_features = in_features\n self.out_features = out_features\n self.std_init = std_init\n \n self.weight_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))\n self.weight_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))\n self.register_buffer('weight_epsilon', torch.FloatTensor(out_features, in_features))\n \n self.bias_mu = nn.Parameter(torch.FloatTensor(out_features))\n self.bias_sigma = nn.Parameter(torch.FloatTensor(out_features))\n self.register_buffer('bias_epsilon', torch.FloatTensor(out_features))\n \n self.reset_parameters()\n self.reset_noise()\n \n def forward(self, x):\n if self.use_cuda:\n weight_epsilon = self.weight_epsilon.cuda()\n bias_epsilon = self.bias_epsilon.cuda()\n else:\n weight_epsilon = self.weight_epsilon\n bias_epsilon = self.bias_epsilon\n \n if self.training: \n weight = self.weight_mu + self.weight_sigma.mul(Variable(weight_epsilon))\n bias = self.bias_mu + self.bias_sigma.mul(Variable(bias_epsilon))\n else:\n weight = self.weight_mu\n bias = self.bias_mu\n \n return F.linear(x, weight, bias)\n \n def reset_parameters(self):\n mu_range = 1 / math.sqrt(self.weight_mu.size(1))\n \n self.weight_mu.data.uniform_(-mu_range, mu_range)\n self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.weight_sigma.size(1)))\n \n self.bias_mu.data.uniform_(-mu_range, mu_range)\n self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.bias_sigma.size(0)))\n \n def reset_noise(self):\n epsilon_in = self._scale_noise(self.in_features)\n epsilon_out = self._scale_noise(self.out_features)\n \n self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))\n self.bias_epsilon.copy_(self._scale_noise(self.out_features))\n \n def _scale_noise(self, size):\n x = torch.randn(size)\n x = x.sign().mul(x.abs().sqrt())\n return x\n\nclass DuelingDQNnoise(nn.Module):\n def __init__(self, num_inputs, num_outputs):\n super(DuelingDQNnoise, self).__init__()\n \n self.num_inputs = num_inputs\n self.num_outputs = num_outputs\n \n self.linear = nn.Linear(num_inputs, dim)\n \n self.noisy_value1 = NoisyLinear(dim, dim, use_cuda = USE_CUDA)\n self.noisy_value2 = NoisyLinear(dim, 1, use_cuda = USE_CUDA)\n \n self.noisy_advantage1 = NoisyLinear(dim, dim, use_cuda = USE_CUDA)\n self.noisy_advantage2 = NoisyLinear(dim, self.num_outputs, use_cuda = USE_CUDA)\n \n \n def forward(self, x):\n x = F.relu(self.linear(x))\n \n value = F.relu(self.noisy_value1(x))\n value = self.noisy_value2(value)\n \n advantage = F.relu(self.noisy_advantage1(x))\n advantage = self.noisy_advantage2(advantage)\n \n x = value + advantage - advantage.mean() \n return x\n \n def reset_noise(self):\n self.noisy_value1.reset_noise()\n self.noisy_value2.reset_noise()\n self.noisy_advantage1.reset_noise()\n self.noisy_advantage2.reset_noise()\n \n \n def act(self, state):\n with torch.no_grad():\n state = Variable(torch.FloatTensor(state).unsqueeze(0))\n q_value = self.forward(state)\n action = q_value.max(1)[1].data.item()\n return action\n\ndef intermediate_reward(state, next_state, c0=-0.025, c1=-0.125, c2=-2):\n mediate_reward = 0\n if abs(state[30] - next_state[30])<1e-6 and next_state[30] > 0:\n mediate_reward = mediate_reward + c0\n \n mediate_reward = mediate_reward - c1 * (state[30] - next_state[30]) - c2 * math.tanh(state[29] - next_state[29])\n \n return mediate_reward\n\ndef process_sample(sample_size=1, add_reward=True, train=True, eval_type = None):\n if not train:\n if eval_type is None:\n raise Exception('Provide eval_type to process_batch')\n elif eval_type == 'train':\n a = df.copy()\n elif eval_type == 'val':\n a = val_df.copy()\n elif eval_type == 'test':\n a = test_df.copy()\n else:\n raise Exception('Unknown eval_type')\n else:\n a = df.sample(n=sample_size)\n \n states = None\n actions = None\n rewards = None\n next_states = None\n done_flags = None\n for i in a.index:\n cur_state = a.loc[i,feature_fields]\n iv = int(a.loc[i, 'iv_input'])\n vaso = int(a.loc[i, 'vaso_input'])\n action = action_map[iv,vaso]\n reward = a.loc[i,'reward']\n reward_new = a.loc[i,'reward_new']\n\n if i != df.index[-1]:\n # if not terminal step in trajectory \n if df.loc[i+1,'bloc'].item() - df.loc[i,'bloc'].item() > 1:\n return process_sample(add_reward=True, train=True, eval_type = None)\n \n if df.loc[i, 'icustayid'] == df.loc[i+1, 'icustayid']:\n next_state = df.loc[i + 1, feature_fields]\n done_flag = 0\n else:\n # trajectory is finished\n next_state = np.zeros(len(cur_state))\n done_flag = 1\n else:\n # last entry in df is the final state of that trajectory\n next_state = np.zeros(len(cur_state))\n done_flag = 1\n \n if states is None:\n states = copy.deepcopy(cur_state)\n else:\n states = np.vstack((states,cur_state))\n\n if actions is None:\n actions = [action]\n else:\n actions = np.vstack((actions,action))\n \n if add_reward and done_flag == 0:\n reward = reward + intermediate_reward(cur_state, next_state) # add intermediate reward\n if rewards is None:\n rewards = [reward]\n else:\n rewards = np.vstack((rewards,reward))\n\n if next_states is None:\n next_states = copy.deepcopy(next_state)\n else:\n next_states = np.vstack((next_states,next_state))\n\n if done_flags is None:\n done_flags = [done_flag]\n else:\n done_flags = np.vstack((done_flags,done_flag))\n \n return (states, np.squeeze(actions), np.squeeze(rewards), next_states, np.squeeze(done_flags), a)\n\ncurrent_model = DuelingDQNnoise(len(feature_fields), ac_dim).to(device)\ntarget_model = DuelingDQNnoise(len(feature_fields), ac_dim).to(device)\n \noptimizer = optim.Adam(current_model.parameters(), lr=lr)\nreplay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=alpha)\n\ndef update_target(current_model, target_model):\n target_model.load_state_dict(current_model.state_dict())\n\nupdate_target(current_model, target_model)\n\ndef compute_td_loss(batch_size, beta):\n state, action, reward, next_state, dones, weights, indices = replay_buffer.sample(batch_size, beta)\n \n state = Variable(torch.FloatTensor(np.float32(state)))\n next_state = Variable(torch.FloatTensor(np.float32(next_state)))\n action = Variable(torch.LongTensor(action))\n reward = Variable(torch.FloatTensor(reward))\n dones = Variable(torch.FloatTensor(np.float32(dones)))\n weights = Variable(torch.FloatTensor(weights))\n \n q_values = current_model(state)\n next_q_values = current_model(next_state)\n next_q_state_values = target_model(next_state)\n \n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1) \n next_q_value = next_q_state_values.gather(1,torch.max(next_q_values, 1)[1].unsqueeze(1)).squeeze(1)\n next_q_value=next_q_value.clamp( -REWARD_THRESHOLD , REWARD_THRESHOLD )\n expected_q_value = reward + gamma * next_q_value * (1 - done)\n\n \n loss = (q_value - expected_q_value.detach()).pow(2) *weights\n prios = loss + per_epsilon\n loss = loss.mean()\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n replay_buffer.update_priorities(indices, prios.data.cpu().numpy())\n current_model.reset_noise()\n target_model.reset_noise()\n \n return loss\n\nbeta_i= lambda i: min(1.0, beta_start + 1 * i * (1.0 - beta_start)/(max_iters * sample)) \nlosses = []\nall_losses = []\n\nstart=timeit.default_timer()\nfor i in range(max_iters * sample):\n state, action, reward, next_state, done, sampled_df = process_sample(sample_size=1, add_reward=True)\n replay_buffer.push(state, action, reward, next_state, done)\n \n if len(replay_buffer) > batch_size and i % sample == 0:\n beta = beta_i(i)\n loss = compute_td_loss(batch_size, beta)\n losses.append(loss.data.item())\n all_losses.append(loss.data.item())\n if i%(100 * sample)==0:\n update_target(current_model, target_model)\n if i % (1000 * sample) == 0 and i>0:\n end=timeit.default_timer()\n av_loss = np.array(losses).mean()\n print(\"iter:\",i)\n print((end-start)/(1000 * sample),\"s/iter\")\n print(\"Average loss is \", av_loss)\n losses=[]\n start=timeit.default_timer()\n","sub_path":"HDQN_noise.py","file_name":"HDQN_noise.py","file_ext":"py","file_size_in_byte":11283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"650984123","text":"import os\nimport re\nimport matplotlib\n\nfrom pmp.multigoal.visualize import draw_histogram\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pmp.multigoal import MultigoalExperiment\nfrom pmp.multigoal.helpers import get_distribution_name, read_scores\n\n\ndef plot(filename, x, mins, rules, title=\"\"):\n rule1_name = rules[0].__str__()\n rule2_name = rules[1].__str__()\n\n axes = plt.gca()\n axes.set_xlim([0, 1])\n plt.xlabel(rule2_name)\n axes.set_ylim([0, 1])\n plt.ylabel(rule1_name)\n plt.plot(np.array(x).astype('float')/100, mins)\n plt.title(title)\n plt.legend(['min'])\n\n plt.savefig(filename)\n plt.clf()\n\n\ndef get_multigoal_rules(multigoal_rule):\n return [threshold_rule.rule for threshold_rule in multigoal_rule().rules]\n\n\ndef get_repetition_from_filename(dir_name, filename):\n filename_pattern = '{}_ILP_(\\d+).score'.format(dir_name)\n rep_match = re.match(filename_pattern, filename)\n\n if not rep_match:\n return None\n else:\n return int(rep_match.group(1))\n\n\ndef count_already_generated(current_dir):\n max_rep = 0\n if os.path.isdir(current_dir):\n for dir_name in os.listdir(current_dir):\n dir_path = os.path.join(current_dir, dir_name)\n if not os.path.isdir(dir_path):\n continue\n\n for filename in os.listdir(os.path.join(current_dir, dir_name)):\n rep = get_repetition_from_filename(dir_name, filename)\n max_rep = max(max_rep, rep)\n return max_rep\n\n\ndef draw_pareto_chart_from_winner_files(current_dir, m, n, k, multigoal_rule, distribution, distribution_params=None):\n print(\"{}, k={}\".format(multigoal_rule.__name__, k))\n # We assume that there are \"repetitions\" files generated for each threshold.\n rule_name = multigoal_rule.__name__\n distribution_name = get_distribution_name(distribution)\n rules = get_multigoal_rules(multigoal_rule)\n if not rules:\n return\n\n xy = {}\n\n for dir_name in os.listdir(current_dir):\n dir_pattern = '{}_{}_(\\d+)_\\d+_k{}_n{}_m{}'.format(rule_name, distribution_name, k, n, m)\n r1_match = re.match(dir_pattern, dir_name)\n if r1_match is None:\n continue\n r1 = r1_match.group(1)\n\n for filename in os.listdir(os.path.join(current_dir, dir_name)):\n rep = get_repetition_from_filename(dir_name, filename)\n if not rep:\n continue\n\n win_filename = os.path.join(current_dir, dir_name, filename)\n best_filename = '{}_{}.best'.format(dir_name, rep)\n best_filename = os.path.join(current_dir, dir_name, best_filename)\n\n scores = read_scores(win_filename)\n best = read_scores(best_filename)\n\n approx = scores[1] / best[1]\n if r1 in xy:\n xy[r1].append(approx)\n else:\n xy[r1] = [approx]\n\n xy_list = list(xy.items())\n xy_list = sorted(xy_list, key=lambda e: int(e[0]))\n x = [int(x) for x, _ in xy_list]\n y_min = [np.min(ys) for _, ys in xy_list]\n\n if distribution_params is None:\n filename = '{}_{}_k{}_n{}_m{}'.format(rule_name, distribution_name, k, n, m)\n else:\n distribution_params_string = '_'.join([dpk + str(dpv) for dpk, dpv in distribution_params.items()])\n distribution_params_string = distribution_params_string.replace('.', '')\n filename = '{}_{}_{}_k{}_n{}_m{}'.format(rule_name, distribution_name, distribution_params_string, k, n, m)\n\n title = \"voters: {}, candidates: {}, committee size: {}\".format(n, m, k)\n plot(filename, x, y_min, rules, title=title)\n\n\ndef draw_transition_from_winner_files(current_dir, m, n, k, multigoal_rule, distribution, repetitions):\n print(\"{}, k={}\".format(multigoal_rule.__name__, k))\n # We assume that there are \"repetitions\" files generated for each threshold.\n rule_name = multigoal_rule.__name__\n distribution_name = get_distribution_name(distribution)\n rules = get_multigoal_rules(multigoal_rule)\n if not rules:\n return\n\n for dir_name in os.listdir(current_dir):\n dir_path = os.path.join(current_dir, dir_name)\n if not os.path.isdir(dir_path):\n continue\n\n dir_pattern = '{}_{}_(\\d+)_(\\d+)_k{}_n{}_m{}'.format(rule_name, distribution_name, k, n, m)\n r1_r2_match = re.match(dir_pattern, dir_name)\n if r1_r2_match is None:\n continue\n\n percentages = r1_r2_match.group(1), r1_r2_match.group(2)\n\n draw_histogram(dir_path, multigoal_rule, k, percentages, distribution, repetitions, n, m, 'ILP')\n\n\ndef generate_winner_files_for_pareto(dir_name, configs, multigoal_rule, k, start=70, step=2, save_win=False):\n n_start = count_already_generated(dir_name)\n\n rules = get_multigoal_rules(multigoal_rule)\n if not rules:\n return\n\n x = np.array([a for a in range(start, 101, step)])\n\n for repetition, config in enumerate(configs[n_start:]):\n experiment = MultigoalExperiment(config, dir_name=dir_name)\n\n for i, r1 in enumerate(x):\n experiment.set_multigoal_election(multigoal_rule, k, percent_thresholds=(r1, 0))\n\n experiment.run(n=1, n_start=n_start + repetition + 1, criterion='rule2',\n save_in=False, save_out=False, save_win=save_win, save_best=True, save_score=True)\n","sub_path":"pmp/multigoal/pareto.py","file_name":"pareto.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"51098852","text":"from django.shortcuts import render, redirect\nfrom .forms import MergeForm\nfrom .pdf4 import mergefunction, insertfunction, getnumpagesfunction, split1function, inimagesfunction, outimagesfunction, compressfunction, rotatefunction, inpdffunction, intextfunction\nfrom django.core.files.storage import FileSystemStorage\n\n\ndef home_view(request):\n\t\treturn render(request, 'index.html')\ndef faq_view(request):\n\t\treturn render(request, 'faq.html')\ndef about_view(request):\n\t\treturn render(request, 'about.html')\ndef rules_view(request):\n\t\treturn render(request, 'rules.html')\ndef policy_view(request):\n\t\treturn render(request, 'policy.html')\n\ndef merge_view(request):\n if request.method == 'POST' and request.FILES['file1'] and request.FILES['file2']:\n fs = FileSystemStorage() #создаем экземпляр джанго-класс для работы с файлами\n file1 = request.FILES['file1']\n file2 = request.FILES['file2']\n if (file1.size > 25*1024*1024) or (file2.size > 25*1024*1024): #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'merge.html')\n file1name = fs.save(file1.name, file1) #сохраняем файлы из формы\n file2name = fs.save(file2.name, file2)\n resulturl = mergefunction('./pdf4/media/'+ file1name, './pdf4/media/'+ file2name) #вызываем функцию для объединения файлов из pdf4.py, на выходе из которой получаем url результата\n fs.delete(file1name) #удаляем исходные файлы\n fs.delete(file2name)\n return render(request, 'merge.html', {\n 'resulturl': resulturl\n })\n return render(request, 'merge.html')\n\n\n\n\n\ndef insert_view(request):\n if 'file1_cookie' not in request.session:\n request.session['file1_cookie'] = [] #имя первого файла\n if 'file2_cookie' not in request.session:\n request.session['file2_cookie'] = [] #имя второго файла\n if request.method == 'POST':\n if 'button_111' in request.POST:\n fs = FileSystemStorage() #создаем экземпляр джанго-класс для работы с файлами\n file1 = request.FILES['file1']\n file2 = request.FILES['file2']\n if (file1.size > 25*1024*1024) or (file2.size > 25*1024*1024): #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'insert.html')\n file1name = fs.save(file1.name, file1) #сохраняем файлы из формы\n file2name = fs.save(file2.name, file2)\n request.session['file1_cookie'] = file1name\n request.session['file2_cookie'] = file2name\n insert_position = 9999\n return render(request, 'insert.html', {\n 'insert_position': insert_position\n })\n else:\n file1name = ''.join(request.session['file1_cookie'])\n file2name = ''.join(request.session['file2_cookie'])\n resulturl = ''\n insert_position = request.POST['select_position']\n # если в строчке ниже убрать явное приведение к типу int то появится ошибка django slice indices must be integers or None or have an __index__ method\n resulturl = insertfunction('./pdf4/media/'+ file1name, './pdf4/media/'+ file2name, int(insert_position)) #вызываем функцию для объединения файлов из pdf4.py, н�� выходе из которой получаем url результата\n fs = FileSystemStorage()\n fs.delete(file1name) #удаляем исходные файлы\n fs.delete(file2name)\n del request.session['file1_cookie']\n del request.session['file2_cookie']\n return render(request, 'insert.html', {\n 'resulturl': resulturl,\n 'insert_position': insert_position\n })\n else:\n return render(request, 'insert.html')\n\n\ndef split_1_view(request):\n if request.method == 'POST' and request.FILES['file1']:\n fs = FileSystemStorage() #создаем экземпляр джанго-класс для работы с файлами\n file1 = request.FILES['file1']\n if file1.size > 50*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'split-1.html')\n file1name = fs.save(file1.name, file1) #сохраняем файл из формы\n resulturl = split1function('./pdf4/media/'+ file1name) #вызываем функцию для разделения файла на отдельные страницы из pdf4.py, на выходе из которой получаем url для скачивания архива со страницами\n fs.delete(file1name) #удаляем исходный файлы\n return render(request, 'split-1.html', {\n 'resulturl': resulturl\n })\n return render(request, 'split-1.html')\n\n\ndef in_images_view(request):\n if request.method == 'POST' and request.FILES['file1']:\n fs = FileSystemStorage() #создаем экземпляр джанго-класс для работы с файлами\n file1 = request.FILES['file1'] #передаем данные из формы в переменные\n if file1.size > 50*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'in-images.html')\n format = request.POST['format1']\n file1name = fs.save(file1.name, file1) #сохраняем файл из переменной\n resulturl = inimagesfunction('./pdf4/media/'+ file1name, format) #вызываем функцию для преобразования файла в картинки из pdf4.py, на выходе из которой получаем url для скачивания архива со страницами\n fs.delete(file1name) #удаляем исходный файлы\n return render(request, 'in-images.html', {\n 'resulturl': resulturl\n })\n return render(request, 'in-images.html')\n\ndef out_images_view(request):\n if request.method == 'POST':\n fs = FileSystemStorage() #создаем экземпляр джанго-класс для работы с файлами\n filename = dict.fromkeys([1,2,3,4,5]) #создем словарь для имен файлов, ключи словаря 1,2,3,4,5 а значения у ключей None\n if 'file1' in request.FILES: #если вместо этого написать if request.FILES['file1']: то будет появляться ошибка в случае если поле пустое (если пользователь не выбрал файл)\n file1 = request.FILES['file1']\n if file1.size > 25*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'out-images.html')\n filename[1] = fs.save(file1.name, file1) #сохраняем файлы из формы, имена файлов записываем в словарь\n if 'file2' in request.FILES:\n file2 = request.FILES['file2']\n if file2.size > 25*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'out-images.html')\n filename[2] = fs.save(file2.name, file2) #сохраняем файлы из формы, имена файлов записываем в словарь\n if 'file3' in request.FILES:\n file3 = request.FILES['file3']\n if file3.size > 25*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'out-images.html')\n filename[3] = fs.save(file3.name, file3) #со��раняем файлы из формы, имена файлов записываем в словарь\n if 'file4' in request.FILES:\n file4 = request.FILES['file4']\n if file4.size > 25*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'out-images.html')\n filename[4] = fs.save(file4.name, file4) #сохраняем файлы из формы, имена файлов записываем в словарь\n if 'file5' in request.FILES:\n file5 = request.FILES['file5']\n if file5.size > 25*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'out-images.html')\n filename[5] = fs.save(file5.name, file5) #сохраняем файлы из формы, имена файлов записываем в словарь\n\n resulturl = outimagesfunction(filename[1], filename[2], filename[3], filename[4], filename[5]) #вызываем функцию для сбора PDF-файла из картинок, на выходе из которой получаем url результата\n #В отличии от предыдущих функций, тут мы передаем значения словаря без добавления './pdf4/media/'\n if filename[1] != None:\n fs.delete(filename[1]) #удаляем исходный файлы\n if filename[2] != None:\n fs.delete(filename[2])\n if filename[3] != None:\n fs.delete(filename[3])\n if filename[4] != None:\n fs.delete(filename[4])\n if filename[5] != None:\n fs.delete(filename[5])\n\n return render(request, 'out-images.html', {\n 'resulturl': resulturl\n })\n return render(request, 'out-images.html')\n\n\n\ndef compress_view(request):\n if request.method == 'POST' and request.FILES['file1']:\n fs = FileSystemStorage() #создаем экземпляр джанго-класс для работы с файлами\n file1 = request.FILES['file1'] #передаем данные из формы в переменные\n if file1.size > 50*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'compress.html')\n level = request.POST['level1']\n file1name = fs.save(file1.name, file1) #сохраняем файл из переменной\n resulturl = compressfunction('./pdf4/media/'+ file1name, level) #вызываем функцию для сжатия файла из pdf4.py, на выходе из которой получаем url для скачивания результата\n fs.delete(file1name) #удаляем исходный файлы\n return render(request, 'compress.html', {\n 'resulturl': resulturl\n })\n return render(request, 'compress.html')\n\n\n\ndef rotate_view(request):\n if request.method == 'POST' and request.FILES['file1']:\n fs = FileSystemStorage() #создаем экземпляр джанго-класс для работы с файлами\n file1 = request.FILES['file1'] #передаем данные из формы в переменные\n if file1.size > 50*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'rotate.html')\n grad1 = request.POST['grad1']\n pages1 = request.POST['pages1']\n file1name = fs.save(file1.name, file1) #сохраняем файл из переменной\n resulturl = rotatefunction('./pdf4/media/'+ file1name, grad1, pages1) #вызываем функцию файла из pdf4.py, которая в файле file1name поворачивает страницы pages1 на градус из grad1, на выходе из которой получаем url для скачивания результата\n fs.delete(file1name) #удаляем исходный файл\n return render(request, 'rotate.html', {\n 'resulturl': resulturl\n })\n return render(request, 'rotate.html')\n\ndef in_pdf_view(request):\n if request.method == 'POST' and request.FILES['file1']:\n fs = FileSystemStorage() #создаем экземпляр джанго-класс для работы с файлами\n file1 = request.FILES['file1'] #передаем данные из ф��рмы в переменные\n if file1.size > 50*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'in-pdf.html')\n file1name = fs.save(file1.name, file1) #сохраняем файл из переменной\n resulturl = inpdffunction('./pdf4/media/'+ file1name) #вызываем функцию для преобразования файла в PDF\n fs.delete(file1name) #удаляем исходный файлы\n return render(request, 'in-pdf.html', {\n 'resulturl': resulturl\n })\n return render(request, 'in-pdf.html')\n\ndef in_text_view(request):\n if request.method == 'POST' and request.FILES['file1']:\n fs = FileSystemStorage() #создаем экземпляр джанго-класс для работы с файлами\n file1 = request.FILES['file1'] #передаем данные из формы в переменные\n if file1.size > 50*1024*1024: #защита от отправки на сервер больших файлов\n raise Exception('Слишком большой файл!')\n return render(request, 'in-text.html')\n format = request.POST['format1']\n file1name = fs.save(file1.name, file1) #сохраняем файл из переменной\n resulturl = intextfunction('./pdf4/media/'+ file1name, format) #вызываем функцию для преобразования файла в текстовый формат из pdf4.py, на выходе из которой получаем url для скачивания текстового файла\n fs.delete(file1name) #удаляем исходный файлы\n return render(request, 'in-text.html', {\n 'resulturl': resulturl\n })\n return render(request, 'in-text.html')\n\n","sub_path":"pdf4/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"608026905","text":"from google.appengine.ext import ndb\n\nclass Achievement(ndb.Model):\n # Achievement name\n name = ndb.StringProperty()\n \n # Achievement text\n text = ndb.StringProperty()\n \n # Achievement category (Ex: date, comment, project category, etc. \n # (ex. comment for 10 comments achievement))\n category = ndb.StringProperty()\n \n # Achievement goal (Depending on the category: for comment its a straight number)\n # Date (certain date or daterange) etc.\n goal = ndb.StringProperty()\n\n # Value in points\n value = ndb.IntegerProperty()\n \n # Achievement image\n image = ndb.StringProperty()\n \n # Meta data\n creation_date = ndb.DateTimeProperty(auto_now_add = True)\n \n # Returns this achievement as a dictionary\n def to_dict(self):\n return {\n \"achievement_id\": self.key.urlsafe(),\n \"name\": self.name,\n \"text\": self.text,\n \"category\": self.category,\n \"goal\": self.goal,\n \"image\": self.image,\n \"value\": self.value,\n \"creation_date\": str(self.creation_date)\n }\n\n\nclass User_Achivement(ndb.Model):\n # User_id\n user_id = ndb.StringProperty()\n \n # Achievement id\n achievement_id = ndb.StringProperty()\n \n # Achievement date\n creation_date = ndb.DateTimeProperty(auto_now_add = True)\n \n # Returns this user achievement as a dictionary\n def to_dict(self):\n return {\n \"user_id\": self.user_id,\n \"achievement_id\": self.achievement_id,\n \"creation_date\": str(self.creation_date)\n }\n","sub_path":"model/achievement.py","file_name":"achievement.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"114478859","text":"from typing import Optional, Union\n\nfrom graphql import GraphQLError, parse\n\n\ndef convert_camel_case_to_snake(graphql_name: str) -> str:\n python_name = \"\"\n for i, c in enumerate(graphql_name.lower()):\n if i and c != graphql_name[i]:\n python_name += \"_\"\n python_name += c\n return python_name\n\n\ndef gql(value: str) -> str:\n parse(value)\n return value\n\n\ndef unwrap_graphql_error(\n error: Union[GraphQLError, Optional[Exception]]\n) -> Optional[Exception]:\n if isinstance(error, GraphQLError):\n return unwrap_graphql_error(error.original_error)\n return error\n","sub_path":"ariadne/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"113257195","text":"import math\nN, K = map(int, input().split())\n\nans = 0\nfor i in range(1, K):\n if i > N:\n break\n x = math.ceil(math.log2(K/i))\n p = (1/2)**x\n ans += p\n\nans *= 1/N\n\nif N >= K:\n ans += (N-K+1)/N\n\nprint(\"{:.10f}\".format(ans))\n","sub_path":"DiffUme/DiceAndCoin.py","file_name":"DiceAndCoin.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"346688468","text":"from collections import Counter, defaultdict, OrderedDict, deque\nfrom bisect import bisect_left, bisect_right\nfrom functools import reduce, lru_cache\nfrom typing import List\nimport itertools\nimport math\nimport heapq\nimport string\ntrue = True\nfalse = False\nMIN, MAX, MOD = -0x3f3f3f3f, 0x3f3f3f3f, 1000000007\n\n\n# https://leetcode.com/problems/add-bold-tag-in-string/description/\n# Medium\nclass Solution:\n def addBoldTag(self, s: str, words: List[str]) -> str:\n bold = [False] * len(s)\n for w in words:\n start = s.find(w)\n while start != -1:\n for i in range(start, start + len(w)):\n bold[i] = True\n start = s.find(w, start + 1)\n\n res = \"\"\n i = 0\n while i < len(s):\n if bold[i] == False:\n res += s[i]\n i += 1\n else:\n res += \"\"\n while i < len(s) and bold[i] == True:\n res += s[i]\n i+=1\n res += \"\"\n return res\n","sub_path":"python_solutions/616.add-bold-tag-in-string.py","file_name":"616.add-bold-tag-in-string.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"70812842","text":"# Take a number: 56789. Rotate left, you get 67895.\n\n# Keep the first digit in place and rotate left the other digits: 68957.\n\n# Keep the first two digits in place and rotate the other ones: 68579.\n\n# Keep the first three digits and rotate left the rest: 68597. \n# Now it is over since keeping the first four it remains only \n# one digit which rotated is itself.\n\n# You have the following sequence of numbers:\n\n# 56789 -> 67895 -> 68957 -> 68579 -> 68597\n\n# and you must return the greatest: 68957.\n\n# Calling this function max_rot\n\n# max_rot(56789) should return 68957\n\ndef max_rot(n):\n\t\n\tlist_of_all_my_matrix_buddies_living_in_max_rot_because_they_love_python = []\n\t\n\tnumber_array = [int(x) for x in str(n)]\n\t\n\tfirst_number = 0\n\tsuper_encrypted_XXXXxxxXXXX_String = ''.join(str(e) for e in number_array)\n\tlist_of_all_my_matrix_buddies_living_in_max_rot_because_they_love_python.append(super_encrypted_XXXXxxxXXXX_String)\n\t\n\t# our first number is swapped in place with the last number as is\n\tnumber_array.append(number_array[0])\n\tnumber_array.pop(0)\n\tfirst_number = number_array\n\tsuper_encrypted_XXXXxxxXXXX_String01 = ''.join(str(e) for e in first_number)\n\tlist_of_all_my_matrix_buddies_living_in_max_rot_because_they_love_python.append(super_encrypted_XXXXxxxXXXX_String01)\n\t\n\t# our second number freezes the first index and then performs the swap\n\tsecond_number = 0 \n\tnumber_array.append(number_array[1])\n\tnumber_array.pop(1)\n\tsecond_number = number_array\n\tsuper_encrypted_XXXXxxxXXXX_String02 = ''.join(str(e) for e in second_number)\n\tlist_of_all_my_matrix_buddies_living_in_max_rot_because_they_love_python.append(super_encrypted_XXXXxxxXXXX_String02)\n\n\t# our third number freezes the second index and then performs the swap\n\tthird_number = 0 \n\tnumber_array.append(number_array[2])\n\tnumber_array.pop(2)\n\tthird_number = number_array\n\tsuper_encrypted_XXXXxxxXXXX_String03 = ''.join(str(e) for e in third_number)\n\tlist_of_all_my_matrix_buddies_living_in_max_rot_because_they_love_python.append(super_encrypted_XXXXxxxXXXX_String03)\n\n\t# our fourth number freezes the second index and then performs the swap\n\tfourth_number = 0 \n\tnumber_array.append(number_array[3])\n\tnumber_array.pop(3)\n\t# print(number_array)\n\tfourth_number = number_array\n\tsuper_encrypted_XXXXxxxXXXX_String04 = ''.join(str(e) for e in fourth_number)\n\tlist_of_all_my_matrix_buddies_living_in_max_rot_because_they_love_python.append(super_encrypted_XXXXxxxXXXX_String04)\n\t\n\tfinal_answer_to_life_itself = max(list_of_all_my_matrix_buddies_living_in_max_rot_because_they_love_python)\n\t\n\treturn int(final_answer_to_life_itself)\n\t\n\t\n\t\n\t\n\t\n","sub_path":"warmups/w18/w18-w-rotate-for-a-max/prompt.py","file_name":"prompt.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"424100461","text":"import streamlit as st\nimport numpy as np \nimport pandas as pd \n\n#streamlit run [filename]\n\nst.title('StackOverflow 2019 Developer Survey')\nst.write('(This data only reflects 50 rows for performance purposes)')\nst.subheader('Use the sidebar to filter the data set by student status. The below charts will auto-generate based on your selections.')\nst.subheader(' ')\nst.subheader(' ')\n\n#Caching allows you to reuse the already load variable\n#instead of reloading/recomputing\n@st.cache\ndef get_data(nrows):\n return pd.read_csv('survey_results_public.csv', nrows=nrows)\n\n#data_load_state = st.text('Loading data...')\ndf = get_data(50)\ndf.rename(columns={'Student': 'Selection'}, inplace=True)\n#data_load_state.text(\"Done! (using st.cache)\")\n\n# inspect raw data\nif st.checkbox('Select to see raw dataset'):\n st.write(df)\n\n\n # Create a Check box to show few summary details.\nif st.checkbox('Select to see top 10 databases and frameworks used'):\n grp_data = df.copy()\n grp_data['Count'] = 1\n st.subheader('Top 10 Databases used')\n st.write(pd.DataFrame(grp_data.groupby(['DatabaseWorkedWith'], sort=False)['Count'].count().rename_axis([\"DatabaseWorkedWith\"]).nlargest(10)))\n st.subheader('Top 10 Frameworks used')\n st.write(pd.DataFrame(grp_data.groupby(['WebFrameWorkedWith'], sort=False)['Count'].count().rename_axis([\"WebFrameWorkedWith\"]).nlargest(10)))\n\nst.subheader(' ')\nst.subheader('Filtered data table - selected columns will be added to the end')\nstudent_status_selection = st.sidebar.multiselect(\"Filter by student status here:\", df['Selection'].unique())\n#st.write(\"Selected:\", student_selection)\n\nvariables = st.sidebar.multiselect(\"Select the columns you'd like to see:\", df.columns)\n#st.write(\"You selected these variables\", variables)\n\nselected_segmented_data = df[(df['Selection'].isin(student_status_selection))]\n\nall_status_data = selected_segmented_data[variables]\n#student_data_is_check = st.checkbox(\"Display the data of selected student status\")\n#if student_data_is_check:\n# st.write(all_status_data)\nst.write(all_status_data)\n\nst.subheader('Country breakdown')\ncountry = selected_segmented_data.groupby('Country')['Selection'].count()\nst.bar_chart(country)\n\nst.subheader('Hobbyist?')\nhobbyist = selected_segmented_data.groupby('Hobbyist')['Selection'].count()\nst.bar_chart(hobbyist)\n\nst.subheader('Gender breakdown')\ngender = selected_segmented_data.groupby('Gender')['Selection'].count()\nst.bar_chart(gender)\n\nst.subheader('Education level')\nedlevel = selected_segmented_data.groupby('EdLevel')['Selection'].count()\nst.bar_chart(edlevel)\n","sub_path":"first_app.py","file_name":"first_app.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"274916508","text":"#coding=utf-8\nimport sys\nimport os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\nfrom common.Basepage_D import Test_login\nfrom common.Basepage_A import SellertoolsPublish\nfrom common.Basepage_A import Test_tips\nfrom selenium import webdriver\nfrom time import sleep\nimport unittest\n\n\nclass TestPublishShopGoods(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Firefox()\n cls.driver.get('https://tools.7881.com/')\n cls.driver.find_element_by_xpath('//*[@id=\"top-html\"]/div[1]/a').click()\n cls.driver.implicitly_wait(2)\n Test_login().login(cls.driver, \"zdh01\", \"abc12345\")\n\n @classmethod\n def tearDownClass(cls):\n Test_tips().logout(cls.driver)\n\n\n def setUp(self):\n self.driver.get('https://tools.7881.com/')\n\n def tearDown(self):\n pass\n\n #@unittest.skip(\"先跳过此用例\")\n def test_001(self):\n \"\"\"验证可以发布卖家工具商城DNF担保游戏币成功\"\"\"\n SellertoolsPublish().PublishShopIsk(self.driver,'E:/ziliao/shopupfile.exe',\"3\",\"15312345678\",\"1582022144\")\n success_page = self.driver.find_element_by_xpath('/html/body/div[2]/div/div[1]/div[1]/h4').text\n self.assertEqual(success_page,'恭喜您,商品成功发布!')\n\n def test_002(self):\n \"\"\"验证可以发布卖家工具列表DNF担保游戏币成功\"\"\"\n SellertoolsPublish().PublishListIsk(self.driver, 'E:/ziliao/listupfile.exe', \"3\", \"15312345678\", \"1582022144\")\n success_page = self.driver.find_element_by_xpath('/html/body/div[2]/div/div[1]/div[1]/h4').text\n self.assertEqual(success_page, '恭喜您,商品成功发布!')\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"UIZDH_LB/testCase/testI_publishShopGoods.py","file_name":"testI_publishShopGoods.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"201623210","text":"#!/usr/bin/env python\n\nfrom flask import jsonify\nfrom flask import request\n\nfrom api import app\nfrom api import db\nfrom api.views.auth.authenticator import login_required\nfrom api.config import log\n\nfrom api.models.models import Incident\n\n@app.route('/veris/incidents', methods=['GET'])\n@login_required\ndef incidents():\n log.debug('[!] %s Request To: %s From: %s' % \\\n (request.method, request.path, request.remote_addr))\n\n incidents = db.verisbase.find({}, {'incident_id': 1, '_id': 0})\n return jsonify({'Response' : 'Success',\n 'Incident Ids': [str(i['incident_id']) for i in incidents]})\n\n\n@app.route('/veris/incident', methods=['POST'])\n@login_required\ndef by_incident():\n log.debug('[!] %s Request To: %s From: %s' % \\\n (request.method, request.path, request.remote_addr))\n\n req = request.form.get('incident')\n if req is not None:\n incidents = db.verisbase.find({'incident_id': req}, {'_id': 0})\n return jsonify({'Response' : 'Success',\n 'Incident' : req,\n 'Results' : [i for i in incidents]})\n else:\n return jsonify({'Response':'Error',\n 'Message':'Missing {incident} parameter. Not found'})\n\n\n@app.route('/veris/incident/industry', methods=['POST'])\n@login_required\ndef by_vertical():\n log.debug('[!] %s Request To: %s From: %s' % \\\n (request.method, request.path, request.remote_addr))\n\n req = request.form.get('industry')\n if req is not None:\n incidents = db.verisbase.find({'victim.industry': req}, {'_id': 0})\n\n all_incidents = [Incident(inc) for inc in incidents]\n\n incidents = {}\n for incident in all_incidents:\n\n if incidents.has_key(incident.industry):\n incidents[incident.industry].append(incident.company)\n else:\n incidents[incident.industry] = [incident.company]\n\n return jsonify({ 'Response' : 'Success', 'Results' : incidents })\n else:\n return jsonify({'Response':'Error',\n 'Message':'Missing \"industry\" parameter. Not found.'})\n","sub_path":"api/views/incident/incidents.py","file_name":"incidents.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"214323500","text":"# pyhon yelp data exploration - this test currently works\n\nimport json\nimport clust_mini\n\n# get a list of IDs that belong to restaurants\nrestaurant_ids = []\nwith open(\"./yelp_data/yelp_academic_dataset_business.json\") as biz_file:\n for line in biz_file:\n json_obj = json.loads(line)\n if 'Restaurants' in json_obj['categories']:\n restaurant_ids.append(json_obj['business_id'])\n\n# create a dictionary of review texts indexed by user ID\nusers_file = open(\"./yelp_restaurant_review.json\", \"w\")\nwith open(\"./yelp_data/yelp_academic_dataset_review.json\") as reviews:\n for line in reviews:\n json_obj = json.loads(line)\n biz_id = json_obj['business_id']\n if biz_id in restaurant_ids:\n \tusers_file.write(line)\n\nusers_file.close()\n\n\n","sub_path":"filter_reviews_rest_only.py","file_name":"filter_reviews_rest_only.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"283106171","text":"\"\"\"\n\nTemplate for Characters\n\nCopy this module up one level and name it as you like, then\nuse it as a template to create your own Character class.\n\nTo make new logins default to creating characters\nof your new type, change settings.BASE_CHARACTER_TYPECLASS to point to\nyour new class, e.g.\n\nsettings.BASE_CHARACTER_TYPECLASS = \"game.gamesrc.objects.mychar.MyChar\"\n\nNote that objects already created in the database will not notice\nthis change, you have to convert them manually e.g. with the\n@typeclass command.\n\n\"\"\"\n#from ev import Character as DefaultCharacter\nfrom game.gamesrc.objects.object import Object\nfrom django.conf import settings\n\nclass Character(Object):\n \"\"\"\n This is just like the Object except it implements its own\n version of the at_object_creation to set up the script\n that adds the default cmdset to the object.\n \"\"\"\n\n def basetype_setup(self):\n \"\"\"\n Setup character-specific security\n\n You should normally not need to overload this, but if you do, make\n sure to reproduce at least the two last commands in this method (unless\n you want to fundamentally change how a Character object works).\n\n \"\"\"\n super(Character, self).basetype_setup()\n self.locks.add(\";\".join([\"get:false()\", # noone can pick up the character\n \"call:false()\"])) # no commands can be called on character from outside\n # add the default cmdset\n self.cmdset.add_default(settings.CMDSET_CHARACTER, permanent=True)\n\n def at_object_creation(self):\n #self.db.strength = 100\n #self.db.speed = 100\n #self.db.precision = 100\n #self.db.stamina = 100\n \n #self.db.intelligence = 100\n #self.db.charisma = 100\n #self.db.willpower = 100\n #self.db.natural_armor = 0\n pass\n\n @property\n def name(self):\n return \"{Y\" + self.key + \"{n\"\n \n @property\n def name_upper(self):\n return \"{Y\" + self.key[0].upper() + self.key[1:] + \"{n\"\n \n #def at_after_move(self, source_location):\n # \"Default is to look around after a move.\"\n # self.execute_cmd('look')\n\n def at_pre_puppet(self, player, sessid=None):\n if self.tags.get(\"hide_on_unpuppet\"):\n \"\"\"\n This recovers the character again after having been \"stoved away\"\n at the unpuppet\n \"\"\"\n if self.db.prelogout_location:\n # try to recover\n self.location = self.db.prelogout_location\n if self.location is None:\n # make sure location is never None (home should always exist)\n self.location = self.home\n if self.location:\n # save location again to be sure\n self.db.prelogout_location = self.location\n self.location.msg_contents(\"%s has entered the game.\" % self.name, exclude=[self])\n self.location.at_object_receive(self, self.location)\n else:\n player.msg(\"{r%s has no location and no home is set.{n\" % self, sessid=sessid)\n\n def at_post_puppet(self):\n self.msg(\"\\nYou become %s.\\n\" % self.name)\n \n if self.tags.get(\"hide_on_unpuppet\"):\n \"\"\"\n Called just after puppeting has completed.\n \"\"\"\n self.execute_cmd(\"look\")\n if self.location:\n self.location.msg_contents(\"%s has entered the game.\" % self.name, exclude=[self])\n\n def at_post_unpuppet(self, player, sessid=None):\n if self.tags.get(\"hide_on_unpuppet\"):\n \"\"\"\n We stove away the character when the player goes ooc/logs off,\n otherwise the character object will remain in the room also after the\n player logged off (\"headless\", so to say).\n \"\"\"\n if self.location: # have to check, in case of multiple connections closing\n self.location.msg_contents(\"%s has left the game.\" % self.name, exclude=[self])\n self.db.prelogout_location = self.location\n self.location = None\n","sub_path":"game/gamesrc/objects/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"164041892","text":"import webapp2\r\nimport os\r\nimport jinja2\r\n\r\nfrom google.appengine.api import users\r\n\r\nfrom anagram import Anagram\r\nfrom service import Services\r\n# environment creation\r\nJINJA_ENVIRONMENT = jinja2.Environment(\r\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\r\n extensions=[\"jinja2.ext.autoescape\"],\r\n autoescape=True)\r\n\r\nclass NewWord(webapp2.RedirectHandler,Services):\r\n\r\n# grtting datat from the database\r\n def get(self):\r\n# getting the data of the newword.html\r\n self.response.headers[\"Content-Type\"] = \"text/html\"\r\n template_values = {\r\n }\r\n template = JINJA_ENVIRONMENT.get_template(\"newword.html\")\r\n self.response.write(template.render(template_values))\r\n# posting the data to the database\r\n def post(self):\r\n self.response.headers[\"Content-Type\"] = \"text/html\"\r\n# checking whether the key is add word\r\n if self.request.get(\"newdevice\") == \"Add Word\":\r\n inputed_word = self.request.get(\"word\").lower()\r\n# checking whether the inputed word is null or not and redirecting to the add word paeg\r\n if Services().get_current_user() == None or inputed_word == None or inputed_word == \"\" :\r\n self.redirect(\"/newdevice\")\r\n return\r\n current_user_id = Services().get_current_user_id()\r\n sorted_key = Services().sorted_key(word =inputed_word)\r\n list_word = Anagram.query()\r\n list_word = list_word.filter(Anagram.anagram_key == sorted_key,Anagram.user_id == current_user_id)\r\n list_word = list_word.fetch()\r\n valid_permutation = Services().validpermutations(text=sorted_key)\r\n# checking whether the inputed word has valid permutations or not\r\n if len(valid_permutation) == 0:\r\n self.redirect(\"/newdevice\")\r\n return\r\n# checking whether the there are any word in the list or redirecting to the add word page\r\n if len(list_word) > 0:\r\n anagram = list_word[0]\r\n if inputed_word in anagram.input_words:\r\n self.redirect(\"/newdevice\")\r\n return\r\n\r\n inputed_words = anagram.input_words\r\n inputed_words.append(inputed_word)\r\n anagram.input_words = inputed_words\r\n anagram.input_words_count = anagram.input_words_count + 1\r\n anagram.put()\r\n\r\n else:\r\n\r\n new_anagram = Anagram(anagram_key=sorted_key, anagram_words = Services().permutations(text=sorted_key),\r\n input_words = [inputed_word],\r\n input_words_count = 1,\r\n word_length = len(inputed_word),\r\n user_id = current_user_id)\r\n new_anagram.put()\r\n self.redirect(\"/\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"newdevice.py","file_name":"newdevice.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"222068097","text":"from pprint import pprint\n\ngwiazdy = {}\ngwiazdy[1] = {'imie': 'John', 'nazwisko': 'Turturo',\n 'stawka': 100, 'status': 'mała stara gwiazdka'}\n\ngwiazdy[2] = {'imie': 'John', 'nazwisko': 'Rambo',\n 'stawka': 600, 'status': 'bombowa gwiazda'}\n\ngwiazdy[3] = {'imie': 'John', 'nazwisko':'Travolta',\n 'stawka': 300, 'status':'pilot'}\n\nposzukiwany = 'John Rambo'\n\nfor pracownik in gwiazdy.values():\n if pracownik['imie'] + ' ' + pracownik['nazwisko'] == poszukiwany:\n print(f'{pracownik[\"imie\"]} ma stawkę {pracownik[\"stawka\"]}')\n\n\npprint(gwiazdy)\n","sub_path":"dzien4_5_6/dzien4/pracownicy_dict.py","file_name":"pracownicy_dict.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"409439453","text":"from gtts import gTTS\r\nimport speech_recognition as sr\r\nfrom pygame import mixer\r\n\r\n\r\ndef talkToMe(audio):\r\n print(audio)\r\n tts = gTTS(text=audio, lang='pt-br')\r\n tts.save('audio.mp3')\r\n mixer.init()\r\n mixer.music.load('./audio.mp3')\r\n mixer.music.play()\r\n\r\n\r\ndef myCommand():\r\n r = sr.Recognizer()\r\n\r\n with sr.Microphone() as source:\r\n r.pause_threshold = 1\r\n r.adjust_for_ambient_noise(source, duration=1)\r\n audio = r.listen(source)\r\n\r\n try:\r\n command = r.recognize_google(audio, language='pt')\r\n print(\"You said: \" + command + \"\\n\")\r\n\r\n except sr.UnknownValueError:\r\n return sr.UnknownValueError\r\n\r\n return command\r\n\r\n\r\ntalkToMe(\"Olá, estou pronta!\")\r\n","sub_path":"model/VoiceRec.py","file_name":"VoiceRec.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"83002393","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 22 16:06:21 2015\n\n@author: melissaferrari\n\"\"\"\n\ndef simData():\n# This function is called as the argument for the simPoints function. \n timestep = 1.0 # Time between steps\n y = 0.0 # initial x and t\n x = 0.0\n t = 0\n for t in range(0,1000):# < 100:\n# t = t + timestep\n spaceObj.moveAll(timestep)\n x = []\n y = []\n angles = []\n for particle in spaceObj.particles:\n x.append(particle.x)\n y.append(particle.y)\n angles.append(particle.angle)\n \n yield x, y, angles, t # returns a generator - a generator is an iterator that you can only iterate over once. \n","sub_path":"simData.py","file_name":"simData.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"250718081","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 21 12:33:49 2019\n\n@author: jpelleti\n\"\"\"\nimport os.path\nimport pandas as pd\n\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\n\nfrom tkinter import Tk\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\n\n# Add columns Buy and Stock\ndef CheckStock(datafr, stockfr):\n stock_list = []\n buy_list = []\n \n for i in range (datafr.size):\n buy = datafr['Total Quantity'][i]\n stock = 0\n \n # Check if current item is in stock, if so,\n # calculate buying quantities\n for stock_item in stockfr.index:\n\n # Item found in stock data\n if (datafr.index[i] == stockfr['DPN'][stock_item]):\n # get production and stock quantities\n qty = datafr['Total Quantity'][i]\n stock = stockfr['Quantity'][stock_item]\n\n # if quantity is negative, we ship back the items\n # to the suppliers :D \n buy = qty - stock\n if (buy < 0):\n buy = 0\n \n #print ('----')\n #print (stock_item, stockfr['DPN'][stock_item],qty,stock,buy)\n #print ('yes')\n\n stock_list.append(stock)\n buy_list.append(buy)\n #print (i)\n\n # append the list to the dataframe\n datafr['Stock'] = stock_list\n datafr['Buy'] = buy_list\n return datafr\n#------------------------------------------------------\n# we don't want a full GUI, so keep the root window from appearing\nTk().withdraw()\n\n# Get the production file specifying the BOM files and quantities\n# to be produced\nprod_file = askopenfilename(title='Select production file')\n\n# Get the directory where the production file is.\nprod_dir = os.path.dirname(prod_file)\nos.chdir(prod_dir)\n\n# Read the production file\nprod_data = pd.read_excel(prod_file)\n\n# Make a list of BOM files, with the corresponding production\n# quantity and other data\nfiles = prod_data.loc[:,'BOM files']\nQties = prod_data.loc[:,'Qty']\nRefs = prod_data.loc[:,'Cust Ref']\n\n# Create an empty dataframe\ndf = pd.DataFrame()\n\n# Append all the BOMs together\nfor i in range(files.size):\n print ('Processing file: %s\\n',files[i])\n \n # Open each BOM files\n data = pd.read_excel(files[i])\n\n # Add refs and total quantities\n data['Refs'] = Refs[i]\n data['Total Quantity'] = data['Quantity'] * Qties[i]\n df = df.append(data, ignore_index=True, sort = False)\n \n# Once all merged, separate by suppliers \n#print ('separate suppliers')\ndf_Digikey = df[df.Dist == 'Digi-Key']\ndf_Newark = df[df.Dist == 'Newark']\ndf_Others = df[(df.Dist != 'Digi-Key') & (df.Dist != 'Newark')]\n\n# Combine quantities by DPN and compute the total quantity for each part\n#print ('Combining quantities')\ndf_D = df_Digikey.pivot_table(index = 'DPN',values = 'Total Quantity',aggfunc='sum')\ndf_N = df_Newark.pivot_table(index = 'DPN',values = 'Total Quantity',aggfunc='sum')\n\n# For others, we include the distributors\ndf_O = df_Others.pivot_table(index = ['Dist','DPN'],values = 'Total Quantity',aggfunc='sum')\n \n# Check if we have some stock already\nprint ('Check stock')\nstock_file = askopenfilename(title='Select stock file')\nif (stock_file != None):\n stock_data = pd.read_excel(stock_file)\n\n df_D_Buy = CheckStock(df_D,stock_data)\n df_N_Buy = CheckStock(df_N,stock_data)\n df_O_Buy = CheckStock(df_O,stock_data)\n \n# Basket is ready, save it\nfileout = asksaveasfilename(title = \"Save basket file\",filetypes = ((\"Excel files\",\"*.xlsx\"),(\"all files\",\"*.*\")))\n\n# Write resulting file\nwriter = ExcelWriter(fileout, engine='xlsxwriter')\n\nif (stock_file != None):\n df_D_Buy.to_excel(writer, sheet_name='Digi-Key')\n df_N_Buy.to_excel(writer, sheet_name='Newark')\n df_O_Buy.to_excel(writer, sheet_name='Others')\nelse:\n df_D.to_excel(writer, sheet_name='Digi-Key')\n df_N.to_excel(writer, sheet_name='Newark')\n df_O.to_excel(writer, sheet_name='Others')\n \nwriter.save()\n\nprint('Done\\n')","sub_path":"bom2basket.py","file_name":"bom2basket.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"170667209","text":"from __future__ import print_function\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"7\"\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom facedet.utils.optim import AdamW\nimport torch.backends.cudnn as cudnn\nimport argparse\nfrom torch.autograd import Variable\nimport torch.utils.data as data\n\nfrom dataset import AnnotationTransform, LandmarkAnnotationTransform, CenterFaceDataset, \\\n detection_collate_centerface, detection_collate_centerface_with_ldmk_coord, detection_collate_centerface_with_ldmk_heatmap, preproc_centerface\nfrom losses.centerface_losses import *\n\nimport time\nimport math\nfrom facedet.utils.misc import add_flops_counting_methods, flops_to_string, get_model_parameters_number\nfrom facedet.utils.bbox.fcos_target import FCOSBoxConverter, FCOSBoxTargetConverter\nfrom tensorboardX import SummaryWriter\nimport numpy as np\n\n\nwriter = SummaryWriter('./log/')\n\nparser = argparse.ArgumentParser(description='CenterFace Training')\nparser.add_argument('--cfg_file', default='./configs/centerface_ldmk.py', type=str, help='model config file')\nparser.add_argument('--training_dataset', default='/home/gyt/dataset/WIDER_ldmk', help='Training dataset directory')\nparser.add_argument('-b', '--batch_size', default=32, type=int, help='Batch size for training')\nparser.add_argument('--num_workers', default=32, type=int, help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=bool, help='Use cuda to train model')\nparser.add_argument('--ngpu', default=2, type=int, help='gpus')\nparser.add_argument('--lr', '--learning-rate', default=1e-3, type=float, help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, help='momentum')\nparser.add_argument('--resume_net', default=None, help='resume net for retraining')\nparser.add_argument('--resume_epoch', default=0, type=int, help='resume iter for retraining')\nparser.add_argument('-max', '--max_epoch', default=300, type=int, help='max epoch for retraining')\nparser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD')\nparser.add_argument('--use_tensorboard', dest='use_tensorboard', action='store_true', default=False)\nparser.add_argument('--optimizer', type=str, default='AdamW', choices=['SGD', 'AdamW'])\nparser.add_argument('--save_folder', default='./weights/xface/',\n help='Location to save checkpoint models')\nargs = parser.parse_args()\n\nfrom mmcv import Config\nimport logging\ncfg = Config.fromfile(args.cfg_file)\nlogging.basicConfig(filename='./log/train_{}_{}.log'.format(cfg['net_cfg']['net_name'], args.optimizer), level=logging.DEBUG)\nargs.save_folder = os.path.join(cfg['train_cfg']['save_folder'], args.optimizer)\nif not os.path.exists(args.save_folder):\n os.makedirs(args.save_folder)\nimport models\n\nnet = models.__dict__[cfg['net_cfg']['net_name']](phase='train', cfg=cfg['net_cfg'])\n\nrgb_means = (104, 117, 123)\nimg_dim = cfg['train_cfg']['input_size']\n\nbatch_size = args.batch_size\nweight_decay = args.weight_decay\ngamma = args.gamma\nmomentum = args.momentum\n\n# print(\"Printing net...\")\n# print(net)\n\ninput_size = (1, 3, img_dim, img_dim)\nimg = torch.FloatTensor(input_size[0], input_size[1], input_size[2], input_size[3])\nnet = add_flops_counting_methods(net)\nnet.start_flops_count()\nfeat = net(img)\nfaceboxes_flops = net.compute_average_flops_cost()\nprint('Net Flops: {}'.format(flops_to_string(faceboxes_flops)))\nprint('Net Params: ' + get_model_parameters_number(net))\n\nif args.resume_net is not None:\n print('Loading resume network...')\n state_dict = torch.load(args.resume_net)\n # create new OrderedDict that does not contain `module.`\n from collections import OrderedDict\n\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n head = k[:7]\n if head == 'module.':\n name = k[7:] # remove `module.`\n else:\n name = k\n new_state_dict[name] = v\n net.load_state_dict(new_state_dict)\n\nif args.ngpu > 1:\n net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))\n\nif args.cuda:\n net.cuda()\n cudnn.benchmark = True\n\nif args.optimizer == 'SGD':\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\nelif args.optimizer == 'AdamW':\n optimizer = AdamW(net.parameters(),\n lr=args.lr,\n betas=(0.9, 0.995),\n eps=1e-9,\n weight_decay=1e-5,\n correct_bias=False)\nelse:\n raise NotImplementedError('Please use SGD or Adamw as optimizer')\n\n\ncls_criterion = FocalLoss()\nwh_criterion = LogRegLoss()\nctr_criterion = RegLoss()\nldmk_criterion = RegLoss()\n\ndef train():\n cfg['net_cfg']['use_ldmk']=True\n net.train()\n epoch = 0 + args.resume_epoch\n print('Loading Dataset...')\n if cfg['net_cfg']['use_ldmk']:\n dataset = CenterFaceDataset(args.training_dataset,\n preproc_centerface(img_dim,\n rgb_means,\n use_ldmk=cfg['net_cfg']['use_ldmk'],\n ldmk_reg_type=cfg['net_cfg']['ldmk_reg_type']),\n LandmarkAnnotationTransform(),\n aug_type='FaceBoxes',\n use_ldmk=cfg['net_cfg']['use_ldmk'],\n ldmk_reg_type=cfg['net_cfg']['ldmk_reg_type'])\n else:\n dataset = CenterFaceDataset(args.training_dataset,\n preproc_centerface(img_dim, rgb_means, use_ldmk=cfg['net_cfg']['use_ldmk']),\n AnnotationTransform(),\n aug_type='FaceBoxes',\n use_ldmk=False)\n\n epoch_size = math.ceil(len(dataset) / args.batch_size)\n max_iter = args.max_epoch * epoch_size\n\n stepvalues = (200 * epoch_size, 250 * epoch_size)\n step_index = 0\n\n if args.resume_epoch > 0:\n start_iter = args.resume_epoch * epoch_size\n else:\n start_iter = 0\n\n for iteration in range(start_iter, max_iter):\n if iteration % epoch_size == 0:\n # create batch iterator\n if cfg['net_cfg']['use_ldmk']:\n if cfg['net_cfg']['ldmk_reg_type']=='coord':\n batch_iterator = iter(data.DataLoader(dataset,\n batch_size,\n shuffle=True,\n num_workers=args.num_workers,\n collate_fn=detection_collate_centerface_with_ldmk_coord))\n if cfg['net_cfg']['ldmk_reg_type']=='heatmap':\n batch_iterator = iter(data.DataLoader(dataset,\n batch_size,\n shuffle=True,\n num_workers=args.num_workers,\n collate_fn=detection_collate_centerface_with_ldmk_heatmap))\n else:\n batch_iterator = iter(data.DataLoader(dataset,\n batch_size,\n shuffle=True,\n num_workers=args.num_workers,\n collate_fn=detection_collate_centerface))\n\n if (epoch % 5 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > 200):\n torch.save(net.state_dict(), os.path.join(args.save_folder, 'CenterFace_epoch_' + repr(epoch) + '.pth'))\n epoch += 1\n\n load_t0 = time.time()\n if iteration in stepvalues:\n step_index += 1\n lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)\n\n # load train data\n if cfg['net_cfg']['use_ldmk']:\n if cfg['net_cfg']['ldmk_reg_type'] == 'coord':\n images, cls_targets, box_targets, ctr_targets, box_mask_targets, ldmk_targets, ldmk_mask_targets = next(batch_iterator)\n if cfg['net_cfg']['ldmk_reg_type'] == 'heatmap':\n images, cls_targets, box_targets, ctr_targets, box_mask_targets, ldmk_targets = next(batch_iterator)\n # import pdb\n # pdb.set_trace()\n else:\n images, cls_targets, box_targets, ctr_targets, box_mask_targets = next(batch_iterator)\n\n # print(images.size())\n # print(cls_targets.size())\n # print(box_targets.size())\n # print(ctr_targets.size())\n # print(reg_mask_targets.size())\n # print(ldmk_targets)\n # import pdb\n # pdb.set_trace()\n # box_targets[box_mask_targets]\n # print(box_targets[ldmk_mask_targets])\n # print(ldmk_targets[ldmk_mask_targets])\n # box_mask_targets.sum()\n # ldmk_mask_targets.sum()\n # continue\n\n load_t1 = time.time()\n # 写入writer\n if args.batch_size == 1 and args.use_tensorboard:\n img = images.squeeze(0).cpu().numpy().transpose(1, 2, 0) + rgb_means\n img = img.astype(np.uint8).copy()\n img = img.transpose(2, 0, 1).clip(0, 255)\n # import pdb\n # pdb.set_trace()\n\n pos_box_targets = box_targets[(cls_targets == 1).expand_as(box_targets)].view(-1, 2)\n # writer.add_image_with_boxes('Image_box', img, pos_box_targets, global_step=iteration, dataformats='CHW')\n writer.add_image('Image', img, global_step=iteration, dataformats='CHW')\n writer.add_image('cls_s8', cls_targets[0][0:6400].view(80, 80), global_step=iteration, dataformats='HW')\n # writer.add_image('ctr_s8', ctr_targets[0][0:6400].view(80, 80), global_step=iteration, dataformats='HW')\n writer.add_image('box_mask_s8', box_mask_targets[0][0:6400].view(80, 80).float(), global_step=iteration, dataformats='HW')\n writer.add_image('ldmk_mask_s8', ldmk_mask_targets[0][0:6400].view(80, 80).float(), global_step=iteration, dataformats='HW')\n # writer.add_image('cls0', cls_targets[0][0:256].view(16, 16), global_step=iteration, dataformats='HW')\n # writer.add_image('ctr0', ctr_targets[0][0:256].view(16, 16), global_step=iteration, dataformats='HW')\n print('iteration:', iteration)\n continue\n\n if args.cuda:\n images = Variable(images.cuda())\n box_targets = Variable(box_targets.cuda())\n cls_targets = Variable(cls_targets.cuda())\n ctr_targets = Variable(ctr_targets.cuda())\n box_mask_targets = Variable(box_mask_targets.cuda())\n if cfg['net_cfg']['use_ldmk']:\n if cfg['net_cfg']['ldmk_reg_type'] == 'coord':\n ldmk_mask_targets = Variable(ldmk_mask_targets.cuda())\n ldmk_targets = Variable(ldmk_targets.cuda())\n else:\n images = Variable(images)\n box_targets = Variable(box_targets)\n cls_targets = Variable(cls_targets)\n ctr_targets = Variable(ctr_targets)\n box_mask_targets = Variable(box_mask_targets)\n if cfg['net_cfg']['use_ldmk']:\n if cfg['net_cfg']['ldmk_reg_type'] == 'coord':\n ldmk_mask_targets = Variable(ldmk_mask_targets)\n ldmk_targets = Variable(ldmk_targets)\n\n if cfg['net_cfg']['use_ldmk']:\n (box_preds, cls_preds, ctr_preds, ldmk_preds) = net(images)\n else:\n (box_preds, cls_preds, ctr_preds) = net(images)\n\n # backprop\n optimizer.zero_grad()\n\n loss_wh = wh_criterion(pred=box_preds, mask=box_mask_targets, target=box_targets)\n # loss_ctr = ctr_criterion(pred=ctr_preds, mask=box_mask_targets, target=ctr_targets)\n loss_cls = cls_criterion(cls_preds, cls_targets)\n if cfg['net_cfg']['use_ldmk']:\n # import pdb\n # pdb.set_trace()\n # torch.Size([1, 8400])\n # (ldmk_targets != -1).all(dim=1)\n # ldmk_preds[ldmk_mask.long()].size()\n\n\n # ldmk_mask = (ldmk_targets!=-1)[:, :, 0]\n\n # ldmk_mask_targets = (cls_targets==1)[:, :, 0]\n if cfg['net_cfg']['ldmk_reg_type'] == 'coord':\n loss_ldmk = ldmk_criterion(pred=ldmk_preds, mask=ldmk_mask_targets, target=ldmk_targets)\n if cfg['net_cfg']['ldmk_reg_type'] == 'heatmap':\n loss_ldmk = ldmk_criterion(pred=ldmk_preds, mask=None, target=ldmk_targets)\n\n\n loss = cfg['train_cfg']['wh_weight'] * loss_wh \\\n + cfg['train_cfg']['cls_weight'] * loss_cls \\\n + cfg['train_cfg']['ldmk_weight'] * loss_ldmk\n # + cfg['train_cfg']['ctr_weight'] * loss_ctr \\\n\n\n loss.backward()\n optimizer.step()\n load_t2 = time.time()\n if iteration % 1 == 0:\n if cfg['net_cfg']['use_ldmk']:\n print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size)\n + '/' + repr(epoch_size) \\\n + ' || LOC: %.3f CLS: %.3f LDMK: %.3f||' % (\n cfg['train_cfg']['wh_weight'] * loss_wh.item(),\n cfg['train_cfg']['cls_weight'] * loss_cls.item(),\n # cfg['train_cfg']['ctr_weight'] * loss_ctr.item(),\n cfg['train_cfg']['ldmk_weight'] * loss_ldmk.item())\n + 'Batch time: %.4f sec. ||' % (load_t1 - load_t0)\n + 'Process time: %.4f sec. ||' % (load_t2 - load_t1)\n + 'LR: %.8f' % (lr))\n logging.info('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size)\n + '/' + repr(epoch_size) \\\n + ' || LOC: %.3f CLS: %.3f LDMK: %.3f||' % (\n cfg['train_cfg']['wh_weight'] * loss_wh.item(),\n cfg['train_cfg']['cls_weight'] * loss_cls.item(),\n # cfg['train_cfg']['ctr_weight'] * loss_ctr.item(),\n cfg['train_cfg']['ldmk_weight'] * loss_ldmk.item())\n + 'Batch time: %.4f sec. ||' % (load_t1 - load_t0)\n + 'Process time: %.4f sec. ||' % (load_t2 - load_t1)\n + 'LR: %.8f' % (lr))\n else:\n print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size)\n + '/' + repr(epoch_size) \\\n + ' || LOC: %.3f CLS: %.3f ||' % (\n cfg['train_cfg']['wh_weight'] * loss_wh.item(),\n cfg['train_cfg']['cls_weight'] * loss_cls.item())\n + 'Batch time: %.4f sec. ||' % (load_t1 - load_t0)\n + 'Process time: %.4f sec. ||' % (load_t2 - load_t1)\n + 'LR: %.8f' % (lr))\n logging.info('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size)\n + '/' + repr(epoch_size) \\\n + ' || LOC: %.3f CLS: %.3f ||' % (\n cfg['train_cfg']['wh_weight'] * loss_wh.item(),\n cfg['train_cfg']['cls_weight'] * loss_cls.item())\n # cfg['train_cfg']['ctr_weight'] * loss_ctr.item())\n + 'Batch time: %.4f sec. ||' % (load_t1 - load_t0)\n + 'Process time: %.4f sec. ||' % (load_t2 - load_t1)\n + 'LR: %.8f' % (lr))\n\n torch.save(net.state_dict(), os.path.join(args.save_folder, 'Final_CenterFace.pth'))\n\n\ndef adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):\n \"\"\"Sets the learning rate\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n if epoch < 0:\n lr = 1e-6 + (args.lr - 1e-6) * iteration / (epoch_size * 5)\n else:\n lr = args.lr * (gamma ** (step_index))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\nif __name__ == '__main__':\n train()\n","sub_path":"FlashNet/facedet/apis/trainers/deprecated/train_centerface_with_ldmk.py","file_name":"train_centerface_with_ldmk.py","file_ext":"py","file_size_in_byte":16607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"67238539","text":"from clustering.cluster_fascicles import *\n\nmain_folder = subj_folder\nshow_cluster = True\nfascicle = 'SLF_L_mct001rt20'\nmethods = ['agglomerative', 'kmeans']\nmethod = methods[1]\nfor fol, n in zip(all_subj_folders, all_subj_names):\n folder_name = main_folder + fol\n if not any(fi.startswith(f'rdti_fa') for fi in os.listdir(folder_name)):\n print('Moving on!')\n continue\n\n print(f'++++++Starting to compute models for {n}++++++')\n nii_file = load_dwi_files(folder_name)[5]\n file_list = os.listdir(folder_name + r'\\streamlines')\n for file in file_list:\n if fascicle in file and '.trk' in file:\n fascicle_file_name = pjoin(folder_name + r'\\streamlines', file)\n s_list = load_trk(fascicle_file_name, \"same\", bbox_valid_check=False)\n masked_streamlines = s_list.streamlines\n break\n streamlines, vec_vols = streamline_mean_fascicle_value_weighted(folder_name, n, nii_file, fascicle,\n masked_streamlines, weight_by='rdti_fa')\n dist_method = 'mam'\n tracts_num = streamlines.__len__()\n\n #X = clustering_input(dist_method, tracts_num, streamlines, vec_vols)\n\n # show_23456_groups(method,streamlines,folder_name,X,fascicle)\n\n g = [3]\n for i in g:\n # model = compute_clustering_model(method, X, i)\n #save_model(model, i, folder_name, method, fascicle)\n model = load_model(i,folder_name,method, fascicle)\n weighted_clusters(model,streamlines,vec_vols, folder_name, file_name = 'clustered_'+fascicle+'_'+str(n))\n","sub_path":"cluster_fa_script.py","file_name":"cluster_fa_script.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"106516757","text":"#coding=utf-8\n\nfrom basic import BasicCtrl\n\nclass VoiceCtrl(BasicCtrl):\n def post(self):\n if not self.human_valid():\n self.flash(0, {'msg': '验证码错误'})\n return\n\n post = self.model('posts').get_by_pid(self.dbase('posts'), self.input('p'))\n if not post:\n self.flash(0, {'msg': '文章不存在'})\n return\n\n rank = '0'\n usid = '0'\n if self.input('auth', False) and self.current_user:\n if self.model('admin').chk_user_is_live(self.current_user):\n rank = self.get_runtime_conf('posts_talks_min_rank')\n usid = self.current_user['user_id']\n name = self.current_user['user_name']\n mail = self.current_user['user_mail']\n else:\n name = self.input('name')\n mail = self.input('mail')\n\n text = self.input('text')\n time = self.stime()\n\n con_talks = self.dbase('talks')\n cur_talks = con_talks.cursor()\n cur_talks.execute('insert into talks (post_id, user_ip, user_id, user_name, user_mail, talk_text, talk_rank, talk_ctms, talk_utms) values (?, ?, ?, ?, ?, ?, ?, ?, ?)', \\\n (post['post_id'], self.request.remote_ip, usid, name, mail, text, rank, time, time))\n con_talks.commit()\n cur_talks.close()\n\n if cur_talks.lastrowid:\n con_posts = self.dbase('posts')\n cur_posts = con_posts.cursor()\n cur_posts.execute('update posts set post_refc = post_refc + 1 where post_id = ?', (post['post_id'],))\n con_posts.commit()\n cur_posts.close()\n\n if float(rank) > 0:\n self.flash(1, {'msg': '评论发表成功'})\n else:\n self.flash(1, {'msg': '当前评论内容暂不公开'})\n return\n self.flash(0)\n","sub_path":"www.luokr.com/app/ctrls/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"42103803","text":"# -*- coding:utf-8 -*-\n\nimport requests\nimport pandas as pd\nimport json\n#import arrow\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n\nclass GetTrainNums(object):\n \n def __init__(self,from_station,to_station,date):\n self.from_station = from_station\n self.to_station = to_station\n self.date = date\n\n\n def get_train_text(self):\n # para = para\n r = requests.get('https://kyfw.12306.cn/otn/lcxxcx/query?purpose_codes=ADULT&queryDate='+self.date+\\\n '&from_station='+self.from_station +'&to_station=' + self.to_station,\n verify=False).text\n return r\n\n def get_train_no(self):\n js = self.get_train_text()\n js2 = json.loads(js)\n data = js2['data']['datas']\n df = pd.DataFrame(data)\n df_use = df[[u'train_no',\n u'station_train_code',\n u'from_station_name',\n u'to_station_name',\n u'start_time',\n u'swz_num',\n u'tz_num',\n u'zy_num',\n u'ze_num',\n u'wz_num',\n ]]\n df_use = df_use[df_use[u'station_train_code'].str.startswith('G') | \\\n df_use[u'station_train_code'].str.startswith('D')]\n df_use.replace('--',0,inplace=True)\n df_use.replace(u'无',0,inplace=True)\n return df_use[u'train_no']\n\n\nclass GetStationNames(object): \n def __init__(self,from_station,to_station,date,train_num,out_csv):\n self.from_station = from_station\n self.to_station = to_station\n self.date = date\n self.train_num = train_num\n self.out_csv = out_csv\n\n def get_stations_text(self):\n r = requests.get('https://kyfw.12306.cn/otn/czxx/queryByTrainNo?train_no='+self.train_num+\\\n '&from_station_telecode='+self.from_station+\\\n '&to_station_telecode='+self.from_station+'&depart_date='+self.date,\n verify=False).text\n return r\n\n def get_stations_names(self): \n js1 = self.get_stations_text()\n\n js2 = json.loads(js1)\n data = js2['data']['data']\n df = pd.DataFrame(data)\n try:\n df = df[[\"station_name\",]]\n df_use = pd.DataFrame({'train_no':[''],'station_name':['']})\n df_use['train_no'] = self.train_num\n df_use['station_name'] = df_use['station_name'].apply(lambda x: df[\"station_name\"].values)\n \n df_use.to_csv(self.out_csv, mode = 'a',index=False,header=False,encoding='utf-8')\n print(self.train_num)\n except:\n pass\n\n\n\nif __name__ == '__main__':\n from_station = 'SHH'\n to_station = 'NJH'\n date= '2016-12-29'\n out_csv = './data/'+from_station+'-'+to_station+'-train_names.csv'\n \n js = GetTrainNums(from_station,to_station,date)\n train_nums = js.get_train_no()\n \n for train_num in train_nums:\n stations = GetStationNames(from_station,to_station,date,train_num.strip(),out_csv)\n # js = stations.get_stations_names(train_num.strip())\n stations.get_stations_names()\n print('finished')","sub_path":"A0-ch06-综合研究—基于高铁余票的客流行为特征分析/py/002get_stations.py","file_name":"002get_stations.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"197991130","text":"#! /usr/bin/env python3\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nfrom sklearn.datasets import load_svmlight_file\n\ntrain_x, train_y = load_svmlight_file('data.txt')\n\n# Returns a short sequential model\ndef create_model():\n model = tf.keras.models.Sequential([\n keras.layers.Dense(8, activation='relu', input_shape=(2,)),\n # keras.layers.Dense(128, activation='relu'),\n # keras.layers.Dropout(0.2),\n keras.layers.Dense(2, activation='softmax')\n ])\n\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model\n\n# include the epoch in the file name. (uses `str.format`)\ncheckpoint_path = \"training/cp-{epoch:04d}.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\ncp_callback = tf.keras.callbacks.ModelCheckpoint(\n checkpoint_path, \n verbose=0, \n save_weights_only=True, \n period=1)\n\nmodel = create_model()\nmodel.save_weights(checkpoint_path.format(epoch=0))\nmodel.fit(\n train_x,\n train_y, \n epochs = 500, \n callbacks = [cp_callback]\n)\n\nmodel.save('nn.h5')","sub_path":"nn-visualization-tests/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"571996244","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'basic_app'\n\nurlpatterns = [\n url(r'^$',views.ColegioLista.as_view(),name='list'),\n # url(r'^(?P[-\\w]+)/$',views.ColegioDetalle.as_view(),name='details'),\n url(r'^(?P\\d+)/$',views.ColegioDetalle.as_view(),name='details'),\n url(r'^create/$',views.ColegioCreateView.as_view(),name='create'),\n url(r'^update/(?P\\d+)/$',views.ColegioUpdateView.as_view(),name='update'),\n url(r'^delete/(?P\\d+)/$',views.ColegioDeleteView.as_view(),name='delete '),\n]\n","sub_path":"Django_Avanzado/CBV/basic_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"306190434","text":"#058: Edit Distance Alignment\n#http://rosalind.info/problems/edta/\n\n#Given: Two protein strings s and t in FASTA format (with each string having length at most 1000 aa).\n\ns = 'PRETTY'\nt = 'PRTTEIN'\n\n#If parsing from file:\nimport bio\nf = open('rosalind_edta.txt', 'r')\ncontents = f.read()\ntitles, sequences = bio.fastaParse(contents)\ns,t = sequences\n\n#Return: The edit distance dE(s,t) followed by two augmented strings s' and t' representing an optimal alignment of s and t.\n\n\ndef editDistance(s,t):\n\td = [[0 for j in range(len(s)+1)] for i in range(len(t)+1)]\n\n\t# initialising:\n\tfor j in range(len(d[0])):\n\t\td[0][j] = j\n\t\n\tfor i in range(len(d)):\n\t\td[i][0] = i\n\n\tfor i in range(1,len(d)):\n\t\tfor j in range(1, len(d[0])):\n\t\t\tif s[j-1] == t[i-1]:\n\t\t\t\td[i][j] = d[i-1][j-1]\n\t\t\telse:\n\t\t\t\td[i][j] = min([\n\t\t\t\t\td[i-1][j] + 1, # deletion\n\t\t\t\t\td[i][j-1] + 1, # insertion\n\t\t\t\t\td[i-1][j-1] + 1 # substitution\n\t\t\t\t\t])\n\treturn d\n\ned = editDistance(s,t)\nedit_distance = ed[len(t)][len(s)]\n\n\ndef traceback(ed, s, t):\n\ts_prime = s[:]\n\tt_prime = t[:]\n\ti = len(ed) - 1\n\tj = len(ed[0]) - 1\n\n\twhile not(i == 0 and j == 0):\n\t\tcurrent_value = ed[i][j]\n\t\tif ed[i][j-1] == (current_value - 1):\n\t\t\tj -= 1\n\t\t\tt_prime = t_prime[:i] + '-' + t_prime[i:]\n\t\telif ed[i-1][j] == (current_value - 1):\n\t\t\ti -= 1\n\t\t\ts_prime = s_prime[:j] + '-' + s_prime[j:]\n\t\telse:\n\t\t\tj -= 1\n\t\t\ti -= 1\n\t\n\treturn s_prime, t_prime\n\n\ns_prime, t_prime = traceback(ed, s, t)\n\n#print s_prime\n#print t_prime\n\n#If writing to file:\nw = open('rosalind_edta_output.txt', 'w')\nw.write(str(edit_distance))\nw.write('\\n')\nw.write(s_prime)\nw.write('\\n')\nw.write(t_prime)\nw.close()\n","sub_path":"bioinformatics/058_edit_distance_alignment.py","file_name":"058_edit_distance_alignment.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"837864","text":"from django.db.models import Q\nfrom django import http\nfrom django.utils import html\nfrom dal import autocomplete\nfrom peeringdb_server.models import (InternetExchange, Facility,\n NetworkFacility, InternetExchangeFacility,\n Organization, IXLan, CommandLineTool)\n\nfrom peeringdb_server.admin_commandline_tools import TOOL_MAP\n\n\nclass AutocompleteHTMLResponse(autocomplete.Select2QuerySetView):\n def has_add_permissions(self, request):\n return False\n\n def render_to_response(self, context):\n q = self.request.GET.get('q', None)\n return http.HttpResponse(\"\".join(\n [i.get(\"text\") for i in self.get_results(context)]),\n content_type=\"text/html\")\n\n\nclass ExchangeAutocompleteJSON(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n qs = InternetExchange.objects.filter(status=\"ok\")\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n qs = qs.order_by('name')\n return qs\n\n\nclass ExchangeAutocomplete(AutocompleteHTMLResponse):\n def get_queryset(self):\n qs = InternetExchange.objects.filter(status=\"ok\")\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n qs = qs.order_by('name')\n return qs\n\n def get_result_label(self, item):\n return u'
%s
' % (\n item.pk, html.escape(item.name))\n\n\nclass FacilityAutocompleteJSON(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n qs = Facility.objects.filter(status=\"ok\")\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n qs = qs.order_by('name')\n return qs\n\n\nclass FacilityAutocomplete(AutocompleteHTMLResponse):\n def get_queryset(self):\n qs = Facility.objects.filter(status=\"ok\")\n if self.q:\n qs = qs.filter(\n Q(name__icontains=self.q) | Q(address1__icontains=self.q))\n qs = qs.order_by('name')\n return qs\n\n def get_result_label(self, item):\n return u'
%s
%s
' % (\n item.pk, html.escape(item.name), html.escape(item.address1))\n\n\nclass FacilityAutocompleteForNetwork(FacilityAutocomplete):\n def get_queryset(self):\n qs = super(FacilityAutocompleteForNetwork, self).get_queryset()\n net_id = self.request.resolver_match.kwargs.get(\"net_id\")\n fac_ids = [\n nf.facility_id\n for nf in NetworkFacility.objects.filter(status=\"ok\",\n network_id=net_id)\n ]\n qs = qs.exclude(id__in=fac_ids)\n return qs\n\n\nclass FacilityAutocompleteForExchange(FacilityAutocomplete):\n def get_queryset(self):\n qs = super(FacilityAutocompleteForExchange, self).get_queryset()\n ix_id = self.request.resolver_match.kwargs.get(\"ix_id\")\n fac_ids = [\n nf.facility_id\n for nf in InternetExchangeFacility.objects.filter(\n status=\"ok\", ix_id=ix_id)\n ]\n qs = qs.exclude(id__in=fac_ids)\n return qs\n\n\nclass OrganizationAutocomplete(AutocompleteHTMLResponse):\n def get_queryset(self):\n qs = Organization.objects.filter(status=\"ok\")\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n qs = qs.order_by('name')\n return qs\n\n def get_result_label(self, item):\n return u'
%s
' % (\n item.pk, html.escape(item.name))\n\n\nclass IXLanAutocomplete(AutocompleteHTMLResponse):\n def get_queryset(self):\n qs = IXLan.objects.filter(status=\"ok\").select_related(\"ix\")\n if self.q:\n qs = qs.filter(\n Q(ix__name__icontains=self.q)\n | Q(ix__name_long__icontains=self.q))\n qs = qs.order_by('ix__name')\n return qs\n\n def get_result_label(self, item):\n return u'
%s
%s
%s
%s
' % (\n item.pk, html.escape(item.ix.name),\n html.escape(item.ix.country.code), html.escape(item.ix.name_long),\n html.escape(item.name))\n\n\nclass CommandLineToolHistoryAutocomplete(autocomplete.Select2QuerySetView):\n \"\"\"\n Autocomplete for command line tools that were ran via the admin ui\n \"\"\"\n tool = \"\"\n\n def get_queryset(self):\n # Only staff needs to be able to see these\n if not self.request.user.is_staff:\n return []\n qs = CommandLineTool.objects.filter(\n tool=self.tool).order_by(\"-created\")\n if self.q:\n qs = qs.filter(description__icontains=self.q)\n return qs\n\n def get_result_label(self, item):\n return (item.description or self.tool)\n\n\nclt_history = {}\n# class for each command line tool wrapper that we will map to an auto-complete\n# url in urls.py\nfor tool_id, tool in TOOL_MAP.items():\n\n class ToolHistory(CommandLineToolHistoryAutocomplete):\n tool = tool_id\n\n ToolHistory.__name__ = \"CLT_{}_Autocomplete\".format(tool_id)\n clt_history[tool_id] = ToolHistory\n","sub_path":"peeringdb_server/autocomplete_views.py","file_name":"autocomplete_views.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"196678130","text":"import rospy\nfrom std_msgs.msg import Empty\nfrom geometry_msgs.msg import Twist\nfrom drone_status import DroneStatus\nfrom ardrone_autonomy.msg import Navdata\n\nclass Drone_movements():\n\tdef __init__(self):\n\t\tself.pub_takeoff = rospy.Publisher(\"ardrone/takeoff\", Empty, queue_size=10)\n\t\tself.pub_land = rospy.Publisher(\"ardrone/land\", Empty, queue_size=10)\n\t\tself.pub_twist = rospy.Publisher(\"cmd_vel\", Twist, queue_size=10)\n\t\tself.subNavdata = rospy.Subscriber('/ardrone/navdata',Navdata,self.ReceiveNavdata)\n\t\tself.Pitch = 0\n\t\tself.Roll = 0\n\t\tself.Yaw_velocity = 0 \n\t\tself.Z_velocity = 0\n\t\tself.rate = rospy.Rate(20)\n\t\tself.command = Twist()\n\t\tself.status = -1\n\tdef ReceiveNavdata(self,navdata):\n\t\tself.status = navdata.state\n\tdef SetCommand(self,roll,pitch,yaw_velocity,z_velocity):\n\t\tself.command.linear.x = pitch\n\t\tself.command.linear.y = roll\n\t\tself.command.linear.z = z_velocity\n\t\tself.command.angular.z = yaw_velocity\n\t\treturn\n\t\n\tdef move_left(self):\n\t\t#self.Yaw_velocity = -1\n\t\tself.Roll = -0.5\n\t\tself.SetCommand(self.Roll, self.Pitch, self.Yaw_velocity, self.Z_velocity)\n\t\tself.pub_twist.publish(self.command)\n\t\treturn\n\tdef move_right(self):\n\t\t#self.Yaw_velocity = 1\n\t\tself.Roll = 0.5\n\t\tself.SetCommand(self.Roll, self.Pitch, self.Yaw_velocity, self.Z_velocity)\n\t\tself.pub_twist.publish(self.command)\n\t\treturn\n\tdef hower(self):\n\t\tself.SetCommand(0,0,0,0)\n\t\tself.pub_twist.publish(self.command)\n\t\treturn\n\tdef leftright(self):\n\t\tLcount = 0\n\t\tRcount = 0\n\t\tBegin = rospy.get_time()\n\t\tstart_time = rospy.get_time()\n\t\twhile not rospy.is_shutdown():\n\t\t\tif(self.status == DroneStatus.Landed):\n\t\t\t\tprint(\"Hit\")\n\t\t\t\tself.pub_takeoff.publish(Empty())\n\t\t\t\tfor i in range(1,50):\n\t\t\t\t\tprint(\"Ok\")\n\t\t\telif rospy.get_time() <= start_time+1.2:\n\t\t\t\tself.move_right()\n\t\t\t\tLcount += 1\n\t\t\telif rospy.get_time() > start_time+1.2 and rospy.get_time() < Begin+10:\t\n\t\t\t\tself.hower()\n\t\t\t\tself.move_left()\n\t\t\t\tRcount += 1\n\t\t\t\tif rospy.get_time() >= start_time+2.0:\n\t\t\t\t\tstart_time = rospy.get_time()\n\t\t\tif rospy.get_time() > Begin+13:\n\t\t\t\tprint(rospy.get_time()) \n\t\t\t\tprint(Begin+10)\n\t\t\t\tself.hower()\n\t\t\t\tself.hower()\n\t\t\t\tself.pub_land.publish(Empty())\n\t\t\t\tprint(\"Lcount \",Lcount)\n\t\t\t\tprint(\"Rcount \",Rcount)\n\t\t\t\tbreak\n\t\t\tself.rate.sleep()\n\t\t\t\t\nif __name__=='__main__':\n\timport sys\n\trospy.init_node('ardrone', anonymous=True)\n\tdrone = Drone_movements()\n\tdrone.leftright()\n\trospy.spin()\n\tsys.exit()\n","sub_path":"water_wave.py","file_name":"water_wave.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"386505830","text":"import webapp2\nimport json_utils\nimport jinja2\nimport os\n\nimport gae_data\n\ntemplate_path = os.path.join(\n os.path.dirname(__file__),\n \"templates\"\n)\n\njinja_environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(template_path)\n)\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n template = jinja_environment.get_template('chart.html')\n self.response.out.write(template.render())\n\nclass GetWeights(webapp2.RequestHandler):\n def get(self):\n\n weight_data = gae_data.get_weights()\n\n self.response.headers.add_header('Content-Type', 'application/json')\n self.response.headers.add_header(\"Access-Control-Allow-Origin\", \"*\")\n self.response.write(json_utils.get_weight_table(weight_data))\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/weight/', GetWeights)\n], debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"609222511","text":"\ndef write_log(message, status, file_path):\n import datetime\n log_time = datetime.datetime.now()\n log_time = log_time.strftime('%Y-%m-%d %H:%M:%S')\n if status == 'e' or status == 'error' or status == 'ERROR':\n s = '[ERROR]'\n mess_str = s + '[' + log_time + '] ' + message + '\\n'\n with open(file_path, 'a') as f:\n f.write(mess_str)\n\n if status == 'i' or status == 'info' or status == 'INFO':\n s = '[INFO]'\n mess_str = s + '[' + log_time + '] ' + message + '\\n'\n with open(file_path, 'a') as f:\n f.write(mess_str)","sub_path":"mylogs/modscript.py","file_name":"modscript.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"148806161","text":"# -*- coding: utf-8 -*- import sys reload(sys) sys.setdefaultencoding('utf-8')\n\n__author__ = \"anton\"\n__date__ = \"$18.08.2014 22:51:24$\"\n\nfrom initdata import tom\nimport shelve\n\ndb = shelve.open( 'people-shelve' )\nsue = db['sue'] # извлекает объект sue\nsue['pay'] *= 1.50\ndb['sue'] = sue # изменяет объект sue\ndb['tom'] = tom # добавляет новую запись\ndb.close()","sub_path":"db_shelve/update_db_shelve.py","file_name":"update_db_shelve.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"209226626","text":"from django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport sys\n\ncaps = DesiredCapabilities().FIREFOX\ncaps[\"marionette\"] = False\n\n\nclass FunctionalTest(StaticLiveServerTestCase):\n\n @classmethod\n def setUpClass(cls):\n for arg in sys.argv:\n if 'liveserver' in arg:\n cls.server_url = 'http://' + arg.split('=')[1]\n cls.live_server_url = ''\n return\n super().setUpClass()\n cls.server_url = cls.live_server_url\n\n @classmethod\n def tearDownClass(cls):\n if cls.server_url == cls.live_server_url:\n super().tearDownClass()\n\t\n def setUp(self):\n #C:\\Users\\imanv\\AppData\\Local\\Programs\\Python\\Python36-32\\Scripts\n #self.browser = webdriver.Firefox(capabilities=caps, executable_path='/mnt/c/users/imanv/appdata/local/programs/python/\"Python36-32\"/scripts/geckodriver.exe')\n self.browser = webdriver.Firefox(capabilities=caps, executable_path=\"C:\\\\Users\\\\imanv\\\\AppData\\\\Local\\\\Programs\\\\Python36-32\\\\Scripts\\\\geckodriver.exe\")\n self.browser.implicitly_wait(3)\n\n def tearDown(self):\n self.browser.quit()\n\n def check_for_row_in_list_table(self, row_text):\n table = self.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n self.assertIn(row_text, [row.text for row in rows])\n\n def get_item_input_box(self):\n return self.browser.find_element_by_id('id_text')\n","sub_path":"functional_tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"270736643","text":"#!/usr/bin/env python\n#coding:utf8\n\nimport queue\nimport threading\nclass Worker(threading.Thread):\n\tdef __init__(self,work_queue,word,number):\n\t\tsuper().__init__()\n\t\tself.work_queue=work_queue\n\t\tself.word=word\n\t\tself.number=number\n\tdef run(self):\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tfilename=self.work_queue.get()\n\t\t\t\tself.process(filename)\n\t\t\tfinally:\n\t\t\t\tself.work_queue.task_done()\ndef main():\n\topts,word,args=parse_options()\n\tfilelist=get_files(args,opts.recurse)\n\twork_queue=queue.Queue()\n\tfor i in range(opts.count):\n\t\tnumber=\"{0}: \".format(i+1) if opts.debug else \"\"\n\t\tworker=Worker(work_queue,word,number)\n\t\tworker.daemon=True\n\t\tworker.start()\n\n\tfor filename in filelist:\n\t\twork_queue.put(filename)\n\twork_queue.join()\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"src/grepword-t.py","file_name":"grepword-t.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"239591086","text":"from . import views\nfrom django.conf.urls import include, url\n# Create your views here.\n\napp_name='app'\n\nurlpatterns = [\n url(r'^lista_clientes$', views.lista_clientes, name = \"lista_clientes\"),\n url(r'^$', views.menu_principal, name = \"menu\"),\n url(r'^registrar_tecnico',include('accounts.urls')),\n url(r'^registrar_cliente',views.registrar_cliente, name=\"registrar_cliente\"),\n url(r'^asignar_tecnico',views.asignar_tecnico, name=\"asignar_tecnico\"),\n url(r'^(?P[\\w]+)/$', views.ordenes_cliente, name=\"datos_cliente\"),\n url(r'^(?P[\\w]+)/(?P[\\w]+)$', views.detalle_orden, name=\"detalle_orden\"),\n url(r'^(?P[\\w]+)/(?P[\\w]+)$', views.detalle_orden, name=\"detalle_orden\"),\n url(r'^registrar_orden', views.registrar_orden, name=\"registrar_orden\")\n]","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"191915082","text":"import numpy as np\nimport sympy as sp\nimport scipy.optimize as opt\nfrom scipy.linalg import block_diag\nfrom mechanics import dynamics, kinematics\nimport matplotlib.pyplot as plt\n\n# Model Predictive Control : Currently, the prediction and control horizon are set the same with N\n# (x_N-x_r)^T P (x_N-x_r) + \\sum_{k=0}^{N-1} (x_k-x_r)^T Q (x_k-x_r) + u_k^T R u_k \\\\\n# subject to & x_{k+1} = A x_k + B u_k \\\\\n# x_{min} <= x_k <= x_{max} \\\\\n# u_{min} <= u_k <= u_{max} \\\\\n# x_0 = x_bar\n# This code is valid for Linear Systems\n\n# TODO:\n# 1. Code for adding control horizon\n# 2. Add jacobian in optimization to speed up\n\n\nclass mpc_opt():\n\n def __init__(self, A=None, B=None, C=None, Q=None, R=None, P=None,\n xl=None, xh=None, ul=None, uh=None, N=4, x0=None, time=None, ref_traj=None):\n\n if not isinstance(A, (list, tuple, np.ndarray)):\n self.A = np.array([[-0.79, -0.3, -0.1], [0.5, 0.82, 1.23], [0.52, -0.3, -0.5]])\n self.B = np.array([[-2.04, -0.21], [-1.28, 2.75], [0.29, -1.41]])\n self.C = np.array([[0, 1, 0]])\n\n self.P = 4 # terminal state penalty\n self.Q = 5\n if isinstance(P, (list, tuple, np.ndarray)):\n self.P = self.C.transpose() @ self.P @ self.C\n self.P_dd = self.C.transpose() @ self.P\n self.Q = self.C.transpose() @ self.Q @ self.C\n self.Q_dd = self.C.transpose() @ self.Q\n else:\n self.P = self.P * self.C.transpose() @ self.C\n self.P_dd = self.P * self.C.transpose()\n self.Q = self.Q * self.C.transpose() @ self.C\n self.Q_dd = self.Q * self.C.transpose()\n self.R = np.eye(2)\n\n self.t = np.linspace(0, 1, 50)\n\n self.ul, self.uh, self.xl, self.xh = np.array([[-2.], [-3.]]), np.array([[2.], [3.]]), np.array([[-10.], [-9.], [-8.]]), np.array([[10.], [9.], [8.]])\n self.x0 = np.array([[0.0], [0.0], [0.0]])\n self.N = 3 # # Prediction horizon\n self.y_ref = np.array([[0], [2], [0]])\n\n else:\n # x_dot = A x + B u\n self.A = A # linear system dynamics matrix\n self.B = B # input matrix\n self.C = C # output matrix\n\n if isinstance(P, (list, tuple, np.ndarray)):\n self.P = C.transpose() @ P @ C\n self.P_dd = C.transpose() @ P\n self.Q = C.transpose() @ Q @ C\n self.Q_dd = C.transpose() @ Q\n else:\n self.P = P * C.transpose() @ C\n self.P_dd = P * C.transpose()\n self.Q = Q * C.transpose() @ C\n self.Q_dd = Q * C.transpose()\n self.R = R\n\n # if self.C.shape[0] != self.P.shape[0]:\n # raise ValueError(' DIMENSION MISMATCH: Both C and P should have same number of rows')\n\n self.t = time\n\n self.ul, self.uh, self.xl, self.xh = ul, uh, xl, xh\n self.x0 = x0\n self.N = N # # Prediction horizon\n self.y_ref = ref_traj\n\n self.optCurve, self.costs = [], []\n self.omega = 0.5\n self.dyn = dynamics()\n self.kin = kinematics()\n\n def plant_model(self, x, u):\n x_dot = self.A * x + self.B * u\n return x_dot\n\n def transfer_matrices(self,):\n N = self.N\n nx, nu = self.B.shape\n Su = np.zeros((N * nx, N * nu))\n Sx = self.A\n An = self.A @ self.A\n for i in range(N-1):\n Sx = np.concatenate((Sx, An), axis=0)\n An = An @ self.A\n B = self.B\n for i in range(N):\n Su = Su + np.kron(np.eye(N, k=-i), B)\n B = self.A @ B\n return Sx, Su\n\n def plant_prediction(self, Sx, Su, x0, u):\n x = Sx @ x0 + Su @ u\n return x\n\n def penalties(self, ):\n N = self.N\n if N == 1:\n Qs = self.Q # Q stacked as diagonal\n Qs_d = self.Q_dd\n else:\n Qs = np.kron(np.eye(N - 1), self.Q) # Q stacked as diagonal\n Qs_d = np.kron(np.eye(N - 1), self.Q_dd)\n return Qs, Qs_d, np.kron(np.eye(N), self.R), np.kron(np.eye(N), self.C)\n\n def block_diag(self, Q, P):\n N = self.N\n rQ, cQ = Q.shape\n if isinstance(P, (list, tuple, np.ndarray)):\n rP, cP = P.shape\n else:\n rP, cP = 1, 1\n t1 = np.zeros((rP+rQ, cQ+cP))\n if N != 1:\n t1[0:rQ, 0:cQ] = Q\n else:\n t1 = Q\n t2 = np.zeros((N, N))\n t2[-1, -1] = 1\n t3 = np.kron(t2, P)\n return t1 + t3 # diag([Q, Q, ....., P])\n\n def cost_function(self, u, *args):\n x0, t, = args\n N = self.N\n u = u.reshape(len(u), -1)\n Sx, Su = self.transfer_matrices()\n Qs, Qs_d, H, Cs = self.penalties()\n F = self.block_diag(Qs, self.P)\n G = self.block_diag(Qs_d, self.P_dd)\n\n W = 2 * (Su.transpose() @ F @ Su + H)\n K = 2 * Su.transpose() @ F @ Sx\n L = 2 * Su.transpose() @ G\n\n if self.y_ref.shape[1] > 1:\n y_ref = self.y_ref[:, t].reshape(len(self.y_ref[:, t]), -1)\n else:\n y_ref = self.y_ref\n y_ref_lifted = np.tile(y_ref, (N, 1))\n cost = 0.5 * u.transpose() @ W @ u + u.transpose() @ K @ x0 - u.transpose() @ L @ y_ref_lifted\n return cost\n\n # @staticmethod\n def ref_trajectory(self, i):\n return self.y_ref[:, i]\n\n def cost_gradient(self, u, *args):\n x0, t = args\n N = self.N\n u = u.reshape(len(u), -1)\n Sx, Su = self.transfer_matrices()\n Qs, Qs_d, Rs, Cs = self.penalties()\n Q_blk = self.block_diag(Qs, self.P)\n G = 2 * (Rs + Su.transpose() @ Q_blk @ Su)\n F = 2 * (Su.transpose() @ Q_blk @ Sx)\n K = 2 * Su.transpose() @ Q_blk\n # y_ref = self.ref_trajectory(t)\n y_ref_lifted = np.tile(self.y_ref, (N, 1))\n cost_gradient = np.vstack((0.5 * G @ u, F @ x0, -K @ y_ref_lifted, np.zeros((18, 1))))\n return np.squeeze(cost_gradient)\n\n def constraints(self, u, *args):\n x0, t = args\n N = self.N\n nx, nu = self.B.shape\n Ix, Iu = np.eye(nx), np.eye(nu)\n zx, zu = np.zeros((2 * nu, nx)), np.zeros((2 * nx, nu))\n Mi = np.vstack((zx, -Ix, Ix))\n Ei = np.vstack((-Iu, Iu, zu))\n bi = np.vstack((-self.ul, self.uh, -self.xl, self.xh))\n MN = np.vstack((-Ix, Ix))\n bN = np.vstack((-self.xl, self.xh))\n c = np.vstack((np.tile(bi, (N, 1)), bN))\n tp = c.shape[0]\n D = np.vstack((Mi, np.zeros((tp - Mi.shape[0], Mi.shape[1]))))\n if N == 1:\n b = Mi\n else:\n a = np.kron(np.eye(N - 1), Mi)\n b = block_diag(a, MN)\n tt = c.shape[0] - b.shape[0]\n q = np.zeros((tt, nx * N))\n M = np.vstack((q, b))\n aa = np.kron(np.eye(N), Ei)\n tt1 = c.shape[0] - aa.shape[0]\n qq = np.zeros((tt1, nu * N))\n Eps = np.vstack((aa, qq))\n Sx, Su = self.transfer_matrices()\n L = M @ Su + Eps\n W = -D - M @ Sx\n u = u.reshape(len(u), -1)\n con_ieq = c + W @ x0 - L @ u\n return np.squeeze(con_ieq)\n\n def optimise(self, u0, x0, t):\n con_ineq = {'type': 'ineq',\n 'fun': self.constraints,\n 'args': (x0, t)}\n # con_eq = {'type': 'eq',\n # 'fun': self.con_eq,\n # 'args': (x0, t)}\n\n # U = opt.minimize(self.cost_function, u0, args=(x0, t), method='SLSQP',\n # options={'maxiter': 200, 'disp': True}, jac=self.cost_gradient, constraints=con_ineq)\n\n U = opt.minimize(self.cost_function, u0, args=(x0, t), method='SLSQP',\n options={'maxiter': 200, 'disp': True},)# constraints=con_ineq)\n U = U.x\n return U\n\n def get_state_and_input(self, u0, x0):\n X, U = np.zeros((len(x0), len(self.t))), np.zeros((len(u0), len(self.t)))\n u0 = np.tile(u0, (self.N, 1))\n nx, nu = self.B.shape\n for i in range(len(self.t)):\n print('i = :', i)\n U[:, i], X[:, i] = u0[0:nu].transpose(), x0.transpose()\n u = self.optimise(u0, x0, i, )\n u0 = u\n u = u[0:nu].reshape(nu, -1)\n x0 = x0.reshape(len(x0), -1)\n x0 = self.A @ x0 + self.B @ u\n return X, U\n\n\nif __name__ == '__main__':\n mpc = mpc_opt()\n pos = sp.zeros(3, len(mpc.t))\n x0, u0, = np.array([[0.0], [0.0], [0.0]]), np.array([[0.4], [0.2]])\n\n cg = mpc.cost_gradient(np.tile(u0, (mpc.N, 1)), x0, 1)\n cons = mpc.constraints(np.tile(u0, (mpc.N, 1)), x0, 1)\n\n X, U = mpc.get_state_and_input(u0, x0)\n\n plt.figure(1)\n plt.plot(X[0, :], '--r')\n plt.plot(X[1, :], '--b')\n plt.plot(X[2, :], '--g')\n # plt.ylim(-2, 2)\n plt.xlabel('time')\n plt.title('states')\n\n plt.figure(2)\n plt.plot(U[0, :], '--r')\n plt.plot(U[1, :], '--b')\n # plt.ylim(-1, 1)\n plt.title('Input')\n plt.xlabel('time')\n\n plt.show()\n\n print('hi')","sub_path":"src/mpc_optimizer.py","file_name":"mpc_optimizer.py","file_ext":"py","file_size_in_byte":9105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"589829881","text":"\nfrom direct.distributed.PyDatagram import *\n\nfrom otp.ai.TimeManagerAI import TimeManagerAI\nfrom otp.distributed.OtpDoGlobals import *\nfrom otp.ai.MagicWordManagerAI import MagicWordManagerAI\nfrom otp.ai.BanManagerAI import BanManagerAI\n\nfrom pirates.distributed.PiratesInternalRepository import PiratesInternalRepository\nfrom pirates.instance.DistributedMainWorldAI import DistributedMainWorldAI\nfrom pirates.distributed.DistrictManagerAI import DistrictManagerAI\nfrom pirates.ai.NewsManagerAI import NewsManagerAI\n\nfrom pirates.world.WorldCreatorAI import WorldCreatorAI\nfrom pirates.piratesbase.DistributedTimeOfDayManagerAI import DistributedTimeOfDayManagerAI\nfrom pirates.piratesbase.DistributedGameStatManagerAI import DistributedGameStatManagerAI\nfrom pirates.instance.DistributedTeleportMgrAI import DistributedTeleportMgrAI\nfrom pirates.distributed.TargetManagerAI import TargetManagerAI\nfrom pirates.battle.BattleManagerAI import BattleManagerAI\n\nclass PiratesAIRepository(PiratesInternalRepository):\n def __init__(self, baseChannel, stateServerChannel, districtName):\n PiratesInternalRepository.__init__(\n self, baseChannel, stateServerChannel, dcSuffix='AI')\n\n self.districtName = districtName\n self.districtManager = None\n self.uid2do = {}\n\n self.notify.setInfo(True)\n\n def createManagers(self):\n self.timeManager = TimeManagerAI(self)\n self.timeManager.generateWithRequired(2)\n\n self.newsManager = NewsManagerAI(self)\n self.newsManager.generateWithRequired(2)\n\n self.todManager = DistributedTimeOfDayManagerAI(self)\n self.todManager.generateWithRequired(2)\n\n self.tpMgr = DistributedTeleportMgrAI(self)\n self.tpMgr.generateWithRequired(2)\n\n self.targetMgr = TargetManagerAI(self)\n self.targetMgr.generateWithRequired(2)\n\n self.gameStatManager = DistributedGameStatManagerAI(self)\n self.gameStatManager.generateWithRequired(2)\n\n self.magicWordManager = MagicWordManagerAI(self)\n self.magicWordManager.generateWithRequired(2)\n\n self.banMgr = BanManagerAI(self)\n self.battleMgr = BattleManagerAI(self)\n\n def createMainWorld(self):\n self.worldCreator = WorldCreatorAI(self)\n\n self.mainWorld = DistributedMainWorldAI(self)\n self.mainWorld.generateWithRequired(2)\n\n self.worldCreator.makeMainWorld(self.districtManager.district.mainWorld)\n\n def handleConnected(self):\n PiratesInternalRepository.handleConnected(self)\n\n self.districtId = self.allocateChannel()\n self.notify.info('Creating PiratesDistrictAI(%d)...' % self.districtId)\n\n self.districtManager = DistrictManagerAI(self)\n self.districtManager.generateDistrict()\n\n self.notify.info('Claiming ownership of channel ID: %d...' % self.districtId)\n self.claimOwnership(self.districtId)\n\n self.notify.info('Creating managers...')\n self.createManagers()\n\n self.notify.info('Creating the main world...')\n self.createMainWorld()\n\n self.notify.info('Making district available...')\n self.districtManager.openDistrict()\n self.notify.info('Done.')\n messenger.send('startShardActivity')\n\n from pirates.battle.DistributedEnemySpawnerAI import DistributedEnemySpawnerAI\n DistributedEnemySpawnerAI.printMissingTypes()\n self.accept('pirate-inventory-activate', self.__inventoryActivate)\n \n def __inventoryActivate(self, ownerId, invId):\n owner = self.doId2do.get(ownerId)\n \n if owner:\n owner.b_setInventoryId(invId)\n else:\n self.acceptOnce('generate-%d' % ownerId, lambda av: av.b_setInventoryId(invId))\n\n def getTrackClsends(self):\n return False\n\n def incrementPopulation(self):\n self.districtManager.district.b_setAvatarCount(self.districtManager.district.getAvatarCount() + 1)\n\n def decrementPopulation(self):\n self.districtManager.district.b_setAvatarCount(self.districtManager.district.getAvatarCount() - 1)\n","sub_path":"PORMain/pirates/ai/PiratesAIRepository.py","file_name":"PiratesAIRepository.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"265597682","text":"menu = [\n [\"egg\", \"spam\", \"bacon\"],\n [\"egg\", \"sausage\", \"bacon\"],\n [\"egg\", \"spam\"],\n [\"egg\", \"bacon\", \"spam\"],\n [\"egg\", \"bacon\", \"sausage\", \"spam\"],\n [\"spam\", \"bacon\", \"sausage\", \"spam\"],\n [\"spam\", \"egg\", \"spam\", \"spam\", \"bacon\", \"spam\"],\n [\"spam\", \"egg\", \"sausage\", \"spam\"],\n [\"chicken\", \"chips\"]\n]\n\nmeals = []\nfor meal in menu:\n if \"spam\" not in meal:\n meals.append(meal)\n else:\n meals.append(\"a meal was skipped\")\nprint(meals)\n\n# meals = [meal for meal in menu if \"spam\" not in meal if \"chicken\" not in meal]\nmeals = [meal for meal in menu if \"spam\" not in meal and \"chicken\" not in meal]\nprint(meals)\n\nfussy_meals = [meal for meal in menu if \"spam\" in meal or \"eggs\" in meal if not\n(\"bacon\" in meal and \"sausage\" in meal)]\nprint(fussy_meals)\n\nfussy_meals = [meal for meal in menu if\n (\"spam\" in meal or \"eggs\" in meal) and not (\"bacon\" in meal and \"sausage\" in meal)]\nprint(fussy_meals)\n","sub_path":"Complete Python Masterclass/Source_Codes/lesson_198_condcomp.py","file_name":"lesson_198_condcomp.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"382363895","text":"# -*- coding: utf-8 -*-\nimport threading\nimport time\nimport os\nimport toml\nfrom influxdb import InfluxDBClient\nfrom logbook import Logger\n\n\ndef ping_check(ip):\n ans = os.system('ping -n 1 -w 1 %s' %ip)\n return ans^1\n\n\ndef time_check(datatime):\n structed_time = time.strptime(datatime[0:19], '%Y-%m-%dT%X')\n time_stamp = time.mktime(structed_time)\n localtime = time.time()\n if abs(time_stamp - localtime) < 600.0 or abs(localtime - time_stamp) < 600.0:\n ans = 1\n else:\n ans = 0\n return ans\n\ndef pack_data(eqpt,ping_ans,data_ans,time):\n fields = {\n \"ping\":ping_ans,\n \"data\":data_ans\n }\n json_body = [\n {\n \"measurement\": \"test\",\n \"tags\": {\n \"eqpt_no\": eqpt\n },\n \"time\":time,\n \"fields\": fields\n }\n\n ]\n return json_body\n\ndef Send(perl_measurement,perl_eqpt,perl_ip):\n client = InfluxDBClient('localhost', 8086, 'root', '', 'mts')\n query = \"select * from {0} where eqpt_no='{1}' order by desc limit 1\".format(perl_measurement,perl_eqpt)\n print(query)\n data = client.query(query)\n for i in data:\n for j in i:\n time = j['time']\n print(time)\n data_ans = time_check(time)\n ping_ans = ping_check(perl_ip)\n json_body = pack_data(perl_eqpt,ping_ans,data_ans,time)\n client.write_points(json_body)\n return 0\n\n\n\n\ndef main():\n \n with open('test.toml') as conf_file:\n config = toml.loads(conf_file.read())\n print(config)\n for perl_measurement in config:\n for perl_eqpt in config[perl_measurement]:\n perl_ip = config[perl_measurement][perl_eqpt]\n print(perl_ip,perl_eqpt,perl_measurement)\n # m = Send(perl_measurement,perl_eqpt,perl_ip)\n # print(m)\n t = threading.Thread(target=Send,args=(perl_measurement,perl_eqpt,perl_ip))\n t.setDaemon(True)\n t.start()\n t.join()\n print('hhh')\nif __name__ == '__main__':\n main()\n\n\n\n\n\n","sub_path":"old_monitor/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"27873223","text":"import openmdao.api as om\n\nclass Resistor(om.ExplicitComponent):\n \"\"\"\n Computes current across a resistor using Ohm's law.\n \"\"\"\n \n def initialize(self):\n self.options.declare('R', default=1., desc='Resistance in Ohms')\n \n def setup(self):\n self.add_input('V_in', units='V')\n self.add_input('V_out', units='V')\n self.add_output('I', units='A')\n \n self.declare_partials('I', 'V_in', method='fd')\n self.declare_partials('I', 'V_out', method='fd')\n \n def compute(self, inputs, outputs):\n deltaV = inputs['V_in'] - inputs['V_out']\n outputs['I'] = deltaV / self.options['R']\n \nclass Diode(om.ExplicitComponent):\n \"\"\"\n Computes current across a diode using the Shockley diode equation.\n \"\"\"\n \n def initialize(self):\n self.options.declare('Is', default=1e-15, desc='Saturation current in Amps')\n self.options.declare('Vt', default=0.025875, desc='Thermal voltage in Volts')\n \n def setup(self):\n self.add_input('V_in', units='V')\n self.add_input('V_out', units='V')\n self.add_output('I', units='A')\n \n self.declare_partials('I', 'V_in', method='fd')\n self.declare_partials('I', 'V_out', method='fd')\n \n def compute(self, inputs, outputs):\n deltaV = inputs['V_in'] - inputs['V_out']\n Is = self.options['Is']\n Vt = self.options['Vt']\n outputs['I'] = Is* (np.exp(deltaV / Vt) - 1)\n\nclass Node(om.ImplicitComponent):\n \"\"\"\n Computes voltage residual across a node based on incoming and outgoing current.\n \"\"\"\n \n def initialize(self):\n self.options.declare('n_in', default=1, types=int, desc='Number of connections with + assumed in')\n self.options.declare('n_out', default=1, types=int, desc='Number of current connections + assumed out')\n \n def setup(self):\n self.add_output('V', val=5., units='V')\n \n for i in range(self.options['n_in']):\n i_name = f'I_in:{i}'\n self.add_input(i_name, units='A')\n \n for i in range(self.options['n_out']):\n i_name = f'I_out:{i}'\n self.add_input(i_name, units='A')\n \n # Note: We don't declare any partials wrt `V` here, because the residual doesn't directly depend on it\n self.declare_partials('V', 'I*', method='fd')\n \n def apply_nonlinear(self, inputs, outputs, residuals):\n residuals['V'] = 0.\n for i_conn in range(self.options['n_in']):\n residuals['V'] += inputs[f'I_in:{i_conn}']\n for i_conn in range(self.options['n_out']):\n residuals['V'] += inputs[f'I_out:{i_conn}']\n\nclass Circuit(om.Group):\n \n def setup(self):\n self.add_subsystem('n1', Node(n_in=1, n_out=2), promotes_inputs=[('I_in:0', 'I_in')])\n self.add_subsystem('n2', Node()) # Leaving defaults\n \n self.add_subsystem('R1', Resistor(R=100.), promotes_inputs=[('V_out', 'Vg')])\n self.add_subsystem('R2', Resistor(R=10000.))\n self.add_subsystem('D1', Diode(), promotes_inputs=[('V_out', 'Vg')])\n \n # USE PROMOTES\n self.connect('n1.V', ['R1.V_in', 'R2.V_in'])\n self.connect('R1.I', 'n1.I_out:0')\n self.connect('R2.I', 'n1.I_out:1')\n \n self.connect('n2.V', ['R2.V_out', 'D1.V_in'])\n self.connect('R2.I', 'n2.I_in:0')\n self.connect('D1.I', 'n2.I_out:0')\n \n self.nonlinear_solver = om.NewtonSolver()\n self.nonlinear_solver.options['iprint'] = 2\n self.nonlinear_solver.options['maxiter'] = 20\n self.linear_solver = om.DirectSolver()\n \nprob = om.Problem()\nmodel = prob.model\n\nmodel.add_subsystem('ground', om.IndepVarComp('V', 0., units='V'))\nmodel.add_subsystem('source', om.IndepVarComp('I', 0.1, units='A'))\nmodel.add_subsystem('circuit', Circuit())\n\nmodel.connect('source.I', 'circuit.I_in')\nmodel.connect('ground.V', 'circuit.Vg')\n\nprob.setup()\n\n# Initial values\nprob['circuit.n1.V'] = 10.\nprob['circuit.n2.V'] = 1.\n\nprob.run_model()","sub_path":"circuit.py","file_name":"circuit.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"376685558","text":"# -*- coding: utf-8 -*-\nimport time\nimport random\n\nclass EMG_Variables(object):\n def __init__(self):\n self.initTime = time.time()\n\n def load_variables(self):\n #Creating the variables needed for the analysis\n self.answer = []\n self.emg0 = []\n self.calibrationSignal = []\n self.RightGluteus = [] #IMPORTANT INITIALIZE IT TO 0\n self.RightQuadriceps = []\n self.RightTriceps = []\n self.RightHamstrings = []\n self.LeftGluteus = []\n self.LeftQuadriceps = []\n self.LeftTriceps = []\n self.LeftHamstrings = []\n self.emg9 = []\n self.Index = []\n self.IndexLeft = []\n self.EMGlength = 2500\n self.CPWalkerIndex = 0 #from cpwalker\n self.j = 0 \n self.counterEMG1 = [0]\n self.counterEMG2 = [0]\n self.counterEMG3 = [0]\n self.counterEMG4 = [0]\n self.cyclestowait = 4\n self.phase = 0\n \nif __name__ == '__main__':\n s = EMG_Variables()\n s.load_variables()","sub_path":"lib/resources/variablesEMG.py","file_name":"variablesEMG.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"104764376","text":"#\n# [821] Bricks Falling When Hit\n#\n# https://leetcode.com/problems/bricks-falling-when-hit/description/\n#\n# algorithms\n# Hard (20.02%)\n# Total Accepted: 1.1K\n# Total Submissions: 5.2K\n# Testcase Example: '[[1,0,0,0],[1,1,1,0]]\\n[[1,0]]'\n#\n# We have a grid of 1s and 0s; the 1s in a cell represent bricks.  A brick will\n# not drop if and only if it is directly connected to the top of the grid, or\n# at least one of its (4-way) adjacent bricks will not drop.\n#\n# We will do some erasures sequentially. Each time we want to do the erasure at\n# the location (i, j), the brick (if it exists) on that location will\n# disappear, and then some other bricks may drop because of that erasure.\n#\n# Return an array representing the number of bricks that will drop after each\n# erasure in sequence.\n#\n#\n# Example 1:\n# Input:\n# grid = [[1,0,0,0],[1,1,1,0]]\n# hits = [[1,0]]\n# Output: [2]\n# Explanation:\n# If we erase the brick at (1, 0), the brick at (1, 1) and (1, 2) will drop. So\n# we should return 2.\n#\n#\n# Example 2:\n# Input:\n# grid = [[1,0,0,0],[1,1,0,0]]\n# hits = [[1,1],[1,0]]\n# Output: [0,0]\n# Explanation:\n# When we erase the brick at (1, 0), the brick at (1, 1) has already\n# disappeared due to the last move. So each erasure will cause no bricks\n# dropping. Note that the erased brick (1, 0) will not be counted as a dropped\n# brick.\n#\n#\n#\n# Note:\n#\n#\n# The number of rows and columns in the grid will be in the range [1, 200].\n# The number of erasures will not exceed the area of the grid.\n# It is guaranteed that each erasure will be different from any other erasure,\n# and located inside the grid.\n# An erasure may refer to a location with no brick - if it does, no bricks\n# drop.\n\n# REVIEW:\n# step 1: remove all hits\n# step 2: defs\n# step 3: check connect and add back dfs\n\n\nclass Solution:\n def hitBricks(self, grid, hits):\n \"\"\"\n :type grid: List[List[int]]\n :type hits: List[List[int]]\n :rtype: List[int]\n \"\"\"\n ret = [0] * len(hits)\n\n for i, j in hits:\n grid[i][j] -= 1\n\n def dfs(i, j, grid):\n if i < 0 or i >= len(grid) or j < 0 or j >= len(grid[0]) or grid[i][j] != 1:\n return 0\n\n count = 1\n grid[i][j] = 2\n for d1, d2 in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n count += dfs(i + d1, j + d2, grid)\n return count\n\n for i in range(len(grid[0])):\n dfs(0, i, grid)\n\n for k in reversed(range(len(hits))):\n i, j = hits[k]\n grid[i][j] += 1\n\n if grid[i][j] == 1 and (i == 0 or any([0 <= x < len(grid) and 0 <= y < len(grid[0]) and grid[x][y] == 2 for x, y in [(i-1, j), (i+1, j), (i, j-1), (i, j+1)]])):\n ret[k] = dfs(i, j, grid) - 1\n\n return ret\n","sub_path":"src/803.bricks-falling-when-hit.python3.py","file_name":"803.bricks-falling-when-hit.python3.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"433097239","text":"import librosa.display\nimport matplotlib.pyplot as plt\nfrom dtw import dtw\nfrom matplotlib import cm\nfrom numpy.linalg import norm\n\ny1, sr1 = librosa.load('speech_dataset/yes/0a2b400e_nohash_0.wav')\ny2, sr2 = librosa.load('speech_dataset/yes/5e3b7a84_nohash_0.wav')\n\n\ndef manhattan_distance(x, y):\n # print(f'x:{x}')\n # print(f'y:{y}')\n d = norm(x - y, ord=1)\n # print(f'd:{d}')\n return d\n\n\nax1 = plt.subplot(1, 3, 1)\nmfcc1 = librosa.feature.mfcc(y1, sr1).T\nprint(mfcc1.shape)\nax1.imshow(mfcc1, interpolation='nearest', cmap=cm.coolwarm, origin='lower')\n\nax2 = plt.subplot(1, 3, 2)\nmfcc2 = librosa.feature.mfcc(y2, sr2).T\nax2.imshow(mfcc2, interpolation='nearest', cmap=cm.coolwarm, origin='lower')\n\n# mfcc1 = mfcc1.T[:, 1:]\n# mfcc2 = mfcc2.T[:, 1:]\ndist, cost, acc_cost, path = dtw(mfcc1, mfcc2, dist=manhattan_distance)\nprint('Normalized distance between the two sounds:', dist)\n# print(f'cost:{cost}')\n# print(f'acc_cost:{acc_cost}')\n# print(f'path:{path}')\nax3 = plt.subplot(1, 3, 3)\nax3.imshow(cost.T, origin='lower', cmap=cm.gray, interpolation='nearest')\nplt.plot(path[0], path[1], 'w')\nplt.xlim((-0.5, cost.shape[0] - 0.5))\nplt.ylim((-0.5, cost.shape[1] - 0.5))\nplt.show()\n","sub_path":"try/dtw2.py","file_name":"dtw2.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"468451860","text":"import numpy as np\r\nimport os\r\nimport matplotlib.pylab as plt\r\nimport math\r\nimport cv2\r\nfrom PIL import Image\r\nimport imageio\r\n\r\nlarge_image = plt.imread(\"images/large/large.jpg\")\r\n\r\nlarge_image = cv2.resize(large_image, (3240, 4800))\r\n(full_y, full_x, full_depth) = large_image.shape\r\nsmall_images = np.load(\"image_comp.npy\")\r\n(num_images, height, width, depth) = small_images.shape\r\n\r\naverage_rgbs = []\r\nres = math.floor(full_y / height)\r\n\r\npixels = 0\r\nscale = 1\r\n\r\nfor i in range(num_images):\r\n r = 0\r\n g = 0\r\n b = 0\r\n for j in range(height):\r\n for l in range(width):\r\n pixel = small_images[i][j][l]\r\n r = r + pixel[0]\r\n g = g + pixel[1]\r\n b = b + pixel[2]\r\n\r\n average_rgbs.append([\r\n math.floor(r/(height*width)), \r\n math.floor(g/(height*width)), \r\n math.floor(b/(height*width))\r\n ])\r\n\r\naverage_rgbs = np.asarray(average_rgbs)\r\n\r\nbroken_down_large = []\r\n\r\nstartX = 0\r\nstartY = 0\r\n\r\ncurrentImage = 0\r\nnumImages = int(height * width / res) # round( * (res / scale))\r\n\r\nfor im in range(numImages): #stores blocks of the large image individually in an array\r\n col = [] #initialize new col to store rows\r\n for i in range(math.floor(height * scale)):\r\n row = [] #initialize new row to store individual pixels (r, g, b)\r\n for j in range(math.floor(width * scale)):\r\n row.append(large_image[i + startY][j + startX]) #append pixel rgb to the row\r\n col.append(row) #append row of pixels to the column\r\n broken_down_large.append(col) #append col of rows to the broken down large image\r\n if (im + 1) % math.floor(res / scale) == 0: #if the end of the row is reached, move down and back to the beginning\r\n startX = 0\r\n startY = math.floor(startY + (height * scale))\r\n else: #otherwise move to the right\r\n startX = math.floor(startX + (width * scale))\r\n\r\n\r\n\r\nfor im in broken_down_large:\r\n im = np.asarray(im)\r\n im = cv2.resize(im, (width, height))\r\n\r\n\r\nbroken_down_large_average_rgbs = []\r\n\r\nfor im in broken_down_large:\r\n r = 0\r\n g = 0\r\n b = 0\r\n for j in range(round(height * scale)):\r\n for l in range(round(width * scale)):\r\n pixel = im[j][l]\r\n r = r + pixel[0]\r\n g = g + pixel[1]\r\n b = b + pixel[2]\r\n\r\n broken_down_large_average_rgbs.append([\r\n math.floor(r/(height*width)), \r\n math.floor(g/(height*width)), \r\n math.floor(b/(height*width))\r\n ])\r\n\r\nbroken_down_large_average_rgbs = np.asarray(broken_down_large_average_rgbs)\r\n\r\nclosest_match = []\r\n\r\nfor i in range(len(broken_down_large_average_rgbs)):\r\n minimum = 255 * 3\r\n minimumLocation = 0\r\n for j in range(len(average_rgbs)):\r\n lr = broken_down_large_average_rgbs[i][0]\r\n lg = broken_down_large_average_rgbs[i][1]\r\n lb = broken_down_large_average_rgbs[i][2]\r\n\r\n sr = average_rgbs[j][0]\r\n sg = average_rgbs[j][1]\r\n sb = average_rgbs[j][2]\r\n\r\n difference = (((lr-sr)/255.0)**2 + ((lg-sg)/255.0)**2 + ((lb-sb)/255.0)**2) * 255\r\n if difference < minimum:\r\n minimumLocation = j\r\n minimum = difference\r\n closest_match.append(minimumLocation)\r\nprint(\"Starting assembly of large image ----------------------------------------------\")\r\nfinal_image = np.zeros((full_y, full_x, full_depth), dtype=int)\r\n\r\nlargeImageStartX = 0\r\nlargeImageStartY = 0\r\n\r\niteration = 1\r\nfor image in closest_match: #stores the index of the closest picture to the original\r\n print(largeImageStartX, \",\", largeImageStartY)\r\n image = cv2.resize(small_images[image], (round(width * scale), round(height * scale)))\r\n for i in range(round(height * scale)):\r\n for j in range(round(width * scale)):\r\n final_image[largeImageStartY + i][largeImageStartX + j] = image[i][j]\r\n if largeImageStartX == full_x - round((width * scale)):\r\n largeImageStartX = 0\r\n largeImageStartY = round(largeImageStartY + (height * scale))\r\n else:\r\n largeImageStartX = round(largeImageStartX + (width * scale))\r\n\r\noutput_image = Image.new('RGB', (full_x, full_y))\r\n\r\nx = 0\r\ny = 0\r\nfor row in final_image:\r\n for col in row:\r\n color = (col[0], col[1], col[2])\r\n output_image.putpixel((x, y), color)\r\n x = x + 1\r\n\r\n x = 0\r\n y = y + 1\r\n\r\noutput_image.save('mosaic.jpg')\r\noutput_image.close()\r\n# imageio.imwrite('mosaic.jpg', final_image)\r\n","sub_path":"CreateMosaic.py","file_name":"CreateMosaic.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"350972167","text":"\"\"\"validate inputs\"\"\"\nimport re\nimport sys\nimport json\nfrom secretctl.tuples import Secret\n\nDELIM = '/'\n\n# validate user path input\ndef validate_path(path):\n \"\"\"validate secret path name: characters, delimeter, length\"\"\"\n path = path[:-1] if path[-1] == '/' else path\n if not re.match(r\"^(?=.{2,256}$)([a-zA-Z0-9_@.-]+\\/)*([a-zA-Z0-9_@.-])*[a-zA-Z0-9_@.-]$\", path):\n print(\"secretctl: invalid path/key name. Allowable characters include [A-z0-9_@.-], \" \\\n \"/ for path delimiter, 3 to 256 chars long\")\n sys.exit(1)\n return path\n\n# validate user recovery days input\ndef validate_recovery(recovery):\n \"\"\"validate recovery days: integer, length\"\"\"\n if int(recovery) in range(7, 30):\n return recovery\n print('secretctl: invalid recovery days. Must be integer between 7 and 30')\n sys.exit(1)\n\n# convert cli provided tags into key:value json\ndef tags_to_json(tags, novalue=False):\n \"\"\"validate supplied tags and convert to secretsmanager tags json\"\"\"\n options = {True: tags_without_values, False: tags_with_values}\n return options[novalue](tags)\n\ndef tags_with_values(tags):\n \"\"\"validate supplied tags-with-values and convert to secretsmanager tags json\"\"\"\n tag_list = []\n for tag in tags.split(\",\"):\n if not re.match(r\"^(([a-zA-Z0-9\\/\\+\\:_@.-]{3,127})\\s*[=]\\s*([a-zA-Z0-9\\/\\+\\:_@.-]{1,255}))$\", tag.strip()):\n print(\"secretctl: invalid tags list. Supply tags as \\\"tag1=value1, tag2=value2, ...\\\"\")\n sys.exit(1)\n tag_list.append({\"Key\": tag.split(\"=\")[0].strip(), \"Value\": tag.split(\"=\")[1].strip()})\n return tag_list\n\ndef tags_without_values(tags):\n \"\"\"validate supplied tags-without-values and convert to secretsmanager tags json\"\"\"\n tag_list = []\n for tag in tags.split(\",\"):\n if not re.match(r\"^([a-zA-Z0-9\\/\\+\\:_@.-]{3,127})$\", tag.strip()):\n print(\"secretctl: invalid remove tags list. Supply tags as \\\"tag1, tag2, ...\\\"\")\n sys.exit(1)\n tag_list.append(tag.strip())\n return tag_list\n\n# convert json Tags to simple cli formatted\ndef json_to_tags(tags):\n \"\"\"format json Tags as simple tag list\"\"\"\n tag_list = \"\"\n for index, _ in enumerate(tags):\n if index == 0:\n tag_list += (tags[index]['Key'] + '=' + tags[index]['Value'])\n else:\n tag_list += (', ' + tags[index]['Key'] + '=' + tags[index]['Value'])\n return tag_list\n\ndef read_value(path, value, isjson=False):\n \"\"\"parse supplied value or response from sys.stdin.read (pipe)\"\"\"\n resp = sys.stdin.read() if value == '-' else value\n if isjson:\n try:\n json.loads(resp)\n except ValueError as e:\n print('secretctl: invalid json %s' % e)\n sys.exit(1)\n else:\n resp = json.dumps({path.split(DELIM)[-1]: value})\n return resp\n\ndef set_secret(secret):\n \"\"\"return Secret from list_secret element\"\"\"\n secret_kwargs = {}\n secret_kwargs['path'] = secret['Name']\n if 'Description' in secret:\n secret_kwargs['description'] = secret['Description']\n if 'Tags' in secret:\n secret_kwargs['tags'] = secret['Tags']\n return Secret(**secret_kwargs)\n","sub_path":"secretctl/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"181261063","text":"from flask import Flask,render_template,request\r\napp = Flask(__name__)\r\n#Bind route to view function\r\nitems=[\r\n {\"name\":\"com rang\",\r\n \"price\":25000,\r\n },\r\n {\"name\":\"pho bo\",\r\n \"price\":30000,\r\n },\r\n {\"name\":\"xoi xeo\",\r\n \"price\":10000,\r\n }\r\n]\r\n@app.route(\"/\")\r\ndef menu():\r\n return render_template(\"menu.html\",item_list=items,user=\"sơn\")\r\n@app.route(\"/food/\")\r\ndef food(i):\r\n f=items[i]\r\n return render_template(\"food_detail.html\",item=f)\r\n@app.route(\"/add_food\",methods=[\"GET\",\"POST\"])\r\ndef add_food():\r\n if request.method==\"GET\":\r\n return render_template(\"food_form.html\")\r\n elif request.method==\"POST\":\r\n form=request.form\r\n n=form[\"name\"]\r\n p=form[\"price\"]\r\n new_item={\r\n \"name\":n,\r\n \"price\":p,\r\n }\r\n print(type(p))\r\n items.append(new_item)\r\n return p\r\nif __name__==\"__main__\":\r\n app.run(debug=True, port=6969)\r\n\r\n\r\n","sub_path":"web2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"167511324","text":"import numpy as np\r\n\r\nfrom typing import TypeVar, List\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.model_selection import StratifiedKFold\r\n\r\nfrom ..woe import WoE\r\n\r\nDataFrame = TypeVar(\"DataFrame\")\r\nSeries = TypeVar(\"Series\")\r\n\r\n\r\nclass QTransform:\r\n \"\"\"\r\n Качество хуже, чем у деревьев + С генетикой будет лучше, но работать будет намного дольше.\r\n В генетике нет доказательства сходимости\r\n \"\"\"\r\n\r\n def __init__(self, x: Series, y: Series, cv_splits: int):\r\n \"\"\"\r\n Бининг по квантилям\r\n\r\n Parameters\r\n ----------\r\n x\r\n y\r\n cv_splits\r\n \"\"\"\r\n self.x = x\r\n self.y = y\r\n self.cv_splits = cv_splits\r\n self.tr_coeff = None\r\n\r\n def __call__(self, q_splits, n_iter: int):\r\n \"\"\"\r\n\r\n Parameters\r\n ----------\r\n q_splits\r\n n_iter\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n split = self._init_split(q_splits)\r\n score_ = [np.mean(self.cv_transform(split))]\r\n\r\n for _ in range(n_iter):\r\n # print(_)\r\n new_split = self.__bins_unite(split)\r\n new_score = np.mean(self.cv_transform(new_split))\r\n if new_score > score_[-1]:\r\n score_.append(new_score)\r\n split = new_split.copy()\r\n\r\n # woe = WoE.fit(split, self.x, self.y)\r\n # x_tr = woe.transform(self.x)\r\n\r\n # TODO: Добавить критерий остановки !!!!!\r\n\r\n return score_, split\r\n\r\n def cv_transform(self, split=None):\r\n \"\"\"\r\n\r\n Parameters\r\n ----------\r\n split\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n score_ = []\r\n\r\n if split is None:\r\n split = self._init_split(q_splits=20)\r\n\r\n cv = StratifiedKFold(n_splits=self.cv_splits, random_state=323, shuffle=True)\r\n for train_index, test_index in cv.split(self.x, self.y):\r\n x_train, y_train = self.x.iloc[train_index], self.y.iloc[train_index]\r\n x_test, y_test = self.x.iloc[test_index], self.y.iloc[test_index]\r\n\r\n woe = self.__woe_transform(split, x_train, y_train)\r\n x_train, x_test = woe.transform(x_train), woe.transform(x_test)\r\n\r\n score_.append(self.__fp_clf(x_test[\"woe\"], y_test))\r\n\r\n return score_\r\n\r\n @staticmethod\r\n def __fp_clf(x_test, y_test):\r\n \"\"\"\r\n\r\n Parameters\r\n ----------\r\n x_test\r\n y_test\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n return roc_auc_score(y_true=y_test, y_score=-x_test.values)\r\n\r\n def _init_split(self, q_splits=20):\r\n \"\"\"\r\n Дробление по бинам с помощью квантилей\r\n\r\n Parameters\r\n ----------\r\n q_splits: int\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n range_ = np.arange(100 / q_splits, 99.99, 100 / q_splits)\r\n range_ = np.append(range_, 100)\r\n split = np.array([np.percentile(self.x, q) for q in range_])\r\n # split[0] = -np.inf\r\n return np.unique(split)\r\n\r\n @staticmethod\r\n def __bins_unite(split: List):\r\n \"\"\"\r\n В дальнейшем можно переписать так, чтобы не вызывать self.WoE каждый раз\r\n\r\n Parameters\r\n ----------\r\n split\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n ind = np.random.choice(np.arange(1, len(split) - 1), size=1, replace=False)\r\n\r\n # if 0 in ind or len(split-1) in ind:\r\n # raise IndexError(f\"can not drop left border {0} or right border {len(split-1)}\")\r\n\r\n split = np.delete(split, ind)\r\n return split\r\n\r\n @staticmethod\r\n def __woe_transform(split, x, y):\r\n \"\"\"\r\n\r\n Parameters\r\n ----------\r\n split\r\n x\r\n y\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n woe = WoE(bins=split)\r\n woe.fit(x, y)\r\n return woe\r\n","sub_path":"ds_template/dspl/autowoe/lib/pipelines/pipeline_quantile.py","file_name":"pipeline_quantile.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"427603258","text":"\nfrom geometry.dayligthgrid import daylight_mesh_grid\nfrom externalcommands.radiancecommands import run_rfluxmtx_day_vmx, \\\n run_rfluxmtx_day_dmx, run_gendaymtx, run_dctimestep_day, run_rmtxop_day\n\nfrom postprocessing.daypostprocess import calc_da\n\n\ndef daylightanalysis_2(info, engine):\n \n\n #Run rfluxmtx once for each level to create vmx matrices\n for i in range(len(info.approved_rooms)):\n run_rfluxmtx_day_vmx(info, i, engine)\n \n #Create matrices (for each window)\n for i in range(len(info.approved_rooms)):\n run_rfluxmtx_day_dmx(info, i, engine)\n \n #Create smx\n run_gendaymtx(info,\n\t\t\t\t spektrum = \"visible spektrum\", \n\t\t\t\t sky_resolution = 1)\n \n #run dcitimestep (for each room)\n for i in range(len(info.approved_rooms)):\n run_dctimestep_day(info, i)\n \n #run rmtxop (for each room)\n for i in range(len(info.approved_rooms)):\n run_rmtxop_day(info, i)\n \n #annual metrics (for each room)\n calc_da(info)","sub_path":"recipes/dayligthanalysis_2.py","file_name":"dayligthanalysis_2.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"329982499","text":"import numpy\ndef action(obs,info):\n if info[1]:\n #print(obs[:,:,4].nonzero())\n neutral_y, neutral_x = obs[:,:,4].nonzero()\n player_y, player_x = obs[:,:,3].nonzero()\n\n player = [int(player_x.mean()), int(player_y.mean())]\n closest, min_dist = None, None\n for p in zip(neutral_x, neutral_y):\n dist = numpy.linalg.norm(numpy.array(player) - numpy.array(p))\n if not min_dist or dist < min_dist:\n closest, min_dist = p, dist\n if closest != None:\n return 2,closest[0],closest[1]\n else:\n return 0,0,0\n else:\n return 0,0,0\n","sub_path":"supervise/teacher.py","file_name":"teacher.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"317792918","text":"#!/usr/bin/env python\n\n\"cuda library\"\nimport pycuda.autoinit\nimport pycuda.driver as cuda\nfrom pycuda.compiler import SourceModule\n\n\"math library\"\nimport numpy as np\nfrom math import sqrt\n\n\"normal library\"\nimport json\nimport sys\nimport os\n\ndef genSVD():\n dir = '../../jsons/tf_idf/book/'\n list = os.listdir(dir)\n\n origin = []\n\n for idx, e in enumerate(list):\n src = open(dir+e, 'r')\n tf_idf = json.loads( src.read() )\n src.close()\n\n origin.append(tf_idf)\n\n origin = np.transpose(origin)\n\n (T, sigma, D) = np.linalg.svd(origin, False)\n\n return (T, sigma, D)\n\ndef main():\n mod = SourceModule(\"\"\"\n __global__ void diff(double *dest, double *a, double *b, int limit)\n {\n int jump_size = blockDim.x * gridDim.x;\n int offset = threadIdx.x + blockDim.x * blockIdx.x;\n\n int index;\n for (index = offset; index < limit; index += jump_size) {\n dest[index] = a[index] - b[index];\n }\n }\n\n __global__ void power2(double *dest, double *a, int limit)\n {\n int jump_size = blockDim.x * gridDim.x;\n int offset = threadIdx.x + blockDim.x * blockIdx.x;\n\n int index = offset;\n for (index = offset; index < limit; index += jump_size) {\n dest[index] = a[index] * a[index];\n }\n }\n \"\"\")\n diff = mod.get_function('diff')\n pow2 = mod.get_function('power2')\n\n \"SVD\"\n (T, sigma, D) = genSVD()\n\n \"normalize\"\n D = np.absolute(D)\n row_sums = D.sum(axis=1, keepdims=True)\n D = D / row_sums\n D = np.nan_to_num(D)\n\n answer = {}\n for i, e1 in enumerate(D):\n answer[i] = []\n for j, e2 in enumerate(D):\n R = genDistanceGPU(e1, e2, diff, pow2)\n answer[i].append(R)\n\n return D, row_sums, answer\n\ndef genDistanceGPU(np_a, np_b, diff, power):\n \"init variable\"\n \"device\"\n a_gpu = cuda.mem_alloc(np_a.nbytes)\n b_gpu = cuda.mem_alloc(np_b.nbytes)\n diff_res_gpu = cuda.mem_alloc(np_a.nbytes)\n \"host\"\n diff_res = np.zeros_like(np_a)\n\n \"copy to device\"\n cuda.memcpy_htod(a_gpu, np_a)\n cuda.memcpy_htod(b_gpu, np_b)\n\n \"execute cuda\"\n \"diff\"\n diff(diff_res_gpu, a_gpu, b_gpu, np.int32(np_a.size), block = (1024, 1, 1), grid = (128, 1, 1))\n\n \"pow2\"\n power(diff_res_gpu, diff_res_gpu, np.int32(np_a.size), block = (1024, 1, 1), grid = (128, 1, 1))\n\n \"callback\"\n cuda.memcpy_dtoh(diff_res, diff_res_gpu)\n\n return sqrt(diff_res.sum())\n\n","sub_path":"src/testing/svd.py","file_name":"svd.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"220458396","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/pumml/learners.py\n# Compiled at: 2019-07-15 17:22:22\n# Size of source mod 2**32: 19758 bytes\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture, BayesianGaussianMixture\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.utils import resample\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom monty.serialization import dumpfn\nimport pandas as pd, seaborn as sns, os, pickle, numpy as np, matplotlib.pyplot as plt\n__author__ = 'Nathan C. Frey, Jin Wang'\n__copyright__ = 'MIT License'\n__version__ = '0.0.1'\n__maintainer__ = 'Nathan C. Frey'\n__email__ = 'n.frey@seas.upenn.edu'\n__status__ = 'Development'\n__date__ = 'Aug 2017'\n\nclass PULearner:\n\n def __init__(self):\n \"\"\"A machine learning model that predicts material synthesizability.\n\n Positive samples are experimentally synthesized materials. Unlabled samples are not-yet synthesized materials that are part of the same material family.\n\n Features for training data might be generated by first principles (density functional theory) calculations.\n\n Hyperparameters are initialized with sensible defaults, but any newly trained model should have hyperparams carefully converged.\n\n Args:\n\n Attributes:\n pu_stats (dict): Outputs of cv_baggingDT\n df_U (DataFrame): Unlabeled data.\n df_P (DataFrame): Positive data.\n\n synth_scores (list): Synthesizability scores (between 0 and 1) of unlabeled samples.\n labels (list): Likely synthesizable (1) or not (0)\n feat_importances (DataFrame): Feature importances from trained\n decision tree classifiers. Index corresponds to feature index in original data. \n\n \"\"\"\n pass\n\n def cv_baggingDT(self, pu_data, splits=3, repeats=100, bags=100, filename=''):\n \"\"\"\n Train bagged decision tree base classifiers and do repeated \n k-fold CV.\n\n Synthesizability scores (0 = not synthesizable, 1 = already\n synthesized) are generated for an unlabeled sample by averaging\n the scores from the ensemble of decision tree classifiers that\n have not been trained on that sample. \n\n Args:\n pu_data (json): A file of numeric features describing materials. There MUST be a column called \"PU_label\" where a 1 value indicates a synthesized (positive) compound and a 0 value indicates an unlabeled compound.\n\n splits (int): Number of splits in k-fold CV.\n repeats (int): Number of repeated k-fold CV.\n bags (int): Number of bags in bootstrap aggregation.\n filename (string): Save model training results to file with\n filename ending in .json or .pkl.\n\n Returns:\n pu_stats (dict): Metrics and outputs of PU learning model\n training.\n\n \"\"\"\n print('Start PU Learning.')\n df = pd.read_json(pu_data)\n df_P, df_U, X_P, X_U = self._process_pu_data(df)\n self.df_P = df_P\n self.df_U = df_U\n kfold = RepeatedKFold(n_splits=splits, n_repeats=repeats, random_state=42)\n scores = []\n tprs = []\n prob_P = np.ones(shape=(X_P.shape[0], splits * repeats))\n prob_U = -np.ones(shape=(X_U.shape[0], splits * repeats))\n feat_rank = np.zeros(shape=(X_P.shape[1], splits * repeats))\n idsp = 0\n for (ptrain, ptest), (utrain, utest) in zip(kfold.split(X_P), kfold.split(X_U)):\n N_ptrain = X_P[ptrain].shape[0]\n N_utrain = X_U[utrain].shape[0]\n d = X_P.shape[1]\n K = N_ptrain\n train_label = np.zeros(shape=(N_ptrain + K,))\n train_label[:N_ptrain] = 1.0\n n_oob = np.zeros(shape=(N_utrain,))\n f_oob = np.zeros(shape=(N_utrain, 2))\n f_ptest = np.zeros(shape=(X_P[ptest].shape[0], 2))\n f_utest = np.zeros(shape=(X_U[utest].shape[0], 2))\n for i in range(bags):\n bootstrap_sample = np.random.choice((np.arange(N_utrain)), replace=True, size=K)\n data_bootstrap = np.concatenate((X_P[ptrain], X_U[bootstrap_sample, :]), axis=0)\n model = DecisionTreeClassifier(max_depth=None, max_features=None, criterion='gini', class_weight='balanced')\n model.fit(data_bootstrap, train_label)\n idx_oob = sorted(set(range(N_utrain)) - set(np.unique(bootstrap_sample)))\n f_oob[idx_oob] += model.predict_proba(X_U[utrain][idx_oob])\n n_oob[idx_oob] += 1\n f_ptest += model.predict_proba(X_P[ptest])\n f_utest += model.predict_proba(X_U[utest])\n feat_rank[:, idsp] = model.feature_importances_\n\n predict_utrain = f_oob[:, 1] / n_oob\n predict_ptest = f_ptest[:, 1] / bags\n predict_utest = f_utest[:, 1] / bags\n true_pos = predict_ptest[np.where(predict_ptest > 0.5)].shape[0]\n u_pos = predict_utest[np.where(predict_utest > 0.5)].shape[0]\n N_ptest = X_P[ptest].shape[0]\n N_utest = X_U[utest].shape[0]\n p_pred_pos = (true_pos + u_pos) / (N_ptest + N_utest) + 0.0001\n recall = true_pos / N_ptest\n score = recall ** 2 / p_pred_pos\n scores.append(score)\n tprs.append(recall)\n prob_P[(ptest, idsp)] = predict_ptest\n prob_U[(utrain, idsp)] = predict_utrain\n prob_U[(utest, idsp)] = predict_utest\n idsp += 1\n if (idsp + 1) % splits == 0:\n tpr_tmp = np.asarray(tprs[-splits - 1:-1])\n print('Performed Repeated ' + str(splits) + '-fold: ' + str(idsp // splits + 1) + ' out of ' + str(repeats))\n print('True Positive Rate: %0.2f (+/- %0.2f)' % (tpr_tmp.mean(), tpr_tmp.std() * 2))\n\n label_U = np.zeros(shape=(X_U.shape[0], splits * repeats + 1), dtype=int)\n label_U[:, :splits * repeats][np.where(prob_U > 0.5)] = 1\n label_U[:, splits * repeats] = np.sum((label_U[:, :splits * repeats + 1]), axis=1)\n tprs = np.asarray(tprs)\n scores = np.asarray(scores)\n label_U_rp = np.zeros(shape=(X_U.shape[0], repeats), dtype=int)\n prob_U_rp = np.zeros(shape=(X_U.shape[0], repeats))\n feat_rank_rp = np.zeros(shape=(X_U.shape[1], repeats))\n tpr_rp = np.zeros(shape=(repeats,))\n scores_rp = np.zeros(shape=(repeats,))\n labels = np.zeros(shape=(X_U.shape[0],))\n for i in range(repeats):\n prob_U_rp[:, i] = prob_U[:, i * splits:(i + 1) * splits].mean(axis=1)\n feat_rank_rp[:, i] = feat_rank[:, i * splits:(i + 1) * splits].mean(axis=1)\n tpr_rp[i] = tprs[i * splits:(i + 1) * splits].mean()\n scores_rp[i] = scores[i * splits:(i + 1) * splits].mean()\n\n label_U_rp[np.where(prob_U_rp > 0.5)] = 1\n prob = prob_U_rp.mean(axis=1)\n labels[np.where(prob > 0.5)] = 1\n tpr_low, tpr_up = self.bootstrapCI(tpr_rp)\n scores_low, scores_up = self.bootstrapCI(scores_rp)\n metrics = np.asarray([tpr_rp.mean(), tpr_low, tpr_up,\n scores_rp.mean(), scores_low, scores_up])\n print('Accuracy: %0.2f' % tpr_rp.mean())\n print('95%% confidence interval: [%0.2f, %0.2f]' % (tpr_low, tpr_up))\n pu_stats = {'prob':prob, \n 'labels':labels, 'metrics':metrics, 'prob_rp':prob_U_rp, \n 'label_rp':label_U_rp, 'tpr_rp':tpr_rp, \n 'scores_rp':scores_rp, 'feat_rank_rp':feat_rank_rp}\n if filename:\n if filename.endswith('.json'):\n dumpfn(pu_stats, filename)\n if filename.endswith('.pkl'):\n with open(filename, 'wb') as (file):\n pickle.dump(pu_stats, file, protocol=(pickle.HIGHEST_PROTOCOL))\n self.pu_stats = pu_stats\n return pu_stats\n\n def bootstrapCI(self, data, ci=95, ns=10000):\n \"\"\"Compute confidence interval of the TPR.\n\n Args:\n data (array): Array of TPRs for each kfold.\n ci (int): Confidence interval.\n ns (int): Number of bootstrap resamplings.\n\n Returns:\n lower (float): Lower endpoint of CI.\n upper (float): Upper endpoint of CI.\n \n \"\"\"\n bs_rsample = []\n for _ in range(ns):\n rsample = resample(data, n_samples=(len(data)))\n bs_rsample.append(np.mean(rsample))\n\n bs_rsample = np.asarray(bs_rsample)\n lower = np.percentile(bs_rsample, (100 - ci) / 2)\n upper = np.percentile(bs_rsample, ci + (100 - ci) / 2)\n return (\n lower, upper)\n\n def corr_heatmap(self, pu_stats, num_feats=10, fname=''):\n \"\"\"Plot correlation matrix between synthesizability and features.\n\n cv_baggingDT must be run first.\n\n Args:\n pu_stats (dict): Output from cv_baggingDT.\n num_feats (int): How many features to consider.\n fname (str): Filename if correlation plot should be saved.\n\n Returns:\n None (generates plots)\n\n \"\"\"\n df_U = self.df_U\n df_U_copy = df_U.drop(columns=['PU_label'])\n synth_scores = self.pu_stats['prob']\n df_U_copy['synth_score'] = synth_scores\n corrmat = df_U_copy.corr()\n cols = corrmat.nlargest(num_feats, 'synth_score')['synth_score'].index\n cm = np.corrcoef(df_U_copy[cols].values.T)\n fig, ax = plt.subplots(1, 1)\n hm = sns.heatmap(cm, ax=ax, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 7}, yticklabels=(cols.values), xticklabels=(cols.values))\n if fname:\n self.save_plot(fname + '.png', fig, ax)\n\n def get_feat_importances(self, pu_stats, plot_format=''):\n \"\"\"Process output from PU learning k-fold cross validation.\n\n cv_baggingDT must be run first.\n\n If plot_format is specified, a feature importance plot will\n be saved.\n\n Args:\n pu_stats (dict): Output from PULearner.cv_baggingDT\n plot_format (str): svg, png, or pdf file format for saving simple visualizations of feature importance and correlation. \n\n \"\"\"\n feat_rank_rp = pu_stats['feat_rank_rp']\n feat_importances = np.sum(feat_rank_rp, axis=1)\n df_U = self.df_U\n df_U = df_U._get_numeric_data()\n df_U_copy = df_U.drop(columns=['PU_label'])\n feat_names = df_U_copy.columns\n df_feat = pd.DataFrame(columns=['feature', 'importance'])\n df_feat['feature'] = feat_names\n df_feat['importance'] = feat_importances\n df_feat_sort = df_feat.sort_values(by='importance', ascending=False)\n max_value = df_feat['importance'].max()\n df_feat_sort['importance'] = df_feat_sort['importance'] / max_value\n self.feat_importances = df_feat\n if plot_format in ('svg', 'pdf', 'png'):\n fig, ax = plt.subplots(figsize=(10, 4))\n with sns.axes_style(style='ticks'):\n sns.barplot(x='feature', y='importance', data=df_feat_sort)\n ax.set_xticklabels((ax.get_xticklabels()), rotation=45, ha='right', fontsize=7)\n filename = 'feat_importance.' + plot_format\n self.save_plot(filename, fig, ax)\n\n @staticmethod\n def _process_pu_data(data):\n \"\"\"Utility method for processing input data.\n\n Args:\n data (DataFrame): Data with positive and unlabeled samples.\n\n Returns:\n X_P (array): Positive sample set.\n X_U (array): Unlabeled sample set.\n\n \"\"\"\n df_P = data.query('PU_label == 1')\n df_U = data.query('PU_label == 0')\n X_P = np.asarray(df_P._get_numeric_data())[:, :-1]\n X_U = np.asarray(df_U._get_numeric_data())[:, :-1]\n return (\n df_P, df_U, X_P, X_U)\n\n @staticmethod\n def save_plot(filename, fig, ax):\n \"\"\"Utility method for saving simple visualizations.\n\n Args:\n filename (str): Name ending in .svg, .png, or .pdf\n fig, ax (objects): Matplotlib objects.\n\n Returns:\n None\n\n \"\"\"\n sns.set_style('ticks')\n fig.tight_layout()\n fig.savefig(filename)\n\n\nclass PUInteract:\n\n def __init__(self, df_parent, pu_parent, df_child, pu_child, merge_on=(), feats=()):\n \"\"\"Consider parent and child phase PU learning scores.\n\n This class looks at PU learning scores for parent bulk\n compounds (e.g. layered h-BN) and scores of the child phases\n along with descriptors like exfoliation energy and changes\n in structural/electronic properties to predict (parent, child)\n pairs that can be synthesized.\n\n Parent and child must be linked by a column that allows the dataframes to be merged. There should also be additional features that characterize the structural and chemical differences between parents and children, e.g. changes in bond lengths, etc.\n\n Unsupervised clustering models are used to identify synthesizable (parent/child) pairs.\n\n Args:\n df_parent (str): Parent data filename.\n pu_parent (dict): Output from PULearner.cv_baggingDT.\n df_child (str): Child data filename.\n pu_child (dict): Output from PULearner.cv_baggingDT.\n merge_on (tuple): Column name(s) on which to merge.\n feats (tuple): Column names to use as features. If empty, use all possible columns. \n\n Attributes:\n merged_df (DataFrame): (Parent, child) pair data.\n X (array): Array representation of merged_df.\n\n Returns:\n None\n\n \"\"\"\n df_parent = pd.read_json(df_parent)\n df_child = pd.read_json(df_child)\n df_parent['synth_score'] = 1\n df_child['synth_score'] = 1\n df_parent.loc[(df_parent.eval('PU_label == 0'), 'synth_score')] = pu_parent['prob']\n df_child.loc[(df_child.eval('PU_label == 0'), 'synth_score')] = pu_child['prob']\n merge_on = list(merge_on)\n df = pd.merge(df_parent, df_child, on=merge_on, how='outer', suffixes=['_p', '_c'])\n df.drop(columns=['PU_label_p', 'PU_label_c'], inplace=True, axis=1)\n if feats:\n feat_names = [f + '_p' for f in feats] + [f + '_c' for f in feats]\n df = df[feat_names]\n self.merged_df = df\n self.X = np.array(df)\n\n def do_kmeans(self, n_clusters=2, seed=42):\n \"\"\"Do k-means clustering on (parent, child) pairs.\n\n Args:\n n_clusters (int): Number of clusters.\n seed (int): Fix random seed for kmeans reproducibility.\n\n Returns:\n kmeans_output (dict): kmeans cluster centers, cluster labels for each (parent, child)\n\n \"\"\"\n np.random.seed(seed)\n km = KMeans(n_clusters=n_clusters, random_state=seed)\n km.fit(self.X)\n kmeans_output = {'cluster_centers':km.cluster_centers_, 'cluster_labels':km.labels_}\n return kmeans_output\n\n def do_gmixture(self, n_components=2, seed=42):\n \"\"\"\n Estimate parameters of a Gaussian mixture distribution of (parent, child) data.\n\n Args:\n n_components (int): Number of components in GMM.\n seed (int): Random seed.\n\n Returns:\n gmm_output (dict): Predicted labels of (parent, child) pairs and predicted posterior probabilities of each component.\n\n \"\"\"\n np.random.seed(seed)\n gmm = GaussianMixture(n_components=n_components, random_state=seed, covariance_type='full')\n gmm.fit(self.X)\n gmm_labels = gmm.predict(self.X)\n gmm_prob = gmm.predict_proba(self.X)[:, 0]\n gmm_output = {'gmm_labels':gmm_labels, 'gmm_prob':gmm_prob}\n return gmm_output\n\n def do_bgm(self, n_components=6, seed=42):\n \"\"\"Bayesian Gaussian Mixture.\n\n Infer the effective number of components in a Gaussian Mixture Model via variational Bayesian estimation.\n\n n_effective_componenents < n_components if the model sets some weights close to 0.\n\n Args:\n n_components (int): Number of components in GMM.\n seed (int): Random seed.\n\n Returns:\n bgm_output (dict): Labels and probabilities.\n\n \"\"\"\n np.random.seed(seed)\n bgm = BayesianGaussianMixture(n_components=n_components, covariance_type='full', weight_concentration_prior=0.01, weight_concentration_prior_type='dirichlet_process', mean_precision_prior=0.01, init_params='random', max_iter=100, random_state=seed)\n bgm.fit(self.X)\n bgm_labels = bgm.predict(self.X)\n bgm_prob = bgm.predict_proba(self.X)[:, 0]\n bgm_output = {'bgm_labels':bgm_labels, \n 'bgm_prob':bgm_prob}\n return bgm_output","sub_path":"pycfiles/pumml-0.0.1-py3.6/learners.cpython-36.py","file_name":"learners.cpython-36.py","file_ext":"py","file_size_in_byte":17105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"474567717","text":"from rest_framework import serializers\nfrom rest_framework.validators import UniqueValidator\nfrom django.conf import settings\n\nfrom .models import Order, OrderDetail\nfrom couriers.models import Courier\nimport core.validators as validator\n\n\nclass OrderCreateSerializer(serializers.ModelSerializer):\n\t\"\"\"Добавление заказа\"\"\"\n\n\torder_id = serializers.IntegerField(\n\t\tlabel='Идентификатор заказа',\n\t\tmin_value=0,\n\t\tmax_value=2147483647,\n\t\tvalidators=[UniqueValidator(queryset=Order.objects.all())]\n\t)\n\tweight = serializers.FloatField(\n\t\tlabel='Вес заказа',\n\t\tmin_value=0.01,\n\t\tmax_value=50\n\t)\n\tregion = serializers.IntegerField(\n\t\tlabel='Район доставки заказа',\n\t\tmin_value=0,\n\t\tmax_value=2147483647\n\t)\n\tdelivery_hours = serializers.ListField(\n\t\tlabel='Промежутки приёма заказов',\n\t\tallow_empty=False,\n\t\tchild=serializers.CharField(label='Промежуток приёма заказа', max_length=11),\n\t\tvalidators=[validator.ValidationTimeFormat()]\n\t)\n\n\tclass Meta:\n\t\tmodel = Order\n\t\texclude = ('status', )\n\t\tvalidators = [validator.ValidationFields()]\n\n\nclass OrderAssignSerializer(serializers.ModelSerializer):\n\t\"\"\"Формирование группы заказов для курьера\"\"\"\n\n\tcourier_id = serializers.IntegerField(\n\t\tlabel='Идентификатор курьера',\n\t\tmin_value=0,\n\t\tmax_value=2147483647,\n\t\tvalidators=[validator.ValidationCourierID(Courier)]\n\t)\n\n\tclass Meta:\n\t\tmodel = Courier\n\t\tfields = ('courier_id', )\n\t\tvalidators = [validator.ValidationEmpty(), validator.ValidationFields()]\n\n\nclass OrderCompleteSerializer(serializers.Serializer):\n\t\"\"\"Завершение заказа\"\"\"\n\n\tcourier_id = serializers.IntegerField(\n\t\tlabel='Идентификатор курьера',\n\t\tmin_value=0,\n\t\tmax_value=2147483647,\n\t\tvalidators=[validator.ValidationCourierID(Courier)]\n\t)\n\torder_id = serializers.IntegerField(\n\t\tlabel='Идентификатор заказа',\n\t\tmin_value=0,\n\t\tmax_value=2147483647,\n\t\tvalidators=[validator.ValidationOrderID(Order)]\n\t)\n\tcomplete_time = serializers.DateTimeField(\n\t\tformat=settings.TIME_FORMAT\n\t)\n\n\tclass Meta:\n\t\tfields = ('courier_id', 'order_id', 'complete_time')\n\t\tvalidators = [\n\t\t\tvalidator.ValidationEmpty(),\n\t\t\tvalidator.ValidationFields(),\n\t\t\tvalidator.ValidationOrderBelongsCourier(Courier, Order, OrderDetail)\n\t\t]\n","sub_path":"orders/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"423909145","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimg_file = \"D://UDEMY//Video analytics using OpenCV//Image thresholding//sample.jpg\"\r\n\r\nimg = cv2.imread(img_file,0)\r\n\r\n#global thresholding\r\nret1,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)\r\n\r\n#otsu's thresholding\r\nret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n\r\n#otsu's thresholding after gaussian filtering\r\nblur = cv2.GaussianBlur(img,(5,5),0)\r\nret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n\r\n#plot all the images and their histograms\r\nimages = [img,0,th1,\r\n\timg,0,th2,\r\n\tblur,0,th3]\r\n\r\ntitles = ['orig noisy img','histogram','global thresholding(v=127)','orig noisy img','histogram',\"otsu's thresholding\",'gaussian filtered img','histogram',\"otsu's thresholding\"]\r\n\r\nfor i in range(3):\r\n\tplt.subplot(3,3,i*3+1)\r\n\tplt.imshow(images[i*3],'gray')\r\n\tplt.title(titles[i*3])\r\n\tplt.xticks([]),plt.yticks([])\r\n\r\n\tplt.subplot(3,3,i*3+2)\r\n\tplt.hist(images[i*3].ravel(),256)\r\n\tplt.title(titles[i*3+1])\r\n\tplt.xticks([]),plt.yticks([])\r\n\r\n\tplt.subplot(3,3,i*3+3)\r\n\tplt.imshow(images[i*3+2],'gray')\r\n\tplt.title(titles[i*3+2])\r\n\tplt.xticks([]),plt.yticks([])\r\nplt.show()","sub_path":"Image Thresholding/Otsu's binarization.py","file_name":"Otsu's binarization.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"324584658","text":"def sum_int(n):\n assert n >= 0 and int(n) == n, \"The entered input is not as expected\"\n if n in [0, 1]:\n return n\n else:\n return n + sum_int(n-1)\n\ndef sum_of_digits(n):\n \"\"\"f(n) = n%10 + f(n/10)\"\"\"\n assert n >= 0 and int(n) == n, \"The entered input is not as expected\"\n if n == 0:\n return 0\n else:\n return int(n%10)+sumOfDigits(int(n/10))\n\nprint(sum_int(10))\nprint(sum_of_digits(10))","sub_path":"Recursion/sumIntegers.py","file_name":"sumIntegers.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"636638328","text":"import telebot, time, random\ntoken = 'Your token'\nbot = telebot.TeleBot(token)\nlastboot = time.asctime()\n\nmon_lessons = ['''1.История\n2.ОБЖ\n3.Геометрия\n4.Геометрия\n5.Физика\n6.Технопредпринимательство/Мехатроника. \n[3D прототипирование(пара)]''']\n\ntue_lessons = ['''1.Алгебра\n2.Алгебра\n3.Химия\n4.География\n5.География\n6.Русский язык\n''']\nwed_lessons = ['''1.Физ-ра\n2.Инженерная графика\n3.Англ.язык\n4.Рус.яз\n5.История\n6.Лит-ра\n7.Биология\n\nИЗ по инженерной графике\n''']\nthu_lessons = ['''1.География\n2.Физика\n3.Обществознание\n4.Рус.яз\n5.Литература\n6.Англ.яз\n7.Алгебра\n''']\n\nfri_lessons = ['''1.Алгебра\n2.Алгебра\n3.Биология\n4.Физ-ра\n5.ИЗО\n6.Технопредпринимательство(2 группа)/Робототехника\n\nИЗ по инженерной графике\n''']\n\nsat_lessons = ['''1.Физика(ол.задачи)\n2.Физика\n3.Геометрия\n4.Физ-ра\n5.Химия\n''']\nprint(\"initialised\")\n@bot.message_handler(commands=['start'])\ndef cmd_start(message):\n\tbot.send_message(message.chat.id, \"Hello\")\n@bot.message_handler(commands=['sendsticker'])\ndef cmd_sendsticker(message):\n\tstickers = [\"BQADAgADpQkAApI2owt3UnXTSkF3uwI\", \"BQADAgADswsAApI2owtg1HAfAvgjIgI\", \"BQADAgADTQADyJsDAAG6DcSDcxpKBAI\", \"BQADAgAD-QEAAtT-vgjZ7A86uU0H9gI\", \"BQADAgADZQADyJsDAAEoe0wLx8pmqAI\", \"BQADAgADaQADyJsDAAG3NoF1cpmzhQI\", \"BQADAgADOAADyIsGAAE7re09I3hMQwI\"]\n\tbot.send_sticker(message.chat.id, stickers[random.randint(0, 6)])\n@bot.message_handler(commands=['help'])\ndef cmd_help(message):\n\tbot.send_message(message.chat.id, \"/time - Local time. /lastboot - last boot time. /sendsticker - send random sticker, /mondaylessons, /tuesdaylessons, /wednesdaylessons, /thursdaylessons, /fridaylessons, /saturdaylessons\")\n@bot.message_handler(commands=['lastboot'])\t\ndef cmd_lastboot(message):\n\tbot.send_message(message.chat.id, lastboot)\n@bot.message_handler(commands=['time'])\ndef cmd_time(message):\n\tbot.send_message(message.chat.id, time.asctime())\n@bot.message_handler(commands=['mondaylessons'])\ndef cmd_monlessons(message):\n\tbot.send_message(message.chat.id, mon_lessons)\n@bot.message_handler(commands=['tuesdaylessons'])\ndef cmd_tuelessons(message):\n\tbot.send_message(message.chat.id, tue_lessons)\n@bot.message_handler(commands=['wednesdaylessons'])\ndef cmd_wedlessons(message):\n\tbot.send_message(message.chat.id, wed_lessons)\n@bot.message_handler(commands=['thursdaylessons'])\ndef cmd_thulessons(message):\n\tbot.send_message(message.chat.id, thu_lessons)\n@bot.message_handler(commands=['frilessons'])\ndef cmd_frilessons(message):\n\tbot.send_message(message.chat.id, fri_lessons)\n@bot.message_handler(commands=['satlessons'])\ndef cmd_satlessons(message):\n\tbot.send_message(message.chat.id, sat_lessons)\nif __name__ == '__main__':\n\tbot.polling(none_stop=True)\n","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"438566971","text":"import os, sys\n\nPROJECT_DIR = '/var/www/Parser/Parser'\nsys.path.insert(0, '/var/www/Parser/Parser')\n\ndef execfile(filename):\n globals = dict(__file__ = filename)\n exec( open(filename).read(), globals )\n\nactivate_this = os.path.join( '/home/ubuntu-0898795/', 'venv3/bin', 'activate_this.py' )\nexecfile( activate_this )\n\nfrom routes import APP as application\n","sub_path":"parser.wsgi","file_name":"parser.wsgi","file_ext":"wsgi","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"357371876","text":"import numpy\n\ntry:\n import matplotlib.pyplot as pypl\n plotting = True\nexcept:\n plotting = False\n\nimport os,time\nthis_dir = os.path.dirname(os.path.realpath(__file__))\n\nimport condor\n\nimport logging\nlogger = logging.getLogger(\"condor\")\n#logger.setLevel(\"DEBUG\")\nlogger.setLevel(\"WARNING\")\n#logger.setLevel(\"INFO\")\n\nN = 1\nrotation_formalism=\"random\"\nrotation_values = None\n\n# Source\nsrc = condor.Source(wavelength=1E-10, pulse_energy=1E-3, focus_diameter=1001E-9)\n# Detector\ndet = condor.Detector(distance=0.2, pixel_size=800E-6, nx=250, ny=250)\n# Map\n#print(\"Simulating map\")\npar = condor.ParticleAtoms(pdb_filename=\"%s/../../DNA.pdb\" % this_dir,\n rotation_formalism=rotation_formalism, rotation_values=rotation_values)\ns = \"particle_atoms\"\nE = condor.Experiment(src, {s : par}, det)\n\nW = condor.utils.cxiwriter.CXIWriter(\"./condor.cxi\")\nfor i in range(N):\n t = time.time()\n res = E.propagate()\n #print(time.time()-t)\n if plotting:\n real_space = numpy.fft.fftshift(numpy.fft.ifftn(res[\"entry_1\"][\"data_1\"][\"data_fourier\"]))\n pypl.imsave(this_dir + \"/%i.png\" % (i), numpy.log10(res[\"entry_1\"][\"data_1\"][\"data\"]))\n pypl.imsave(this_dir + \"/%i_rs.png\" % (i), abs(real_space))\n W.write(res)\nW.close()\n\n","sub_path":"examples/scripts/pdb/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"585176730","text":"__author__ = 'Marvin Smith'\n\n\n# Python Libraries\nimport logging, curses\n\n\n# Project Libraries\nfrom .CLI_Window_Base import CLI_Window_Base\nfrom .DatabaseManagerWindow import DatabaseManagerWindow\n\n\n#-------------------------------------------#\n#- Command-Line-Interface -#\n#-------------------------------------------#\nclass CLI_Interface(CLI_Window_Base):\n\n\n #-------------------------#\n #- Constructor -#\n #-------------------------#\n def __init__(self,options):\n\n # Construct Parent\n CLI_Window_Base.__init__(self, window_title='Cost-Tracker')\n\n\n #--------------------------#\n #- Render the CLI -#\n #--------------------------#\n def Render(self):\n\n # clear the screen\n self.screen.clear()\n\n # Print the main menu\n self.screen.addstr(0, 0, self.window_title)\n self.screen.addstr(1, 0, '----------------------')\n self.screen.addstr(2, 0, 'q. Quit ' + self.window_title)\n self.screen.addstr(3, 0, 'i. Import CSV File.')\n self.screen.addstr(4, 0, 's. Database manager.')\n self.screen.addstr(5, 0, 'option:')\n self.screen.refresh()\n\n\n #--------------------------------------#\n #- Process Keyboard Input -#\n #--------------------------------------#\n def Process_Keyboard_Input(self, input):\n\n # Check if we need to quit\n if input == ord('q'):\n self.is_running = False\n\n # Check if we need to show the database manager\n elif input == ord('s'):\n self.database_manager = DatabaseManagerWindow().Run(stdscr=self.screen, database_manager=self.database_manager)\n\n\n#---------------------------------------------#\n#- Start the Command-Line Interface -#\n#---------------------------------------------#\ndef Start_CLI(options, database_manager):\n\n # Command-Line Initialization\n logging.debug('Starting CLI')\n\n # Create CLI\n cli = CLI_Interface(options)\n\n # Run\n curses.wrapper(cli.Run, database_manager, init_curses=True)\n\n","sub_path":"cost_tracker/cli/cli_main.py","file_name":"cli_main.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"305807093","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport time\n\nORDER_DIR = 'static/orders'\n\n\ndef create_date_dir(date_str):\n \"\"\"\n => datestr dmY\n <= dirstring\n checks if desired dir exists, creates it if it doesn't \n \"\"\"\n target_path = os.path.join(ORDER_DIR, date_str)\n if not os.path.exists(target_path): os.makedirs(target_path)\n return target_path\n\n\ndef clean_ORDER_DIR(cur_date_str):\n \"\"\"\n => datestr dmY\n calls dir-creation for files of each date encountered in the order-ORDER_DIR,\n moves files to the respective dirs\n \"\"\"\n for order in os.listdir(ORDER_DIR):\n if 'order' in order:\n order_date = order[0:8]\n source_file = os.path.join(ORDER_DIR, order)\n target_path = create_date_dir(order_date)\n target_file = os.path.join(target_path, order)\n os.rename(source_file, target_file)\n \n\ndef get_orders(target_date=None):\n \"\"\"\n => optional: datestr dmY (defaults to current date)\n <= list of order-jsons\n looks for order-dir of the current date,\n loads jsons from determined dir,\n \"\"\"\n order_content, d = [], {}\n cur_date_str = time.strftime('%d%m%Y')\n clean_ORDER_DIR(cur_date_str)\n\n if not target_date: source_dir = os.path.join(ORDER_DIR, cur_date_str)\n else: source_dir = os.path.join(ORDER_DIR, target_date)\n\n if os.path.exists(source_dir): \n for order in sorted(os.listdir(source_dir)):\n if 'order' in order:\n source_file = os.path.join(source_dir, order)\n order_content.append(json.load(open(source_file,'r')))\n else: return False\n\n return(order_content)\n\n\ndef build_orders(date_query=None):\n \"\"\"\n => optional: datestr dmY (defaults to current date)\n <= orders of the day as list of html-formatted lines\n determines desired orders, requests them (get_orders),\n builds list of oderpage-htmllines: title, parsed order-jsons and total sum\n \"\"\"\n if not os.path.exists(ORDER_DIR): os.makedirs(ORDER_DIR)\n if date_query:\n orders = get_orders(date_query)\n if not orders: return False\n order_date = date_query[0:2]+'.'+date_query[2:4]+'.'+date_query[4:]\n else:\n orders = get_orders()\n order_date = time.strftime('%d.%m.%Y')\n\n title = '

Bestellung vom ' + order_date + ':

'\n content = [title, '']\n price = 0\n\n if not orders: return False\n print(orders)\n\n for d in orders:\n content.append('

'+d['name']+': '+d['store']+', total: '+d['price']+'

')\n d['product'] = d['product'].replace(')', ') ')\n content.append(d['product'])\n content.append(d['comment'])\n content.append('')\n price += float(d['price'][:-1])\n\n total_sum = str(price)+'0€'\n content.append('

Preis insgesamt: '+total_sum+'

')\n\n for x in content: x = x.encode('utf-8')\n\n return(content)\n","sub_path":"app/speisekarte_orders.py","file_name":"speisekarte_orders.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"88265898","text":"try:\n from chronostar._likelihood import lnprob_func_gradient_descent\nexcept ImportError:\n print(\"C IMPLEMENTATION OF lnprob_func_gradient_descent NOT IMPORTED\")\n USE_C_IMPLEMENTATION = False\n TODO = True # NOW WHAT?\n\nfrom chronostar import likelihood\nfrom chronostar import traceorbit\n\nimport numpy as np\nimport time\nimport scipy.optimize\n\nimport pickle\nwith open('data_for_testing/lnprob_func_gradient_descent_10.pkl', 'rb') as h:\n d = pickle.load(h)\n\ndata=d[0]\nmemb_probs=d[1]\ninit_pos=d[2]\n\n# data['means']: [nstars, 6]\n# data['covs']: [nstars, 6, 6]\n# data['bg_lnols']: [nstars, 1]\n# memb_probs: [nstars, 1]\n\n# Construct an array that contains all the data needed for lnprob\n# THIS IS SOMETHING THAT NEEDS TO BE DONE ONCE AT THE BEGINNING, + add memb_probs at the end each time\na = []\nmemb_threshold=1e-5\nnearby_star_mask = np.where(memb_probs > memb_threshold)\n#~ for i in range(len(memb_probs)):\nfor i in nearby_star_mask[0]:\n tmp = np.hstack((data['means'][i], data['covs'][i].flatten(), memb_probs[i]))\n a.append(tmp)\na=np.array(a)\n\nstart = time.time()\nresultC = lnprob_func_gradient_descent(init_pos, a)\ndurationC = time.time()-start\nprint('result C', resultC)\n#~ print('Duration C:', durationC)\n\nstart = time.time()\ntrace_orbit_func=traceorbit.trace_epicyclic_orbit\noptimisation_method='Nelder-Mead'\nfrom chronostar.component import SphereComponent\nComponent = SphereComponent\n#~ args = [data, memb_probs, trace_orbit_func, optimisation_method] # likelihood2\n#~ resultP = likelihood.lnprob_func_gradient_descent(init_pos, args, \n #~ memb_probs=memb_probs, \n #~ trace_orbit_func=traceorbit.trace_epicyclic_orbit)\nargsP = [data, memb_probs, trace_orbit_func, Component]\nresultP = likelihood.lnprob_func_gradient_descent(init_pos, argsP)\ndurationP = time.time()-start\nprint('result P', resultP)\n#~ print('Duration P:', durationP)\n\nprint('likelihood evaluation: DurationP / DurationC', durationP/durationC)\n\n# I don't really need to test minimisation but just the function evaluation.\nconvergence_tol=1\noptimisation_method = 'Nelder-Mead'\n\n# C\nstart = time.time()\nresult = scipy.optimize.minimize(lnprob_func_gradient_descent, init_pos, \n args=a, tol=convergence_tol, method=optimisation_method)\ndurationC = time.time()-start\nprint('Result C')\nprint(result.x)\n\n\n# Python\nstart = time.time()\nresult2 = scipy.optimize.minimize(\n likelihood.lnprob_func_gradient_descent, init_pos, \n args=argsP, tol=convergence_tol, method=optimisation_method)\ndurationP = time.time()-start\nprint('Result P')\nprint(result2.x)\n\nprint('MINIMIZE: DurationP / DurationC', durationP/durationC)\n","sub_path":"fastfit/test_likelihood.py","file_name":"test_likelihood.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"192968847","text":"fname = '/data3/XJ_SAC/header/station.dat'\nfout = 'xj.sta'\nf=open(fname); lines=f.readlines(); f.close()\nout=open(fout,'w')\nchn='HHZ'\nfor line in lines:\n net, sta, lon, lat, ele = line.split('\\t')\n lon = float(lon)\n lat = float(lat)\n ele = int(ele)\n lat_int = int(lat)\n lat_flt = 60*(lat-int(lat))\n lon_int = int(lon)\n lon_flt = 60*(lon-int(lon))\n out.write(\"{:<5} {} {} {} {:7.4f}N{} {:7.4f}E{:4}0.2 0.00 0.00 0.00 0.00 3 0.00--HHZ \\n\"\\\n .format(sta, net[-2:], chn, lat_int, lat_flt, lon_int, lon_flt, ele))\nout.close()\n","sub_path":"hypoinverse/mk_sta.py","file_name":"mk_sta.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"227807276","text":"from datetime import date\r\nimport datetime\r\n\r\nclass Registro:\r\n def __init__(self):\r\n self.fecha = \"\"\r\n self.imc = \"\"\r\n self.peso = \"\"\r\n self.altura = \"\"\r\n\r\nclass Persona:\r\n\r\n def __init__(self):\r\n self.rut = \"\"\r\n self.nombre = \"\"\r\n self.apellido = \"\"\r\n self.fecha_nacimiento = \"\"\r\n self.sexo = \"\"\r\n self.actividad = \"\"\r\n lista_registros = []\r\n self.registros = lista_registros\r\n\r\n def Calcular(self, fecha_registro, peso, altura):\r\n # calculo para el imc\r\n try:\r\n imc = peso / (altura * altura)\r\n registro = Registro()\r\n registro.imc = imc\r\n registro.fecha = fecha_registro\r\n self.registros.append(registro)\r\n print(\"El imc calculado fue:\", imc)\r\n except:\r\n print(\"Dato invalido para realizar el calculo...\")\r\n\r\n def listar_imc_por_sexo(self):\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha,\" \")\r\n if self.sexo == \"M\":\r\n if registro.imc < 20:\r\n print(\"Bajo Peso\\n\")\r\n elif 20 <= registro.imc < 25:\r\n print(\"Normal\\n\")\r\n elif 25 <= registro.imc < 30:\r\n print(\"Obesidad Leve\\n\")\r\n elif 30 <= registro.imc < 41:\r\n print(\"Obesidad Severa\\n\")\r\n elif 41 <= registro.imc:\r\n print(\"Obesidad Muy Severa\\n\")\r\n elif self.sexo == \"F\":\r\n if registro.imc < 20:\r\n print(\"Bajo Peso\\n\")\r\n elif 20 <= registro.imc < 24:\r\n print(\"Normal\\n\")\r\n elif 24 <= registro.imc < 29:\r\n print(\"Obesidad Leve\\n\")\r\n elif 29 <= registro.imc < 38:\r\n print(\"Obesidad Severa\\n\")\r\n elif 38 <= registro.imc:\r\n print(\"Obesidad Severa\\n\")\r\n\r\n def listar_imc_por_edad_sexo(self):\r\n edad = self.calculateAge()\r\n\r\n if 19 <= edad <= 24:\r\n if self.sexo == \"F\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 18.9 <= registro.imc <= 22.1:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 25:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 29.6:\r\n print(\"SobrePeso\\n\")\r\n elif 29.6 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif self.sexo == \"M\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 10.8 <= registro.imc <= 14.9:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 19:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 23.3:\r\n print(\"SobrePeso\\n\")\r\n elif 23.3 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif 25 <= edad <= 29:\r\n if self.sexo == \"F\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 18.9 <= registro.imc <= 22:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 25.4:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 29.8:\r\n print(\"SobrePeso\\n\")\r\n elif 29.8 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif self.sexo == \"M\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 12.8 <= registro.imc <= 16.5:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 20.3:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 24.4:\r\n print(\"SobrePeso\\n\")\r\n elif 24.4 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif 30 <= edad <= 34:\r\n if self.sexo == \"F\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 19.7 <= registro.imc <= 22.7:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 26.4:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 30.5:\r\n print(\"SobrePeso\\n\")\r\n elif 30.5 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif self.sexo == \"M\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 14.5 <= registro.imc <= 18:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 21.5:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 25.2:\r\n print(\"SobrePeso\\n\")\r\n elif 25.2 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif 35 <= edad <= 39:\r\n if self.sexo == \"F\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 21 <= registro.imc <= 22.7:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 27.7:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 31.5:\r\n print(\"SobrePeso\\n\")\r\n elif 31.5 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif self.sexo == \"M\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 16.1 <= registro.imc <= 19.4:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 22.6:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 26.1:\r\n print(\"SobrePeso\\n\")\r\n elif 26.1 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif 40 <= edad <= 44:\r\n if self.sexo == \"F\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 22.6 <= registro.imc <= 25.6:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 29.3:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 32.8:\r\n print(\"SobrePeso\\n\")\r\n elif 32.8 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif self.sexo == \"M\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 17.5 <= registro.imc <= 20.5:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 23.6:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 26.9:\r\n print(\"SobrePeso\\n\")\r\n elif 26.9 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif 45 <= edad <= 49:\r\n if self.sexo == \"F\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 24.3 <= registro.imc <= 27.3:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 30.9:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 34.1:\r\n print(\"SobrePeso\\n\")\r\n elif 34.1 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif self.sexo == \"M\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 18.6 <= registro.imc <= 21.5:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 24.5:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 27.6:\r\n print(\"SobrePeso\\n\")\r\n elif 27.6 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif 50 <= edad <= 54:\r\n if self.sexo == \"F\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 26.6 <= registro.imc <= 29.7:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 33.1:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 36.2:\r\n print(\"SobrePeso\\n\")\r\n elif 36.2 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif self.sexo == \"M\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 19.8 <= registro.imc <= 22.7:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 25.6:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 28.7:\r\n print(\"SobrePeso\\n\")\r\n elif 28.7 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n\r\n elif 55 <= edad <= 59:\r\n if self.sexo == \"F\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 27.4 <= registro.imc <= 30.7:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 34:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 37.3:\r\n print(\"SobrePeso\\n\")\r\n elif 33.7 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif self.sexo == \"M\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 20.2 <= registro.imc <= 23.2:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 26.2:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 29.3:\r\n print(\"SobrePeso\\n\")\r\n elif 29.3 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif 56 <= edad:\r\n if self.sexo == \"F\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 27.4 <= registro.imc <= 30.7:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 34:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 37.3:\r\n print(\"SobrePeso\\n\")\r\n elif 33.7 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n elif self.sexo == \"M\":\r\n for registro in self.registros:\r\n print(\"Para el IMC\", registro.imc, \"Con fecha\", registro.fecha, \" \")\r\n if 20.3 <= registro.imc <= 23.5:\r\n print(\"Óptimo\\n\")\r\n elif registro.imc <= 26.7:\r\n print(\"Bueno\\n\")\r\n elif registro.imc <= 29.8:\r\n print(\"SobrePeso\\n\")\r\n elif 29.8 < registro.imc :\r\n print(\"Malo (Obesidad)\\n\")\r\n return\r\n\r\n def Mostrar(self):\r\n print(\"\\n\")\r\n print(\"Los datos Ingresados son:\")\r\n print(\"nombre:\", self.nombre)\r\n print(\"apellido:\", self.apellido)\r\n print(\"sexo:\", self.sexo)\r\n print(\"es atleta/normal:\", self.actividad)\r\n\r\n def calculateAge(self):\r\n date_str = self.fecha_nacimiento # The date - 29 Dec 2017\r\n format_str = '%d/%m/%Y' # The format\r\n datetime_obj = datetime.datetime.strptime(date_str, format_str)\r\n today = date.today()\r\n age = today.year - datetime_obj.year - ((today.month, today.day) < (datetime_obj.month, datetime_obj.day))\r\n return age\r\n\r\nlista_personas = []\r\n# se define el menu\r\ndef Menu():\r\n while True:\r\n print(\"\\n\")\r\n print(\"\\tMenu\")\r\n print(\"[1] Ingresar a nueva Persona...\")\r\n print(\"[2] Ingresar nuevo registro de IMC...\")\r\n print(\"[3] Mostrar registros...\")\r\n print(\"[4] Salir...\")\r\n\r\n opcion = int(input(\"Ingrese una opcion: \"))\r\n\r\n if opcion == 1:\r\n try:\r\n persona = Persona()\r\n persona.rut = input(\"Rut: \")\r\n persona.nombre = input(\"nombre: \")\r\n persona.apellido = input(\"apellido: \")\r\n persona.fecha_nacimiento = input(\"fecha nacimiento: \")\r\n persona.sexo = input(\"sexo M o F : \")\r\n persona.actividad = input(\"es usted atleta/normal: \")\r\n\r\n lista_personas.append(persona)\r\n except:\r\n print(\"Dato invalido.....\")\r\n print(\"Regresando al menu principal...\")\r\n\r\n elif opcion == 2:\r\n rut_persona = input(\"Ingrese rut persona a ingresar registros: \")\r\n for persona in lista_personas:\r\n print(persona.rut)\r\n if persona.rut == rut_persona:\r\n persona.Mostrar()\r\n fecha = input(\"Ingrese fecha en el formato dd/mm/yyyy que se peso\")\r\n peso = float(input(\"Ingrese su peso en kg, ej '0.0': \"))\r\n altura = float(input(\"Ingrese su altura en m: \"))\r\n persona.Calcular(fecha, peso, altura)\r\n break\r\n\r\n elif opcion == 3:\r\n print(\"adios...\")\r\n rut_persona = input(\"Ingrese rut persona a mostrar registros: \")\r\n for persona in lista_personas:\r\n if persona.rut == rut_persona:\r\n persona.Mostrar()\r\n persona.listar_imc_por_sexo()\r\n persona.listar_imc_por_edad_sexo()\r\n break\r\n\r\n elif opcion == 4:\r\n break\r\n\r\n else:\r\n print(\"Opcion Invalida...\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # se llama al menu para desplegarlo\r\n Menu()","sub_path":"testing/IMC_POO.py","file_name":"IMC_POO.py","file_ext":"py","file_size_in_byte":15190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"43250812","text":"from app import app\nimport urllib.request,json\nfrom .models import news\n\nNews = news.News\n# Getting api key\napi_key = app.config['NEWS_API_KEY']\nbase_url = app.config[\"NEWS_API_BASE_URL\"]\n\ndef get_news():\n '''\n Function that gets the json response to our url request\n '''\n get_news_url = base_url.format(api_key)\n\n with urllib.request.urlopen(get_news_url) as url:\n get_news_data = url.read()\n get_news_response = json.loads(get_news_data)\n\n news_results = None\n\n if get_news_response['articles']:\n news_results_list = get_news_response['articles']\n news_results = process_results(news_results_list)\n\n\n return news_results\ndef process_results(news_list):\n '''\n Function that processes the movie result and transform them to a list of Objects\n\n Args:\n movie_list: A list of dictionaries that contain movie details\n\n Returns :\n movie_results: A list of movie objects\n '''\n news_results = []\n for news_item in news_list:\n id = news_item.get('id')\n title = news_item.get('title')\n description = news_item.get('description')\n url = news_item.get('url')\n urlToImage = news_item.get('urlToImage')\n publishedAt = news_item.get('publishedAt')\n content = news_item.get('content')\n\n \n news_object = News(id,title,description,url,urlToImage,publishedAt,content)\n news_results.append(news_object)\n\n return news_results","sub_path":"app/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"321394642","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals, division\nimport six\nimport operator\nfrom functools import partial\nfrom six.moves import map, reduce\nfrom armet.exceptions import ImproperlyConfigured\nfrom armet.query import parser, Query, QuerySegment, constants\nfrom armet import utils\n\n\nclass ModelResourceOptions(object):\n\n def __init__(self, meta, name, bases):\n #! SQLAlchemy session used to perform operations on the models.\n self.Session = meta.get('Session')\n if not self.Session:\n raise ImproperlyConfigured(\n 'A session factory (via sessionmaker) is required by '\n 'the SQLAlchemy model connector.')\n\n\ndef iequal_helper(x, y):\n # String values should use ILIKE queries.\n if isinstance(type(y), six.string_types):\n return x.ilike(y)\n else:\n return operator.eq(x, y)\n\n\n# Build an operator map to use for sqlalchemy.\nOPERATOR_MAP = {\n constants.OPERATOR_EQUAL[0]: operator.eq,\n constants.OPERATOR_IEQUAL[0]: iequal_helper,\n constants.OPERATOR_LT[0]: operator.lt,\n constants.OPERATOR_GT[0]: operator.gt,\n constants.OPERATOR_LTE[0]: operator.le,\n constants.OPERATOR_GTE[0]: operator.ge,\n}\n\n\ndef build_segment(model, segment, attr):\n # Get the associated column for the initial path.\n path = segment.path.pop(0)\n col = model.__dict__[path]\n\n # Resolve the inner-most path segment.\n if segment.path:\n if col.impl.accepts_scalar_loader:\n return col.has(build_segment(\n col.property.mapper.class_, segment, attr))\n\n else:\n return col.any(build_segment(\n col.property.mapper.class_, segment, attr))\n\n # Determine the operator.\n op = OPERATOR_MAP[segment.operator]\n\n # Apply the operator to the values and return the expression\n return reduce(operator.or_,\n map(partial(op, col),\n map(attr.try_clean, segment.values)))\n\n\ndef build_clause(query, attributes, model):\n # Iterate through each query segment.\n clause = None\n last = None\n for seg in query.segments:\n # Get the attribute in question.\n attribute = attributes[seg.path[0]]\n\n # Replace the initial path segment with the expanded\n # attribute path.\n seg.path[0:1] = attribute.path.split('.')\n\n # Construct the clause from the segment.\n q = build_segment(model, seg, attribute)\n\n # Combine the segment with the last.\n clause = last.combinator(clause, q) if last is not None else q\n last = seg\n\n # Return the constructed clause.\n return clause\n\n\nclass ModelResource(object):\n \"\"\"Specializes the RESTFul model resource protocol for SQLAlchemy.\n\n @note\n This is not what you derive from to create resources. Import\n ModelResource from `armet.resources` and derive from that.\n \"\"\"\n\n def route(self, *args, **kwargs):\n # Establish a session.\n self.session = session = self.meta.Session()\n\n try:\n # Continue on with the cycle.\n result = utils.super(ModelResource, self).route(*args, **kwargs)\n\n # Commit the session.\n session.commit()\n\n # Return the result.\n return result\n\n except:\n # Something occurred; rollback the session.\n session.rollback()\n\n # Re-raise the exception.\n raise\n\n finally:\n # Close the session.\n session.close()\n\n def filter(self, clause, queryset):\n # Filter the queryset by the passed clause.\n return queryset.filter(clause).distinct()\n\n def count(self, queryset):\n # Return the count of the queryset.\n return queryset.count()\n\n def read(self):\n # Initialize the query to the model.\n queryset = self.session.query(self.meta.model)\n\n query = None\n if self.slug is not None:\n # This is an item-access (eg. GET //:slug); ignore the\n # query string and generate a query-object based on the slug.\n query = Query(segments=[QuerySegment(\n path=self.meta.slug.path.split('.'),\n operator=constants.OPERATOR_EQUAL[0],\n values=[self.slug])])\n\n elif self.request.query:\n # This is a list-access; use the query string and construct\n # a query object from it.\n query = parser.parse(self.request.query)\n\n # Determine if we need to filter the queryset in some way; and if so,\n # filter it.\n clause = None\n if query is not None:\n clause = build_clause(query, self.attributes, self.meta.model)\n queryset = self.filter(clause, queryset)\n\n # Filter the queryset by asserting authorization.\n queryset = self.meta.authorization.filter(\n self.request.user, 'read', self, queryset)\n\n # Return the queryset.\n return queryset if self.slug is None else queryset.first()\n\n def create(self, data):\n # Instantiate a new target.\n target = self.meta.model()\n\n # Iterate through all attributes and set each one.\n for name, attribute in six.iteritems(self.attributes):\n # Set each one on the target.\n value = data.get(name)\n if value is not None:\n attribute.set(target, value)\n\n # Add the target to the session.\n self.session.add(target)\n self.session.flush()\n\n # Refresh the target object to avoid inconsistencies with storage.\n self.session.expire(target)\n\n # Ensure the user is authorized to perform this action.\n authz = self.meta.authorization\n if not authz.is_authorized(self.request.user, 'create', self, target):\n authz.unauthorized()\n\n # Return the target.\n return target\n\n def update(self, target, data):\n # Iterate through all attributes and set each one.\n for name, attribute in six.iteritems(self.attributes):\n # Set each one on the target.\n attribute.set(target, data.get(name))\n\n # Flush the target and expire attributes.\n self.session.flush()\n\n # Refresh the target object to avoid inconsistencies with storage.\n self.session.expire(target)\n\n # Ensure the user is authorized to perform this action.\n authz = self.meta.authorization\n if not authz.is_authorized(self.request.user, 'update', self, target):\n authz.unauthorized()\n\n def destroy(self):\n # Grab the existing target.\n target = self.read()\n\n # Ensure the user is authorized to perform this action.\n authz = self.meta.authorization\n if not authz.is_authorized(self.request.user, 'destroy', self, target):\n authz.unauthorized()\n\n # Remove the object from the session.\n self.session.delete(target)\n","sub_path":"armet/connectors/sqlalchemy/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":6956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"569595637","text":"from time import sleep\nfrom itertools import zip_longest\nfrom string import ascii_letters, digits\nfrom json import dumps\n\nimport logging\nfrom loguru import logger\nfrom telebot import logger as bot_logger\nfrom notifiers.logging import NotificationHandler\nfrom nanoid import generate\nfrom telebot import TeleBot, util\nfrom telebot.apihelper import ApiTelegramException\nfrom telebot.types import InlineKeyboardMarkup, InlineKeyboardButton, Update, ForceReply\n\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom django.http import HttpResponse\nfrom django.views.decorators.http import require_GET\n\nfrom HookaDooBot.settings import LOG_IS_SET\nfrom .models import User, Search, Tobacco\nfrom .services.cfg import WEBHOOK_URL, BOT_TOKEN, NOTIFY_BOT_TOKEN, NOTIFY_CHAT_ID\nfrom .services.finder import start as finder_start\n\nbot = TeleBot(token=BOT_TOKEN)\n\n\nclass InterceptHandler(logging.Handler):\n def emit(self, record):\n # Get corresponding Loguru level if it exists\n try:\n level = logger.level(record.levelname).name\n except ValueError:\n level = record.levelno\n\n # Find caller from where originated the logged message\n frame, depth = logging.currentframe(), 2\n while frame.f_code.co_filename == logging.__file__:\n frame = frame.f_back\n depth += 1\n\n logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())\n\n\nif not LOG_IS_SET:\n handler = NotificationHandler(\n \"telegram\",\n defaults={\n \"token\": NOTIFY_BOT_TOKEN,\n \"chat_id\": NOTIFY_CHAT_ID\n }\n )\n logger.add(handler, level=logging.WARNING)\n\n bot_logger.setLevel(logging.WARNING)\n bot_logger.removeHandler(bot_logger.handlers[0])\n bot_logger.addHandler(InterceptHandler())\n\n logger.add('./logs/{time:YYYY-MM-DD_HH-mm-ss}.log',\n encoding='UTF-8',\n backtrace=True,\n diagnose=True,\n rotation='10 MB',\n compression='zip',\n )\n\n LOG_IS_SET = True\n\n\n@require_GET\ndef robots_txt(request):\n lines = [\n \"User-Agent: *\",\n \"Disallow: /\",\n ]\n return HttpResponse(\"\\n\".join(lines), content_type=\"text/plain\")\n\n\nclass BotUpdate(View):\n\n @staticmethod\n def get(request):\n logger.warning('Запрос на установку webhook-а.')\n logger.info('Получаем информацию о webhook-е со стороны Telegram.')\n webhook_prev_url = bot.get_webhook_info().url\n webhook_url = WEBHOOK_URL.format(\n domain=request.headers['HOST']\n if not request.headers.get('X-Original-Host') else request.headers['X-Original-Host']\n )\n sleep(1)\n logger.info('Сверяем url нашего webhook-а c url полученным от Telegram.')\n if webhook_url != webhook_prev_url:\n if webhook_prev_url:\n logger.info('Удаляем старый webhook на Telegram')\n bot.remove_webhook()\n sleep(1)\n logger.info('Устанавливаем свой webhook на Telegram.')\n res = bot.set_webhook(\n url=webhook_url,\n max_connections=100,\n )\n if res:\n logger.warning('Webhook установлен.')\n text = 'Webhook установлен. Бот запущен.'\n code = 200\n else:\n logger.warning('Webhook не удалось установить.')\n text = 'Webhook не удалось установить. Ошибка.'\n code = 500\n else:\n logger.warning('Webhook не требует обновления.')\n text = 'Webhook не требует обновления. Бот запущен.'\n code = 200\n return JsonResponse({\n 'code': code,\n 'message': text,\n 'webhook_url': webhook_url\n })\n\n @staticmethod\n def post(request):\n bot.process_new_updates(\n [\n Update.de_json(\n request.body.decode('UTF-8')\n )\n ]\n )\n return JsonResponse({'code': 200})\n\n\nCALLBACKS = {\n 'start_search': 'start_search',\n 'tabak': 'tabak',\n 'charcoal': 'charcoal',\n 'extra': 'extra',\n 'choose_flavor': 'choose_flavor',\n 'agree_search': 'agree_search',\n 'result': 'result',\n}\nSEARCH_ID_TEXT = '`ID поиска: {id}`\\n\\n'\n\n\ndef json_to_str(obj):\n return dumps(obj, ensure_ascii=False)\n\n\ndef split_long_text(text):\n split_text = util.split_string(text, 3000)\n res = []\n for i, n in enumerate(split_text):\n if not n.endswith('\\n'):\n n += split_text[i + 1][:split_text[i + 1].find('\\n') + 1]\n split_text[i + 1] = split_text[i + 1][split_text[i + 1].find('\\n') + 1:]\n res.append(n)\n return res\n\n\ndef delete_message(m):\n logger.info(f'[{m.chat.id}]|Удаляем сообщение.')\n bot.delete_message(\n chat_id=m.chat.id,\n message_id=m.message_id,\n )\n\n\ndef check_reply_to_message(m):\n if m.reply_to_message:\n res = True if ('ID' in m.reply_to_message.text and\n m.reply_to_message.from_user.id == 1157687780) else False\n else:\n res = False\n return res\n\n\ndef get_product_title(call):\n data = call.data.split('|')[2]\n row, col = data.split('-')\n return Tobacco.objects.get(\n title=call.message.reply_markup.keyboard[int(row)][int(col)].text\n )\n\n\ndef get_product_extra(call):\n index = int(call.data.split('|')[2])\n extra = call.message.reply_markup.keyboard[index][0].text.split()[0]\n return extra\n\n\ndef btn_start_search():\n markup = InlineKeyboardMarkup()\n markup.row_width = 1\n markup.add(\n InlineKeyboardButton('Начать поиск', callback_data=CALLBACKS['start_search']),\n )\n return markup\n\n\ndef btns_choose_product(search):\n markup = InlineKeyboardMarkup()\n markup.row_width = 2\n markup.add(\n InlineKeyboardButton('Табак', callback_data=f'{CALLBACKS[\"tabak\"]}|{search.search_id}'),\n InlineKeyboardButton('Уголь', callback_data=f'{CALLBACKS[\"charcoal\"]}|{search.search_id}'),\n )\n return markup\n\n\ndef btns_choose_company(search):\n markup = InlineKeyboardMarkup()\n markup.row_width = 2\n if search.product == 'tabak':\n products = Tobacco.objects.all().order_by('title')\n elif search.product == 'charcoal':\n products = []\n else:\n products = []\n products_half_left = products[:len(products) // 2]\n products_half_right = products[len(products) // 2:]\n for i, v in enumerate(zip_longest(products_half_left, products_half_right)):\n if v[0] is not None:\n markup.add(\n InlineKeyboardButton(\n text=f'{v[0].title}',\n callback_data=f'{CALLBACKS[\"extra\"]}|{search.search_id}|{i}-0'\n ),\n InlineKeyboardButton(\n text=f'{v[1].title}',\n callback_data=f'{CALLBACKS[\"extra\"]}|{search.search_id}|{i}-1'\n ),\n )\n else:\n markup.add(\n InlineKeyboardButton(\n text=f'{v[1].title}',\n callback_data=f'{CALLBACKS[\"extra\"]}|{search.search_id}|{i}-0'\n ),\n )\n return markup\n\n\ndef btns_choose_extra(search):\n markup = InlineKeyboardMarkup()\n markup.row_width = 1\n if search.product == 'tabak':\n extras = [int(e) for e in search.company.weight.split(',') if e.isdigit()]\n elif search.product == 'charcoal':\n extras = []\n else:\n extras = []\n for i, extra in enumerate(extras):\n markup.add(\n InlineKeyboardButton(\n text=f'{extra}' + (' грамм' if extra > 10 else ' кг'),\n callback_data=f'{CALLBACKS[\"choose_flavor\"]}|{search.search_id}|{i}'\n ),\n )\n return markup\n\n\ndef btn_agree_search(search):\n markup = InlineKeyboardMarkup()\n markup.row_width = 1\n markup.add(\n InlineKeyboardButton(\n text=f'Поиск',\n callback_data=f'{CALLBACKS[\"result\"]}|{search.search_id}'),\n )\n return markup\n\n\n@bot.callback_query_handler(func=lambda call: CALLBACKS['start_search'] == call.data.split('|')[0])\ndef cb_start_search(call):\n logger.info(f'[{call.message.chat.id}]|\"Начать поиск\". Процесс выбора продукта.')\n search = Search.objects.create(\n search_id=generate(ascii_letters[:] + digits[:], 10),\n user=User.objects.get(uid=call.message.chat.id),\n step='product',\n )\n search.save()\n text = f'{SEARCH_ID_TEXT.format(id=search.search_id)}' \\\n f'Какой продукт ищем?'\n logger.info(f'[{call.message.chat.id}]|Отправляем сообщение с выбором продукта.')\n bot.edit_message_text(\n chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=text,\n reply_markup=btns_choose_product(search),\n parse_mode='MarkdownV2',\n )\n\n\n@bot.callback_query_handler(func=lambda call: CALLBACKS['tabak'] == call.data.split('|')[0])\ndef cb_tabak(call):\n logger.info(f'[{call.message.chat.id}]|Процесс выбора марки табака.')\n search = Search.objects.get(\n search_id=call.data.split('|')[1],\n )\n search.product = 'tabak'\n search.step = 'company'\n search.save()\n text = f'{SEARCH_ID_TEXT.format(id=search.search_id)}' \\\n f'Какой табак желаете найти?'\n logger.info(f'[{call.message.chat.id}]|Отправляем сообщение с выбором марки табака.')\n bot.edit_message_text(\n chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=text,\n reply_markup=btns_choose_company(search),\n parse_mode='MarkdownV2',\n )\n\n\n@bot.callback_query_handler(func=lambda call: CALLBACKS['charcoal'] == call.data.split('|')[0])\ndef cb_charcoal(call):\n logger.info(f'[{call.message.chat.id}]|Процесс выбора угля.')\n text = 'Данный функционал еще в разработке'\n logger.info(f'[{call.message.chat.id}]|Отправляем уведомление, что уголь еще в разработке.')\n bot.answer_callback_query(\n callback_query_id=call.id,\n text=text,\n show_alert=True,\n )\n\n\n@bot.callback_query_handler(func=lambda call: CALLBACKS['extra'] == call.data.split('|')[0])\ndef cb_extra(call):\n logger.info(f'[{call.message.chat.id}]|Процесс выбора экстры.')\n search = Search.objects.get(\n search_id=call.data.split('|')[1],\n )\n search.company = get_product_title(call)\n search.step = 'extra'\n search.save()\n if search.product == 'tabak':\n text = f'{SEARCH_ID_TEXT.format(id=search.search_id)}' \\\n f'Какой вес табака?'\n else:\n text = ''\n logger.info(f'[{call.message.chat.id}]|Отправляем сообщение с выбором экстры.')\n bot.edit_message_text(\n chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=text,\n reply_markup=btns_choose_extra(search),\n parse_mode='MarkdownV2',\n )\n\n\n@bot.callback_query_handler(func=lambda call: CALLBACKS['choose_flavor'] == call.data.split('|')[0])\ndef cb_choose_flavor(call):\n logger.info(f'[{call.message.chat.id}]|Процесс выбора вкуса табака.')\n search = Search.objects.get(\n search_id=call.data.split('|')[1],\n )\n search.extra = get_product_extra(call)\n search.step = 'flavor'\n search.save()\n markup_force_reply = ForceReply(selective=False)\n text = f'{SEARCH_ID_TEXT.format(id=search.search_id)}' \\\n f'{search.company} {search.extra}\\n\\n' \\\n f'Какой вкус табака найти?\\n\\n' \\\n f'_Чтобы найти все вкусы напиши \\- _*все*'\n delete_message(call.message)\n logger.info(f'[{call.message.chat.id}]|Отправляем сообщение с просьбой написать искомый вкус.')\n bot.send_message(\n chat_id=call.message.chat.id,\n text=text,\n parse_mode='MarkdownV2',\n reply_markup=markup_force_reply,\n )\n\n\n@bot.callback_query_handler(func=lambda call: CALLBACKS['result'] == call.data.split('|')[0])\ndef cb_result(call):\n logger.info(f'[{call.message.chat.id}]|Процесс старта поиска табака в базе сайтов.')\n text_q = \"Производим поиск по базе сайтов.\"\n logger.info(f'[{call.message.chat.id}]|Отправляем уведомление, что поиск начат.')\n try:\n bot.answer_callback_query(\n callback_query_id=call.id,\n text=text_q,\n cache_time=60,\n )\n except ApiTelegramException as e:\n logger.info(f'[{call.message.chat.id}]|Не удалось отправить уведомлениео начале поиска. Ошибка: {e}')\n finally:\n search = Search.objects.get(\n search_id=call.data.split('|')[1],\n )\n header_text = f'{SEARCH_ID_TEXT.format(id=search.search_id)}' \\\n f'Ваш запрос: {search.company} {search.flavor} {search.extra}\\n\\n'\n\n # Тупая заглушка на ошибку в процессе поиска\n # Переделай, когда будет время\n try:\n find_result_text = finder_start(\n company=search.company, flavor=search.flavor, extra=search.extra, uid=call.message.chat.id\n )\n find_error = False\n except Exception as e:\n find_result_text = 'Ошибка'\n find_error = e\n\n if find_result_text == '':\n find_result_text = '\\nПо вашему запросу табака в наличии нет'\n\n if find_error:\n logger.error(f'[{call.message.chat.id}]|Ошибка во время поиска. Ошибка: {find_error}')\n text = header_text + 'во время поиска произошла ошибка. ' \\\n 'Обратитесь, пожалуйста, к ажминистратору @vladrunk с указанием ID поиска.'\n bot.edit_message_text(\n chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=text,\n parse_mode='Markdown',\n disable_web_page_preview=True,\n )\n else:\n search.result = find_result_text\n search.save()\n if len(find_result_text) > 4500:\n logger.info(f'[{call.message.chat.id}]|Результат поиска длинее 4500 символов. Делим результат на части.')\n text = split_long_text(header_text + find_result_text)\n else:\n text = header_text + find_result_text\n if isinstance(text, str):\n logger.info(f'[{call.message.chat.id}]|Отправляем результат поиска.')\n bot.edit_message_text(\n chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=text,\n parse_mode='Markdown',\n disable_web_page_preview=True,\n )\n else:\n logger.info(f'[{call.message.chat.id}]|Отправляем результат поиска #1.')\n bot.edit_message_text(\n chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=text[0],\n parse_mode='Markdown',\n disable_web_page_preview=True,\n )\n for i, t in enumerate(text):\n if i == 0:\n continue\n if len(t) > 1:\n logger.info(f'[{call.message.chat.id}]|Отправляем результат поиска #{i + 1}.')\n bot.send_message(\n chat_id=call.message.chat.id,\n text=t,\n parse_mode='Markdown',\n disable_web_page_preview=True,\n )\n\n cmd_start(call.message)\n\n\n@bot.message_handler(commands=['start'])\ndef cmd_start(m):\n logger.info(f'[{m.chat.id}]|Получили команду /start.')\n _, is_new = User.objects.get_or_create(\n uid=m.chat.id,\n defaults={\n 'fname': m.chat.first_name,\n 'lname': m.chat.last_name,\n 'username': m.chat.username,\n }\n )\n\n if is_new:\n logger.warning(f'[{m.chat.id}]|Новый юзер')\n text = f'Добро пожаловать, {m.chat.first_name} \\N{grinning face}'\n else:\n logger.info(f'[{m.chat.id}]|Юзер уже есть в БД')\n text = f'{m.chat.first_name}, ещё по табачку ? \\N{drooling face}'\n logger.info(f'[{m.chat.id}]|Отправляем ответ на /start.')\n bot.send_message(\n chat_id=m.chat.id,\n text=text,\n reply_markup=btn_start_search(),\n )\n\n\n@bot.message_handler(func=check_reply_to_message)\ndef msg_agree_search(m):\n logger.info(f'[{m.chat.id}]|Процесс подтверждения поиска табака.')\n m_edit = m.reply_to_message\n search_id = m_edit.text.splitlines()[0].split(': ')[1]\n search = Search.objects.get(search_id=search_id)\n if search.step != 'flavor':\n delete_message(m)\n return\n search.flavor = m.text\n search.step = 'result'\n search.save()\n text = f'{SEARCH_ID_TEXT.format(id=search.search_id)}' \\\n f'Ваш запрос: {search.company} {search.flavor} {search.extra}'\n delete_message(m_edit)\n delete_message(m)\n logger.info(f'[{m.chat.id}]|Отправляем сообщение для подтверждения.')\n bot.send_message(\n chat_id=m_edit.chat.id,\n text=text,\n reply_markup=btn_agree_search(search),\n parse_mode='MarkdownV2',\n )\n\n\n@bot.message_handler(func=lambda m: True)\ndef any_msg(m):\n logger.info(f'[{m.chat.id}]|Пришло сообщение которое не обрабатывается ботом.')\n delete_message(m)\n","sub_path":"bot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"73677974","text":"# -*- coding: utf-8 -*-\n#\n# ramstk.dao.RAMSTKControl.py is part of The RAMSTK Project\n#\n# All rights reserved.\n# Copyright 2007 - 2017 Doyle Rowland doyle.rowland reliaqual com\n\"\"\"RAMSTKControl Table Module.\"\"\"\n\nfrom sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy.orm import relationship\n\n# Import other RAMSTK modules.\nfrom ramstk.Utilities import none_to_default\nfrom ramstk.dao.RAMSTKCommonDB import RAMSTK_BASE\n\n\nclass RAMSTKControl(RAMSTK_BASE):\n \"\"\"\n Class to represent the table ramstk_control in the RAMSTK Program database.\n\n This table shares a Many-to-One relationship with ramstk_cause.\n \"\"\"\n\n __tablename__ = 'ramstk_control'\n __table_args__ = {'extend_existing': True}\n\n cause_id = Column(\n 'fld_cause_id',\n Integer,\n ForeignKey('ramstk_cause.fld_cause_id'),\n nullable=False)\n control_id = Column(\n 'fld_control_id',\n Integer,\n primary_key=True,\n autoincrement=True,\n nullable=False)\n\n description = Column('fld_description', String(512), default='')\n type_id = Column('fld_type_id', String(512), default='')\n\n # Define the relationships to other tables in the RAMSTK Program database.\n cause = relationship('RAMSTKCause', back_populates='control')\n\n is_mode = False\n is_mechanism = False\n is_cause = False\n is_control = True\n is_action = False\n\n def get_attributes(self):\n \"\"\"\n Retrieve the current values of the RAMSTKControl data model attributes.\n\n :return: {cause_id, control_id, description, type_id} pairs.\n :rtype: dict\n \"\"\"\n _attributes = {\n 'cause_id': self.cause_id,\n 'control_id': self.control_id,\n 'description': self.description,\n 'type_id': self.type_id\n }\n\n return _attributes\n\n def set_attributes(self, attributes):\n \"\"\"\n Set the current values of the RAMSTKControl data model attributes.\n\n :param dict attributes: values to assign to instance attributes.\n :return: (_code, _msg); the error code and error message.\n :rtype: tuple\n \"\"\"\n _error_code = 0\n _msg = \"RAMSTK SUCCESS: Updating RAMSTKControl {0:d} attributes.\". \\\n format(self.control_id)\n\n try:\n self.description = str(\n none_to_default(attributes['description'], ''))\n self.type_id = str(none_to_default(attributes['type_id'], ''))\n except KeyError as _err:\n _error_code = 40\n _msg = \"RAMSTK ERROR: Missing attribute {0:s} in attribute \" \\\n \"dictionary passed to \" \\\n \"RAMSTKControl.set_attributes().\".format(_err)\n\n return _error_code, _msg\n","sub_path":"src/ramstk/dao/programdb/RAMSTKControl.py","file_name":"RAMSTKControl.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"342468001","text":"import sqlite3\nfrom datetime import datetime\nfrom distutils.version import LooseVersion\n\n\nclass Database():\n def __init__(self, dbname='ukrstatdb', table_name='Inflation'):\n self.conn = None\n self.cursor = None\n self.dbname = dbname\n self.table_name = table_name\n self.__open(dbname)\n self.__create_new()\n self.current_date = datetime.now()\n\n def __open(self, dbname):\n try:\n self.conn = sqlite3.connect(dbname)\n self.cursor = self.conn.cursor()\n except sqlite3.Error:\n print(\"Local Database connection Error!\")\n\n def close(self):\n if self.conn:\n self.conn.commit()\n self.cursor.close()\n self.conn.close()\n\n def __create_new(self):\n \"\"\"Create new table if it's not exists\"\"\"\n query = 'CREATE TABLE IF NOT EXISTS {0} \\\n (year INTEGER PRIMARY KEY, month INTEGER, inflation REAL);'.format(self.table_name)\n self.cursor.execute(query)\n self.conn.commit()\n\n def get_all_entries(self, columns='*'):\n \"\"\"Extract all data columns from Database Table\"\"\"\n query = 'SELECT {1} FROM {0};'.format(self.table_name, columns)\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n return rows\n\n @property\n def last_entry(self):\n \"\"\"Get last data entry from Database Table\"\"\"\n return self.get_all_entries()[-1]\n\n @property\n def current_month_entry(self):\n \"\"\"Get current month data entry from Database Table. None if not exists\"\"\"\n return self.get_entry(self.current_date.month, self.current_date.year)\n\n def get_entry(self, month, year):\n \"\"\"Get data entry from Database Table\"\"\"\n query = 'SELECT * FROM {0} where year={1} and month={2};'.format(self.table_name, year, month)\n self.cursor.execute(query)\n rows = self.cursor.fetchone()\n return rows\n\n def period_entries(self, start=(2007, 1), end=None):\n if not end: end = (self.current_date.year, self.current_date.month)\n lv_str = lambda t: '.'.join((str(s) for s in t))\n entries = []\n for entry in self.get_all_entries():\n if (LooseVersion(lv_str(start)) <= LooseVersion(lv_str(entry[:2])) and\n LooseVersion(lv_str(end)) >= LooseVersion(lv_str(entry[:2]))):\n entries.append(entry)\n return entries\n\n def write_entry(self, year, month, data):\n \"\"\"Insert data into Table\"\"\"\n query = 'INSERT INTO {0} VALUES ( {1}, {2}, {3});'.format(self.table_name, year, month, data)\n self.cursor.execute(query)\n self.conn.commit()\n\n def __query(self, sql):\n \"\"\"Execution method for random query\"\"\"\n self.cursor.execute(sql)\n self.conn.commit()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Close connection to DataBase\"\"\"\n self.close()\n\n\nif __name__ == \"__main__\":\n db = Database()\n print(db.get_entry(10, 2017))\n print(db.current_month_entry)\n print(db.last_entry)\n","sub_path":"T3. Code Testing/Pytest/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"322378206","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\n\n\nclass FCNeuralNet(tf.Model):\n \"\"\"A simple fully-connected network for multi-classification.\n\n Args:\n num_classes: The number of classes involved in the classification.\n dropout: Drop-out rate in the fully-connected layer.\n \"\"\"\n\n def __init__(self, num_classes: int = 2, dropout: float = 0):\n super(FCNeuralNet, self).__init__()\n self.flatten = Flatten()\n self.fc = keras.Sequential(\n [\n Dense(512, activation=\"relu\"),\n Dropout(dropout),\n Dense(64, activation=\"relu\"),\n Dropout(dropout),\n Dense(num_classes),\n ]\n )\n\n def call(self, x):\n out = self.flatten(x)\n out = self.fc(out)\n return out\n","sub_path":"privacy_evaluator/models/tf/fc_neural_net.py","file_name":"fc_neural_net.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"227270499","text":"from torch.utils.data import Dataset\nimport torch\nfrom dataset.HO_Data.data_util import *\nimport os\nimport numpy as np\nfrom src.path import OBJ_MODEL_PATH\n\n\nclass Ho3DDataset(Dataset):\n def __init__(self, root, pathFile, augmentation, dtype, isValid=False,preVis=False):\n self.folder = os.path.join(root, 'train')\n self.fileNames = os.path.join(root, pathFile)\n self.dtype = dtype\n self.transform = V2VVoxelization(cubic_size=200, augmentation=augmentation)\n self.isValid = isValid\n self.preVis = preVis\n self._load()\n\n def __getitem__(self, index):\n record = self.filePaths[index]\n #print('record:', record)\n subfolder, file = tuple(record.rstrip().split('/'))\n depthpath = os.path.join(self.folder, subfolder, 'depth', file + '.png')\n annotpath = os.path.join(self.folder, subfolder, 'meta', file + '.pkl')\n\n depth = read_depth_img(depthpath)\n annot = np.load(annotpath, allow_pickle=True)\n camMat = annot['camMat']\n fx = camMat[0, 0]\n fy = camMat[1, 1]\n ux = camMat[0, 2]\n uy = camMat[1, 2]\n\n ##################### load object model and annotations #######################\n objMesh = read_obj(\n os.path.join(OBJ_MODEL_PATH, annot['objName'], 'textured_2358.obj'))\n objMesh.v = np.matmul(objMesh.v, cv2.Rodrigues(annot['objRot'])[0].T) + annot['objTrans']\n\n handJoints = annot['handJoints3D']\n handJoints = handJoints[jointsMapManoToSimple]\n objCorners = annot['objCorners3D']\n _, handMesh = forwardKinematics(annot['handPose'], annot['handTrans'], annot['handBeta'])\n\n ################# project given annotations in UVD ###################\n handJoints_uvd = project_3D_points(camMat, handJoints)\n obj_uvd = project_3D_points(camMat, objCorners)\n handMesh_uvd = project_3D_points(camMat, handMesh.r)\n objmesh_uvd = project_3D_points(camMat, objMesh.v)\n ################ get the common center point of hand and object ###########\n objcenter = np.mean(obj_uvd, axis=0)\n com = np.mean(np.array([handJoints_uvd[0], objcenter]), axis=0)\n # print('com:', com)\n\n if(not self.isValid):\n ###################### calculate voxel of depthmap and heatmaps of joints and object corners (V2V approach) ############\n pointcloud = Main_depthmap2points(depth, ux, uy, fx, fy)\n pointcloud = pointcloud.reshape(-1, 3)\n refpoint = Main_pixelToworld(com.reshape(1, -1), ux, uy, fx, fy)\n refpoint = np.array(refpoint)\n joints_world = Main_pixelToworld(handJoints_uvd.copy(), ux, uy, fx, fy)\n bbox_world = Main_pixelToworld(obj_uvd.copy(), ux, uy, fx, fy)\n handmesh_world = Main_pixelToworld(handMesh_uvd.copy(), ux, uy, fx, fy)\n objmesh_world = Main_pixelToworld(objmesh_uvd.copy(), ux, uy, fx, fy)\n\n sample = {\n 'points': pointcloud,\n 'joints': joints_world,\n 'bbox': bbox_world,\n 'handmesh': handmesh_world,\n 'objmesh': objmesh_world,\n 'refpoint': refpoint,\n }\n #voxel88,heatmap_joints,heatmap_bbox,voxel44,mesh_voxel,norm_handmesh,norm_objmesh = self.transform.train_transform(sample)\n voxel88, voxel44, norm_handmesh, norm_objmesh = self.transform.train_transform(\n sample)\n ################ for testing purpose in visualization ###############\n # if (self.preVis):\n # self.testVis(voxel88,heatmap_joints,heatmap_bbox,mesh_voxel,norm_handmesh,norm_objmesh)\n\n voxel88 = torch.from_numpy(voxel88.reshape((1, *voxel88.shape))).to(self.dtype)\n voxel44 = torch.from_numpy(voxel44.reshape((1, *voxel44.shape))).to(self.dtype)\n # mesh_voxel = torch.from_numpy(mesh_voxel.reshape((1, *mesh_voxel.shape))).to(self.dtype)\n # heatmap_joints = torch.from_numpy(heatmap_joints).to(self.dtype)\n # heatmap_bbox = torch.from_numpy(heatmap_bbox).to(self.dtype)\n norm_handmesh = torch.from_numpy(norm_handmesh).to(self.dtype)\n norm_objmesh = torch.from_numpy(norm_objmesh).to(self.dtype)\n\n #return (voxel88,heatmap_joints,heatmap_bbox,voxel44,mesh_voxel,norm_handmesh,norm_objmesh)\n return (voxel88, voxel44, norm_handmesh, norm_objmesh)\n else:\n pointcloud = Main_depthmap2points(depth, ux, uy, fx, fy)\n pointcloud = pointcloud.reshape(-1, 3)\n refpoint = Main_pixelToworld(com.reshape(1, -1), ux, uy, fx, fy)\n refpoint = np.array(refpoint)\n sample = {\n 'points': pointcloud,\n 'refpoint': refpoint\n }\n voxel88,voxel44 = self.transform.val_transform(sample)\n voxel88 = torch.from_numpy(voxel88.reshape((1, *voxel88.shape))).to(self.dtype)\n voxel44 = torch.from_numpy(voxel44.reshape((1, *voxel44.shape))).to(self.dtype)\n\n GT = {\n 'handpose' : annot['handJoints3D'].astype(np.float64),\n 'objpose' : annot['objCorners3D'].astype(np.float64),\n 'handverts': handMesh.r.astype(np.float64),\n 'objverts': objMesh.v.astype(np.float64),\n 'camMat' :annot['camMat'].astype(np.float64),\n 'refpoint': refpoint.astype(np.float64)\n }\n return (voxel88,voxel44,GT)\n\n def __len__(self):\n return len(self.filePaths)\n\n def _load(self):\n self.filePaths = []\n with open(self.fileNames) as f:\n for record in f:\n self.filePaths.append(record)\n\n def standardize(self,val,mean,std):\n norm_val = (val-mean)/std\n return norm_val\n\n def testVis(self, voxel88,heatmap_joints,heatmap_bbox,mesh_voxel,norm_handmesh,norm_objmesh):\n import matplotlib.pyplot as plt\n\n joints = self.transform.extract_coord_from_output(heatmap_joints)\n objCorners = self.transform.extract_coord_from_output(heatmap_bbox)\n coord_x = np.argwhere(voxel88)[:, 0]\n coord_y = np.argwhere(voxel88)[:, 1]\n coord_z = np.argwhere(voxel88)[:, 2]\n # print(len(coord_x))\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n # ax.scatter(coord_x, coord_y, coord_z, c='r', s=10)\n coord_x = np.argwhere(mesh_voxel)[:, 0]\n coord_y = np.argwhere(mesh_voxel)[:, 1]\n coord_z = np.argwhere(mesh_voxel)[:, 2]\n ax.scatter(coord_x, coord_y, coord_z, c='r', s=10)\n ax.scatter(norm_handmesh[:, 0], norm_handmesh[:, 1], norm_handmesh[:, 2], c='b', s=10)\n ax.scatter(norm_objmesh[:, 0], norm_objmesh[:, 1], norm_objmesh[:, 2], c='b', s=10)\n\n\n ############## bone joints indexe as pair of points order by thumb, index, middle, ring, pinky fingers\n bones_3d = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10],\n [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]\n edges_3d = [[0, 1], [1, 3], [3, 2], [2, 0], [4, 5], [5, 7], [7, 6], [6, 4], [0, 4], [1, 5], [2, 6], [3, 7]]\n\n ############ Display in 3D\n\n for b in bones_3d:\n ax.plot(joints[b, 0], joints[b, 1], joints[b, 2], linewidth=1.0, c='g')\n ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], c=\"b\", edgecolors='b')\n\n for e in edges_3d:\n ax.plot(objCorners[e, 0], objCorners[e, 1], objCorners[e, 2], linewidth=1.0, c='m')\n ax.scatter(objCorners[:, 0], objCorners[:, 1], objCorners[:, 2], c=\"c\", edgecolors='c')\n\n plt.show()\n\nclass V2VVoxelization(object):\n def __init__(self, cubic_size, augmentation=False):\n self.cubic_size = cubic_size\n self.cropped_size1, self.original_size1 = 88,96\n self.cropped_size2, self.original_size2 = 44,48\n self.sizes1 = (self.cubic_size, self.cropped_size1, self.original_size1)\n self.sizes2 = (self.cubic_size, self.cropped_size2, self.original_size2)\n self.pool_factor = 2\n self.std = 1.7\n self.augmentation = augmentation\n self.extract_coord_from_output = extract_coord_from_output\n output_size = int(self.cropped_size1 / self.pool_factor)\n # Note, range(size) and indexing = 'ij'\n self.d3outputs = np.meshgrid(np.arange(output_size), np.arange(output_size), np.arange(output_size),\n indexing='ij')\n\n def train_transform(self, sample):\n points, joints, bbox, handmesh, objmesh, refpoint = sample['points'], sample['joints'], \\\n sample['bbox'], sample['handmesh'], sample['objmesh'], \\\n sample['refpoint']\n if not self.augmentation:\n new_size = 100\n angle = 0\n self.angle = angle\n trans1 = self.original_size1 / 2 - self.cropped_size1 / 2\n trans2 = self.original_size2 / 2 - self.cropped_size2 / 2\n else:\n ## Augmentations\n # Resize\n new_size = np.random.rand() * 40 + 80\n\n # Rotation\n angle = np.random.rand() * 80 / 180 * np.pi - 40 / 180 * np.pi\n self.angle = angle\n # Translation\n trans1 = np.random.rand(3) * (self.original_size2 - self.cropped_size2)\n trans2 = np.random.rand(3) * (self.original_size2 - self.cropped_size2)\n\n ######################## processing input & output for posenet #################\n voxel88 = generate_cubic_input(points, refpoint, new_size, angle, trans1, self.sizes1)\n # heatmap_joints = generate_heatmap_gt(joints, refpoint, new_size, angle, trans1, self.sizes1, self.d3outputs,\n # self.pool_factor, self.std)\n # heatmap_bbox = generate_heatmap_gt(bbox, refpoint, new_size, angle, trans1, self.sizes1, self.d3outputs,\n # self.pool_factor, self.std)\n ######################## processing input & output for shapenet #################\n voxel44 = generate_cubic_input(points, refpoint, new_size, angle, trans2, self.sizes2)\n fullmesh = np.concatenate([handmesh, objmesh], axis=0)\n #mesh_voxel = generate_cubic_input(fullmesh, refpoint, new_size, angle, trans2, self.sizes2)\n norm_handmesh = generate_coord(handmesh, refpoint, new_size, angle, trans2, self.sizes2)\n norm_objmesh = generate_coord(objmesh, refpoint, new_size, angle, trans2, self.sizes2)\n\n #return voxel88,heatmap_joints,heatmap_bbox,voxel44,mesh_voxel,norm_handmesh,norm_objmesh\n\n return voxel88,voxel44, norm_handmesh, norm_objmesh\n\n def val_transform(self, sample):\n points, refpoint = sample['points'], sample['refpoint']\n if not self.augmentation:\n new_size = 100\n angle = 0\n self.angle = angle\n trans1 = self.original_size1 / 2 - self.cropped_size1 / 2\n trans2 = self.original_size2 / 2 - self.cropped_size2 / 2\n else:\n ## Augmentations\n # Resize\n new_size = np.random.rand() * 40 + 80\n\n # Rotation\n angle = np.random.rand() * 80 / 180 * np.pi - 40 / 180 * np.pi\n self.angle = angle\n # Translation\n trans1 = np.random.rand(3) * (self.original_size2 - self.cropped_size2)\n trans2 = np.random.rand(3) * (self.original_size2 - self.cropped_size2)\n\n ######################## processing input & output for posenet #################\n voxel88 = generate_cubic_input(points, refpoint, new_size, angle, trans1, self.sizes1)\n\n ######################## processing input & output for shapenet #################\n voxel44 = generate_cubic_input(points, refpoint, new_size, angle, trans2, self.sizes2)\n\n return voxel88,voxel44\n","sub_path":"dataset/HO_Data/HO_PVSNet.py","file_name":"HO_PVSNet.py","file_ext":"py","file_size_in_byte":12167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"259040179","text":"import time\nfrom sensorThread import sensorThread\nfrom driveUnit import driveUnit\nfrom regulator import Regulator\n\nshared_stuff = {\"lineSensor\" : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n \"distance\" : [0, 0],\n \"armPosition\" : [0, 0, 255, 4, 5, 5],\n \"errorCodes\" : [\"YngveProgrammedMeWrong\"],\n \"motorSpeed\" : [50, 50],\n \"latestCalibration\" : \"0000-00-00-15:00\",\n \"autoMotor\" : True,\n \"autoArm\" : False,\n \"regulator\" : [0, 0]}\n\ndrive_unit = driveUnit()\n\nsensor_thread = sensorThread(shared_stuff)\nsensor_thread.daemon=True\nsensor_thread.start()\n\nregulator = Regulator(shared_stuff)\nregulator.daemon = True\nregulator.start()\n\nl, r = 0, 0\n\nwhile True:\n time.sleep(0.01)\n\n l, r = shared_stuff[\"regulator\"]\n drive_unit.setMotorLeft(l)\n time.sleep(0.001)\n drive_unit.setMotorRight(r)\n time.sleep(0.001)\n drive_unit.sendAllMotor()\n\n #print \"I didnt crash!\"\n","sub_path":"src/main_unit/regulator_test.py","file_name":"regulator_test.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"205981535","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport numpy as np\nfrom app import app, BASEDIR\n\ndf = pd.read_csv(BASEDIR + \"/mldata/BigMac.csv\", sep = '\\t')\n\ndf = df.loc[:10]\n\nfig = plt.figure(constrained_layout=False, figsize=(24, 16))\nspec = fig.add_gridspec(2, 2)\nfig_ax1 = fig.add_subplot(spec[0:1,:])\nfig_ax2 = fig.add_subplot(spec[1:2,:])\n\nindex = np.arange(df.shape[0])\ncountry = df['Country']\nprice = df['Price']\nwage = df['Wage']\nbar_width = 0.35\n\nopacity = 0.4\nerror_config = {'ecolor': '0.3'}\n\nrects1 = fig_ax1.bar(x=index, height=price, width=bar_width,\n alpha=opacity, color='b',\n label='Price')\nrects2 = fig_ax1.bar(x=index + bar_width, height=wage, width=bar_width,\n alpha=opacity, color='r',\n label='Wage')\n\nfig_ax1.set_xticks(index)\nfig_ax1.set_xticklabels(country)\nfig_ax1.set_title(\"Price and Wage: Selected Countries\")\nfig_ax1.legend()\n\n\nfig_ax2.scatter(price, wage)\nplt.show()\n\n","sub_path":"app/mlvisuals/bigmacvisuals.py","file_name":"bigmacvisuals.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"297714988","text":"import tkinter as tk\r\n\r\nclass GuiJogo():\r\n \"\"\"Classe que define a interface gráfica da aplicação\r\n \"\"\"\r\n x_pad = 5\r\n y_pad = 3\r\n width_entry = 30\r\n\r\n # Criando a janela...\r\n window = tk.Tk()\r\n window.wm_title(\"Poker - Jogo\")\r\n\r\n # Criando variáveis que armazenarão o texto inserido pelo usuário...\r\n txtJogo = tk.StringVar()\r\n txtJogador = tk.StringVar()\r\n txtBuyIn = tk.StringVar()\r\n txtCashOut = tk.StringVar()\r\n txtSaldo = tk.StringVar()\r\n txtPontos = tk.StringVar()\r\n txtRank = tk.StringVar()\r\n\r\n # Criando os objetos que estarão na janela...\r\n lblLocal = tk.Label(window, text = \"Evento/Local\")\r\n lblJogador = tk.Label(window, text = \"Jogador\")\r\n lblBuyin = tk.Label(window, text = \"BuyIn\")\r\n lblCashOut = tk.Label(window, text = \"CashOut\")\r\n lblSaldo = tk.Label(window, text = \"Saldo\")\r\n lblPontos = tk.Label(window, text = \"Pontos\")\r\n lblRank = tk.Label(window, text = \"Rank\")\r\n entJogo = tk.Entry(window, textvariable = txtJogo, width = width_entry)\r\n entJogador = tk.Entry(window, textvariable = txtJogador, width = width_entry)\r\n entBuyIn = tk.Entry(window, textvariable = txtBuyIn, width = width_entry)\r\n entCashOut = tk.Entry(window, textvariable = txtCashOut, width = width_entry)\r\n entSaldo = tk.Entry(window, textvariable = txtSaldo, width = width_entry)\r\n entPontos = tk.Entry(window, textvariable = txtPontos, width = width_entry)\r\n entRank = tk.Entry(window, textvariable = txtRank, width = width_entry)\r\n listJogo = tk.Listbox(window, width = 100)\r\n scrollJogo = tk.Scrollbar(window)\r\n btnViewAll = tk.Button(window, text = \"Ver todos\")\r\n btnBuscar = tk.Button(window, text = \"Buscar\")\r\n btnInserir = tk.Button(window, text = \"Inserir\")\r\n btnUpdate = tk.Button(window, text = \"Atualizar Selecionados\")\r\n btnDel = tk.Button(window, text = \"Deletar Selecionados\")\r\n btnClose = tk.Button(window, text = \"Fechar\")\r\n\r\n # Associando os objetos a grid da janela...\r\n lblLocal.grid(row = 0,column = 0)\r\n lblJogador.grid(row = 1,column = 0)\r\n lblBuyin.grid(row = 2,column = 0)\r\n lblCashOut.grid(row = 3, column = 0)\r\n lblSaldo.grid(row = 4, column = 0)\r\n lblPontos.grid(row = 5, column = 0)\r\n lblRank.grid(row = 6, column = 0)\r\n entJogo.grid(row=0, column=1, padx = 50, pady = 50)\r\n entJogador.grid(row = 1, column = 1)\r\n entBuyIn.grid(row = 2, column = 1)\r\n entCashOut.grid(row = 3, column = 1)\r\n entSaldo.grid(row = 4, column = 1)\r\n entPontos.grid(row = 5, column = 1)\r\n entRank.grid(row = 6, column = 1)\r\n listJogo.grid(row = 0, column = 2, rowspan = 10)\r\n scrollJogo.grid(row = 0, column = 6, rowspan = 10)\r\n btnViewAll.grid(row = 7, column = 0, columnspan = 2)\r\n btnBuscar.grid(row = 8, column = 0, columnspan = 2)\r\n btnInserir.grid(row = 9, column = 0, columnspan = 2)\r\n btnUpdate.grid(row = 10, column = 0, columnspan = 2)\r\n btnDel.grid(row = 11, column = 0, columnspan = 2)\r\n btnClose.grid(row = 12, column = 0, columnspan = 2)\r\n\r\n # buy_in REAL, cash_out REAL, pts REAL, rank INTEGER\r\n\r\n # Associando a Scrollbar com a Listbox...\r\n listJogo.configure(yscrollcommand=scrollJogo.set)\r\n scrollJogo.configure(command=listJogo.yview)\r\n\r\n\r\n # Adicionando um pouco de SWAG a interface...\r\n for child in window.winfo_children():\r\n widget_class = child.__class__.__name__\r\n if widget_class == \"Button\":\r\n child.grid_configure(sticky = 'WE', padx = x_pad, pady = y_pad)\r\n elif widget_class == \"Listbox\":\r\n child.grid_configure(padx = 0, pady = 0, sticky = 'NS')\r\n elif widget_class == \"Scrollbar\":\r\n child.grid_configure(padx = 0, pady = 0, sticky = 'NS')\r\n else:\r\n child.grid_configure(padx = x_pad, pady = y_pad, sticky = 'N')\r\n\r\n\r\n def run(self):\r\n GuiJogo.window.mainloop()","sub_path":"GuiJogo.py","file_name":"GuiJogo.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"6281262","text":"\"\"\"\nCustom errors for task-planner module\n\"\"\"\n\nclass NoDataError(Exception):\n \"\"\"\n To be raised when an attempt to return a file finds no data,\n typically found when a file is empty.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if args:\n try:\n filepath = args[0]\n except IndexError:\n filepath = None\n try:\n msg = args[1]\n except IndexError:\n msg = None\n elif kwargs:\n try:\n filepath = kwargs['filepath']\n except KeyError:\n filepath = None\n try:\n msg = kwargs['message']\n except KeyError:\n msg = None\n else:\n filepath = None\n msg = None\n\n if msg is None:\n if filepath is None:\n msg = 'The selected file was found, but returned no data'\n else:\n msg = ('The selected file (' + str(filepath) +\n ') was found, but returned no data.')\n\n super().__init__(msg)\n","sub_path":"errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"292179675","text":"#!/usr/bin/python\n\n#author:clearluo\n#create date: 2017-05-27\n#modify date:2017-05-27\n\nimport os\nimport sys\nimport datetime\nimport time\n\nos.environ[\"GOPATH\"] = \"/home/didong/go\"\nos.environ[\"GOLOG\"] = os.environ[\"GOPATH\"] + \"/logs\"\nos.environ[\"GOSRC\"] = os.environ[\"GOPATH\"] + \"/src\"\ngobin = os.environ[\"GOPATH\"] + \"/bin\"\nos.environ[\"GOBIN\"] = gobin\nexebin = \"didong-backend\"\nos.environ[\"BIN\"]=exebin\nos.environ[\"MYPROJECT\"] = \"didong\"\nos.environ[\"CURTIME\"] = datetime.datetime.now().strftime('%Y%m%d%H%M')\n\nif os.system('cd $GOSRC; rm -rf $MYPROJECT*'):\n\texit()\n\nif os.system('cd $GOSRC; rz'):\n\texit()\n\nif os.system('cd $GOSRC; unzip $MYPROJECT.zip'):\n\texit()\n\nif os.system('cd $GOSRC/$MYPROJECT; go build -ldflags \\'-w -s\\' -o ./$BIN'):\n\tprint('go build err')\n\texit()\n\nif os.system('cd $GOSRC/$MYPROJECT; upx ./$BIN'):\n\tprint('pux exe err')\n\texit()\n\nif os.system('./runtool.py stop'):\n\texit()\n\nif os.path.exists(gobin + \"/\" + exebin):\n\tif os.system('mv $GOBIN/$BIN $GOBIN/$BIN.$CURTIME.bak'):\n\t\texit()\n\nif os.system('cp $GOSRC/$MYPROJECT/$BIN $GOBIN/$BIN'):\n\texit()\n\nif os.system('./runtool.py start'):\n\texit()\n\n\n","sub_path":"recompile.goversion.py","file_name":"recompile.goversion.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"3538824","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nHerman Sanghera\r\nNovember 4, 2019\r\nCodingBat Solutions (Python)\r\n\r\nThis is a python file containing my own solutions to the\r\ndifferent Python Logic-2 exercises on codingbat.com\r\n\"\"\"\r\n\r\n\"\"\" \r\nLogic-2 > make_bricks:\r\nWe want to make a row of bricks that is goal inches long. \r\nWe have a number of small bricks (1 inch each) and big \r\nbricks (5 inches each). Return True if it is possible to \r\nmake the goal by choosing from the given bricks. This is \r\na little harder than it looks and can be done without any loops.\r\n\"\"\"\r\ndef make_bricks(small, big, goal):\r\n smallNeeded = goal - 5 * big\r\n if(smallNeeded < 0):\r\n smallNeeded = goal%5\r\n return(smallNeeded <= small)\r\n \r\n\"\"\"\r\nLogic-2 > lone_sum:\r\nGiven 3 int values, a b c, return their sum. However, if \r\none of the values is the same as another of the values, it \r\ndoes not count towards the sum.\r\n\"\"\"\r\ndef lone_sum(a, b, c):\r\n if(a == b and b == c): return 0\r\n elif(a == b): return c\r\n elif(b == c): return a\r\n elif(a == c): return b\r\n return a + b + c\r\n\r\n\"\"\"\r\nLogic-2 > lucky_sum:\r\nGiven 3 int values, a b c, return their sum. However, if one \r\nof the values is 13 then it does not count towards the sum and \r\nvalues to its right do not count. So for example, if b is 13, \r\nthen both b and c do not count.\r\n\"\"\"\r\ndef lucky_sum(a, b, c):\r\n arr = [a, b, c]\r\n sum = 0\r\n for element in arr:\r\n if(element == 13):\r\n break\r\n sum += element\r\n return sum\r\n\r\n\"\"\"\r\nLogic-2 > no_teen_sum:\r\nGiven 3 int values, a b c, return their sum. However, if any \r\nof the values is a teen -- in the range 13..19 inclusive -- \r\nthen that value counts as 0, except 15 and 16 do not count \r\nas a teens. Write a separate helper \"def fix_teen(n):\"that \r\ntakes in an int value and returns that value fixed for the \r\nteen rule. In this way, you avoid repeating the teen code 3 \r\ntimes (i.e. \"decomposition\"). Define the helper below and at \r\nthe same indent level as the main no_teen_sum().\r\n\"\"\"\r\ndef no_teen_sum(a, b, c):\r\n arr = [a, b, c]\r\n sum = 0\r\n \r\n for element in arr:\r\n sum += fix_teen(element)\r\n return sum\r\n\r\ndef fix_teen(n):\r\n if(n <= 12 or n == 15 or n == 16 or n >= 20):\r\n return n\r\n return 0\r\n\r\n\"\"\"\r\nLogic-2 > round_sum:\r\nFor this problem, we'll round an int value up to the next \r\nmultiple of 10 if its rightmost digit is 5 or more, so 15 \r\nrounds up to 20. Alternately, round down to the previous \r\nmultiple of 10 if its rightmost digit is less than 5, so 12 \r\nrounds down to 10. Given 3 ints, a b c, return the sum of \r\ntheir rounded values. To avoid code repetition, write a \r\nseparate helper \"def round10(num):\" and call it 3 times. \r\nWrite the helper entirely below and at the same indent level \r\nas round_sum().\r\n\"\"\"\r\ndef round_sum(a, b, c):\r\n return(round10(a)+round10(b)+round10(c))\r\n\r\ndef round10(num):\r\n if(num%10 < 5):\r\n return(num - num%10)\r\n return(num - num%10 + 10)\r\n \r\n\"\"\"\r\nLogic-2 > close_far:\r\nGiven three ints, a b c, return True if one of b or c is \r\n\"close\" (differing from a by at most 1), while the other\r\nis \"far\", differing from both other values by 2 or more. \r\nNote: abs(num) computes the absolute value of a number.\r\n\"\"\"\r\ndef close_far(a, b, c):\r\n if(abs(c-a) <= 1 or abs(b-a) <=1):\r\n if(abs(c-a) >= 2 and abs(c-b) >= 2):\r\n return True\r\n if(abs(b-a) >= 2 and abs(b-c) >= 2):\r\n return True\r\n return False\r\n\r\n\"\"\"\r\nLogic-2 > make_chocolate:\r\nWe want make a package of goal kilos of chocolate. We have \r\nsmall bars (1 kilo each) and big bars (5 kilos each). Return \r\nthe number of small bars to use, assuming we always use big \r\nbars before small bars. Return -1 if it can't be done.\r\n\"\"\"\r\ndef make_chocolate(small, big, goal):\r\n needed = goal - 5*big\r\n if(needed < 0):\r\n needed = goal%5\r\n if(small >= needed):\r\n return needed\r\n return -1\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n ","sub_path":"Logic-2.py","file_name":"Logic-2.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"161094262","text":"import pytest\nimport glob\nimport os\nimport pathlib\n\nimport pytomlpp\n\nVALID_EXCLUDE_FILE = []\nINVALID_EXCLUDE_FILE = [\n 'array-mixed-types-arrays-and-ints',\n 'array-mixed-types-ints-and-floats',\n 'array-mixed-types-strings-and-ints',\n 'float-leading-zero',\n 'float-leading-zero-neg',\n 'float-leading-zero-pos',\n 'float-no-leading-zero',\n 'float-no-trailing-digits',\n 'key-single-open-bracket',\n]\n\n@pytest.fixture\ndef valid_toml_files():\n current_path = os.path.dirname(__file__)\n toml_files = glob.glob(current_path + \"/../toml-test/tests/valid/*.toml\")\n return [pathlib.Path(p) for p in toml_files]\n\n@pytest.fixture\ndef invalid_toml_files():\n current_path = os.path.dirname(__file__)\n toml_files = glob.glob(current_path + \"/../toml-test/tests/invalid/*.toml\")\n return [pathlib.Path(p) for p in toml_files]\n \n\ndef test_keys():\n toml_string = \"a = 3\"\n table = pytomlpp.loads(toml_string)\n keys = table.keys()\n assert len(keys) == 1\n assert list(keys)[0] == \"a\"\n\ndef test_valid_toml_files(valid_toml_files):\n for t in valid_toml_files:\n if t.stem in VALID_EXCLUDE_FILE:\n continue\n print(f\"parsing {t}\")\n table = pytomlpp.load(str(t))\n assert type(table) == dict\n\ndef test_invalid_toml_files(invalid_toml_files):\n for t in invalid_toml_files:\n if t.stem in INVALID_EXCLUDE_FILE:\n print(f\"skiping {t.stem}\")\n continue\n print(f\"parsing {t}\")\n with pytest.raises(RuntimeError):\n pytomlpp.load(str(t))","sub_path":"tests/python-tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"237296346","text":"# Course: CS261 - Data Structures\n# Assignment: 5 - Part 1\n# Student: Ryan Jensen\n# Description: An implementation of a hash map with methods to clear, get, resize, put, remove, get keys, check if keys\n# are in the hash table, check the number of empty buckets, and get the load value of the table.\n\nfrom a5_include import *\n\n\ndef hash_function_1(key: str) -> int:\n \"\"\"\n Sample Hash function #1 to be used with A5 HashMap implementation\n \"\"\"\n\n hash = 0\n for letter in key:\n hash += ord(letter)\n return hash\n\n\ndef hash_function_2(key: str) -> int:\n \"\"\"\n Sample Hash function #2 to be used with A5 HashMap implementation\n \"\"\"\n hash, index = 0, 0\n index = 0\n for letter in key:\n hash += (index + 1) * ord(letter)\n index += 1\n return hash\n\n\nclass HashMap:\n def __init__(self, capacity: int, function) -> None:\n \"\"\"Init new HashMap based on DA with SLL for collision resolution\"\"\"\n\n self.buckets = DynamicArray()\n for _ in range(capacity):\n self.buckets.append(LinkedList())\n self.capacity = capacity\n self.hash_function = function\n self.size = 0\n\n def __str__(self) -> str:\n \"\"\"Return content of hash map t in human-readable form\"\"\"\n\n out = ''\n for i in range(self.buckets.length()):\n list = self.buckets.get_at_index(i)\n out += str(i) + ': ' + str(list) + '\\n'\n return out\n\n def clear(self) -> None:\n \"\"\"Clears the content of the hash map. Does not alter underlying capacity.\"\"\"\n\n # Find all keys and store in dynamic array object\n key_array = self.get_keys()\n\n # Increment through keys, removing key/value pairs from hash map until dynamic array is empty\n while key_array.length() != 0:\n key_to_remove = key_array.pop()\n self.remove(key_to_remove)\n\n def get(self, key: str) -> object:\n \"\"\"Returns the value associated with the given key, if key is not in hash map, returns None.\"\"\"\n\n # Hash the input value to get index\n hashed_key = self.hash_function(key)\n index = hashed_key % self.buckets.length()\n\n bucket = self.buckets.get_at_index(index)\n\n # Search bucket for matching key, return value if found, None if not found\n if bucket.contains(key):\n for node in bucket:\n if node.key == key:\n return node.value\n else:\n return None\n\n def put(self, key: str, value: object) -> None:\n \"\"\"Updates the key/value pair in the hash map. If given key already exists, it's associated value should be\n replaced with the new value. Otherwise, a key/value pair should be added.\n \"\"\"\n\n # Hash the input value to get index\n hashed_key = self.hash_function(key)\n index = hashed_key % self.buckets.length()\n\n bucket = self.buckets.get_at_index(index)\n\n # Check for key already in use, in which case update value\n if bucket.contains(key):\n bucket.remove(key)\n bucket.insert(key, value)\n # Key not already in use, create new key/value pair\n else:\n bucket.insert(key, value)\n self.size += 1\n\n def remove(self, key: str) -> None:\n \"\"\"Removes the given key and it's associated value from the hash map. If given not found, does nothing.\"\"\"\n\n if not self.contains_key(key):\n return\n else:\n # Hash the input value to get index\n hashed_key = self.hash_function(key)\n index = hashed_key % self.buckets.length()\n\n bucket = self.buckets.get_at_index(index)\n\n # Search bucket for matching key, remove key/value pair, decrement hash map size\n for node in bucket:\n if node.key == key:\n bucket.remove(key)\n self.size -= 1\n\n def contains_key(self, key: str) -> bool:\n \"\"\"Returns True if the given key is in the hash map, otherwise returns False.\"\"\"\n\n # Hash the input value to get index\n hashed_key = self.hash_function(key)\n index = hashed_key % self.buckets.length()\n\n bucket = self.buckets.get_at_index(index)\n\n # Search bucket for matching key, return True if found\n for node in bucket:\n if node.key == key:\n return True\n return False\n\n def empty_buckets(self) -> int:\n \"\"\"Returns the number of empty buckets in the hash table.\"\"\"\n\n empty_buckets_count = 0\n\n # Check for empty linked list in buckets\n for z in range(self.capacity):\n bucket = self.buckets.get_at_index(z)\n if bucket.length() == 0:\n empty_buckets_count += 1\n\n return empty_buckets_count\n\n def table_load(self) -> float:\n \"\"\"Returns current hash table load factor. (Average number of elements in each bucket.)\"\"\"\n\n load = float(self.size/self.buckets.length())\n\n return load\n\n def resize_table(self, new_capacity: int) -> None:\n \"\"\"Changes the capacity of the hash table. All key/value pairs must remain in the new hash map and links must\n be rehashed. If new capacity is less than one, this method does nothing.\"\"\"\n\n # Create new hash map with desired capacity and same hash function\n temp_hash_map = HashMap(new_capacity, self.hash_function)\n\n if new_capacity < 1:\n return\n\n # Store all the keys\n key_array = self.get_keys()\n key_array_copy = self.get_keys()\n\n # Iterate through keys list, rehashing and adding key/value pairs to temporary hash map\n while key_array.length() != 0:\n old_key = key_array.pop()\n value = self.get(old_key)\n temp_hash_map.put(old_key, value)\n\n # Clear hash map contents\n self.clear()\n\n # Add buckets and update capacity\n capacity_difference = new_capacity - self.capacity\n # Growing capacity\n if capacity_difference > 0:\n for i in range(capacity_difference):\n self.buckets.append(LinkedList())\n # Shrinking capacity\n elif capacity_difference < 0:\n self.buckets = DynamicArray()\n for i in range(new_capacity):\n self.buckets.append(LinkedList())\n\n self.capacity = new_capacity\n\n # Iterate through keys list, rehashing and adding key/value pairs to new sized hash map\n while key_array_copy.length() != 0:\n old_key = key_array_copy.pop()\n value = temp_hash_map.get(old_key)\n self.put(old_key, value)\n\n def get_keys(self) -> DynamicArray:\n \"\"\"Returns a DynamicArray that contains all keys stored in the hash map.\"\"\"\n\n key_array = DynamicArray()\n\n # Iterate through keys in each bucket, storing those keys in a dynamic array\n for z in range(self.capacity):\n bucket = self.buckets.get_at_index(z)\n for x in bucket:\n key_array.append(x.key)\n\n return key_array\n","sub_path":"hash_map.py","file_name":"hash_map.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"405361858","text":"import random\nimport math\nfrom sa_settings import SA_settings\nimport matplotlib.pyplot as plt\nimport copy\nclass SA():\n\n def __init__(self,sa_settings):\n self.settings = sa_settings\n self.x = list(range(self.settings.numCity)) # 解的初始化\n self.x = self.settings.standardize(self.x)\n self.x_best = [self.x] # 记录历史搜索过的最优解\n self.obj_best = [float('inf')] # 记录历史搜索过的最优目标函数值\n self.obj = [float('inf')] # 记录每条马尔可夫链最后一个解的目标函数值\n\n def main(self):\n iterNum = 0 # 迭代次数\n while 1:\n for i in range(self.settings.chainLength[iterNum]):\n y = self.get_neighbor(self.x) # 在解 i 的邻域内随机产生新解\n if self.settings.objective(y) < self.settings.objective(self.x): # 新解更优,直接接受\n self.x = y\n elif math.exp((self.settings.objective(self.x) - self.settings.objective(y)) / self.settings.temperature[iterNum]) > random.uniform(0,1): # 旧解更优,以概率接受\n self.x = y\n self.x = self.settings.standardize(self.x)\n if self.settings.objective(self.x) < self.obj_best[-1]: # 如果新解比历史最优更优,记录下来\n self.obj_best.append(self.settings.objective(self.x))\n self.x_best.append(self.x)\n self.obj.append(self.settings.objective(self.x))\n iterNum = iterNum + 1 # 迭代次数 +1\n if iterNum >= self.settings.maxIteration:\n self.settings.showSolution(self.x)\n break\n\n def get_neighbor(self,i):\n neighbor = copy.deepcopy(i)\n if self.settings.neighborMode == 1:\n index = random.randint(1,len(i) - 2)\n neighbor[index],neighbor[index + 1] = neighbor[index + 1],neighbor[index] # 交换访问顺序相邻的两个城市的访问顺序\n elif self.settings.neighborMode == 2:\n index = random.sample(list(range(1,self.settings.numCity)),2) # 交换任意两个城市的访问顺序\n neighbor[index[0]],neighbor[index[1]] = neighbor[index[1]],neighbor[index[0]]\n return neighbor\n\n def output(self):\n print('-'*40,'SA','-'*40)\n print(self.x)\n print(self.obj_best[-1])\n ax1 = plt.subplot(121)\n plt.title('SA')\n plt.plot(self.obj_best,'^-r')\n plt.xlabel('iteration')\n plt.ylabel('objective')\n ax2 = plt.subplot(122)\n plt.title('SA')\n plt.plot(self.obj,'^-r')\n plt.xlabel('iteration')\n plt.ylabel('objective')\n plt.show()\n\n def __repr__(self):\n return f'{self.__class__.__name__}'\n\nif __name__ == \"__main__\":\n sa_settings = SA_settings()\n sa = SA(sa_settings)\n sa.main()\n sa.output()\n","sub_path":"sa.py","file_name":"sa.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"131829421","text":"from PySide2.QtCore import Qt\nfrom isis.dialog import Dialog\nfrom isis.push_button import Push_Button\nfrom isis.h_box_layout import H_Box_Layout\nfrom isis.v_box_layout import V_Box_Layout\nfrom isis.grid_layout import Grid_Layout\nfrom isis.line_edit import Line_Edit\nfrom isis.label import Label\nfrom isis.decimal_edit import Decimal_Edit\nfrom isis.message_box import Message_Box\nfrom isis.itzamara.search_item import Search_Item\nfrom sarah.acp_bson import Client\nfrom isis.valentine.widget_viewer_storage import Widget_Viewer_Storage\n\n\nclass Set_Inventory(Dialog):\n def __init__(self, parent=None):\n Dialog.__init__(self, parent)\n self.setWindowTitle('Set_Inventory')\n\n self.viewer_storage = Widget_Viewer_Storage(self)\n self.viewer_storage.with_button_change = True\n\n lbl_item = Label('item', self)\n btn_change_item = Push_Button('change_&item', self)\n lbl_item_sku = Label('sku: ', self)\n lbl_item_description = Label('description: ', self)\n\n self.lbl_item_sku = Label(self)\n self.lbl_item_description = Label(self)\n\n lbl_quanty = Label('quanty: ', self)\n self.spin_quanty = Decimal_Edit(self)\n\n btn_accept = Push_Button('accept', self)\n btn_close = Push_Button('&close', self)\n btn_accept.setDefault(True)\n\n lbl_item_sku.fix_size_based_on_font()\n lbl_item_description.fix_size_based_on_font()\n lbl_quanty.fix_size_based_on_font()\n\n mainlayout = Grid_Layout(self)\n mainlayout.addWidget(self.viewer_storage, 0, 0, 1, -1)\n mainlayout.addWidget(lbl_item, 1, 0)\n mainlayout.addWidget(btn_change_item, 1, 1, Qt.AlignRight)\n mainlayout.addWidget(lbl_item_sku, 2, 0)\n mainlayout.addWidget(self.lbl_item_sku, 2, 1)\n mainlayout.addWidget(lbl_item_description, 3, 0)\n mainlayout.addWidget(self.lbl_item_description, 3, 1)\n mainlayout.addWidget(lbl_quanty, 4, 0)\n mainlayout.addWidget(self.spin_quanty, 4, 1)\n\n layoutbuttons = H_Box_Layout()\n layoutbuttons.addStretch()\n layoutbuttons.addWidget(btn_accept)\n layoutbuttons.addWidget(btn_close)\n\n mainlayout.addItem(layoutbuttons)\n self.setLayout(mainlayout)\n\n self.storage = None\n self.item = None\n self.agent_valentine = None\n\n btn_change_item.clicked.connect(self.handle_btn_change_item_clicked)\n btn_close.clicked.connect(self.close)\n btn_accept.clicked.connect(self.handle_btn_accept_clicked)\n self.spin_quanty.setMaximum(10000)\n self.spin_quanty.setFocus()\n\n @property\n def storage(self):\n return self.viewer_storage.storage\n\n @storage.setter\n def storage(self, storage):\n self.viewer_storage.storage = storage\n\n def handle_btn_change_item_clicked(self):\n searcher = Search_Item(self)\n searcher.exec_()\n if searcher.selected is not None:\n self.item = searcher.selected\n self.update_item_ui()\n self.spin_quanty.setFocus()\n\n def handle_btn_accept_clicked(self):\n if self.item is not None and self.storage is not None:\n if self.agent_valentine is None:\n self.agent_valentine = Client('isis.valentine.set_inventory', 'valentine')\n from decimal import Decimal\n msg = {'type_message': 'action', 'action': 'valentine/set_inventory', 'item': self.item,\n 'storage': self.storage}\n self.item['inventory'] = round(Decimal(self.spin_quanty.value), 3)\n self.agent_valentine.send_msg(msg)\n Message_Box.information(self, 'changed', 'invetory setted')\n self.item = None\n self.update_item_ui()\n self.spin_quanty.value = 0\n searcher = Search_Item(self)\n searcher.exec_()\n if searcher.selected is not None:\n self.item = searcher.selected\n self.update_item_ui()\n self.spin_quanty.setFocus()\n\n def update_storage_ui(self):\n if self.storage is not None:\n if 'id' in self.storage:\n self.lbl_storage_id.setText(self.storage['id'])\n else:\n self.lbl_storage_id.setText('')\n if 'name' in self.storage:\n self.lbl_storage_name.setText(self.storage['name'])\n else:\n self.lbl_storage_name.setText('')\n else:\n self.lbl_storage_id.setText('')\n self.lbl_storage_name.setText('')\n\n def update_item_ui(self):\n if self.item is not None:\n if 'sku' in self.item:\n self.lbl_item_sku.setText(self.item['sku'])\n else:\n self.lbl_item_sku.setText('')\n if 'description' in self.item:\n self.lbl_item_description.setText(self.item['description'])\n else:\n self.lbl_item_description.setText('')\n else:\n self.lbl_item_sku.setText('')\n self.lbl_item_description.setText('')\n\n\nif __name__ == '__main__':\n import sys\n from isis.application import Application\n app = Application(sys.argv)\n vv = Set_Inventory()\n vv.show()\n sys.exit(app.exec_())\n","sub_path":"isis/valentine/set_inventory.pyw","file_name":"set_inventory.pyw","file_ext":"pyw","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"594429533","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport pytz\nimport inspect\n\nfrom .parser import Parser\nfrom .._compat import decode\n\n\nclass Loader(object):\n\n path = os.path.join(os.path.dirname(inspect.getfile(pytz)), 'zoneinfo')\n\n @classmethod\n def load(cls, name):\n name = decode(name)\n\n name_parts = name.lstrip('/').split('/')\n\n for part in name_parts:\n if part == os.path.pardir or os.path.sep in part:\n raise ValueError('Bad path segment: %r' % part)\n\n filepath = os.path.join(cls.path, *name_parts)\n\n if not os.path.exists(filepath):\n raise ValueError('Unknown timezone [{}]'.format(name))\n\n return Parser.parse(filepath)\n","sub_path":"pendulum/tz/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"66714302","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/Labtools/HMC5883L.py\n# Compiled at: 2015-05-09 06:16:36\nimport Labtools.interface as interface, time\nfrom numpy import int16\n\nclass HMC5883L:\n\n def __init__(self, ADDRESS=30):\n self.ADDRESS = ADDRESS\n self.I = interface.Interface()\n\n def connect(self):\n self.I.I2C.start(self.ADDRESS, 0)\n self.I.I2C.send(1)\n self.I.I2C.send(0)\n self.I.I2C.stop()\n self.I.I2C.start(self.ADDRESS, 0)\n self.I.I2C.send(2)\n self.I.I2C.send(0)\n self.I.I2C.stop()\n\n def __getVals__(self, addr, bytes):\n self.I.I2C.start(self.ADDRESS, 0)\n self.I.I2C.send(addr)\n self.I.I2C.restart(self.ADDRESS, 1)\n vals = self.I.I2C.read(bytes)\n self.I.I2C.stop()\n return vals\n\n def read(self):\n vals = self.__getVals__(3, 6)\n x = int16(vals[0] << 8 | vals[1])\n y = int16(vals[2] << 8 | vals[3])\n z = int16(vals[4] << 8 | vals[5])\n return (x, y, z)","sub_path":"pycfiles/LabtoolSuite-0.1.6.10-py2.7/HMC5883L.py","file_name":"HMC5883L.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"233230444","text":"#!/usr/bin/env python\nfrom lib.swap import *\n\ndef comb_sort(L=[], factor=1.2):\n ''' Implementation of comb sort.\n :param L: list of sortable elements.\n '''\n if len(L) < 2: return L\n\n gap = len(L)\n while gap > 1:\n\n # calculate gap\n gap = max(1, int(gap / factor))\n\n # comb swapping\n for i in range(len(L) - gap):\n j = i + gap\n if L[i] > L[j]:\n swap(L, i, j)\n\n return L","sub_path":"algorithms/sorting/comb_sort.py","file_name":"comb_sort.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"130345543","text":"import sqlite3\r\nfrom hstest import * # instrukcja jak dodać bibliotekę\r\nimport os\r\n\r\n\r\nclass SQLite3Test:\r\n\r\n \"\"\"It's recommended to keep the sequence:\r\n 1. Create object SQLite3Check\r\n 2. Check is file exists\r\n 3. Establish connection\r\n 4. Check is table exists\r\n 5. Check are columns exists\r\n 6. Do the rest of tests on tables: is column primary key, not null\r\n\r\n To do tests: is unique and is foreign key\"\"\"\r\n\r\n cursor_message = f\"There is no cursor to connection.\" # Is it proper message?\r\n no_table_message = f\"There is no table you are looking for.\"\r\n\r\n def __init__(self, file_name): # file_name -> string\r\n self.file_name = file_name\r\n self.conn = None\r\n self.cursor = None\r\n\r\n def is_file_exist(self):\r\n if not os.path.exists(self.file_name):\r\n return f\"The file '{self.file_name}' does not exist or is outside of the script directory.\"\r\n return False\r\n\r\n def connect(self):\r\n ans = self.is_file_exist()\r\n if ans:\r\n return ans\r\n try:\r\n self.conn = sqlite3.connect(self.file_name)\r\n self.cursor = self.conn.cursor()\r\n except sqlite3.OperationalError as err:\r\n raise WrongAnswer(f\"DataBase {self.file_name} may be locked. An error was returned when trying to connect: {err}.\")\r\n\r\n def close(self):\r\n try:\r\n self.conn.close()\r\n except AttributeError:\r\n raise WrongAnswer(self.cursor_message)\r\n\r\n def run_query(self, query):\r\n try:\r\n lines = self.cursor.execute(f\"{query}\")\r\n except AttributeError:\r\n raise WrongAnswer(self.cursor_message)\r\n except sqlite3.OperationalError as err:\r\n self.close()\r\n raise WrongAnswer(f\"Error '{err}' occurred while trying to read from database '{self.file_name}'.\")\r\n except sqlite3.DatabaseError as err:\r\n self.close()\r\n raise WrongAnswer(f\"Error '{err}' occurred while trying to read from database '{self.file_name}'.\")\r\n return lines\r\n\r\n def is_table_exist(self, name): # table name -> string\r\n lines = self.run_query(f\"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{name}';\").fetchall()\r\n if lines[0][0] == 0:\r\n self.close()\r\n raise WrongAnswer(f\"There is no table named '{name}' in database {self.file_name}\")\r\n\r\n def number_of_records(self, name, expected_lines): # table name -> string, expected_lines -> integer\r\n lines = self.run_query(f\"SELECT COUNT(*) FROM {name}\").fetchone()[0]\r\n if lines != expected_lines:\r\n self.close()\r\n raise WrongAnswer(f\"Wrong number of records in table {name}. Expected {expected_lines}, found {lines}\")\r\n\r\n def is_column_exist(self, name, names): # table name -> string, column names -> list of strings for all columns, or list with one string for one column\r\n lines = self.run_query(f'select * from {name}').description\r\n if len(names) != 1:\r\n if sorted(names) != sorted([line[0] for line in lines]):\r\n self.close()\r\n raise WrongAnswer(f\"There is something wrong in table {name}. Found column names: {[line[0] for line in lines]}. Expected {names}'\")\r\n else:\r\n if not any([names[0] == c_name for c_name in [line[0] for line in lines]]):\r\n self.close()\r\n raise WrongAnswer(f\"There is something wrong in table {name}. Found column names: {[line[0] for line in lines]}. Expected to find '{names[0]}'\")\r\n\r\n def table_info(self, name, column, attribute): # table name -> string, column name -> string, attr (\"PK\" Primary Key; \"NN\" Not null)\r\n lines = self.run_query(f\"PRAGMA table_info({name})\").fetchall()\r\n if column not in [line[1] for line in lines]:\r\n raise WrongAnswer(f\"There is no column {column}.\")\r\n for line in lines:\r\n if attribute == \"PK\":\r\n if line[1] == column and line[5] != 1:\r\n self.close()\r\n raise WrongAnswer(f\"There is no PRIMARY KEY parameter in {name} on column {column}.\")\r\n elif attribute == \"NN\":\r\n if line[1] == column and line[3] != 1:\r\n return CheckResult.wrong(f\"There is no NOT NULL parameter in {name} on column {column}.\")\r\n\r\n def is_unique(self, name, column): # table name -> string, column name -> string\r\n lines = self.run_query(f\"SELECT inf.name FROM pragma_index_list('{name}') as lst, pragma_index_info(lst.name) as inf WHERE lst.[unique] = 1;\").fetchall()\r\n if not any([column in line for line in lines]):\r\n raise WrongAnswer(f\"There is no UNIQUE parameter in {name} on column {column}.\")\r\n return True\r\n\r\n def is_foreign_key(self, name, column): # table name -> string, column name -> string\r\n lines = self.run_query(f\"SELECT * FROM pragma_foreign_key_list('{name}');\").fetchall()\r\n if not any([column in line for line in lines]):\r\n raise WrongAnswer(f\"There is no FOREIGN KEY parameter in {name} on column {column}.\")\r\n return True\r\n","sub_path":"Tests_for_SQLite3_Hyperskill.py","file_name":"Tests_for_SQLite3_Hyperskill.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"647500182","text":"import pandas as pd\nimport numpy as np\n\n# rank_mpg\n#\n#This function goes through and cleans up the mpg dataset\ndef rank_mpg():\n #column names\n headers = [\n 'mpg',\n 'cylinders',\n 'displacement',\n 'horsepower',\n 'weight',\n 'acceleration',\n 'year',\n 'origin',\n 'name'\n ];\n\n df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data',\n header=None, names=headers, delim_whitespace=True)\n\n # the last column, names, is not required because it doesn't affect the mpgs. it's more of a convenience.\n df_names = df.pop('name')\n\n #replace all ? with NaN and deletes the rows\n df.replace('?', np.NaN, inplace=True)\n df.dropna(inplace=True)\n\n #https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.as_matrix.html\n #convert the dataframe into numpy arrays.\n #the first return is for data while the second return is the targets.\n #first column is actually the target\n return df.as_matrix(columns=headers[1:8]).astype(float), df.as_matrix(columns=headers[0:1]).astype(float)\n","sub_path":"450 Test/CS450-master/CS450-master/W06/mpg.py","file_name":"mpg.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"426511805","text":"#import os\n#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nclass esu():\n pass\nf=open('stock_dataset.csv')\ndf=pd.read_csv(f) # read stocks\ndata=np.array(df['Price']) #from panda to numpy\ndata=data[::-1]\n#plot raw\nplt.figure()\nplt.plot(data)\nplt.show()\nnormalize_data=(data-np.mean(data))/np.std(data) #nomralisation\nnormalize_data=normalize_data[:,np.newaxis]\n\n\n# Training set\n\ntime_step=20 #time step\nrnn_unit=10 #hidden layer units\nbatch_size=60\ninput_size=1 #dimension of input\noutput_size=1 #dimension of output\nlr=0.0006 #learning rate\ntrain_x,train_y=[],[] #name and set the trainingset\nfor i in range(len(normalize_data)-time_step-1):\n x=normalize_data[i:i+time_step]\n y=normalize_data[i+1:i+time_step+1]\n train_x.append(x.tolist())\n train_y.append(y.tolist())\n\n\n\n\"\"\"\nDefine training neural network parameters\n\"\"\"\nX=tf.placeholder(tf.float32, [None,time_step,input_size])\nY=tf.placeholder(tf.float32, [None,time_step,output_size])\n\nweights={\n 'in':tf.Variable(tf.random_normal([input_size,rnn_unit])),\n 'out':tf.Variable(tf.random_normal([rnn_unit,1]))\n }\nbiases={\n 'in':tf.Variable(tf.constant(0.1,shape=[rnn_unit,])),\n 'out':tf.Variable(tf.constant(0.1,shape=[1,]))\n }\n\n\n\n\ndef lstm(batch):\n w_in=weights['in']\n b_in=biases['in']\n input=tf.reshape(X,[-1,input_size])\n input_rnn=tf.matmul(input,w_in)+b_in\n input_rnn=tf.reshape(input_rnn,[-1,time_step,rnn_unit])\n cell=tf.nn.rnn_cell.BasicLSTMCell(rnn_unit)\n init_state=cell.zero_state(batch,dtype=tf.float32)\n output_rnn,final_states=tf.nn.dynamic_rnn(cell, input_rnn,initial_state=init_state, dtype=tf.float32)\n output=tf.reshape(output_rnn,[-1,rnn_unit])\n w_out=weights['out']\n b_out=biases['out']\n pred=tf.matmul(output,w_out)+b_out\n return pred,final_states\n\n\n\n\ndef train_lstm():\n global batch_size\n pred,_=lstm(batch_size)\n loss=tf.reduce_mean(tf.square(tf.reshape(pred,[-1])-tf.reshape(Y, [-1])))\n train_op=tf.train.AdamOptimizer(lr).minimize(loss)\n saver=tf.train.Saver(tf.global_variables())\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(10000):\n step=0\n start=0\n end=start+batch_size\n while(end '9':\n parts[2] = '0'\n parts[1] = chr(ord(parts[1]) + 1)\n if parts[1] > 'z':\n parts[1] = 'a'\n parts[0] = chr(ord(parts[0]) + 1)\n if parts[0] > 'Z':\n parts[0] = 'A'\n return pattern\n\n# From: https://github.com/ickerwx/pattern/blob/master/pattern\n\n\ndef pattern_offset(value, length=8192):\n try:\n if not isinstance(value, (int)) and value.startswith('0x'):\n value = struct.pack('] Req: \" + str(payload))\n if channel_type == CHANNEL_TYPE_SOCKET:\n send_socket(payload)\n elif channel_type == CHANNEL_TYPE_HTTP:\n send_http(payload)\n elif channel_type == CHANNEL_TYPE_ARG:\n send_args(payload)\n else:\n print(\"[!] Unknown channel_type\")\n exit()\n print(\"[*] Done\")\n return STATUS_SUCCESS\n except socket.timeout:\n print(\"[!] Timeout\")\n return STATUS_TIMEOUT\n except Exception as e:\n print(\"[!] Error \", str(e))\n traceback.print_exc()\n return STATUS_ERROR\n\n#####################\n# Exploit Functions #\n#####################\n\n\ndef interactive():\n while True:\n print(\"\")\n user_input = input(\"INPUT ('py:' for python expr, 'exit:' to stop) > \")\n if (user_input.startswith(\"exit:\")):\n break\n if (user_input.startswith(\"py:\")):\n user_input = eval(user_input[3:])\n send_payload(user_input.encode(\"utf-8\"))\n\n\ndef detect_timeout():\n for i in range(EXPLOIT_TYPE_DETECT_TIMEOUT_MIN_BUFFER_SIZE, EXPLOIT_TYPE_DETECT_TIMEOUT_MAX_BUFFER_SIZE):\n value = i * 100\n print(\"Trying: \", value)\n status = send_payload((\"A\" * value).encode(\"utf-8\"))\n if (status == STATUS_TIMEOUT or status == STATUS_ERROR):\n print(\"Stopped at: \", value)\n break\n\ndef exploit():\n # Compare badcharacter file with ESP: !mona compare -a esp -f c:\\badchar_test.bin\n # Find pattern within registers: !mona findmsp\n # Payload generation: \n # msfvenom -p windows/shell_reverse_tcp LHOST=10.11.0.4 LPORT=443 EXITFUNC=thread -f python –e x 86/shikata_ga_nai -b \"\\x00\\x0a\"\n # NASM shell: msf-nasm_shell metasm_shell.rb\n # Look for loaded DLLs: !mona modules\n # Find witing a module: \n # JMP ESP: !mona find -s \"\\xff\\xe4\" -m \"wcapwsp.dll\"\n # PUSH ESP, RETN: \n # Find instruction: !mona jmp -r esp -cpb \"\\x00\\x0A\\0D\"\n # Find offset of instruction: objdump -D -M intel user32.dll | grep 'jmp.*esp' | head\n # objdump -D validate | grep call| grep eax\n # Instructions: Debug - \\xCC | Nop - \\x90 | SUB ESP \\x10 - \\x83\\xEC\\x10\n # Exit functions: EXITFUNC=none / EXITFUNC=thread / EXITFUNC=process \n # Usual bad-characters: 00 0a\n # Nops or adjustment required due to - GetPC routine\n\n\n # ASCII strings (e.g. \"ABCD\") are stored front-to-back: \"\\x41\\x42\\x43\\x44\\x00\"\n # Code (e.g. \"NOP # NOP # NOP # RET\") is stored front-to-back: \"\\x90\\x90\\x90\\xC3\"\n # Numbers (e.g. 0x1337) are stored back-to-front: \"\\x37\\x13\\x00\\x00\"\n # Memory addresses or \"pointers\" (e.g. 0xDEADBEEF) are stored back-to front: \"\\xEF\\xBE\\xAD\\xDE\n # >>> struct.pack(\">> struct.pack(\" \")\n print_header = True\n if (option == \"1\"):\n interactive()\n elif (option == \"2\"):\n detect_timeout()\n elif (option.startswith(\"3\")):\n try:\n print(pattern_create(option.split(\" \")[1]))\n except IndexError:\n print(\"Provide length along with the option (Example: '3 1024')\")\n print_header = False\n elif (option.startswith(\"4\")):\n try:\n value = option.split(\" \")[2]\n if (len(value) > 4):\n value = bytearray.fromhex(value.strip()).decode()[::-1]\n print(\"Finding: \" + value)\n offset = pattern_offset(value, option.split(\" \")[1])\n print(offset)\n except IndexError:\n print(\n \"Provide length and value to search along with the option (Example: '4 1024 Aa0A')\")\n print_header = False\n elif (option.startswith(\"5\")):\n try:\n send_payload(pattern_create(option.split(\" \")[1]).encode())\n except IndexError:\n print(\"Provide length along with the option (Example: '5 1024')\")\n print_header = False\n elif (option.startswith(\"6\")):\n payload = build_filler_payload(option.split(\" \")[1:])\n send_payload(payload)\n elif (option.startswith(\"7\")):\n section_splits = option.split(\" - \")\n filler_splits = section_splits[0].split(\" \")[1:]\n badchar_test = b\"\"\n try:\n badchar_splits = []\n if len(section_splits) > 1:\n badchar_splits = section_splits[1].split(\" \")\n\n altered_badchars = badchars.copy()\n \n for badchar_split in badchar_splits:\n altered_badchars.append(int(badchar_split, 16))\n\n # generate the string\n for i in range(0x00, 0xFF+1): # range(0x00, 0xFF) only returns up to 0xFE\n if i not in altered_badchars: # skip the badchars\n badchar_test += bytes([i]) # append each non-badchar char to the byte string\n\n # open a file for writing (\"w\") the byte string as binary (\"b\") data\n with open(\"badchar_test.bin\", \"wb\") as f:\n f.write(badchar_test)\n\n except IndexError:\n print(\"No removals\")\n if len(filler_splits) >= 1 and filler_splits[0].startswith(\"*\"):\n filler = b\"A\" * (int(filler_splits[0].split(\"*\")[1]) - len(badchar_test))\n payload = filler + badchar_test\n else:\n payload = build_filler_payload(filler_splits) + badchar_test\n send_payload(payload)\n elif (option.startswith(\"9\")):\n exploit()\n else:\n print(\"[!] Unknown option\")\n\n\nmain()\n","sub_path":"_references/bof.py","file_name":"bof.py","file_ext":"py","file_size_in_byte":11371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"218598012","text":"from nipype.pipeline.engine import Node, Workflow\nimport nipype.interfaces.utility as util\nimport nipype.interfaces.io as nio\nimport nipype.interfaces.freesurfer as fs\nimport nipype.interfaces.fsl as fsl\n\n\n'''\nWorkflow to extract relevant output from freesurfer directory\n'''\n\ndef create_mgzconvert_pipeline(name='mgzconvert'):\n \n # workflow\n mgzconvert = Workflow(name='mgzconvert')\n\n #inputnode \n inputnode=Node(util.IdentityInterface(fields=['fs_subjects_dir',\n 'fs_subject_id'\n ]),\n name='inputnode')\n \n #outputnode\n outputnode=Node(util.IdentityInterface(fields=['anat_head',\n 'anat_brain',\n 'brain_mask',\n 'wmseg',\n 'wmedge']),\n name='outputnode')\n \n\n # import files from freesurfer\n fs_import = Node(interface=nio.FreeSurferSource(),\n name = 'fs_import')\n \n \n # convert Freesurfer T1 file to nifti\n head_convert=Node(fs.MRIConvert(out_type='niigz',\n out_file='T1.nii.gz'),\n name='head_convert')\n \n # create brainmask from aparc+aseg with single dilation\n def get_aparc_aseg(files):\n for name in files:\n if 'aparc+aseg' in name:\n return name\n\n brainmask = Node(fs.Binarize(min=0.5,\n dilate=1,\n out_type='nii.gz'),\n name='brainmask')\n\n\n # fill holes in mask, smooth, rebinarize\n fillholes = Node(fsl.maths.MathsCommand(args='-fillh -s 3 -thr 0.1 -bin',\n out_file='T1_brain_mask.nii.gz'),\n name='fillholes')\n \n \n # mask T1 with the mask\n brain = Node(fsl.ApplyMask(out_file='T1_brain.nii.gz'),\n name='brain')\n\n # cortical and cerebellar white matter volumes to construct wm edge\n # [lh cerebral wm, lh cerebellar wm, rh cerebral wm, rh cerebellar wm, brain stem]\n wmseg = Node(fs.Binarize(out_type='nii.gz',\n match = [2, 7, 41, 46, 16],\n binary_file='T1_brain_wmseg.nii.gz'), \n name='wmseg')\n \n # make edge from wmseg to visualize coregistration quality\n edge = Node(fsl.ApplyMask(args='-edge -bin',\n out_file='T1_brain_wmedge.nii.gz'),\n name='edge')\n\n # connections\n mgzconvert.connect([(inputnode, fs_import, [('fs_subjects_dir','subjects_dir'),\n ('fs_subject_id', 'subject_id')]),\n (fs_import, brainmask, [(('aparc_aseg', get_aparc_aseg), 'in_file')]),\n (fs_import, head_convert, [('T1', 'in_file')]),\n (fs_import, wmseg, [(('aparc_aseg', get_aparc_aseg), 'in_file')]),\n (brainmask, fillholes, [('binary_file', 'in_file')]),\n (fillholes, brain, [('out_file', 'mask_file')]),\n (head_convert, brain, [('out_file', 'in_file')]),\n (wmseg, edge, [('binary_file', 'in_file'),\n ('binary_file', 'mask_file')]),\n (head_convert, outputnode, [('out_file', 'anat_head')]),\n (fillholes, outputnode, [('out_file', 'brain_mask')]),\n (brain, outputnode, [('out_file', 'anat_brain')]),\n (wmseg, outputnode, [('binary_file', 'wmseg')]),\n (edge, outputnode, [('out_file', 'wmedge')])\n ])\n \n return mgzconvert","sub_path":"src/lsd_lemon/struct_preproc/mgzconvert.py","file_name":"mgzconvert.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"387840047","text":"import string\r\nfrom collections import Counter\r\nimport matplotlib.pyplot as plt\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\n\r\ntext = open('test.csv', encoding='utf-8').read()\r\nlower_case = text.lower()\r\ncleaned_text = lower_case.translate(str.maketrans('', '', string.punctuation))\r\ntokenized_word = word_tokenize(cleaned_text, \"english\")\r\n\r\nfinal_word = []\r\nfor words in tokenized_word:\r\n if words not in stopwords.words(\"english\"):\r\n final_word.append(words)\r\n\r\nemotion_list = []\r\nwith open('emotions.txt', 'r') as file:\r\n for line in file:\r\n clear_line = line.replace('\\n', '').replace(',', '').replace(\"'\", '').strip()\r\n word, emotion = clear_line.split(\":\")\r\n if word in final_word:\r\n emotion_list.append(emotion)\r\nprint(emotion_list)\r\nw = Counter(emotion_list)\r\nprint(w)\r\n\r\n\r\ndef sentimental_analyse(sentiment_text):\r\n score = SentimentIntensityAnalyzer().polarity_scores(sentiment_text)\r\n neg = score['neg']\r\n pos = score['pos']\r\n if neg > pos:\r\n print(\"negative sentiment\")\r\n elif pos > neg:\r\n print(\"positive sentiment\")\r\n else:\r\n print(\"neutral sentiment\")\r\n\r\n\r\nsentimental_analyse(cleaned_text)\r\n\r\nfig, axl = plt.subplots()\r\naxl.bar(w.keys(), w.values())\r\nfig.autofmt_xdate()\r\nplt.savefig('graph.png')\r\nplt.show()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"127126767","text":"# Lint as: python3\n# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for exported, composite transformations.\"\"\"\n\nfrom absl.testing import absltest\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import placements\nfrom tensorflow_federated.python.core.impl import transformations\nfrom tensorflow_federated.python.core.impl.compiler import building_block_factory\nfrom tensorflow_federated.python.core.impl.compiler import building_blocks\nfrom tensorflow_federated.python.core.impl.compiler import transformation_utils\nfrom tensorflow_federated.python.core.impl.compiler import transformations as compiler_transformations\n\n\nclass RemoveLambdasAndBlocksTest(absltest.TestCase):\n\n def assertNoLambdasOrBlocks(self, comp):\n\n def _transform(comp):\n if (isinstance(comp, building_blocks.Call) and\n isinstance(comp.function, building_blocks.Lambda)) or isinstance(\n comp, building_blocks.Block):\n raise AssertionError('Encountered disallowed computation: {}'.format(\n comp.compact_representation()))\n return comp, True\n\n transformation_utils.transform_postorder(comp, _transform)\n\n def test_with_simple_called_lambda(self):\n identity_lam = building_blocks.Lambda(\n 'x', tf.int32, building_blocks.Reference('x', tf.int32))\n called_lambda = building_blocks.Call(identity_lam,\n building_blocks.Data('a', tf.int32))\n lambdas_and_blocks_removed, modified = compiler_transformations.remove_lambdas_and_blocks(\n called_lambda)\n self.assertTrue(modified)\n self.assertNoLambdasOrBlocks(lambdas_and_blocks_removed)\n self.assertEqual(lambdas_and_blocks_removed.compact_representation(), 'a')\n\n def test_with_simple_block(self):\n data = building_blocks.Data('a', tf.int32)\n simple_block = building_blocks.Block([('x', data)],\n building_blocks.Reference(\n 'x', tf.int32))\n lambdas_and_blocks_removed, modified = compiler_transformations.remove_lambdas_and_blocks(\n simple_block)\n self.assertTrue(modified)\n self.assertNoLambdasOrBlocks(lambdas_and_blocks_removed)\n self.assertEqual(lambdas_and_blocks_removed.compact_representation(), 'a')\n\n def test_with_structure_replacing_federated_map(self):\n function_type = computation_types.FunctionType(tf.int32, tf.int32)\n tuple_ref = building_blocks.Reference('arg', [\n function_type,\n tf.int32,\n ])\n fn = building_blocks.Selection(tuple_ref, index=0)\n arg = building_blocks.Selection(tuple_ref, index=1)\n called_fn = building_blocks.Call(fn, arg)\n concrete_fn = building_blocks.Lambda(\n 'x', tf.int32, building_blocks.Reference('x', tf.int32))\n concrete_arg = building_blocks.Data('a', tf.int32)\n arg_tuple = building_blocks.Tuple([concrete_fn, concrete_arg])\n generated_structure = building_blocks.Block([('arg', arg_tuple)], called_fn)\n lambdas_and_blocks_removed, modified = compiler_transformations.remove_lambdas_and_blocks(\n generated_structure)\n self.assertTrue(modified)\n self.assertNoLambdasOrBlocks(lambdas_and_blocks_removed)\n\n def test_with_structure_replacing_federated_zip(self):\n fed_tuple = building_blocks.Reference(\n 'tup',\n computation_types.FederatedType([tf.int32] * 3, placements.CLIENTS))\n unzipped = building_block_factory.create_federated_unzip(fed_tuple)\n zipped = building_block_factory.create_federated_zip(unzipped)\n placement_unwrapped, _ = transformations.unwrap_placement(zipped)\n placement_gone = placement_unwrapped.argument\n lambdas_and_blocks_removed, modified = compiler_transformations.remove_lambdas_and_blocks(\n placement_gone)\n self.assertTrue(modified)\n self.assertNoLambdasOrBlocks(lambdas_and_blocks_removed)\n\n def test_with_nested_called_lambdas(self):\n identity_lam = building_blocks.Lambda(\n 'x', tf.int32, building_blocks.Reference('x', tf.int32))\n ref_to_fn = building_blocks.Reference('fn', identity_lam.type_signature)\n data = building_blocks.Data('a', tf.int32)\n called_inner_lambda = building_blocks.Call(ref_to_fn, data)\n higher_level_lambda = building_blocks.Lambda('fn',\n identity_lam.type_signature,\n called_inner_lambda)\n lambdas_and_blocks_removed, modified = compiler_transformations.remove_lambdas_and_blocks(\n higher_level_lambda)\n self.assertTrue(modified)\n self.assertNoLambdasOrBlocks(lambdas_and_blocks_removed)\n\n def test_with_multiple_reference_indirection(self):\n identity_lam = building_blocks.Lambda(\n 'x', tf.int32, building_blocks.Reference('x', tf.int32))\n tuple_wrapping_ref = building_blocks.Tuple(\n [building_blocks.Reference('a', identity_lam.type_signature)])\n selection_from_ref = building_blocks.Selection(\n building_blocks.Reference('b', tuple_wrapping_ref.type_signature),\n index=0)\n data = building_blocks.Data('a', tf.int32)\n called_lambda_with_indirection = building_blocks.Call(\n building_blocks.Reference('c', selection_from_ref.type_signature), data)\n blk = building_blocks.Block([\n ('a', identity_lam),\n ('b', tuple_wrapping_ref),\n ('c', selection_from_ref),\n ], called_lambda_with_indirection)\n lambdas_and_blocks_removed, modified = compiler_transformations.remove_lambdas_and_blocks(\n blk)\n self.assertTrue(modified)\n self.assertNoLambdasOrBlocks(lambdas_and_blocks_removed)\n\n def test_with_higher_level_lambdas(self):\n self.skipTest('b/146904968')\n data = building_blocks.Data('a', tf.int32)\n dummy = building_blocks.Reference('z', tf.int32)\n lowest_lambda = building_blocks.Lambda(\n 'z', tf.int32,\n building_blocks.Tuple([dummy,\n building_blocks.Reference('x', tf.int32)]))\n middle_lambda = building_blocks.Lambda('x', tf.int32, lowest_lambda)\n lam_arg = building_blocks.Reference('x', middle_lambda.type_signature)\n rez = building_blocks.Call(lam_arg, data)\n left_lambda = building_blocks.Lambda('x', middle_lambda.type_signature, rez)\n higher_call = building_blocks.Call(left_lambda, middle_lambda)\n high_call = building_blocks.Call(higher_call, data)\n lambdas_and_blocks_removed, modified = compiler_transformations.remove_lambdas_and_blocks(\n high_call)\n self.assertTrue(modified)\n self.assertNoLambdasOrBlocks(lambdas_and_blocks_removed)\n\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"tensorflow_federated/python/core/impl/compiler/transformations_test.py","file_name":"transformations_test.py","file_ext":"py","file_size_in_byte":7227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"507614167","text":"from django.conf.urls.defaults import *\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom content.management.commands.create_sitemap import sitemaps\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url( r'^', include( 'master.urls' )),\n url( r'^', include( 'content.urls' )),\n url( r'^captcha/', include( 'captcha.urls' ) ),\n url( r'^user/', include( 'userapp.urls' ) ),\n url( r'^accounts/', include( 'registration.backends.default.urls' ) ),\n url( r'comments/', include( 'django.contrib.comments.urls' ) ),\n url( r'qna/', include( 'answers.urls' ) ),\n\n url( r'^admin/', include(admin.site.urls)),\n url( r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT} ),\n url( r'^sitemap\\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps} ),\n url( r'^sitemap-(?P
.+)\\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps} ),\n)\n","sub_path":"fghqlnoebnq/urls_local.py","file_name":"urls_local.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"403195396","text":"#\tCopyright (C) 2017 Battelle Memorial Institute\nimport json\nimport sys\nimport warnings\nimport csv\nimport fncs\nfrom ppcasefile import ppcasefile\nimport numpy as np\nimport pypower.api as pp\nimport math\nimport re\n\ndef summarize_opf(res):\n\tbus = res['bus']\n\tgen = res['gen']\n\n\tPload = bus[:,2].sum()\n\tPgen = gen[:,1].sum()\n\tPctLoss = 100.0 * (Pgen - Pload) / Pgen\n\n\tprint('success =', res['success'], 'in', res['et'], 'seconds')\n\tprint('Total Gen =', Pgen, ' Load =', Pload, ' Loss =', PctLoss, '%')\n\n\tprint('bus #, Pd, Qd, Vm, LMP_P, LMP_Q, MU_VMAX, MU_VMIN')\n\tfor row in bus:\n\t\tprint(int(row[0]),row[2],row[3],row[7],row[13],row[14],row[15],row[16])\n\n\tprint('gen #, bus, Pg, Qg, MU_PMAX, MU_PMIN, MU_PMAX, MU_PMIN')\n\tidx = 1\n\tfor row in gen:\n\t\tprint(idx,int(row[0]),row[1],row[2],row[21],row[22],row[23],row[24])\n\t\t++idx\n\ndef make_dictionary(ppc, rootname):\n\tfncsBuses = {}\n\tgenerators = {}\n\tunitsout = []\n\tbranchesout = []\n\tbus = ppc['bus']\n\tgen = ppc['gen']\n\tcost = ppc['gencost']\n\tfncsBus = ppc['FNCS']\n\tunits = ppc['UnitsOut']\n\tbranches = ppc['BranchesOut']\n\n\tfor i in range (gen.shape[0]):\n\t\tbusnum = gen[i,0]\n\t\tbustype = bus[busnum-1,1]\n\t\tif bustype == 1:\n\t\t\tbustypename = 'pq'\n\t\telif bustype == 2:\n\t\t\tbustypename = 'pv'\n\t\telif bustype == 3:\n\t\t\tbustypename = 'swing'\n\t\telse:\n\t\t\tbustypename = 'unknown'\n\t\tgenerators[str(i+1)] = {'bus':int(busnum),'bustype':bustypename,'Pnom':float(gen[i,1]),'Pmax':float(gen[i,8]),'genfuel':'tbd','gentype':'tbd',\n\t\t\t'StartupCost':float(cost[i,1]),'ShutdownCost':float(cost[i,2]), 'c2':float(cost[i,4]), 'c1':float(cost[i,5]), 'c0':float(cost[i,6])}\n\n\tfor i in range (fncsBus.shape[0]):\n\t\tbusnum = int(fncsBus[i,0])\n\t\tbusidx = busnum - 1\n\t\tfncsBuses[str(busnum)] = {'Pnom':float(bus[busidx,2]),'Qnom':float(bus[busidx,3]),'area':int(bus[busidx,6]),'zone':int(bus[busidx,10]),\n\t\t\t'ampFactor':float(fncsBus[i,2]),'GLDsubstations':[fncsBus[i,1]]}\n\n\tfor i in range (units.shape[0]):\n\t\tunitsout.append ({'unit':int(units[i,0]),'tout':int(units[i,1]),'tin':int(units[i,2])})\n\n\tfor i in range (branches.shape[0]):\n\t\tbranchesout.append ({'branch':int(branches[i,0]),'tout':int(branches[i,1]),'tin':int(branches[i,2])})\n\n\tdp = open (rootname + \"_m_dict.json\", \"w\")\n\tppdict = {'baseMVA':ppc['baseMVA'],'fncsBuses':fncsBuses,'generators':generators,'UnitsOut':unitsout,'BranchesOut':branchesout}\n\tprint (json.dumps(ppdict), file=dp, flush=True)\n\tdp.close()\n\ndef parse_mva(arg):\n\ttok = arg.strip('+-; MWVAKdrij')\n\tvals = re.split(r'[\\+-]+', tok)\n\tif len(vals) < 2: # only a real part provided\n\t\tvals.append('0')\n\tvals = [float(v) for v in vals]\n\n\tif '-' in tok:\n\t\tvals[1] *= -1.0\n\tif arg.startswith('-'):\n\t\tvals[0] *= -1.0\n\n\tif 'd' in arg:\n\t\tvals[1] *= (math.pi / 180.0)\n\t\tp = vals[0] * math.cos(vals[1])\n\t\tq = vals[0] * math.sin(vals[1])\n\telif 'r' in arg:\n\t\tp = vals[0] * math.cos(vals[1])\n\t\tq = vals[0] * math.sin(vals[1])\n\telse:\n\t\tp = vals[0]\n\t\tq = vals[1]\n\n\tif 'KVA' in arg:\n\t\tp /= 1000.0\n\t\tq /= 1000.0\n\telif 'MVA' in arg:\n\t\tp *= 1.0\n\t\tq *= 1.0\n\telse: # VA\n\t\tp /= 1000000.0\n\t\tq /= 1000000.0\n\n\treturn p, q\n\nwith warnings.catch_warnings():\n\twarnings.simplefilter(\"ignore\") # TODO - pypower is using NumPy doubles for integer indices\n\t#warnings.filterwarnings(\"ignore\",category=DeprecationWarning)\n\n\tif len(sys.argv) == 5:\n\t\trootname = sys.argv[1]\n\t\tStartTime = sys.argv[2]\n\t\ttmax = int(sys.argv[3])\n\t\tdt = int(sys.argv[4])\n\telif len(sys.argv) == 1:\n\t\trootname = 'ppcase'\n\t\tStartTime = \"2013-07-01 00:00:00\"\n\t\tdt = 3600\n\t\ttmax = 2 * 24 * 3600\n\telse:\n\t\tprint ('usage: python fncsPYPOWER.py [rootname StartTime tmax dt]')\n\t\tsys.exit()\n\n\tppc = ppcasefile()\n\tmake_dictionary(ppc, rootname)\n\n\tbus_mp = open (\"bus_\" + rootname + \"_metrics.json\", \"w\")\n\tgen_mp = open (\"gen_\" + rootname + \"_metrics.json\", \"w\")\n\tsys_mp = open (\"sys_\" + rootname + \"_metrics.json\", \"w\")\n\tbus_meta = {'LMP_P':{'units':'USD/kwh','index':0},'LMP_Q':{'units':'USD/kvarh','index':1},\n\t\t'PD':{'units':'MW','index':2},'QD':{'units':'MVAR','index':3},'Vang':{'units':'deg','index':4},\n\t\t'Vmag':{'units':'pu','index':5},'Vmax':{'units':'pu','index':6},'Vmin':{'units':'pu','index':7}}\n\tgen_meta = {'Pgen':{'units':'MW','index':0},'Qgen':{'units':'MVAR','index':1},'LMP_P':{'units':'USD/kwh','index':2}}\n\tsys_meta = {'Ploss':{'units':'MW','index':0},'Converged':{'units':'true/false','index':1}}\n\tbus_metrics = {'Metadata':bus_meta,'StartTime':StartTime}\n\tgen_metrics = {'Metadata':gen_meta,'StartTime':StartTime}\n\tsys_metrics = {'Metadata':sys_meta,'StartTime':StartTime}\n\n\tgencost = ppc['gencost']\n\tfncsBus = ppc['FNCS']\n\tppopt = pp.ppoption(VERBOSE=0, OUT_ALL=0) # TODO - the PF_DC option doesn't seem to work\n\tloads = np.loadtxt('NonGLDLoad.txt', delimiter=',')\n\n\toutage = ppc['UnitsOut'][0]\n\tprint ('unit', outage[0], 'off from', outage[1], 'to', outage[2], flush=True)\n\n\tnloads = loads.shape[0]\n\tts = 0\n\ttnext = 0\n\n\top = open (rootname + '.csv', 'w')\n\tprint ('t[s],Converged,Pload,P7,V7,LMP_P7,LMP_Q7,Pgen1,Pgen2,Pgen3,Pgen4', file=op, flush=True)\n\tfncs.initialize()\n\n#\tts = -dt\n#\twhile ts <= tmax:\n#\t\tts += dt\n\n\twhile ts <= tmax:\n\t\tprint (\"looping\", ts, tnext, tmax, flush=True)\n\t\tif ts >= tnext:\n\t\t\tidx = int (ts / 300) % nloads\n\t\t\tbus = ppc['bus']\n\t\t\tgen = ppc['gen']\n\t\t\tbus[6,2] = loads[idx,0]\n\t\t\tbus[4,2] = loads[idx,1]\n\t\t\tbus[8,2] = loads[idx,2]\n\t\t\tif ts >= outage[1] and ts <= outage[2]:\n\t\t\t\tgen[outage[0],7] = 0\n\t\t\telse:\n\t\t\t\tgen[outage[0],7] = 1\n\t\t\tfor row in ppc['FNCS']:\n\t\t\t\tnewload = float(row[2]) * float(row[3])\n\t\t\t\tnewidx = int(row[0]) - 1\n\t\t\t\tprint (' GLD load', newload, 'at', newidx)\n\t\t\t\tbus[newidx,2] += newload\n\t\t\tres = pp.runopf(ppc, ppopt)\n\t\t\tbus = res['bus']\n\t\t\tgen = res['gen']\n\t\t\tPload = bus[:,2].sum()\n\t\t\tPgen = gen[:,1].sum()\n\t\t\tPloss = Pgen - Pload\n\t\t\tprint (' ', res['success'], bus[:,2].sum(), flush=True)\n\t\t\tprint (ts, res['success'], bus[:,2].sum(), bus[6,2], bus[6,7], bus[6,13], bus[6,14], gen[0,1], gen[1,1], gen[2,1], gen[3,1], sep=',', file=op, flush=True)\n\t\t\tfncs.publish('LMP_B7', 0.001 * bus[6,13])\n\t\t\tfncs.publish('three_phase_voltage_B7', 1000.0 * bus[6,7] * bus[6,9])\n\t\t\tprint(' publishing LMP=', 0.001 * bus[6,13], 'vpos=', 1000.0 * bus[6,7] * bus[6,9], flush=True)\n\t\t\t# update the metrics\n\t\t\tsys_metrics[str(ts)] = {rootname:[Ploss,res['success']]}\n\t\t\tbus_metrics[str(ts)] = {}\n\t\t\tfor i in range (fncsBus.shape[0]):\n\t\t\t\tbusnum = int(fncsBus[i,0])\n\t\t\t\tbusidx = busnum - 1\n\t\t\t\trow = bus[busidx].tolist()\n\t\t\t\tbus_metrics[str(ts)][str(busnum)] = [row[13]*0.001,row[14]*0.001,row[2],row[3],row[8],row[7],row[11],row[12]]\n\t\t\tgen_metrics[str(ts)] = {}\n\t\t\tfor i in range (gen.shape[0]):\n\t\t\t\trow = gen[i].tolist()\n\t\t\t\tbusidx = int(row[0] - 1)\n\t\t\t\tgen_metrics[str(ts)][str(i+1)] = [row[1],row[2],float(bus[busidx,13])*0.001]\n\t\t\ttnext += dt\n\t\t\tif tnext > tmax:\n\t\t\t\tprint ('breaking out at',tnext,flush=True)\n\t\t\t\tbreak\n\t\tts = fncs.time_request(tnext)\n\t\tevents = fncs.get_events()\n\t\tfor key in events:\n\t\t\tsubstation = key.decode()\n\t\t\tGLDload = parse_mva (fncs.get_value(key).decode())\n#\t\t\tprint (' **', ts, substation, GLDload)\n\t\t\tfor row in fncsBus:\n\t\t\t\tif substation == row[1]:\n#\t\t\t\t\tprint(' assigning',substation,GLDload)\n\t\t\t\t\trow[3] = GLDload[0]\n\n#\tsummarize_opf(res)\n\tprint ('writing metrics', flush=True)\n\tprint (json.dumps(bus_metrics), file=bus_mp, flush=True)\n\tprint (json.dumps(gen_metrics), file=gen_mp, flush=True)\n\tprint (json.dumps(sys_metrics), file=sys_mp, flush=True)\n\tprint ('closing files', flush=True)\n\tbus_mp.close()\n\tgen_mp.close()\n\tsys_mp.close()\n\top.close()\n\tprint ('finalizing FNCS', flush=True)\n\tfncs.finalize()\n\n","sub_path":"Version 0/Setpoint 72/Very Good Insultation/fncsPYPOWER.py","file_name":"fncsPYPOWER.py","file_ext":"py","file_size_in_byte":7475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"245865716","text":"#!usr/bin/env python\n\n\"\"\"\nConverting fastaID_to_KEGG.py output to contig KEGG orthology table\nAuthor: Keigo IDE\nmail: ide_0109@outlook.jp\ndate: 4/2462017\n\"\"\"\n\nfrom collections import defaultdict\nimport argparse\nimport sys\n\ndef main():\n parser = argparse.ArgumentParser(description='Create otu table from uc file')\n parser.add_argument('-i', '--fasta_gene_list', required=True, type=str, help='Input fastaID_to_KEGG output')\n parser.add_argument('-g', '--gene_list', required=True, type=str, help='input gene_list file')\n parser.add_argument('-o', '--output', required=True, type=str, help='Output file')\n args = parser.parse_args()\n\n fasta_gene = args.fasta_gene_list\n genes = args.gene_list\n output_file = args.output\n\n contig_kegg_orthology(fasta_gene, genes, output_file)\n\ndef contig_kegg_orthology(input_file, gene_list, out):\n '''\n keyword argunemt:\n input_file: fastaID_to_KEGG results files were produced by fastaID_to_KEGG.py\n (ex. k141_1\tpga:PGA1_c15910\n k141_2\tcao:Celal_3213\n k141_3\tsrm:SRM_00787\n k141_6\tpsl:Psta_2313)\n\n return:\n fasta name, kegg Orthology table\n (ex. k141_1 k0002\n k141_2\tk0003\n k141_3\tk0332\n k141_6\tk3821)\n '''\n input_files = open(input_file, 'r')\n kegg = input_files.readline()\n\n gene_kegg = open(gene_list, 'r')\n gene_kegg_line = gene_kegg.readline()\n\n gene_kegg_dict = defaultdict(dict)\n\n while gene_kegg_line:\n kegg_list = gene_kegg_line.split()\n kegg_o = kegg_list[0]\n gene = kegg_list[1]\n\n gene_kegg_dict[gene] = kegg_o\n gene_kegg_line = gene_kegg.readline()\n\n\n while kegg:\n file_list = kegg.split()\n Kegg_orthology = gene_kegg_dict[file_list[1]]\n print(len(Kegg_orthology))\n \n if len(Kegg_orthology) == 0:\n Kegg_orthology = \"Unknown\"\n\n with open(out,'a') as file:\n output = file_list[0]+\"\\t\"+Kegg_orthology+\"\\n\"\n file.write(output)\n\n kegg = input_files.readline()\n\nif __name__ == '__main__':\n main()\n","sub_path":"gene_to_KEGG.py","file_name":"gene_to_KEGG.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"164065110","text":"from pathlib import Path\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nimport statsmodels.api as sm\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\nimport lightgbm as lgb\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\n\n###############################################################################\ndir = Path(__file__).resolve().parents[2]\ninput_path = dir / \"input\"\nmodel_path = dir /\"src/estimation/models/\"\n###############################################################################\n# Read in and transfrom fertility data\ndef scale_fertility():\n \n df_fertility = pd.read_csv(input_path / \"fertility.csv\")\n df_fertility.rename(columns={\"Age\": \"age\",\n \"1968\": \"prob\"},\n inplace=True)\n \n df_fertility[\"prob\"] = df_fertility[\"prob\"]/1000\n return df_fertility\n\n# Features for estimating birth, includes age specific fertility and cumulative sum of kids \ndef birth_features(dataf):\n \n df_fert = scale_fertility()\n \n df_male = dataf[dataf[\"female\"]==0]\n df_female = dataf[dataf[\"female\"]==1]\n\n df_merged = pd.merge(df_female, df_fert, how=\"left\", on=\"age\")\n dataf_out = pd.concat([df_merged, df_male], axis=0, join=\"outer\")\n dataf_out.fillna(0, inplace=True)\n\n\n # dataf_out.sort_values([\"pid\", \"year\"], inplace=True)\n # dataf_out[\"cum_births\"] = dataf_out.groupby(\"pid\")[\"birth\"].apply(lambda x: x.cumsum())\n \n return dataf_out\n\n\ndef make_features(dataf, birth):\n dataf = dataf.copy()\n \n # Add birth features\n if birth:\n dataf = birth_features(dataf)\n else:\n pass\n \n # Adding hourly wages to estimation\n periods = [\"t1\", \"t2\"]\n for h in periods:\n name = \"hourly_wage_\" + h\n \n dataf[name] = dataf[\"gross_earnings_\" + h] / dataf[\"hours_\" + h]\n dataf[name].fillna(0, inplace=True)\n dataf.loc[dataf[\"hours_\" + h] == 0, name] = 0\n \n # Adding hours and gross earnings difference\n for v in [\"hours\", \"gross_earnings\"]:\n dataf[\"diff_\"+v] = dataf[v + \"_t1\"] - dataf[v + \"_t2\"]\n \n \n return dataf\n\n\n# Getting dataframe into right shape\ndef getdf(dataf):\n dataf = dataf.copy()\n\n # Only keeping those with more than two consective years\n condition = dataf.groupby('pid')['year'].count()>2\n dataf = dataf.set_index('pid')[condition]\n year_list = dataf['year'].unique()\n\n # Making space\n dataf['hours_t1'] = np.NaN\n dataf['gross_earnings_t1'] = np.NaN\n\n # Final dataframe for output\n dataf_out = pd.DataFrame()\n\n # For each year use the previous year's values to fill up t-1 and t-2 columns\n for i in np.sort(year_list)[2:]:\n df_now = dataf[dataf['year'] == i].copy()\n df_yesterday = dataf[dataf['year'] == (i-1)].copy()\n df_twoyesterdays = dataf[dataf['year'] == (i-2)].copy()\n\n df_now['retired_t1'] = df_yesterday['retired']\n df_now['working_t1'] = df_yesterday['working']\n df_now['fulltime_t1'] = df_yesterday['fulltime']\n df_now['hours_t1'] = df_yesterday['hours']\n df_now['hours_t2'] = df_twoyesterdays['hours']\n df_now['gross_earnings_t1'] = df_yesterday['gross_earnings']\n df_now['gross_earnings_t2'] = df_twoyesterdays['gross_earnings']\n df_now['employment_status_t1'] = df_yesterday['employment_status']\n df_now['employment_status_t2'] = df_twoyesterdays['employment_status']\n\n dataf_out = pd.concat([dataf_out, df_now])\n\n dataf_out.reset_index(inplace=True)\n dataf_out.dropna(inplace=True)\n return dataf_out\n\n\ndef get_dependent_var(dataf, dep_var):\n dataf = dataf.copy()\n\n dataf.rename(columns={dep_var: 'dep_var'}, inplace=True)\n return dataf\n\ndef _prepare_classifier(dataf):\n dataf = dataf.copy()\n\n y = dataf['dep_var']\n X = dataf.drop('dep_var', axis=1)\n\n train, test = train_test_split(dataf, test_size = 0.35, stratify = dataf[\"dep_var\"])\n \n X_train = train.drop(\"dep_var\", axis=1)\n X_test = test.drop(\"dep_var\", axis=1)\n y_train = train[\"dep_var\"]\n y_test = test[\"dep_var\"]\n\n # Making weights\n weights_train = X_train['personweight']\n X_train.drop('personweight', axis=1, inplace=True)\n\n weights_test = X_test['personweight']\n X_test.drop('personweight', axis=1, inplace=True)\n\n # When having weights and interaction effects, drop interaction w/ weights\n if \"personweight_interacted\" in X.columns.tolist():\n X_train.drop('personweight_interacted', axis=1, inplace=True)\n X_test.drop('personweight_interacted', axis=1, inplace=True)\n else:\n pass\n\n # Scaling\n X_train_scaled = StandardScaler().fit_transform(np.asarray(X_train))\n X_test_scaler = StandardScaler().fit(np.asarray(X_test))\n X_test_scaled = X_test_scaler.transform(np.asarray(X_test))\n\n # Coeffs feature_names\n feature_names = X_train.columns.tolist()\n\n # For Standard Part:\n X_train = sm.add_constant(X_train)\n X_test = sm.add_constant(X_test)\n\n # For ML part:\n lgb_train = lgb.Dataset(X_train_scaled, y_train,\n weight = weights_train)\n lgb_test = lgb.Dataset(X_test_scaled, y_test,\n weight = weights_test)\n\n # Return dictionary with needed values\n out_dici = {'X_train': X_train_scaled,\n 'X_test': X_test_scaled,\n 'X_scaler': X_test_scaler,\n 'y_train': y_train,\n 'y_test': y_test,\n 'lgb_train': lgb_train,\n 'lgb_test': lgb_test,\n 'features': feature_names,\n 'weights': weights_train}\n return out_dici\n\ndef _prepare_regressor(dataf):\n dataf = dataf.copy()\n\n y = dataf['dep_var']\n X = dataf.drop('dep_var', axis=1)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.05)\n\n # Making weights\n weights_train = X_train['personweight']\n X_train.drop('personweight', axis=1, inplace=True)\n\n weights_test = X_test['personweight']\n X_test.drop('personweight', axis=1, inplace=True)\n\n # Scaling\n X_train_scaled = StandardScaler().fit_transform(np.asarray(X_train))\n X_test_scaler = StandardScaler().fit(np.asarray(X_test))\n X_test_scaled = X_test_scaler.transform(np.asarray(X_test))\n y_train_scaled = StandardScaler().fit_transform(np.asarray(y_train).reshape(-1,1))\n\n # Saving the scaler of the test data to convert the predicted values again\n y_test_scaler = StandardScaler().fit(np.asarray(y_test).reshape(-1,1))\n y_test_scaled = y_test_scaler.transform(np.asarray(y_test).reshape(-1,1))\n\n feature_names = X_train.columns.tolist()\n y_test_scaled = np.ravel(y_test_scaled)\n y_train_scaled = np.ravel(y_train_scaled)\n\n # For Standard Part:\n X_train = sm.add_constant(X_train)\n X_test = sm.add_constant(X_test)\n\n # For ML part:\n lgb_train = lgb.Dataset(X_train_scaled, y_train,\n weight = weights_train)\n lgb_test = lgb.Dataset(X_test_scaled, y_test,\n weight = weights_test)\n\n\n out_dici = {'X_train': X_train_scaled,\n 'X_test': X_test,\n 'X_scaler': X_test_scaler,\n 'y_train': y_train_scaled,\n 'y_test': y_test,\n 'y_scaler': y_test_scaler,\n 'lgb_train': lgb_train,\n 'lgb_test': lgb_test,\n 'features': feature_names,\n 'weights': weights_train}\n return out_dici\n\n# Interaction effects for standard part\ndef _interact(dataf, estimate):\n dataf = dataf.copy()\n\n names = dataf.columns.tolist()\n if estimate==1:\n names.remove('dep_var')\n else:\n pass\n\n if \"female\" in names:\n names.remove('female')\n else:\n pass\n\n for name in names:\n df_tmp = np.multiply(dataf[name],dataf['female'])\n var_name = name + \"_interacted\"\n dataf[var_name] = df_tmp\n return dataf\n\ndef _age_squared(dataf):\n dataf = dataf.copy()\n dataf['age_squared'] = dataf['age'].astype('long')**2\n return dataf\n\ndef _add_constant(dataf):\n dataf = dataf.copy()\n dataf = sm.add_constant(dataf)\n return dataf\n\n\n#############################################################################\n#############################################################################\n##############################################################################\ndef data_birth(dataf, estimate=1):\n dataf = dataf.copy()\n # dataf = dataf[(dataf['female']==1) & (dataf['child']==0)]\n dataf = make_features(dataf, True)\n\n if estimate == 1:\n dataf= get_dependent_var(dataf, 'birth')\n vars_retain = ['dep_var',\n 'education',\n 'age',\n 'married',\n 'n_children',\n 'hh_youngest_age',\n 'personweight']\n elif estimate == 0:\n vars_retain = ['education',\n 'age',\n 'married',\n 'n_children',\n 'hh_youngest_age']\n else:\n raise ValueError(\"0 is for simulation, 1 for estimation\")\n\n dataf = dataf[vars_retain]\n #dataf = _interact(dataf,estimate)\n return dataf\n\ndef estimate_birth(dataf):\n dataf = dataf.copy()\n\n dataf = data_birth(dataf)\n dataf.dropna(inplace=True)\n dict = _prepare_classifier(dataf)\n\n params_m = {'task' : 'train',\n 'boosting_type' : 'gbdt',\n 'n_estimators': 350,\n 'objective': 'binary',\n 'eval_metric': 'logloss',\n 'learning_rate': 0.05,\n 'feature_fraction': [0.9],\n 'num_leaves': 31,\n 'verbose': 0}\n\n model = LogisticRegression(C=1e9)\n logit = model.fit(dict['X_train'], dict['y_train'],\n sample_weight=dict['weights'])\n\n\n ml = lgb.train(params_m,\n train_set = dict['lgb_train'],\n valid_sets = dict['lgb_test'],\n feature_name = dict['features'],\n early_stopping_rounds = 5)\n\n pickle.dump(logit,\n open(model_path / \"birth_logit\", 'wb'))\n ml.save_model(str(model_path / \"birth_ml.txt\"))\n pickle.dump(dict['X_scaler'],\n open(model_path / \"birth_X_scaler\", 'wb'))\n\n\ndef data_retired(dataf, estimate=1):\n dataf = dataf.copy()\n dataf = make_features(dataf, False)\n\n if estimate == 1:\n dataf= get_dependent_var(dataf, 'retired')\n vars_retain = ['dep_var',\n 'age',\n 'female',\n 'retired_t1',\n 'personweight']\n elif estimate == 0:\n vars_retain = ['age',\n 'female',\n 'retired_t1']\n else:\n raise ValueError(\"0 is for simulation, 1 for estimation\")\n\n\n dataf = dataf[vars_retain]\n dataf = _interact(dataf,estimate)\n dataf = _age_squared(dataf)\n\n return dataf\n\ndef estimate_retired(dataf):\n dataf = dataf.copy()\n\n dataf = data_retired(dataf)\n dataf.dropna(inplace=True)\n dict = _prepare_classifier(dataf)\n\n params_m = {'boosting_type': 'gbdt',\n 'n_estimators': 350,\n 'objective': 'binary',\n 'eval_metric': 'logloss',\n 'learning_rate': 0.05,\n 'feature_fraction': [0.9],\n 'num_leaves': 31,\n 'verbose': 0}\n\n model = LogisticRegression(C=1e9)\n logit = model.fit(dict['X_train'], dict['y_train'],\n sample_weight=dict['weights'])\n\n ml = lgb.train(params_m,\n train_set = dict['lgb_train'],\n valid_sets = dict['lgb_test'],\n feature_name = dict['features'],\n early_stopping_rounds = 5)\n\n pickle.dump(logit,\n open(model_path / \"retired_logit\", 'wb'))\n ml.save_model(str(model_path / \"retired_ml.txt\"))\n pickle.dump(dict['X_scaler'],\n open(model_path / \"retired_X_scaler\", 'wb'))\n\ndef data_working(dataf, estimate=1):\n dataf = dataf.copy()\n dataf = dataf[dataf['retired']==0]\n dataf = make_features(dataf, False)\n\n\n if estimate == 1:\n dataf= get_dependent_var(dataf, 'working')\n vars_retain = ['dep_var',\n 'fulltime_t1',\n 'working_t1',\n 'n_children',\n 'hh_youngest_age',\n 'hh_income',\n 'hh_frac_working',\n 'female',\n 'age',\n 'personweight']\n elif estimate == 0:\n vars_retain = ['fulltime_t1',\n 'working_t1',\n 'n_children',\n 'hh_youngest_age',\n 'hh_income',\n 'hh_frac_working',\n 'female',\n 'age']\n else:\n raise ValueError(\"0 is for simulation, 1 for estimation\")\n\n dataf = dataf[vars_retain]\n dataf = _interact(dataf,estimate)\n dataf = _age_squared(dataf)\n\n return dataf\n\ndef estimate_working(dataf):\n dataf = dataf.copy()\n\n dataf = data_working(dataf)\n dataf.dropna(inplace=True)\n dict = _prepare_classifier(dataf)\n\n params_m = {'task' : 'train',\n 'boosting_type' : 'gbdt',\n 'n_estimators': 350,\n 'objective': 'binary',\n 'eval_metric': 'logloss',\n 'learning_rate': 0.05,\n 'feature_fraction': [0.9],\n 'num_leaves': 31,\n 'verbose': 0}\n\n model = LogisticRegression(C=1e9)\n logit = model.fit(dict['X_train'], dict['y_train'],\n sample_weight=dict['weights'])\n\n ml = lgb.train(params_m,\n train_set = dict['lgb_train'],\n valid_sets = dict['lgb_test'],\n feature_name = dict['features'],\n early_stopping_rounds = 5)\n\n pickle.dump(logit,\n open(model_path / \"working_logit\", 'wb'))\n ml.save_model(str(model_path / \"working_ml.txt\"))\n pickle.dump(dict['X_scaler'],\n open(model_path / \"working_X_scaler\", 'wb'))\n\ndef data_fulltime(dataf, estimate=1):\n dataf = dataf.copy()\n dataf = dataf[dataf['working']==1]\n dataf = make_features(dataf, False)\n\n\n if estimate == 1:\n dataf= get_dependent_var(dataf, 'fulltime')\n vars_retain = ['dep_var',\n 'fulltime_t1',\n 'working_t1',\n 'n_children',\n 'hh_youngest_age',\n 'hh_income',\n 'hh_frac_working',\n 'female',\n 'age',\n 'personweight']\n elif estimate == 0:\n vars_retain = ['fulltime_t1',\n 'working_t1',\n 'n_children',\n 'hh_youngest_age',\n 'hh_income', 'hh_frac_working',\n 'female',\n 'age']\n else:\n raise ValueError(\"0 is for simulation, 1 for estimation\")\n\n dataf = dataf[vars_retain]\n dataf = _interact(dataf,estimate)\n dataf = _age_squared(dataf)\n\n return dataf\n\ndef estimate_fulltime(dataf):\n dataf = dataf.copy()\n\n dataf = data_fulltime(dataf)\n dataf.dropna(inplace=True)\n dict = _prepare_classifier(dataf)\n\n params_m = {'task' : 'train',\n 'boosting_type' : 'gbdt',\n 'n_estimators': 350,\n 'objective': 'binary',\n 'eval_metric': 'logloss',\n 'learning_rate': 0.05,\n 'feature_fraction': [0.9],\n 'num_leaves': 31,\n 'verbose': 0}\n\n model = LogisticRegression(C=1e9)\n logit = model.fit(dict['X_train'], dict['y_train'],\n sample_weight=dict['weights'])\n\n ml = lgb.train(params_m,\n train_set = dict['lgb_train'],\n valid_sets = dict['lgb_test'],\n feature_name = dict['features'],\n early_stopping_rounds = 5)\n\n pickle.dump(logit,\n open(model_path / \"fulltime_logit\", 'wb'))\n ml.save_model(str(model_path / \"fulltime_ml.txt\"))\n pickle.dump(dict['X_scaler'],\n open(model_path / \"fulltime_X_scaler\", 'wb'))\n\ndef data_hours(dataf, estimate=1):\n dataf = dataf.copy()\n dataf = dataf[dataf['working']==1]\n dataf = make_features(dataf, False)\n\n\n if estimate == 1:\n dataf= get_dependent_var(dataf, 'hours')\n vars_retain = ['dep_var',\n 'hours_t1',\n 'hours_t2',\n 'fulltime',\n 'fulltime_t1',\n 'gross_earnings_t1',\n 'n_children',\n 'hh_youngest_age',\n 'hh_income',\n 'hh_frac_working',\n 'female',\n 'age',\n 'personweight']\n elif estimate == 0:\n vars_retain = ['hours_t1',\n 'hours_t2',\n 'fulltime',\n 'fulltime_t1',\n 'gross_earnings_t1',\n 'n_children',\n 'hh_youngest_age',\n 'hh_income',\n 'hh_frac_working',\n 'female',\n 'age']\n else:\n raise ValueError(\"0 is for simulation, 1 for estimation\")\n\n dataf = dataf[vars_retain]\n dataf = _age_squared(dataf)\n\n return dataf\n\ndef estimate_hours(dataf):\n dataf = dataf.copy()\n\n dataf = data_hours(dataf)\n dataf.dropna(inplace=True)\n dict = _prepare_regressor(dataf)\n\n params_r = {'boosting_type' : 'gbdt',\n 'n_estimators': 350,\n 'objective' : 'l2',\n 'metric' : 'l2',\n 'num_leaves' : 31,\n 'learning_rate' : 0.15,\n 'feature_fraction': [0.9],\n 'bagging_fraction': [0.8],\n 'bagging_freq': [5],\n 'verbose' : 5}\n\n model = LinearRegression()\n ols = model.fit(dict['X_train'], dict['y_train'],\n sample_weight=dict['weights'])\n\n ml = lgb.train(params_r,\n train_set = dict['lgb_train'],\n valid_sets = dict['lgb_test'],\n feature_name = dict['features'],\n early_stopping_rounds = 5)\n\n pickle.dump(ols,\n open(model_path / \"hours_ols\", 'wb'))\n ml.save_model(str(model_path / \"hours_ml.txt\"))\n pickle.dump(dict['y_scaler'],\n open(model_path / \"hours_y_scaler\", 'wb'))\n pickle.dump(dict['X_scaler'],\n open(model_path / \"hours_X_scaler\", 'wb'))\n\ndef data_earnings(dataf, estimate=1):\n dataf = dataf.copy()\n dataf = dataf[dataf['working']==1]\n dataf = make_features(dataf, False)\n\n\n if estimate == 1:\n dataf= get_dependent_var(dataf, 'gross_earnings')\n vars_retain = ['dep_var',\n 'gross_earnings_t1',\n 'gross_earnings_t2',\n 'fulltime',\n 'hours',\n 'education',\n 'n_children',\n 'hh_youngest_age',\n 'hh_income',\n 'hh_frac_working',\n 'female',\n 'age',\n 'personweight']\n elif estimate == 0:\n vars_retain = ['gross_earnings_t1',\n 'gross_earnings_t2',\n 'fulltime',\n 'hours',\n 'education',\n 'n_children',\n 'hh_youngest_age',\n 'hh_income', 'hh_frac_working',\n 'female',\n 'age']\n else:\n raise ValueError(\"0 is for simulation, 1 for estimation\")\n\n dataf = dataf[vars_retain]\n dataf = _age_squared(dataf)\n\n return dataf\n\ndef estimate_earnings(dataf):\n dataf = dataf.copy()\n\n dataf = data_earnings(dataf)\n dataf.dropna(inplace=True)\n dict = _prepare_regressor(dataf)\n\n params_r = {'boosting_type' : 'gbdt',\n 'n_estimators': 350,\n 'objective' : 'l2',\n 'metric' : 'l2',\n 'num_leaves' : 31,\n 'learning_rate' : 0.15,\n 'feature_fraction': [0.9],\n 'bagging_fraction': [0.8],\n 'bagging_freq': [5],\n 'verbose' : 5}\n\n model = LinearRegression()\n ols = model.fit(dict['X_train'], dict['y_train'],\n sample_weight=dict['weights'])\n\n ml = lgb.train(params_r,\n train_set = dict['lgb_train'],\n valid_sets = dict['lgb_test'],\n feature_name = dict['features'],\n early_stopping_rounds = 5)\n\n pickle.dump(ols,\n open(model_path / \"gross_earnings_ols\", 'wb'))\n ml.save_model(str(model_path / \"gross_earnings_ml.txt\"))\n pickle.dump(dict['y_scaler'],\n open(model_path / \"gross_earnings_y_scaler\", 'wb'))\n pickle.dump(dict['X_scaler'],\n open(model_path / \"gross_earnings_X_scaler\", 'wb'))\n\n\n###############################################################################\nif __name__ == \"__main__\":\n df = pd.read_pickle(input_path / 'merged').dropna()\n df1 = getdf(df)\n\n estimate_retired(df1)\n estimate_working(df1)\n estimate_fulltime(df1)\n estimate_hours(df1)\n estimate_earnings(df1)\n estimate_birth(df1)\n","sub_path":"src/estimation/standard.py","file_name":"standard.py","file_ext":"py","file_size_in_byte":21589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"172652750","text":"from flask import Flask,jsonify,request\n\napp = Flask(__name__)\n\n@app.route('/hello')\ndef hello_world():\n return \"hello World\"\n\n@app.route('/hi')\ndef hi_there():\n\n jsy ={\n 'field1' :'yello',\n 'field2':'bloue'\n }\n return jsonify(jsy)\n\n\n@app.route('/add',methods=['POST'])\ndef addTwoNumbers():\n dataDict = request.get_json()\n if 'x' and 'y' not in dataDict:\n return \"ERROR\" ,305\n x = dataDict['x']\n y = dataDict['y']\n z = x + y\n\n ret_json = {'z':z}\n\n return jsonify(ret_json),200\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"76476599","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.utils.timezone import utc\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('courses', '0009_auto_20150404_2017'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='SemesterEntry',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('ready', models.BooleanField(default=False)),\n ],\n ),\n migrations.AlterModelOptions(\n name='course',\n options={'ordering': ('no', 'semester_entry')},\n ),\n migrations.AlterModelOptions(\n name='department',\n options={'ordering': ('abbr',), 'get_latest_by': 'updated'},\n ),\n migrations.AlterModelOptions(\n name='semester',\n options={'ordering': ('-year', '-section'), 'get_latest_by': 'updated'},\n ),\n migrations.RemoveField(\n model_name='course',\n name='semester',\n ),\n migrations.RemoveField(\n model_name='semester',\n name='created',\n ),\n migrations.RemoveField(\n model_name='semester',\n name='ready',\n ),\n migrations.AddField(\n model_name='department',\n name='updated',\n field=models.DateTimeField(default=datetime.datetime(2015, 4, 5, 11, 4, 55, 301627, tzinfo=utc), auto_now=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='semester',\n name='updated',\n field=models.DateTimeField(default=datetime.datetime(2015, 4, 5, 11, 5, 1, 55719, tzinfo=utc), auto_now=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='semesterentry',\n name='semester',\n field=models.ForeignKey(to='courses.Semester'),\n ),\n migrations.AddField(\n model_name='course',\n name='semester_entry',\n field=models.ForeignKey(to='courses.SemesterEntry', default=1),\n preserve_default=False,\n ),\n ]\n","sub_path":"courses/migrations/0010_auto_20150405_1905.py","file_name":"0010_auto_20150405_1905.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"347755919","text":"\"\"\"\nCost module\n\nAuthor: Edward Oughton\nDate: April 2019\n\n\"\"\"\nimport math\n\ndef calculate_costs(data, costs, simulation_parameters, site_radius, environment):\n \"\"\"\n Calculates the annual total cost using capex and opex.\n\n Parameters\n ----------\n data : list of dicts\n Contains a list of assets\n costs : dict\n Contains the costs of each necessary equipment item.\n site_radius : int\n The radius of the site area being modelled.\n environment : string\n Either urban, suburban or rural.\n\n Returns\n -------\n output : list of dicts\n Contains a list of assets, with affliated discounted capex and opex costs.\n\n \"\"\"\n inter_site_distance = site_radius * 2\n site_area_km2 = math.sqrt(3) / 2 * inter_site_distance ** 2 / 1e6\n sites_per_km2 = 1 / site_area_km2\n\n for key, value in simulation_parameters.items():\n if key == 'backhaul_distance_km_{}'.format(environment):\n backhaul_distance = value\n\n cost_breakdown = {\n 'single_sector_antenna_2x2_mimo_dual_band': (\n costs['single_sector_antenna_2x2_mimo_dual_band'] *\n simulation_parameters['sectorization'] * sites_per_km2\n ),\n 'single_remote_radio_unit': (\n costs['single_remote_radio_unit'] *\n simulation_parameters['sectorization'] * sites_per_km2\n ),\n 'single_baseband_unit': (\n costs['single_baseband_unit'] * sites_per_km2\n ),\n 'router': (\n costs['router'] * sites_per_km2\n ),\n 'tower': (\n costs['tower'] * sites_per_km2\n ),\n 'civil_materials': (\n costs['civil_materials'] * sites_per_km2\n ),\n 'transportation': (\n costs['transportation'] * sites_per_km2\n ),\n 'installation': (\n costs['installation'] * sites_per_km2\n ),\n 'battery_system': (\n costs['battery_system'] * sites_per_km2\n ),\n 'fiber_backhaul_{}'.format(environment): (\n costs['fixed_fiber_backhaul_per_km'] * backhaul_distance * sites_per_km2\n ),\n 'microwave_backhaul_1m': (\n costs['microwave_backhaul_1m'] * sites_per_km2\n )\n }\n\n total_deployment_costs_km2 = 0\n for key, value in cost_breakdown.items():\n total_deployment_costs_km2 += value\n\n output = {\n 'environment': environment,\n 'inter_site_distance': inter_site_distance,\n 'site_area_km2': site_area_km2,\n 'sites_per_km2': sites_per_km2,\n 'results_type': data['results_type'],\n 'path_loss': data['path_loss'],\n 'received_power': data['received_power'],\n 'interference': data['interference'],\n 'sinr': data['sinr'],\n 'spectral_efficiency': data['spectral_efficiency'],\n 'capacity_mbps': data['capacity_mbps'],\n 'capacity_mbps_km2': data['capacity_mbps'],\n 'total_deployment_costs_km2': total_deployment_costs_km2,\n 'sector_antenna_costs_km2': cost_breakdown['single_sector_antenna_2x2_mimo_dual_band'],\n 'remote_radio_unit_costs_km2': cost_breakdown['single_remote_radio_unit'],\n 'baseband_unit_costs_km2': cost_breakdown['single_baseband_unit'],\n 'router_costs_km2': cost_breakdown['router'],\n 'tower_costs_km2': cost_breakdown['tower'],\n 'civil_material_costs_km2': cost_breakdown['civil_materials'],\n 'transportation_costs_km2': cost_breakdown['transportation'],\n 'installation_costs_km2': cost_breakdown['installation'],\n 'battery_system_costs_km2': cost_breakdown['battery_system'],\n 'fiber_backhaul_costs_km2': cost_breakdown['fiber_backhaul_{}'.format(environment)],\n 'microwave_backhaul_1m_costs_km2': cost_breakdown['microwave_backhaul_1m'],\n }\n\n return output\n","sub_path":"src/pysim5g/costs.py","file_name":"costs.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"556699461","text":"# Get arguments from the commandline\nimport sys\nfilename = sys.argv[1]\noutputname = sys.argv[2]\nprint(\"Mining %s\" % filename)\n\n# Open CSV file\noutput = \"\"\nvals = {}\nvalsLs = []\nwith open(filename, \"r\") as f:\n f.readline()\n for line in f:\n spt = str(line).strip().split(\",\")\n vals[spt[0]] = float(spt[1])\n if (float(spt[1]) != -1.0):\n valsLs.append(float(spt[1]))\n\nvalMin = min(valsLs)\nvalMax = max(valsLs)\nfor key in vals:\n output += \"coloration[\\\"\" + str(key) + \"\\\"] = \" + str(vals[key]) + \";\\n\"\noutput += \"coloration[\\\"Minimum\\\"] = \" + str(valMin) + \";\\n\"\noutput += \"coloration[\\\"Maximum\\\"] = \" + str(valMax) + \";\\n\"\n\nwith open(outputname, \"w\") as f:\n f.write(output)","sub_path":"convertData.py","file_name":"convertData.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"213586633","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport json\nimport pymysql as DB\n\nimport config\n\n\ndef main():\n schema = load_schema()\n with DB.connect(**config.MySQL) as cursor:\n opts = schema.get('options')\n tables = schema['tables']\n queries = [build_query(name, tbl) for name, tbl in tables.items()]\n for query in queries:\n cursor.execute(query)\n\n\ndef load_schema():\n with open('../spec/SQLSchema.json', 'r') as f:\n schema = json.load(f)\n return schema\n\n\ndef build_query(tbl_name, table):\n return 'CREATE TABLE {tbl_name} ({columns}{foreign_key});'.format(\n tbl_name=tbl_name,\n columns=', '.join(get_columns(table)),\n foreign_key=get_foreign_key(table.get('foreign_key'))\n )\n\n\ndef get_columns(table):\n cols = []\n for col in table['columns']:\n cols.append(\n '{col_name} {data_type}{default_value}{key_idx}'.format(\n col_name=col['name'],\n data_type=get_datatype(col),\n default_value=get_default(col),\n key_idx=get_key_index(col),\n )\n )\n return cols\n\n\ndef get_datatype(col):\n INTEGER = ['TINYINT', 'SMALLINT', 'MEDIUMINT', 'INT', 'INTEGER', 'BIGINT']\n FLOAT = ['FLOAT', 'DOUBLE', 'DOUBLE PRECISION', 'REAL']\n DATETIME = ['DATE', 'DATETIME', 'TIMESTAMP', 'TIME', 'YEAR']\n CHARACTER = ['CHARACTER', 'CHAR', 'CHARACTER VARYING', 'VARCHAR']\n BINARY = ['BINARY', 'CHAR BYTE', 'VARBINARY']\n BLOB = ['TINYBLOB', 'BLOB', 'MEDIUMBLOB', 'LONGBLOB']\n TEXT = ['TINYTEXT', 'TEXT', 'MEDIUMTEXT', 'LONGTEXT']\n\n low_dtype = col['type']\n dtype = col['type'].upper()\n if (dtype in INTEGER + FLOAT):\n dtype += ' UNSIGNED' if col.get('unsigned') else ''\n dtype += ' ZEROFILL' if col.get('zerofill') else ''\n elif dtype in CHARACTER:\n dtype += '(' + str(col.get('max_length', 128)) + ')'\n dtype += get_charset(col)\n elif dtype in BINARY + DATETIME:\n pass\n elif dtype in BLOB + TEXT:\n dtype += get_charset(col)\n elif dtype in ['ENUM', 'SET']:\n listname = low_dtype + 'list'\n dtype += '(' + ', '.join([\"'\" + e + \"'\" for e in col[listname]]) + ')'\n if col.get('charset'):\n dtype += ' CHARACTER SET ' + col['charset']\n return dtype\n\n\ndef get_charset(col):\n if col.get('ascii'):\n return ' ASCII'\n elif col.get('unicode'):\n return ' UNICODE'\n elif col.get('binary'):\n return ' BINARY'\n elif col.get('charset'):\n return ' CHARACTER SET {}'.format(col['charset'])\n else:\n return ''\n\n\ndef get_default(col):\n if col.get('default'):\n return ' DEFAULT ' + col['default']\n return ''\n\n\ndef get_key_index(col):\n key = ''\n if col.get('primary key'):\n key += ' PRIMARY KEY'\n if col.get('not null'):\n key += ' NOT NULL'\n if col.get('unique'):\n key += ' UNIQUE'\n return key\n\n\ndef get_foreign_key(fk):\n if fk is not None:\n return ', FOREIGN KEY ({}) REFERENCES {}({}){}{}'.format(\n fk['key'],\n fk['ref_table'],\n fk['ref_col'],\n ' ON DELETE ' + fk.get('on delete') if fk.get('on delete') else '',\n ' ON UPDATE ' + fk.get('on update') if fk.get('on update') else ''\n )\n else:\n return ''\n\nif __name__ == '__main__':\n main()\n","sub_path":"apps/json2mysql.py","file_name":"json2mysql.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"411012647","text":"from training_utils import get_cifar10_dataloaders, evaluate_model, train_model\r\nfrom model_utils import get_cifar_vgg\r\nimport torch\r\n\r\nEPOCHS = 13\r\n\r\ntrainloader, testloader = get_cifar10_dataloaders()\r\nnet = get_cifar_vgg(10)\r\ntrain_model(net, trainloader, testloader, EPOCHS, torch.device(0))\r\naccuracy = evaluate_model(net, testloader, torch.device(0))\r\nprint('Accuracy of the network on the 10000 test images: %d %%' % (accuracy))\r\n\r\ntorch.save(net.state_dict(), \"original_trained_network.pt\")\r\n","sub_path":"train_base_model.py","file_name":"train_base_model.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"441246672","text":"import pandas as pd\nfrom pandas.testing import assert_frame_equal, assert_series_equal\n\nfrom krk_meetings.data_provider.gtfs_static.Merger import Merger\n\n\ndef test_merge_stops_df() -> None:\n stops_df_1 = pd.DataFrame([\n [1, 'Aaa', 50.0, 20.0],\n [2, 'Bbb', 51.0, 21.0],\n [4, 'Ccc', 52.0, 22.0],\n [5, 'Eee', 54.0, 24.0],\n ], columns=['stop_id', 'stop_name', 'stop_lat', 'stop_lon']).set_index('stop_id')\n\n stops_df_2 = pd.DataFrame([\n [1, 'Aaa', 50.2, 20.2],\n [3, 'Bbb', 51.2, 21.2],\n [4, 'Ddd', 53.2, 23.2],\n [6, 'Fff', 55.2, 25.2],\n ], columns=['stop_id', 'stop_name', 'stop_lat', 'stop_lon']).set_index('stop_id')\n\n merger = Merger()\n actual_stops_df, actual_stop_id_offset, actual_stop_id_mapping = merger._merge_stops_df(stops_df_1, stops_df_2)\n\n expected_stops_df = pd.DataFrame([\n [1, 'Aaa', 50.1, 20.1],\n [2, 'Bbb', 51.1, 21.1],\n [4, 'Ccc', 52.0, 22.0],\n [9, 'Ddd', 53.2, 23.2],\n [5, 'Eee', 54.0, 24.0],\n [11, 'Fff', 55.2, 25.2],\n ], columns=['stop_id', 'stop_name', 'stop_lat', 'stop_lon']).set_index('stop_id')\n\n expected_stop_id_mapping = pd.Series([1, 2], index=[6, 8])\n\n assert_frame_equal(actual_stops_df, expected_stops_df)\n assert_series_equal(actual_stop_id_mapping, expected_stop_id_mapping)\n assert actual_stop_id_offset == 5\n","sub_path":"backend/krk_meetings/data_provider/gtfs_static/tests/test_Merger.py","file_name":"test_Merger.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"448621192","text":"from typing import Optional, Literal, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import sparse\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import normalize\n\n# install dask if available\ntry:\n import dask.array as da\nexcept ImportError:\n da = None\n\n\nplt.style.use(\"ggplot\")\n\n\ndef toarray(matrix1d):\n return np.array(matrix1d).flatten()\n\n\nsetattr(np.matrix, 'toarray', toarray)\n\n\ndef read_mtx(path: str, gene_col: int = 0) -> Tuple[sparse.spmatrix, pd.DataFrame, pd.DataFrame]:\n genes = pd.read_csv(f'{path}/genes.tsv', sep='\\t', usecols=[gene_col], header=None).set_index(gene_col)\n cells = pd.read_csv(f'{path}/barcodes.tsv', sep='\\t', usecols=[0], header=None).set_index(0)\n\n non_zero = pd.read_csv(f'{path}/matrix.mtx', sep=' ', skiprows=2,\n names=('gene_id', 'cell_id', 'counts'))\n shape = tuple(non_zero.iloc[0])[:2][::-1]\n non_zero.drop(0, inplace=True)\n non_zero.gene_id -= 1\n non_zero.cell_id -= 1\n\n adata = sparse.csc_matrix((non_zero.counts, (non_zero.cell_id, non_zero.gene_id)),\n dtype=np.int16, shape=shape)\n\n return adata, genes, cells\n\n\ndef draw_four_plots(\n adata: sparse.spmatrix,\n genes: pd.DataFrame,\n cells: pd.DataFrame,\n capital: bool,\n min_gene_num: int,\n min_cell_num: int,\n max_gene_num: int,\n max_mt_pct: int\n):\n # df = pd.DataFrame.sparse.from_spmatrix(adata, columns=genes.name)\n \n cell_filter = ((adata != 0).sum(axis=1) >= min_gene_num).toarray()\n gene_filter = ((adata != 0).sum(axis=0) >= min_cell_num).toarray()\n adata_filtered = adata[cell_filter][:, gene_filter]\n genes_filtered = genes[gene_filter].copy()\n cells_filtered = cells[cell_filter].copy()\n \n prefix = 'MT-' if capital else 'mt-'\n mt_filter = genes_filtered.index.str.startswith(prefix)\n \n gene_num_per_cell = (adata_filtered != 0).sum(axis=1).toarray()\n gene_counts_per_cell = adata_filtered.sum(axis=1).toarray()\n mt_pct = adata_filtered[:, mt_filter].sum(axis=1).toarray() / gene_counts_per_cell * 100\n \n g = sns.violinplot(data=gene_num_per_cell)\n g = sns.stripplot(data=gene_num_per_cell, jitter=0.4, size=2, color='.3')\n g.set_title('gene_num_per_cell')\n g.set_xticklabels(labels=[])\n plt.savefig('1.jpg')\n plt.cla()\n \n g = sns.violinplot(data=gene_counts_per_cell)\n g = sns.stripplot(data=gene_counts_per_cell, jitter=0.4, size=2, color='.3')\n g.set_title('gene_counts_per_cell')\n g.set_xticklabels(labels=[])\n plt.savefig('2.jpg')\n plt.cla()\n \n g = sns.violinplot(data=mt_pct)\n g = sns.stripplot(data=mt_pct, jitter=0.4, size=2, color='.3')\n g.set_title('pct_counts_mt')\n g.set_xticklabels(labels=[])\n plt.savefig('3.jpg')\n plt.cla()\n\n # cells_filtered['gene_counts_per_cell'] = gene_counts_per_cell\n # cells_filtered['mt_pct'] = mt_pct\n # cells_filtered['gene_num_per_cell'] = gene_num_per_cell\n cells_filtered = cells_filtered.assign(gene_counts_per_cell=gene_counts_per_cell, mt_pct=mt_pct, gene_num_per_cell=gene_num_per_cell)\n \n sns.scatterplot(data=cells_filtered, x='gene_counts_per_cell', y='mt_pct', s=7, alpha=0.5)\n plt.savefig('4.jpg')\n plt.cla()\n \n sns.scatterplot(data=cells_filtered, x='gene_counts_per_cell', y='gene_num_per_cell', s=7, alpha=0.5)\n plt.savefig('5.jpg')\n plt.cla()\n \n adata_filtered_filtered = adata_filtered[(gene_num_per_cell < max_gene_num) & (mt_pct < max_mt_pct)]\n \n return adata_filtered_filtered, genes_filtered, cells_filtered\n\n\ndef my_get_mean_var(X, axis: Union[Literal['gene', 'cell'], int]):\n if not isinstance(axis, int):\n axis = 0 if axis == 'gene' else 1\n\n if isinstance(X, sparse.spmatrix): # same as sparse.issparse()\n X_copy = X.copy()\n X_copy.data **= 2\n mean = X.mean(axis=axis).toarray()\n var = X_copy.mean(axis=axis).toarray() - mean ** 2\n var *= X.shape[axis] / (X.shape[axis] - 1)\n else:\n mean = np.mean(X, axis=axis)\n var = np.var(X, axis=axis, ddof=1) # a little overhead (mean counted twice, but it's ok.)\n return mean, var\n'''\nIn standard statistical practice, ddof=1 provides an unbiased estimator of the variance\nof a hypothetical infinite population. ddof=0 provides a maximum likelihood estimate of\nthe variance for normally distributed variables.\n'''\n\n\ndef my_sparse_mean_variance_axis(mtx: sparse.spmatrix, axis: int):\n if isinstance(mtx, sparse.csr_matrix):\n ax_minor = 1\n shape = mtx.shape\n elif isinstance(mtx, sparse.csc_matrix):\n ax_minor = 0\n shape = mtx.shape[::-1]\n else:\n raise ValueError('This function only works on sparse csr and csc matrices')\n if axis == ax_minor:\n return my_sparse_mean_var_major_axis(\n mtx.data, mtx.indices, mtx.indptr, *shape, np.float64\n )\n else:\n return my_sparse_mean_var_minor_axis(\n mtx.data, mtx.indices, *shape, np.float64\n )\n \n\ndef my_sparse_mean_var_major_axis(\n data,\n indices,\n indptr,\n major_len,\n minor_len,\n dtype\n):\n means = np.zeros(major_len, dtype=dtype)\n variances = np.zeros_like(means, dtype=dtype) # why use zeros_like?\n for ind, (startptr, endptr) in enumerate(zip(indptr[:-1], indptr[1:])):\n counts = endptr - startptr\n \n mean = sum(data[startptr:endptr]) / minor_len\n variance = (sum((i-means[i]) ** 2 for i in data[startptr:endptr]) + mean ** 2 * (minor_len - counts)) / minor_len\n means[ind] = mean\n variances[ind] = variance\n \n return means, variances\n\n\ndef my_sparse_mean_var_minor_axis(\n data,\n indices,\n major_len,\n minor_len,\n dtype\n):\n means = np.zeros(minor_len, dtype=dtype)\n variances = np.zeros_like(means, dtype=dtype)\n\n counts = np.zeros(minor_len, dtype=np.int64)\n \n for ind, num in zip(indices, data):\n means[ind] += num\n \n means /= major_len\n \n for ind, num in zip(indices, data):\n variances[ind] += (num - means[ind]) ** 2\n counts[ind] += 1\n \n variances += [mean ** 2 * (major_len - count) for mean, count in zip(means, counts)]\n variances /= major_len\n \n return means, variances\n\n\ndef highly_variable_genes_single_batch_seurat(\n adata: sparse.spmatrix, # log transformed, base e\n genes: pd.DataFrame,\n layer=None,\n min_disp=0.5,\n max_disp=np.inf,\n min_mean=0.0125,\n max_mean=3,\n n_top_genes: int = 0,\n n_bins=20,\n flavor='seurat'\n) -> None:\n X = adata.layers[layer] if layer is not None else adata#.X\n \n if flavor == 'seurat':\n # 如果不是以e为底的先变成以e为底\n X = np.expm1(X)\n # 然后还原\n \n mean, var = my_get_mean_var(X, axis='gene')\n mean[mean == 0] = 1e-12\n dispersion = var / mean\n if flavor == 'seurat':\n dispersion[dispersion == 0] = np.nan\n dispersion = np.log(dispersion)\n mean = np.log1p(mean)\n\n genes['dispersions'] = dispersion\n genes['means'] = mean\n genes['vars'] = var\n \n if flavor == 'seurat':\n genes['mean_bin'] = pd.cut(genes.means, bins=n_bins)\n disp_grouped = genes.groupby('mean_bin')['dispersions']\n \n single_bin_gene = []\n\n def find_nan_interval(x):\n if len(x) == 1:\n single_bin_gene.extend(x.index)\n std, mean = x.mean(), 0\n else:\n mean = x.mean()\n std = x.std(ddof=1)\n return (x - mean) / std\n \n genes['dispersions_norm'] = disp_grouped.transform(lambda x: find_nan_interval(x))\n if len(single_bin_gene) > 0:\n print(\n f'Gene indices {single_bin_gene} fell into a single bin: their '\n 'normalized dispersion was set to 1.',\n ' Decreasing `n_bins` will likely avoid this effect.'\n )\n \n if n_top_genes > adata.shape[1]:\n print(f'`n_top_genes` > `adata.n_var`, returning all genes.')\n genes['highly_variable'] = np.ones(adata.shape[1], dtype=bool)\n elif n_top_genes > 0:\n genes_largest = genes.nlargest(n_top_genes, 'dispersion_norm')\n disp_cut_off = genes_largest.dispersion_norm[-1]\n genes['highly_variable'] = np.zeros(adata.shape[1], dtype=bool)\n genes.highly_variable.loc[genes_largest] == True\n print(\n f'the {n_top_genes} top genes correspond to a '\n f'normalized dispersion cutoff of {disp_cut_off}'\n )\n else:\n dispersion_norm = genes.dispersions_norm.values.astype('float32')\n np.nan_to_num(dispersion_norm) # similar to Seurat\n gene_subset = np.logical_and.reduce(\n (\n mean > min_mean,\n mean < max_mean,\n dispersion_norm > min_disp,\n dispersion_norm < max_disp,\n )\n )\n genes['highly_variable'] = gene_subset\n\n sns.scatterplot(data=genes, x=\"means\", y=\"dispersions\", hue=\"highly_variable\", s=7, alpha=0.5)\n plt.savefig('6.jpg')\n plt.cla()\n\n sns.scatterplot(data=genes, x=\"means\", y=\"dispersions_norm\", hue=\"highly_variable\", s=7, alpha=0.5)\n plt.savefig('7.jpg')\n plt.cla()\n\n return None\n\n\nif __name__ == '__main__':\n adata, genes, cells = read_mtx('data/hg19', gene_col=1)\n adata_filtered, genes_filtered, cells_filtered = draw_four_plots(\n adata, genes, cells, capital=True, min_gene_num=200, min_cell_num=3,\n max_gene_num=2500, max_mt_pct=5\n )\n\n adata_filtered_norm = normalize(adata_filtered, axis=1, norm='l1') * 1e4\n adata_filtered_norm_log1p = adata_filtered_norm.log1p()\n\n highly_variable_genes_single_batch_seurat(\n adata_filtered_norm_log1p, genes_filtered\n )\n","sub_path":"scanpy/tutorial/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"626868434","text":"#Python 3\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom cgi import parse_header, parse_multipart\nfrom urllib.parse import parse_qs\nfrom nexstar2 import NexstarHandController, NexstarCoordinateMode;\ncoord = NexstarCoordinateMode\n\nn = NexstarHandController('/dev/ttyUSB0')\n\n# HTTPRequestHandler class\nclass testHTTPServer_RequestHandler(BaseHTTPRequestHandler):\n\n def _set_response(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n \n # GET\n def do_GET(self):\n # Send response status code\n self._set_response()\n print('Getting position')\n # Send message back to client\n message = n.getPosition(coord.RA_DEC)\n print(message)\n #print(str(message[0].encode(\"utf8\")))\n # Write content as utf-8 data\n self.wfile.write(b'['+bytes(','.join(map(str, message)), 'utf-8')+b']')\n return\n\n # GET\n def do_POST(self):\n ctype, pdict = parse_header(self.headers['content-type'])\n pdict['boundary'] = bytes(pdict['boundary'], \"utf-8\")\n if ctype == 'multipart/form-data':\n # print(self.rfile)\n # print(pdict)\n postvars = parse_multipart(self.rfile, pdict)\n elif ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers['content-length'])\n postvars = parse_qs(\n self.rfile.read(length), \n keep_blank_values=1)\n else:\n postvars = {}\n # print(postvars)\n ra = float(postvars['ra'][0])\n dec = float(postvars['dec'][0])\n # print(ra)\n # print(dec)\n n.gotoPosition(ra, dec, coord.RA_DEC)\n message = n.getPosition()\n self._set_response()\n self.wfile.write(b'['+bytes(','.join(map(str, message)), 'utf-8')+b']')\n print(message)\n return\n \ndef run():\n print('starting server...')\n \n # Server settings\n # Choose port 8080, for port 80, which is normally used for a http server, you need root access\n #server_address = ('192.168.20.100', 8889)\n server_address = ('10.8.0.6', 8889)\n httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)\n print('running server...')\n httpd.serve_forever()\n \n \nrun()\n","sub_path":"controller/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"84239929","text":"from __future__ import print_function\n# import pdb\n\nclass ListNode(object):\n def __init__(self, val):\n self.val = val\n self.next = None\n\n# class LinkedList(object):\n # def __init__(self, nums):\ndef createList(nums):\n \"\"\"\n creat a linked list from a list\n \"\"\"\n if not nums:\n return None\n dumm = curr = ListNode(0)\n for n in nums:\n curr.next = ListNode(n)\n curr = curr.next\n return dumm.next\n\ndef printList(head):\n if not head:\n print('empty list')\n return\n curr = head\n # pdb.set_trace()\n while True:\n if curr.next:\n print(str(curr.val) + '->', end=\"\")\n else:\n # print('\\n')\n print(str(curr.val))\n return\n curr = curr.next\n\ndef _printList():\n A = []\n B = [1]\n C = [1,2]\n D = [3, 2, 1]\n printList(createList(A))\n printList(createList(B))\n printList(createList(C))\n printList(createList(D))\n\ndef mergeKlists(lists):\n \"\"\"\n merge k sorted lists\n example:\n lists = [[1,4,5],[1,3,4],[2,6]]\n then output: [1, 1, 2, 3, 4, 4, 5, 6]\n \"\"\"\n # pass\n import heapq\n if not lists: return None\n q = []\n for l in lists:\n if l:\n heapq.heappush(q, (l.val, l))\n\n dumm = curr = ListNode(0)\n while q:\n val, l = heapq.heappop(q)\n # curr.next = ListNode(val)\n curr.next = l\n curr = curr.next\n if l.next:\n heapq.heappush(q, (l.next.val, l.next))\n l = l.next\n return dumm.next\n\ndef _mergeKlists():\n lists = [createList([1, 4, 5]), createList([1, 3, 4]), createList([2, 6])]\n printList(mergeKlists(lists))\n lists = [createList([]), createList([1])]\n printList(mergeKlists(lists))\n\nif __name__ == '__main__':\n # _printList()\n _mergeKlists()\n\n","sub_path":"LinkedLists.py","file_name":"LinkedLists.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"621661453","text":"import tensorflow as tf\n\n# 创建一个常量op,产生一个1x2矩阵,这个op被作为一个节点加到默认图中\n\n# 构造器的返回值代表该常量op的返回值\n\nmatrix1 = tf.constant([[3.,3.]])\n\n# 创建另外一个常量op,产生一个2x1矩阵\n\nmatrix2 = tf.constant([[2.],[2.]])\n\n# 创建一个矩阵乘法 matmul op,把'matrix1'和'matrix2'作为输入\n# 返回值'product' 代表矩阵乘法的结果\n\nproduct = tf.matmul(matrix1,matrix2)\n\n# 默认图现在有三个节点, 两个constant() op, 和一个matmul() op. 为了真正进行矩阵相乘运算, 并得到矩阵\n# 乘法的 结果, 你必须在会话里启动这个图.\n\n# 默认启动图\nsess = tf.Session()\n\n# 调用 sess 的 'run()' 方法来执行矩阵乘法 op, 传入 'product' 作为该方法的参数.\n# 上面提到, 'product' 代表了矩阵乘法 op 的输出, 传入它是向方法表明, 我们希望取回\n\nresult = sess.run(product)\nprint(result)\n\n# 任务完成,关闭会话\nsess.close()","sub_path":"examples/starts/tensorflow_matmul.py","file_name":"tensorflow_matmul.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"514240837","text":"# train data augment-> 120% of all dataset\n# compare with banilar\n# save_dir -> temp and delete\n\nimport numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n horizontal_flip=True,\n vertical_flip=True,\n width_shift_range=0.1,\n height_shift_range=0.1,\n rotation_range=5,\n zoom_range=1.2,\n shear_range=0.7,\n fill_mode='nearest'\n)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\nxy_train = train_datagen.flow_from_directory(\n '../_data/brain/train',\n target_size=(150, 150),\n batch_size=200,\n class_mode='binary',\n shuffle=True\n)\n\nxy_test = test_datagen.flow_from_directory(\n '../_data/brain/test',\n target_size=(150, 150),\n batch_size=200,\n class_mode='binary',\n shuffle=True\n)\n\nx_train = np.load('./_save/_NPY/k59_rps_x_train.npy')\nx_test = np.load('./_save/_NPY/k59_rps_x_test.npy')\ny_train = np.load('./_save/_NPY/k59_rps_y_train.npy')\ny_test = np.load('./_save/_NPY/k59_rps_y_test.npy')\n\naugment_size = 400\n\nrandidx = np.random.randint(x_train.shape[0], size=augment_size) # take 40000 feature from train in random\n\nx_argmented = x_train[randidx].copy()\ny_argmented = y_train[randidx].copy()\n\nx_argmented = x_argmented.reshape(x_argmented.shape[0], 150, 150, 3) # (32, 150, 150, 3)\nx_train = x_train.reshape(x_train.shape[0], 150, 150, 3) # (160, 150, 150, 3)\nx_test = x_test.reshape(x_test.shape[0], 150, 150, 3) # (120, 150, 150, 3)\n\nx_argmented = train_datagen.flow(x_argmented, \n np.zeros(augment_size),\n batch_size=augment_size,\n shuffle=False).next()[0]\n\nx_train = np.concatenate((x_train, x_argmented)) # (100000, 28, 28, 1) \ny_train = np.concatenate((y_train, y_argmented)) # (100000,)\n\n# 2. model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D\n\nmodel = Sequential()\nmodel.add(Conv2D(filters = 64, kernel_size=(3,3), input_shape =(150,150,3), activation= 'relu'))\nmodel.add(Conv2D(filters = 64, kernel_size=(3,3), activation= 'relu'))\nmodel.add(MaxPooling2D(2,2))\nmodel.add(Conv2D(filters = 32, kernel_size=(2,2), activation= 'relu'))\nmodel.add(Conv2D(filters = 32, kernel_size=(2,2), activation= 'relu'))\nmodel.add(MaxPooling2D(2,2))\n# model.add(Conv2D(filters = 32, kernel_size=(3,3), activation= 'relu'))\nmodel.add(Flatten())\nmodel.add(Dense(111, activation= 'relu'))\nmodel.add(Dense(64, activation= 'relu'))\nmodel.add(Dense(3, activation= 'softmax'))\n\n# 3. compile train\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])\n\nfrom tensorflow.keras.callbacks import EarlyStopping\nes = EarlyStopping(monitor='val_acc', patience=10, mode='auto', verbose=1)\n\nimport time \n\nstart_time = time.time()\nhist = model.fit(x_train, y_train, epochs=10000, verbose=2,\n validation_split=0.2, callbacks=[es], steps_per_epoch=32,\n validation_steps=4)\nend_time = time.time() - start_time\n\n# 4. predict eval \n\nacc = hist.history['acc']\nval_acc = hist.history['val_acc']\nloss = hist.history['loss']\nval_loss = hist.history['val_loss']\n\nloss = model.evaluate(x_test, y_test)\nprint('acc : ',acc[-1])\nprint('val_acc : ',val_acc[-1])\n# print('loss : ',loss[-10])\nprint('val_loss : ',val_loss[-1]) \n\n'''\nwith flow\nacc : 0.8311966061592102\nval_acc : 0.3617021143436432\n\nacc : 0.7606837749481201\nval_acc : 0.3617021143436432\n\nwithout flow\nacc : 0.9805996417999268\nval_acc : 0.523809552192688\n'''\n","sub_path":"01_keras/keras61_08_rps_augment.py","file_name":"keras61_08_rps_augment.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"307917770","text":"'''\nscript que permite el calculo de la TKM diaria para un sensor en particular...\nrecibe el sensor y entrega una lista con el sensor y la fecha y la respectiva TKM\n'''\n\nimport ConnectDB\nimport HandlerQuery\nimport numpy as np\nimport math\n\nclass TKMDiaria (object):\n\t\n\t#constructor de la clase...\n\tdef __init__(self, sensor):\n\t\t\n\t\tself.sensor = sensor\n\t\t\n\t\tself.connex = ConnectDB.ConnectDB()\n\t\tself.handler = HandlerQuery.HandlerQuery()\n\t\tself.ListaFecha = self.getFecha()\n\t\t\n\t\tself.tkmValues = {}\n\t\t\n\t#obtenemos todas las fechas...\n\tdef getFecha (self):\n\t\t\n\t\tquery = \"select DISTINCT medicion.fecha from medicion where medicion.sensor =%s\" % self.sensor\n\t\t\n\t\tself.connex.initConnectionDB()\n\t\t\n\t\tListaData = self.handler.queryBasicDataBase(query, self.connex)\n\t\t\n\t\tself.connex.closeConnectionDB()\n\t\t\n\t\t#obtenemos las fechas por separado y las almacenamos en una lista....\n\t\tlistaFecha = []\n\t\t\n\t\tfor element in ListaData:\n\t\t\t\n\t\t\tlistaFecha.append(str(element[0]))\n\t\t\n\t\treturn listaFecha\n\t\n\t#obtenemos la lista de mediciones segun una fecha dada\n\tdef getValueMeasure(self, fecha):\n\t\t\n\t\tquery = \"select medicion.valorMedicion from medicion where medicion.fecha = '%s' AND medicion.sensor =%s\" % (fecha, self.sensor)\n\t\t\n\t\tself.connex.initConnectionDB()\n\t\t\n\t\tListaData = self.handler.queryBasicDataBase(query, self.connex)\n\t\t\n\t\tself.connex.closeConnectionDB()\n\t\t\n\t\t#obtenemos las fechas por separado y las almacenamos en una lista....\n\t\tlistaMediciones = []\n\t\t\n\t\tfor element in ListaData:\n\t\t\t\n\t\t\tlistaMediciones.append(float(element[0]))\n\t\t\n\t\treturn listaMediciones\n\t\n\t#metodo que permite hacer el calculo de la tkm dada una lista de medidas...\n\tdef calculateTKMDiaria(self, ListaMedidas):\n\t\t\n\t\tsumTKM=0\n\t\tnumCiclos=0\n\t\tconstanteGases = 0.008314472\n\n\t\t#con formula real...\n\t\tfor i in range (60, 101):\n\t\t\t\n\t\t\tsumExponencial = 0\n\t\t\t\n\t\t\tfor medida in ListaMedidas:\n\t\t\t\t\n\t\t\t\tgradoKelvin = medida+273.15\n\t\t\t\t\n\t\t\t\tsumExponencial = sumExponencial + math.exp((i/(constanteGases*gradoKelvin))*-1)\n\t\t\t\n\t\t\tinfoData = sumExponencial/len(ListaMedidas)\n\t\t\t\n\t\t\ttkm = (i/constanteGases)/(math.log(infoData)*-1)\n\t\t\ttkm = tkm - 273.15\n\t\t\tsumTKM = sumTKM+tkm\n\t\t\tnumCiclos = numCiclos+1\n\n\t\tpromedioTKM = sumTKM/numCiclos\n\t\t\n\t\treturn promedioTKM\n\t\n\t#por cada fecha obtenemos la lista de mediciones y hacemos el calculo de la tkm\n\tdef calculateTKMAll(self):\n\t\t\n\t\tfor fecha in self.ListaFecha:\n\t\t\tself.tkmValues.update({str(fecha) : round (self.calculateTKMDiaria(self.getValueMeasure(fecha)),2)})\n\n'''\ndef main():\n\t\n\thora = TKMDiaria(\"4\")\n\t\n\thora.calculateTKMAll()\n\t\n\tprint hora.tkmValues\n\t\n\treturn 0\n\t\nif __name__ == '__main__':\n\t\n\tmain()\n'''\n","sub_path":"pythonScripts/scriptInformes/calculateTKMDiaria.py","file_name":"calculateTKMDiaria.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"333759656","text":"import array\nfrom collections import defaultdict\nimport ConfigParser\nimport cPickle as pickle\nimport numpy as np\nimport os\n\n__author__ = 'Matt Pugh'\n__version__ = 1.0\n\nFS_PATHS = 'FileSystemPaths'\nFS_BASE_DIR = 'base_dir'\n\nconfig = ConfigParser.ConfigParser()\nconfig.read('config.ini')\n\nEXT_INFO = 'spr'\nEXT_DATA = 'sdt'\nTYPES = {\n 0: 'B', # 'unsigned char',\n 2: 'i', # 'int',\n 3: 'f', # 'float',\n 5: 'd' # 'double'\n}\n\nMODE_CHIPS = 1\nMODE_IMAGES = 2\nMODE_TABLES = 3\n\nBASE_PATH = config.get(FS_PATHS, FS_BASE_DIR)\npaths = {\n MODE_CHIPS: BASE_PATH + 'Chips',\n MODE_IMAGES: BASE_PATH + 'Images',\n MODE_TABLES: BASE_PATH + 'Tables'\n}\n\n\ndef get_idx():\n if os.path.isfile('idx.p'):\n return pickle.load(open('idx.p'))\n else:\n ci = ChipsIndex()\n pickle.dump(ci, open('idx.p', 'w'))\n return ci\n\n\ndef normalize_chip(chip, mu, sigma):\n A = ((chip - mu) * np.ones(chip.shape[0])) / sigma\n\n return A\n\n\nclass ChipsIndex(object):\n\n SPLIT_TRN = 'trn'\n SPLIT_TST = 'tst'\n SPLIT_BOTH = ['trn', 'tst']\n\n HOM4 = ['A1', 'A2', 'A3', 'A4']\n HOM38 = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6']\n HOM56 = ['C1']\n HET36 = ['D1', 'D2', 'D3', 'D4']\n HET5 = ['E1', 'E2', 'E3', 'E4', 'E5']\n\n ALL = ['C1', 'D4']\n\n def __init__(self, exp='C', do_reshape=False):\n self.exp = exp\n self.do_reshape = do_reshape\n self.vread = vread\n\n self.x = None\n self.y = None\n self.i = None\n self.normalized = None\n\n self.__populate()\n\n self.scoring = {}\n self._load_scoring_table()\n self.image_stats = {}\n self._load_image_stats()\n\n def _load_scoring_table(self):\n with open(\"{0}/Experiments_Scoring_Table\".format(paths[MODE_TABLES])) as fp:\n data = [x for x in fp.readlines() if not x.startswith(\"%\")]\n\n for line in data:\n exp, sub_exp, img_area, n_detection_opps = line.split()\n\n for s in range(int(sub_exp)):\n key = \"{0}{1}\".format(exp, s)\n self.scoring[key] = {\n 'area': img_area,\n 'n_detections': n_detection_opps\n }\n\n def _load_image_stats(self):\n ndim = 1024 ** 2\n\n for i in range(1, 135):\n A = np.reshape(vread('img' + str(i)), [ndim, 1])\n self.image_stats[i] = (np.mean(A), np.std(A))\n\n def __populate(self):\n matches = defaultdict(dict)\n\n for chip_name in get_chip_names():\n A = vread(chip_name, MODE_CHIPS)\n parts = chip_name.split(\"_\")\n exp_id, exp_letter, exp_split = parts[1], parts[2][0], parts[2][1:]\n\n if exp_letter != self.exp: # Only interested in particular experiments\n continue\n\n if A.shape[0] % 15 != 0:\n raise Exception(\"This says it's a C experiment, but rows % 15 != 0\")\n continue\n\n windows = []\n\n for i in range(A.shape[1]):\n this_window = A[:,i]\n\n if self.do_reshape:\n this_window = np.reshape(this_window, [15, 15])\n\n windows.append(this_window)\n\n matches[exp_id][exp_split] = windows\n\n self.idx = matches\n\n def reshape(self, source, target_dims=[15, 15]):\n if source.shape[0] % target_dims[0] != 0:\n raise Exception(\"Incorrect dimensions\")\n\n return np.reshape(source, target_dims)\n\n def experiments(self):\n return sorted(self.idx.keys())\n\n def get_all(self, normalized=False):\n if self.x is None:\n c1 = self.idx['C1']\n d4 = self.idx['D4']\n\n x = [z for z in c1[self.SPLIT_TRN][:]]\n x.extend([z for z in d4[self.SPLIT_TRN]])\n x.extend([z for z in d4[self.SPLIT_TST]])\n x.extend([z for z in c1[self.SPLIT_TST]])\n\n c1 = self.labels_for('C1')\n d4 = self.labels_for('D4')\n\n y = [z for z in c1[self.SPLIT_TRN][:]]\n y.extend([z for z in d4[self.SPLIT_TRN]])\n y.extend([z for z in d4[self.SPLIT_TST]])\n y.extend([z for z in c1[self.SPLIT_TST]])\n\n c1 = self.image_numbers_for('C1')\n d4 = self.image_numbers_for('D4')\n\n i = [z for z in c1[self.SPLIT_TRN][:]]\n i.extend([z for z in d4[self.SPLIT_TRN]])\n i.extend([z for z in d4[self.SPLIT_TST]])\n i.extend([z for z in c1[self.SPLIT_TST]])\n\n self.x = x\n self.y = y\n self.i = i\n\n if not normalized:\n return self.x, self.y\n else:\n if self.normalized is None:\n Xn = []\n imnums = self.i\n\n for i, x in enumerate(self.x):\n Xn.append(normalize_chip(x, *self.image_stats[imnums[i]]))\n\n self.normalized = Xn\n\n return self.normalized, self.y\n\n def training_split_for(self, exp):\n return self.idx[exp][self.SPLIT_TRN]\n\n def testing_split_for(self, exp):\n return self.idx[exp][self.SPLIT_TST]\n\n def all_for_exp(self, exp):\n \"\"\"\n For this method, exp can either be an absolute fold in the cross validation,\n i.e. C1 -> {C1}, D3 -> {D3}\n Or, it may simply be the first letter, in which case it'll return all training\n and testing splits that match the mask exp*\n i.e. C* -> {C1}, D* -> {D1, D2, D3, D4} etc.\n \"\"\"\n matches = [x for x in self.experiments() if x.startswith(exp)]\n retval = dict.fromkeys(matches)\n\n for k in matches:\n retval[k] = {\n 'trn': self.training_split_for(k),\n 'tst': self.testing_split_for(k)\n }\n\n return retval\n\n def image_numbers_for(self, exp):\n retval = {}\n\n for k in self.SPLIT_BOTH:\n exp_name =\"exp_{}_N{}\".format(exp, k)\n indexes = self.vread(exp_name, mode=MODE_CHIPS)[0]\n indexes = [int(x) - 1 for x in indexes] # MATLAB -> Python\n numbers = [IMG_NUMBERS[exp][k][i] for i in indexes]\n retval[k] = numbers\n\n return retval\n\n def labels_for(self, exp):\n retval = {}\n\n for k in self.SPLIT_BOTH:\n exp_name =\"exp_{}_L{}\".format(exp, k)\n retval[k] = self.vread(exp_name, mode=MODE_CHIPS)[0]\n\n return retval\n\n\ndef get_all_pixel_windows():\n chips = get_chip_names()\n matches = {}\n\n for chip_name in chips:\n A = vread(chip_name, MODE_CHIPS)\n\n if A.shape[0] % 15 == 0:\n windows = []\n\n for i in range(A.shape[0]):\n this_window = A[:, i]\n this_window = np.reshape(this_window, [15, 15])\n windows.append(this_window)\n\n matches[chip_name] = windows\n\n return matches\n\n\ndef get_chip_names():\n with open(\"{0}/newlist\".format(paths[MODE_CHIPS])) as fp:\n files = set([x.split('.')[0] for x in fp])\n\n return sorted(files)\n\n\ndef vread(filename, mode=MODE_IMAGES):\n with open(\"{0}/{1}.{2}\".format(paths[mode], filename, EXT_INFO)) as idp:\n lines = [x.strip() for x in idp]\n ndim = int(lines[0])\n\n if ndim != 2:\n raise TypeError(\"Can only read two dimensional data\")\n\n nc = int(lines[1])\n nr = int(lines[4])\n type = int(lines[7])\n\n try:\n precision = TYPES[type]\n except KeyError:\n raise NotImplementedError(\"Unrecognized data type\")\n\n with open(\"{0}/{1}.{2}\".format(paths[mode], filename, EXT_DATA)) as fp:\n A = array.array(precision)\n A.fromfile(fp, nc * nr)\n\n A = np.array(A)\n A = A.reshape((nr, nc))\n\n return A.transpose()\n\n\nIMG_NUMBERS = {\n 'A1': {\n ChipsIndex.SPLIT_TRN: [2,3,4],\n ChipsIndex.SPLIT_TST: [1]\n },\n 'A2': {\n ChipsIndex.SPLIT_TRN: [1,3,4],\n ChipsIndex.SPLIT_TST: [2]\n },\n\n 'A3': {\n ChipsIndex.SPLIT_TRN: [1,2,4],\n ChipsIndex.SPLIT_TST: [3]\n },\n\n 'A4': {\n ChipsIndex.SPLIT_TRN: [1,2,3],\n ChipsIndex.SPLIT_TST: [4]\n }, #B\n 'B1': {\n ChipsIndex.SPLIT_TRN: [5,6,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42],\n ChipsIndex.SPLIT_TST: [7, 8, 9, 10, 11, 12]\n },\n 'B2': {\n ChipsIndex.SPLIT_TRN: [5,6,7,8,9,10,11,12,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42],\n ChipsIndex.SPLIT_TST: [13, 14, 15, 16, 17, 18]\n },\n 'B3': {\n ChipsIndex.SPLIT_TRN: [5,6,7,8,9,10,11,12,13,14,15,16,17,18,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42],\n ChipsIndex.SPLIT_TST: [19, 20, 21, 22, 23, 24]\n },\n 'B4': {\n ChipsIndex.SPLIT_TRN: [5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,31,32,33,34,35,36,37,38,39,40,41,42],\n ChipsIndex.SPLIT_TST: [25, 26, 27, 28, 29, 30]\n },\n 'B5': {\n ChipsIndex.SPLIT_TRN: [5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,37,38,39,40,41,42],\n ChipsIndex.SPLIT_TST: [31, 32, 33, 34, 35, 36]\n },\n 'B6': {\n ChipsIndex.SPLIT_TRN: [5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36],\n ChipsIndex.SPLIT_TST: [37, 38, 39, 40, 41, 42]\n }, #C\n 'C1': {\n ChipsIndex.SPLIT_TRN: [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42],\n ChipsIndex.SPLIT_TST: [79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134]\n }, #D\n 'D1': {\n ChipsIndex.SPLIT_TRN: [52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78],\n ChipsIndex.SPLIT_TST: [43,44,45,46,47,48,49,50,51]\n },\n 'D2': {\n ChipsIndex.SPLIT_TRN: [43,44,45,46,47,48,49,50,51,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78],\n ChipsIndex.SPLIT_TST: [52,53,54,55,56,57,58,59,60]\n },\n 'D3': {\n ChipsIndex.SPLIT_TRN: [43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,70,71,72,73,74,75,76,77,78],\n ChipsIndex.SPLIT_TST: [61,62,63,64,65,66,67,68,69]\n },\n 'D4': {\n ChipsIndex.SPLIT_TRN: [43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69],\n ChipsIndex.SPLIT_TST: [70,71,72,73,74,75,76,77,78]\n }, #E\n 'E1': {\n ChipsIndex.SPLIT_TRN: [50,68,69,75],\n ChipsIndex.SPLIT_TST: [46]\n },\n 'E2': {\n ChipsIndex.SPLIT_TRN: [46,68,69,75],\n ChipsIndex.SPLIT_TST: [50]\n },\n 'E3': {\n ChipsIndex.SPLIT_TRN: [46,50,69,75],\n ChipsIndex.SPLIT_TST: [68]\n },\n 'E4': {\n ChipsIndex.SPLIT_TRN: [46,50,68,75],\n ChipsIndex.SPLIT_TST: [69]\n },\n 'E5': {\n ChipsIndex.SPLIT_TRN: [46,50,68,69],\n ChipsIndex.SPLIT_TST: [75]\n },\n}\n","sub_path":"src/pyvov.py","file_name":"pyvov.py","file_ext":"py","file_size_in_byte":10983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"539335665","text":"from PyQt5.QtWidgets import QTreeWidget, QTreeWidgetItem, QHeaderView\n\n\nclass ResultMessagesTreeWidget(QTreeWidget):\n def __init__(self, ResultMessages):\n super().__init__()\n\n # Store Parameters\n self.ResultMessages = ResultMessages\n\n # Header Setup\n self.setHeaderHidden(True)\n self.setRootIsDecorated(False)\n self.header().setSectionResizeMode(QHeaderView.ResizeToContents)\n\n def FillFromResultMessages(self):\n self.clear()\n for Key, Value in sorted(self.ResultMessages.items(), key=lambda x: int(x[0])):\n self.invisibleRootItem().addChild(ResultMessagesWidgetItem(Key, Value))\n\n def SelectIndex(self, Index):\n DestinationIndex = self.model().index(Index, 0)\n self.setCurrentIndex(DestinationIndex)\n self.scrollToItem(self.currentItem(), self.PositionAtCenter)\n\n\nclass ResultMessagesWidgetItem(QTreeWidgetItem):\n def __init__(self, Result, Message):\n super().__init__()\n\n # Store Parameters\n self.Result = Result\n self.Message = Message\n\n # Set Text\n self.setText(0, self.Result + \": \" + self.Message)\n","sub_path":"Interface/Widgets/ResultMessagesTreeWidget.py","file_name":"ResultMessagesTreeWidget.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"420821460","text":"from __future__ import print_function\nfrom __future__ import division\nimport dynet as dy\nimport sys\nimport string\n\n# ----- simple Perceptron: using dynet ----- #\n# Name: Hyun A Chung\n\n# ----- reading input functions ----- #\n# read in necessary files\ndef read_files():\n train_filename = sys.argv[1]\n train_file = open(\"%s\" %(train_filename), \"r\")\n test_file = open(\"test_set\", \"r\")\n\n # read in train file\n # train_list: list of each lines in train file\n train_list = []\n for line in train_file:\n train_list.append(line)\n\n # read in test file\n # test_list: list of each lines in test file\n test_list = []\n for line in test_file:\n test_list.append(line)\n\n train_file.close()\n test_file.close()\n\n return train_list, test_list\n\n\n# read in unique words in the sample data and training instances\ndef read_trainset(train_list):\n inputs = []\n targets = []\n unique_vector = []\n features_total = 0\n count = 0\n\n for line in train_list:\n count += 1\n line = line.strip().split()\n\n # add target to targets list\n targets.append(int(line.pop(0)))\n\n # add the current line to inputs list\n line[:] = [word.strip(string.punctuation) for word in line]\n inputs.append(line)\n\n # count number of unqiue words in the input\n for word in line:\n if word not in unique_vector:\n features_total += 1\n unique_vector.append(word)\n\n print(\"%s lines for train\" %(count + 1))\n return inputs, targets, unique_vector, features_total\n\n# create network input vector in respect to number of unique words in the train set\ndef create_inputVector(word_inputs, unique_vector, features_total):\n network_input = []\n for line in word_inputs:\n # set all entries on the vector to 0\n inputVector = [0] * features_total\n\n # set position to 1 if the index's unique word exist in the given tweet\n for word in line:\n inputVector[unique_vector.index(word)] = 1\n\n network_input.append(inputVector)\n\n return network_input\n\n\n# ----- create network functions ----- #\n# create a network for the given input and output\ndef create_network(pWeight, inputs, expected_answer):\n # new computation graph\n dy.renew_cg()\n\n # add parameters to graph as expressions\n Weight = dy.parameter(pWeight)\n input_dy = dy.vecInput(len(inputs))\n input_dy.set(inputs)\n target_output = dy.scalarInput(expected_answer)\n output = dy.logistic(dy.tanh(Weight*input_dy))\n loss = dy.binary_log_loss(output, target_output)\n return loss\n\n# ----- test network functions ----- #\n# test the network for the given input\ndef test_network(pWeight, input_dy):\n # add parameters to graph as expressions\n Weight = dy.parameter(pWeight)\n\n # return what the network returns\n output = dy.logistic(dy.tanh(Weight*input_dy))\n return output\n\n# run test_network function for the given input\ndef test(test_list, pWeight, unique_vector, features_total):\n input_dy = dy.vecInput(features_total)\n\n for line in test_list:\n test_line = line.split()\n target = test_line[0]\n test_line = test_line[1:]\n test_vector = [0] * features_total\n all_unique = True\n\n for word in test_line:\n try:\n test_vector[unique_vector.index(word)] = 1\n all_unique = False\n except:\n continue\n\n input_dy.set(test_vector)\n\n if all_unique:\n output = 0\n print( \"%s: %s\" %(target, output))\n else:\n output = test_network(pWeight, input_dy)\n print( \"%s: %s\" %(target, output.value()))\n\n\n# ------- main ------- #\ndef main():\n word_inputs = [] \n targets = []\n unique_vector = []\n network_input = []\n\n # read in input files first\n train_list, test_list = read_files()\n\n # ----- training ----- #\n # analysis the train_list\n word_inputs, targets, unique_vector, features_total = read_trainset(train_list)\n network_input = create_inputVector(word_inputs, unique_vector, features_total)\n\n # add features here and increase features_total\n\n # create parameters\n para_collec = dy.ParameterCollection()\n pWeight = para_collec.add_parameters((1, features_total))\n trainer = dy.SimpleSGDTrainer(para_collec)\n\n seen_instances = 0\n total_loss = 0\n\n for line, target in zip(network_input, targets):\n loss = create_network(pWeight, line, target)\n seen_instances += 1\n total_loss += loss.value()\n loss.backward()\n trainer.update()\n if (seen_instances > 1 and seen_instances % 100 == 0):\n print(\"average loss is: %s\" %(total_loss / seen_instances))\n\n # ----- testing ----- #\n print(\"testing...\")\n test(test_list, pWeight, unique_vector, features_total)\n\n# ---------------------- #\nif __name__ == \"__main__\":\n main()","sub_path":"simple perceptron/perceptron_dynet.py","file_name":"perceptron_dynet.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"119676465","text":"from scipy.spatial import distance as dist\nimport cv2\nimport numpy as np\nimport imutils\nfrom collections import OrderedDict\nfrom utils.preprocessor import Preprocessor\nfrom car_detector import CarDetector\n\n\nclass ColorDetector:\n\n def __init__(self):\n # initialize the colors dictionary, containing the color\n # name as the key and the RGB tuple as the value\n colors = OrderedDict({\n \"black\": (10, 10, 10),\n \"orange\": (255, 150, 0),\n \"gray\": (120, 120, 120),\n \"white\": (220, 220, 220),\n \"red\": (190, 50, 50),\n \"green\": (30, 210, 30),\n \"blue\": (30, 30, 210)})\n # allocate memory for the L*a*b* image, then initialize\n # the color names list\n self.lab = np.zeros((len(colors), 1, 3), dtype=\"uint8\")\n self.colorNames = []\n # loop over the colors dictionary\n for (i, (name, rgb)) in enumerate(colors.items()):\n # update the L*a*b* array and the color names list\n self.lab[i] = rgb\n self.colorNames.append(name)\n # convert the L*a*b* array from the RGB color space\n # to L*a*b*\n self.lab = cv2.cvtColor(self.lab, cv2.COLOR_RGB2LAB)\n\n def label(self, image, c):\n # construct a mask for the contour, then compute the\n # average L*a*b* value for the masked region\n mask = np.zeros(image.shape[:2], dtype=\"uint8\")\n cv2.drawContours(mask, [c], -1, 255, -1)\n mask = cv2.erode(mask, None, iterations=2)\n mean = cv2.mean(image, mask=mask)[:3]\n # initialize the minimum distance found thus far\n minDist = (np.inf, None)\n # loop over the known L*a*b* color values\n for (i, row) in enumerate(self.lab):\n # compute the distance between the current L*a*b*\n # color value and the mean of the image\n d = dist.euclidean(row[0], mean)\n # if the distance is smaller than the current distance,\n # then update the bookkeeping variable\n if d < minDist[0]:\n minDist = (d, i)\n # return the name of the color with the smallest distance\n return self.colorNames[minDist[1]]\n\n\ncl = ColorDetector()\n\n\ndef get_color(image_path):\n if type(image_path) == str:\n image = cv2.imread(image_path)\n image = image_path\n resized = cv2.resize(image, (800, 800))\n image = cv2.resize(image, (800, 800))\n ratio = image.shape[0] / float(resized.shape[0])\n gray, canny = Preprocessor.gray_blur_canny(image)\n color_area = {}\n\n _, cnts, _ = cv2.findContours(\n canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n for c in cnts:\n area = cv2.contourArea(c)\n\n epsilon = 0.09*cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, epsilon, True)\n # Process a rectangule\n\n if area > 2_000 and area < 15_000:\n x, y, w, h = cv2.boundingRect(c)\n\n M = cv2.moments(c)\n cX = int((M[\"m10\"] / M[\"m00\"]) * ratio)\n cY = int((M[\"m01\"] / M[\"m00\"]) * ratio)\n # detect the shape of the contour and label the color\n color = cl.label(image, c)\n color_area[color] = color_area.get(color, 0) + area\n # multiply the contour (x, y)-coordinates by the resize ratio,\n # then draw the contours and the name of the shape and labeled\n # color on the image\n c = c.astype(\"float\")\n c *= ratio\n c = c.astype(\"int\")\n text = color\n cv2.drawContours(image, [c], -1, (0, 255, 0), 2)\n cv2.putText(image, text, (cX, cY),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n # show the output image\n cv2.imshow(\"Image\", image)\n cv2.waitKey(0)\n\n if color_area:\n return max(color_area, key=color_area.get)\n\n return \"\"\n\n\nc = CarDetector()\nimg_path = \"images/ford.jpg\"\nbox = c.detect(img_path)\nimg = cv2.imread(img_path)\nprint(box)\ncar_focus = Preprocessor.crop_image(img, box)\nprint(get_color(car_focus))\n","sub_path":"Computer Vision/car_value_predictor/color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"157462466","text":"#!/usr/bin/env python\n# encoding: utf-8\nimport sys\nsys.path.append('./../')\n\nif __name__==\"__main__\":\n from sw_eqns import *\n sig2=2\n \n sw_eqns(# General parameters for simulatiuon #\n final_time=30.0,\n nDOut=30,\n restart_from_frame=None,\n # about initial condition\n A=0.05,\n sig2=sig2,\n mwl=0.75)\n","sub_path":"PyClaw/versus_KdV/run_sw_eqns.py","file_name":"run_sw_eqns.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"104905051","text":"\nimport pickle\nimport pkg_resources\n\nfrom pyqtgraph.Qt import QtCore, QtGui\nimport numpy as np\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\n\nfrom data_slicer.cmaps import cmaps\nfrom data_slicer.imageplot import ImagePlot\nfrom data_slicer.cutline import Cutline\n\n#_Parameters____________________________________________________________________\n\nDATA_PATH = pkg_resources.resource_filename('data_slicer', 'data/')\ndatafile = DATA_PATH + 'testdata_100_150_200.p'\n#datafile = '/home/kevin/Documents/qmap/materials/PLCCO/2018_10_SIS/PLCCO_0000.p'\n\n## Visual\ngloption = 'opaque'\ngloption = 'translucent'\ncmap = 'viridis'\ncmap = 'terrain'\n\n#_GUI_setup_____________________________________________________________________\n\n# Initialize the application\napp = QtGui.QApplication([])\n\n# Set up the main window and set a central widget\nwindow = QtGui.QMainWindow()\nwindow.resize(800, 800)\ncentral_widget = QtGui.QWidget()\nwindow.setCentralWidget(central_widget)\n\n# Create a layout\nlayout = QtGui.QGridLayout()\ncentral_widget.setLayout(layout)\n\n# Add the selector view\nselector = ImagePlot()\n#selector = gl.GLViewWidget()\nlayout.addWidget(selector, 1, 0, 1, 1)\n\n# Set up the main 3D view widget\nmain = gl.GLViewWidget()\nlayout.addWidget(main, 0, 0, 1, 1)\n\n# Somehow this is needed for both widets to be visible\nlayout.setRowStretch(0, 1)\nlayout.setRowStretch(1, 1)\n\nwindow.show()\n\n#_Data_loading_and_presenting___________________________________________________\n\n# Load data\nwith open(datafile, 'rb') as f :\n data = pickle.load(f)\n# D = pickle.load(f)\n#data = np.rollaxis(D.data, 2)\n#data = np.rollaxis(data, 2, 1)\n\nnx, ny, nz = data.shape\nprint(nx, ny, nz)\nx0, y0, z0 = 0, 0, 0\nx0, y0, z0 = [int(2*n/5) for n in [nx, ny, nz]]\n\n# Create Textures\nlevels = [data.min(), data.max()]\ncmap = cmaps[cmap]\nlut = cmap.getLookupTable()\ncuts = [data[x0], data[:,y0], data[:,:,z0]]\ntextures = [pg.makeRGBA(d, levels=levels, lut=lut)[0] for d in cuts]\nplanes = [gl.GLImageItem(texture, glOptions=gloption) for texture in textures] \n\n## Apply transformations to get lanes where they need to go\nxscale, yscale, zscale = 1/nx, 1/ny, 1/nz\n# xy plane\nxy = planes[2]\nxy.scale(xscale, yscale, 1)\nxy.translate(-1/2, -1/2, -1/2 + z0*zscale)\nmain.addItem(xy)\n\n# yz plane (appears in the coordinate system along xy)\nyz = planes[0]\nyz.scale(yscale, zscale, 1)\nyz.rotate(90, 0, 0, 1)\nyz.rotate(90, 0, 1, 0)\nyz.translate(-1/2 + x0*xscale, -1/2, -1/2)\n#main.addItem(yz)\n\n# xz plane (appears in the coordinate system along xy)\nxz = planes[1]\nxz.scale(xscale, zscale, 1)\nxz.rotate(90, 1, 0, 0)\nxz.translate(-1/2, -1/2 + y0*yscale, -1/2)\n#main.addItem(xz)\n\n#_Selector______________________________________________________________________\n\n# Set an image in the selector plot\nselector.set_image(cuts[2])\ncutline = Cutline(selector)\ncutline.initialize()\n\n# A plane representing the cutline\ncut, coords = cutline.get_array_region(data, selector.image_item, \n returnCoords=True)\ncut_texture = pg.makeRGBA(cut, levels=levels, lut=lut)[0]\ncutplane = gl.GLImageItem(cut_texture, glOptions=gloption)\n\n# Scale and move it to origin in upright position\n# Upon initialization, this is like an xz plane\ncutplane.scale(xscale, zscale, 1)\ncutplane.rotate(90, 1, 0, 0)\ncutplane.translate(-1/2, 0, -1/2)\ntransform0 = cutplane.transform()\n\nmain.addItem(cutplane)\n\n# Conversion from ROI coordinates to Scene coordinates\nroi_coords = cutline.roi.getLocalHandlePositions()\n# also usefule for later\noriginal_roi_x0 = roi_coords[0][1].x()\noriginal_roi_x1 = roi_coords[1][1].x()\n# length in ROI coordinates\nlength_in_roi = np.abs(original_roi_x1 - original_roi_x0)\n# length in data coordinates\n#length_in_data = np.abs(data_coords.bottomLeft().x() - data_coords.topRight().x())\nlength_in_data = np.abs(coords[0,0] - coords[0,-1])\n# conversion in units of \"roi/data\"\nroi_data_conversion = length_in_roi/length_in_data\n# distance from left handle to M in data coords\ndistance_p0_m = length_in_data/2\nprint('Distance: ', distance_p0_m)\n\n\ndef update_texture() :\n print('==')\n print('LocalHandles: ', cutline.roi.getLocalHandlePositions())\n print('Scene: ', cutline.roi.getSceneHandlePositions())\n print('++')\n transform = cutline.roi.getArraySlice(data, selector.image_item)[1]\n\n cut, coords = cutline.get_array_region(data, selector.image_item, \n returnCoords=True)\n texture = pg.makeRGBA(cut, levels=levels, lut=lut)[0]\n# xy.setTexture(texture)\n cutplane.setData(texture)\n\n ## Find the original center of mass (if no length changes would have been applied)\n # The current handle positions in data coordinates are in p0 and p1\n# bounds = cutline.roi.parentBounds()\n# print(bounds)\n# p0 = bounds.bottomLeft()\n# p1 = bounds.topRight()\n p0 = coords[[0, 1], [0, 0]]\n p1 = coords[[0, 1], [-1, -1]]\n # Find how much they have been stretched or compressed with respect to \n # the original handles\n new_roi_coords = cutline.roi.getLocalHandlePositions()\n delta0 = (original_roi_x0 - new_roi_coords[0][1].x())/roi_data_conversion\n# delta1 = original_roi_x1 - new_roi_coords[1][1].x()\n # Construct a unit vector pointing from P0 to P1\n diff = p1 - p0\n# e_pp = diff / np.sqrt(diff.x()**2 + diff.y()**2)\n e_pp = diff / np.sqrt(diff.dot(diff))\n print('p0: ', p0)\n print('p1: ', p1)\n print('diff', diff)\n print('e_pp: ', e_pp)\n\n # Now the original midpoint is at p0 + e_pp*(distance_p0_m+delta0)\n print('delta0: ', delta0)\n# M = p0 + e_pp*(distance_p0_m + delta0)\n # Somehow, the way the new plane is calculated, delta0 must not be added\n# M = p0 + e_pp*distance_p0_m\n M = p0\n# tx, ty = M.x(), M.y()\n tx, ty = M[0], M[1]\n print('tx, ty: {}, {}'.format(tx, ty))\n\n# tx, ty = [0.5*(p1.x() + p0.x()), 0.5*(p1.y() + p0.y())]\n tx *= xscale\n ty *= yscale\n print('tx, ty: {}, {}'.format(tx, ty))\n\n # Rotate around origin\n try :\n# alpha = np.arctan((p1.y()-p0.y()) / (p1.x()-p0.x()))\n alpha = np.arctan((p1[1]-p0[1]) / (p1[0]-p0[0]))\n except ZeroDivisionError :\n alpha = np.sign(p1[1]-p0[1]) * np.pi/2\n # Correct for special cases\n if p1[0] < p0[0] :\n alpha -= np.sign(p1[1]-p0[1]) * np.pi\n alpha_deg = alpha*180/np.pi\n print('alpha_deg: {}'.format(alpha_deg))\n\n # Get the right scaling\n# beta = alpha%(np.pi/2)\n# print('beta (deg): ', 180/np.pi*beta)\n# critical_angle = np.arctan(ny/nx)\n# if beta < critical_angle :\n# scaling = nx * np.cos(beta)\n# else :\n# scaling = ny * np.sin(beta)\n# print('scaling: ', scaling)\n \n\n # Send to origin and apply rotation and translation\n# new_transform = QtGui.QMatrix4x4()\n# new_transform.translate(tx-1/2, ty-1/2, 0)\n# new_transform.rotate(alpha, 0, 0, 1)\n# new_transform *= transform0\n# cutplane.setTransform(new_transform)\n\n# cutplane.setTransform(QtGui.QMatrix4x4())\n# cutplane.scale(xscale, zscale, 1)\n# cutplane.scale(1/scaling, zscale, 1)\n# cutplane.rotate(90, 1, 0, 0)\n# cutplane.rotate(alpha_deg, 0, 0, 1)\n# cutplane.translate(tx-1/2, ty-1/2, -1/2)\n nt = QtGui.QMatrix4x4()\n nt.translate(tx-1/2, ty-1/2, -1/2)\n nt.scale(xscale, yscale, 1)\n nt.rotate(alpha_deg, 0, 0, 1)\n nt.rotate(90, 1, 0, 0)\n nt.scale(1, zscale, 1)\n cutplane.setTransform(nt)\n\ncutline.sig_region_changed.connect(update_texture)\n\n#main.setCameraPosition\n\n# Draw a coordinate system\n#line_x = gl.GLLinePlotItem(pos=np.array([[0, 0, 0], [1, 0, 0]]), color=(1, 0, 0, 1))\n#line_y = gl.GLLinePlotItem(pos=np.array([[0, 0, 0], [0, 1, 0]]), color=(0, 1, 0, 1))\n#line_z = gl.GLLinePlotItem(pos=np.array([[0, 0, 0], [0, 0, 1]]), color=(0, 0, 1, 1))\n#main.addItem(line_x)\n#main.addItem(line_y)\n#main.addItem(line_z)\naxis = gl.GLAxisItem()\nmain.addItem(axis)\n\napp.exec_()\n#if __name__ == '__main__':\n# import sys\n# if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n# QtGui.QApplication.instance().exec_()\n\n\n","sub_path":"data_slicer/tests/cool_test.py","file_name":"cool_test.py","file_ext":"py","file_size_in_byte":8019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"528549519","text":"from unittest import TestCase, main as unittest_main\n\nfrom gramtools.tests.mocks import _MockVcfRecord\nfrom gramtools.commands.discover import discover\nfrom gramtools.commands.genotype.seq_region_map import (\n SeqRegionMapper,\n SearchableSeqRegionsMap,\n)\n\n\nclass TestRebaseVcfRecord(TestCase):\n def test_SingleSNPInNonSite(self):\n # base sequence: T TAT CGG\n # derived sequence: T G CGG\n chrom_sizes = {\"JAC\": 5}\n base_records = [_MockVcfRecord(pos=2, ref=\"TAT\", alts=[\"G\"])]\n region_map = SeqRegionMapper(base_records, chrom_sizes).get_map()\n region_searcher = SearchableSeqRegionsMap(region_map)\n\n discov_record = _MockVcfRecord(pos=3, ref=\"C\", alts=[\"G\"])\n new_vcf_record = discover._rebase_vcf_record(\n discov_record, \"JAC\", region_searcher\n )\n\n result = _MockVcfRecord(\n new_vcf_record.pos, new_vcf_record.ref, new_vcf_record.alts\n )\n expected = _MockVcfRecord(pos=5, ref=\"C\", alts=[\"G\"])\n\n self.assertEqual(expected, result)\n\n def test_variant_in_chromo_with_no_prg_variants(self):\n # chr1 base: T TAT CGG\n # chr1 derived: T G CGG\n # chr2 base: TTTTT\n # chr2 derived: TTTTT\n\n chrom_sizes = {\"chr1\": 7, \"chr2\": 5}\n base_records = [_MockVcfRecord(pos=2, ref=\"TAT\", alts=[\"G\"], chrom=\"chr1\")]\n mapped_regions = SeqRegionMapper(base_records, chrom_sizes).get_map()\n region_searcher = SearchableSeqRegionsMap(mapped_regions)\n\n discov_record = _MockVcfRecord(pos=1, ref=\"TT\", alts=[\"GA\"], chrom=\"chr2\")\n new_vcf_record = discover._rebase_vcf_record(\n discov_record, \"chr2\", region_searcher\n )\n self.assertEqual(discov_record, new_vcf_record)\n\n def test_StartsAtNonSite_EndsAtSite(self):\n # base sequence: T TAT CGG\n # derived sequence: T G CGG\n chrom_sizes = {\"JAC\": 7}\n base_records = [_MockVcfRecord(pos=2, ref=\"TAT\", alts=[\"G\"])]\n mapped_regions = SeqRegionMapper(base_records, chrom_sizes).get_map()\n region_searcher = SearchableSeqRegionsMap(mapped_regions)\n\n discov_record = _MockVcfRecord(pos=1, ref=\"TG\", alts=[\"TAA\"])\n new_vcf_record = discover._rebase_vcf_record(\n discov_record, \"JAC\", region_searcher\n )\n\n result = _MockVcfRecord(\n new_vcf_record.pos, new_vcf_record.ref, new_vcf_record.alts\n )\n expected = _MockVcfRecord(1, \"TTAT\", [\"TAA\"])\n\n self.assertEqual(expected, result)\n\n def test_SiteInBetweenNonSites(self):\n \"\"\"\n A test case where the variation on top of the inferred reference overlaps: a non-variant site, a variant site,\n and a non-variant site in the prg.\n\n What we need is for the rebased ref to include all three sites.\n \"\"\"\n # base sequ: T TAT CGG\n # secondary: T G CGG\n chrom_sizes = {\"JAC\": 7}\n base_records = [_MockVcfRecord(pos=2, ref=\"TAT\", alts=[\"G\"])]\n mapped_regions = SeqRegionMapper(base_records, chrom_sizes).get_map()\n region_searcher = SearchableSeqRegionsMap(mapped_regions)\n\n discov_record = _MockVcfRecord(pos=1, ref=\"TGCG\", alts=[\"GGCT\"])\n\n new_vcf_record = discover._rebase_vcf_record(\n discov_record, \"JAC\", region_searcher\n )\n\n result = _MockVcfRecord(\n new_vcf_record.pos, new_vcf_record.ref, new_vcf_record.alts\n )\n expected = _MockVcfRecord(pos=1, ref=\"TTATCG\", alts=[\"GGCT\"])\n\n self.assertEqual(expected, result)\n\n def test_SNP_OnTopOfIndel(self):\n \"\"\"\n A test case where we find a SNP on top of an insertion in the inferred reference.\n\n What we need is for the rebased alt to include the flanking alt bases, which are implied to be present in the discov_record.\n \"\"\"\n # base sequ: T TAT CGG T A\n # secondary: T G CGG TCTGC A\n chrom_sizes = {\"JAC\": 9}\n base_records = [\n _MockVcfRecord(pos=2, ref=\"TAT\", alts=[\"G\"]),\n _MockVcfRecord(pos=8, ref=\"T\", alts=[\"TCTGC\"]),\n ]\n mapped_regions = SeqRegionMapper(base_records, chrom_sizes).get_map()\n region_searcher = SearchableSeqRegionsMap(mapped_regions)\n\n discov_record = _MockVcfRecord(pos=9, ref=\"G\", alts=[\"A\"])\n\n new_vcf_record = discover._rebase_vcf_record(\n discov_record, \"JAC\", region_searcher\n )\n\n result = _MockVcfRecord(\n new_vcf_record.pos, new_vcf_record.ref, new_vcf_record.alts\n )\n expected = _MockVcfRecord(8, \"T\", [\"TCTAC\"])\n\n self.assertEqual(expected, result)\n\n def test_multiple_deletions(self):\n \"\"\"\n A test case where we discover a deletion on top of a deletion in a variant site;\n as well as an extra deletion in a non-variant site.\n\n There is also a SNP among the original deletion, to make it plausible that quasimap/infer picks this variant.\n\n To make it harder, the discovered variation is also reported inside a variant site, so we expect the rebased alt to be elongated.\n\n We expect the rebased ref to include all deleted bases.\n \"\"\"\n # base reference: CAA C GCTA CAA\n # inferred reference: C C GAT CAA\n\n chrom_sizes = {\"JAC\": 11}\n base_records = [\n _MockVcfRecord(pos=1, ref=\"CAA\", alts=[\"C\"]),\n _MockVcfRecord(pos=5, ref=\"GCTA\", alts=[\"GAT\"]),\n ]\n mapped_regions = SeqRegionMapper(base_records, chrom_sizes).get_map()\n region_searcher = SearchableSeqRegionsMap(mapped_regions)\n\n discov_record = _MockVcfRecord(pos=4, ref=\"ATC\", alts=[\"A\"])\n new_vcf_record = discover._rebase_vcf_record(\n discov_record, \"JAC\", region_searcher\n )\n\n result = _MockVcfRecord(\n new_vcf_record.pos, new_vcf_record.ref, new_vcf_record.alts\n )\n expected = _MockVcfRecord(pos=5, ref=\"GCTAC\", alts=[\"GA\"])\n\n self.assertEqual(expected, result)\n\n\nif __name__ == \"__main__\":\n unittest_main()\n","sub_path":"gramtools/tests/commands/discover/test_discover.py","file_name":"test_discover.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"297268480","text":"import yaml\nimport requests\nimport telebot\nfrom bs4 import BeautifulSoup\nfrom yaml.loader import FullLoader\n\n\n# Getting telegram token\nwith open(\"config.yaml\", 'r') as file:\n data = yaml.load(file, Loader=FullLoader)\n TELEBOT_TOKEN = data[\"TELEBOT_TOKEN\"]\n TELEGRAM_GROUP = data[\"TELEGRAM_GROUP\"]\n\n# Configuring Telebot\nbot = telebot.TeleBot(TELEBOT_TOKEN)\nbot.config[\"api_key\"] = TELEBOT_TOKEN\n\n\nclass NotificationBot():\n\n def __init__(self, base_url) -> None:\n self.base_url = base_url\n self.cookies = None\n self.headers = None\n self.data = None\n\n with open(\"itemlist.txt\", 'r') as file:\n self.currentitems = set([line.strip() for line in file])\n\n def addHeaders(self, **kwrgs):\n if \"cookies\" in kwrgs.keys():\n self.cookies = kwrgs[\"cookies\"]\n if \"headers\" in kwrgs.keys():\n self.headers = kwrgs[\"headers\"]\n if \"data\" in kwrgs.keys():\n self.data = kwrgs[\"data\"]\n\n def makeRequest(self, url, post=False):\n if not post:\n response = requests.get(\n url, headers=self.headers, cookies=self.cookies, data=self.data)\n else:\n response = requests.post(\n url, headers=self.headers, cookies=self.cookies, data=self.data)\n return response.text\n\n def send_notification(self, message):\n bot.send_message(TELEGRAM_GROUP, message)\n","sub_path":"notificationBot.py","file_name":"notificationBot.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"313342226","text":"import itertools\n\nf = open('inputs.txt', 'r')\nr = f.readlines()\nsum = 0\n\n# part 1\n# for line in r:\n# frequency = int(line.replace(\"\\n\", \"\"))\n# sum += frequency\n# print(sum)\n\n# part 2\nseen = set()\nfor line in itertools.cycle(r):\n frequency = int(line.replace(\"\\n\", \"\"))\n sum += frequency\n if sum in seen:\n print(sum)\n break\n seen.add(sum)\nf.close()\n","sub_path":"Day01/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"452284895","text":"# Projeto de I.A. para a matéria de Inteligência Artificial e Sistemas Inteligentes.\n# Depth-First Search - Busca por Profundidade\n\nimport sys\nimport os\n\n\ndef search(start, target, relation_matrix):\n '''\n Function for finding the target in a graph.\n\n PARAMS:\n * start -> The initial start position.\n * target -> The desired target position.\n * relation_matrix -> Matriz de relacionamento.\n\n * RETURNS: If successful the route list or an empty list\n\n '''\n\n stack = [start]\n route = [start]\n\n # Enquanto a pilha não estiver vazia\n while len(stack) != 0:\n\n # Checando se na pilha chegou-se no destino\n if stack[len(stack) - 1] == target:\n return route\n\n # Removendo o último da pilha\n currentCity = stack[len(stack) - 1]\n stack.remove(stack[len(stack) - 1])\n\n # Adicionando os filhos relacionados a essa relação na posição TOTAL - 1 que está em último na pilha...\n for relation in relation_matrix:\n\n if relation[0] == currentCity:\n stack.append(relation[1])\n\n # Adicionado a rota o caminho atual\n route.append(stack[len(stack) - 1])\n\n return []\n\n\nif __name__ == '__main__':\n\n # Criando matriz de relacionamento\n relation_matrix = []\n\n # Criando as variaveis de inicio e alvo\n start = None\n target = None\n\n # Abrindo buffer de leitura em algo.txt\n test_file = open('Teste.txt', \"r\")\n\n # Lendo os conteúdos no txt\n content = test_file.readlines()\n\n # Obtendo informações de cada linha e exceto das 2 últimas...\n for index in range(len(content) - 2):\n\n line = content[index]\n\n line = line[:-2] # Removendo o ; e o \\n\n cities = line.split(', ') # Dividindo as cidades\n\n relation_matrix.append([cities[0], cities[1]])\n\n # Carregando os pontos de inicio e alvo\n start = content[len(content) - 2]\n start = start.replace(';', '')\n start = start[:-1]\n\n target = content[len(content) - 1]\n target = target.replace(';', '')\n\n # Calculando a melhor rota possivel usando \"Busca por Profundidade\"\n print(search(start, target, relation_matrix))\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"240809859","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndef sigmoid(z):\r\n a = 1 / (1 + np.exp(-z))\r\n\r\n return a\r\n\r\ndef initWeight(n):\r\n W = np.zeros(n)\r\n np.random.seed(n)\r\n for i in range(n):\r\n W[i] = np.random.random\r\n\r\n return W\r\n\r\ndef lossFunction(m, Y_hat, Y): # m为样本量,Y_hat为预测值,Y 为标签\r\n first = np.dot(Y, np.log(Y_hat))\r\n second = np.dot((1 - Y), np.log(1 - Y_hat))\r\n cost = (-1 / m ) * np.sum(first + second)\r\n\r\n return cost\r\n\r\ndef MiniBatch_GradientDescent(batch_size,X, Y, Y_hat, W, lr):\r\n dW = (1 / batch_size) * np.sum(np.dot((Y_hat - Y), X))\r\n W = W - lr * dW\r\n\r\n return W\r\n\r\ndef train(batch_size, X_batch, Y_batch, Y_hat_batch, m, W, lr):\r\n # 停止策略:迭代次数\r\n for iter in range(800):\r\n W = MiniBatch_GradientDescent(batch_size, X_batch, Y_batch, Y_hat_batch, W, lr)\r\n if iter % 50 == 0:\r\n cost = lossFunction(batch_size, Y_hat_batch, Y_batch)\r\n print('iter = {0}, cost = {1}'.format(iter, cost))\r\n\r\nif __name__ == '__main__':\r\n data = pd.read_table('LogiReg_data.txt', sep = ',')\r\n X = data.iloc[:, 0:2]\r\n X[-1] = pd.Series(np.array([1] * 100)) # X 加一列作为偏置b\r\n Y = data.iloc[:, -1]\r\n\r\n # 权重的随机初始化:\r\n W = initWeight(X.shape[1])\r\n\r\n lr = 0.0002\r\n batch_size = 64\r\n # X_batch, Y_batch, Y_hat_batch\r\n # train(......)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"4-LogiRegr_Tutorial.py","file_name":"4-LogiRegr_Tutorial.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"556900520","text":"\"\"\"\n@author: Joerg Landthaler\n@credits: Christian Osendorfer\n@modify: Daichi Mochihashi\n@origin: Nov, 2011\n@date: Jun, 2013\n@organization: TUM, I6, Machine Learning Group\n@summary: Implementation of the Replicated Softmax model,\n as presented by R. Salakhutdinov & G.E. Hinton\n in http://www.mit.edu/~rsalakhu/papers/repsoft.pdf\n@version: 1.0\n@$Id: rsm_numpy.py,v 1.7 2013/06/28 10:48:44 daichi Exp $\n@\n\"\"\"\nimport scipy as sp\nimport numpy as np\nimport re\nimport sys\nimport gzip\nimport getopt\nimport pickle\nimport cPickle\nimport os, sys\nimport logging\n\nclass RSM(object):\n def train(self, data, stopping_threshold, retrain, model, proto, units, epochs=1000, iter=1, momentum=0.001, rate=0.001, weightinit=0.001, btsz=1):\n \"\"\"\n CD-n training of RSM.\n @param data: a (rowwise) sample matrix. Number of samples should be divisible by btsz.\n @param units: #latent topics\n @param epochs: #training epochs\n @param rate: learning rate\n @param weightinit: scaling of random weight initialization\n @param momentum: momentum rate\n @param btsz: batchsize \n \"\"\"\n dictsize = data.shape[1]\n # initilize weights\n if retrain is None:\n w_vh = weightinit * np.random.randn(dictsize, units)\n w_v = weightinit * np.random.randn(dictsize)\n w_h = np.zeros((units))\n else:\n file = open(retrain, 'rb')\n reload_result = pickle.load(file)\n file.close()\n w_vh = reload_result.get(\"w_vh\")\n w_v = reload_result.get(\"w_v\")\n w_h = reload_result.get(\"w_h\")\n\n # weight updates\n wu_vh = np.zeros((dictsize, units))\n wu_v = np.zeros((dictsize))\n wu_h = np.zeros((units))\n delta = rate/btsz\n batches = data.shape[0]/btsz\n rsm_best_ppl = np.inf\n boolean_mask_vector = gen_mask_vector(data)\n vocab = cPickle.load(open('../../datasets/vocab-data/frequent_sorted_vocab.pkl'))\n vocab = cPickle.load(open('../../datasets/vocab-data/frequent_sorted_vocab.pkl'))\n combination_id = '%s_%s_%s_%s' % (units, iter, momentum, rate)\n\n logging.info(\"updates per epoch: %s | total updates: %s\" % (batches, batches*epochs))\n words = np.sum(data)\n step_diff = 0\n done_looping = False\n epoch = 0\n while (epoch < epochs) and (not done_looping):\n epoch = epoch + 1\n lik = 0\n np.random.shuffle(data)\n mse_list = []\n\n for b in xrange(batches):\n start = b * btsz\n # extract a specific paper/document data alone as v1\n v1 = data[start : start+btsz]\n # hidden biases scaling factor\n\n # D is the number of words in a specific paper.\n D = v1.sum(axis=1)\n # project into hidden\n h1 = sigmoid((np.dot(v1, w_vh) + np.outer(D, w_h)))\n v2 = v1; h2 = h1\n for i in xrange(iter):\n (v2,h2,z) = cdn (v2,h2,w_vh,w_v,w_h,D)\n if i == 0:\n # perform this once for every b, so 330 times * N epochs\n lik += z\n\n # compute updates\n wu_vh = wu_vh * momentum + np.dot(v1.T, h1) - np.dot(v2.T, h2)\n wu_v = wu_v * momentum + v1.sum(axis=0) - v2.sum(axis=0)\n wu_h = wu_h * momentum + h1.sum(axis=0) - h2.sum(axis=0)\n # update\n w_vh += wu_vh * delta\n w_v += wu_v * delta\n w_h += wu_h * delta\n mse_list.append(np.linalg.norm(v2-v1)**2/(v1.shape[1]*v1.shape[0]))\n mse = np.mean(mse_list)\n ppl = np.exp (- lik / words)\n logging.info(\"Epoch[%2d] : PPL = %.02f MSE = %.09f [iter=%d]\" % (epoch, ppl,mse,iter))\n\n if ppl < rsm_best_ppl:\n logging.info(\"The rsm_best_ppl so far was:%s\" %rsm_best_ppl)\n rsm_best_ppl = ppl\n step_diff = 0\n interim_best_model = { \"w_vh\" : w_vh,\n \"w_v\" : w_v,\n \"w_h\" : w_h,\n \"rate\" : rate,\n \"iter\" : iter,\n \"batch\" : btsz,\n \"epoch\" : epochs,\n \"init\" : weightinit,\n \"momentum\" : momentum,\n \"ppl\" : ppl,\n \"mse\" : mse,\n \"hiddens\" : units\n }\n interim_model_file, interim_topic_file = save_model(interim_best_model, epoch, boolean_mask_vector, combination_id, vocab, proto)\n else:\n step_diff+=1\n if step_diff >= stopping_threshold:\n done_looping = True\n logging.info(\"Early stopping: Finished Training\")\n\n final_model = { \"w_vh\" : w_vh,\n \"w_v\" : w_v,\n \"w_h\" : w_h,\n \"rate\" : rate,\n \"iter\" : iter,\n \"batch\" : btsz,\n \"epoch\" : epochs,\n \"init\" : weightinit,\n \"moment\" : momentum,\n \"ppl\" : ppl,\n \"mse\" : mse,\n \"hiddens\" : units\n }\n combination_id = 'final_%s' % combination_id\n final_model_file, final_topic_file = save_model(final_model, epoch, boolean_mask_vector, combination_id, vocab, proto)\n return {'interim_ppl':interim_best_model.get(\"ppl\"),'interim_mse':interim_best_model.get(\"mse\"), 'interim_model':interim_model_file, 'interim_topics':interim_topic_file, 'final_ppl':final_model.get(\"ppl\"), 'final_mse':final_model.get(\"mse\"), 'final_model':final_model_file ,'final_topics':final_topic_file }\n\ndef save_model(interim_best_model, epoch, boolean_mask_vector, combination_id, vocab, proto):\n interim_W_vh = interim_best_model.get(\"w_vh\")\n interim_w_v = interim_best_model.get(\"w_v\")\n interim_ppl = interim_best_model.get(\"ppl\")\n hiddens = interim_best_model.get(\"hiddens\")\n momentum = interim_best_model.get(\"momentum\")\n rate = interim_best_model.get(\"rate\")\n iter = interim_best_model.get(\"iter\")\n model_dir = \"models\"\n log_dir = \"logs\"\n topic_dir = \"topics\"\n\n # os.system(\"rm top_words_* model_*\")\n saved_model_file = '%s/model_%s_%s.pkl' % (model_dir, combination_id, epoch)\n saved_topic_file = '%s/top_words_%s_%s.csv' % (topic_dir, combination_id, epoch)\n with open(saved_model_file, 'wb') as file:\n pickle.dump (interim_best_model, file, proto)\n\n with open(saved_topic_file,'w') as topout:\n sorted_idx = []\n sorted_idx_masked = []\n sorted_idx_masked_normalized = []\n for i in range(hiddens):\n hidden = np.zeros(hiddens)\n hidden[i] = 1\n prob = visible_activation_probability(hidden, interim_W_vh, interim_w_v)\n prob_masked = prob * boolean_mask_vector\n prob_masked_normalized = prob_masked / prob_masked.sum()\n sorted_idx.append(prob.argsort())\n sorted_idx_masked.append(prob_masked.argsort())\n sorted_idx_masked_normalized.append(prob_masked_normalized.argsort())\n topout.write('topic' + str(i) +',')\n topout.write('\\n')\n\n for i in range(10):\n for j in range(hiddens):\n topout.write(vocab[sorted_idx[j][-i-1]] + \",\")\n topout.write('\\n')\n topout.write('\\n Masked probabilties\\n')\n for i in range(10):\n for j in range(hiddens):\n topout.write(vocab[sorted_idx_masked[j][-i-1]] + \",\")\n topout.write('\\n')\n topout.write('\\n Masked Normalized probabilties\\n')\n for i in range(10):\n for j in range(hiddens):\n topout.write(vocab[sorted_idx_masked_normalized[j][-i-1]] + \",\")\n topout.write('\\n')\n return saved_model_file, saved_topic_file\n#return final_ppl, best_ppl\n\ndef gen_mask_vector(data_matrix):\n # Input: takes Document * dict matrix\n # Output: dict size vector of binary values where 1 represents the word is present in the data, 0 otherwise\n row_wise_sum = np.sum(data_matrix, axis=0)\n boolean_mask = row_wise_sum > 0\n return boolean_mask.astype(np.int)\n\ndef visible_activation_probability(h1, w_vh, w_v):\n v2 = np.exp(np.dot(h1, w_vh.T) + w_v)\n prob = np.exp(v2)\n return prob/prob.sum()\n\n## so lik is obtained every cdn, a cdn is run for every train iteration( update weights), here with minibatch it would be number of documents * epochs. more like patience value.\ndef cdn (v1,h1,w_vh,w_v,w_h,D):\n \"\"\"\n one-step contrastive divergence: (v1,h1)->(v2,h2).\n \"\"\"\n lik = 0\n btsz = v1.shape[0]\n # project into visible\n v2 = np.dot(h1, w_vh.T) + w_v\n tmp = np.exp(v2)\n sum = tmp.sum(axis=1)\n sum = sum.reshape((btsz,1))\n v2pdf = tmp / sum\n # perplexity\n lik += np.nansum(v1 * np.log(v2pdf))\n # sample from multinomial\n v2 *= 0\n for i in xrange(btsz):\n v2[i] = np.random.multinomial(D[i],v2pdf[i],size=1)\n # project into hidden\n h2 = sigmoid(np.dot(v2, w_vh) + np.outer(D, w_h))\n return (v2,h2,lik)\n\ndef sigmoid(X):\n \"\"\"\n sigmoid of X\n \"\"\"\n return (1 + sp.tanh(X/2))/2\n","sub_path":"implementation/code_daichi/rsm_training.py","file_name":"rsm_training.py","file_ext":"py","file_size_in_byte":9694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"351434903","text":"import matplotlib.pyplot as mlp\nimport re\n\nif __name__ == '__main__':\n n, bc, wc = [ ], [ ], [ ]\n with open('data.csv', 'r') as fp:\n for line in fp:\n values = list(map(int, filter(lambda x: len(x), re.split('\\W+', line))))\n n.append(values[0])\n bc.append(values[1])\n wc.append(values[2])\n mlp.scatter(n, bc)\n mlp.suptitle('Tamanho do vetor X Número de instruções')\n mlp.savefig('best.png')\n mlp.clf()\n mlp.scatter(n, wc)\n mlp.suptitle('Tamanho do vetor X Número de instruções')\n mlp.savefig('worst.png')\n mlp.clf()\n","sub_path":"labs/lab1/ex1/genfig.py","file_name":"genfig.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"543632204","text":"#!/usr/local/bin/python\n\nimport struct\n\nclass Header:\n def __init__(self, blob):\n (self.type, self.size, self.custom) = struct.unpack(\"!HHL\", blob)\n\n def serialize(self):\n return struct.pack(\"!HHL\", self.type, self.size, self.custom)\n \nclass Report:\n def __init__(self, rtype, custom, ip_addr, message):\n self.type = rtype\n self.custom = custom\n self.ip_addr = ip_addr\n self.message = message\n\n def serialize(self):\n return struct.pack(\"!\" + \"HHL56s\", self.error_type, self.custom,\n self.ip_addr, self.message)\n\n \ndef send_report(header, rtype, custom, ip_addr, message):\n try:\n print(\"REPORT\");\n report = Report(rtype, custom, ip_addr, message)\n trash_report = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n trash_report.connect((\"downstream\", 9999))\n header.type = 8\n header.size = 64\n print(header.serialize());\n print(\"\");\n print(report.type, report.message, sep=':');\n trash_report.send(header.serialize())\n trash_report.send(report.serialize())\n trash_report.close()\n except:\n print(\"Report failed\")\n","sub_path":"residential/header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"25807688","text":"\"\"\"\nUse Minimal Perfect Hashing (see BBHash) to construct a fast lookup\ntable connecting k-mers in the cDBG to cDBG node IDs.\n\nInput: a directory containing a contigs.fa.gz\n\nOutput: contigs.fa.gz.mphf, a BBHash MPHF savefile; and contigs.fa.gz.indices,\na numpy savez file containing mphf_to_kmer, kmer_to_cdbg, and sizes.\n\nNote: relies on the fact that for a cDBG constructed at a particular k,\nno k-mer will appear in more than one cDBG node and every k-mer will\nbe in at least one cDBG node, i.e. k <-> cdbg_id is bijective.\n\"\"\"\nimport sys\nimport os\nimport screed\nimport khmer\nimport argparse\nimport bbhash\nimport numpy\n\n\ndef main(argv):\n p = argparse.ArgumentParser()\n p.add_argument('catlas_prefix')\n p.add_argument('-k', '--ksize', default=31, type=int)\n a = p.parse_args(argv)\n \n kh = khmer.Nodetable(a.ksize, 1, 1)\n\n contigs_filename = os.path.join(a.catlas_prefix, 'contigs.fa.gz')\n mphf_filename = os.path.join(a.catlas_prefix, 'contigs.fa.gz.mphf')\n array_filename = os.path.join(a.catlas_prefix, 'contigs.fa.gz.indices')\n\n # build a list of all k-mers in the cDBG\n all_kmers = list()\n print('reading cDBG nodes from {}'.format(contigs_filename))\n for n, record in enumerate(screed.open(contigs_filename)):\n if n % 50000 == 0 and n:\n print('... contig', n, end='\\r')\n\n kmers = kh.get_kmer_hashes(record.sequence)\n all_kmers.extend(list(kmers))\n\n n_contigs = n + 1\n print('loaded {} contigs.\\n'.format(n_contigs))\n\n # build MPHF (this is the CPU intensive bit)\n print('building MPHF for {} k-mers in {} nodes.'.format(len(all_kmers), n_contigs))\n x = bbhash.PyMPHF(all_kmers, len(all_kmers), 4, 1.0)\n\n # build tables linking:\n # * mphf hash to k-mer hash (for checking exactness)\n # * mphf hash to cDBG ID\n # * cDBG ID to node size (in k-mers)\n\n mphf_to_kmer = numpy.zeros(len(all_kmers), numpy.uint64)\n mphf_to_cdbg = numpy.zeros(len(all_kmers), numpy.uint32)\n sizes = numpy.zeros(n_contigs, numpy.uint32)\n\n print('second pass; reading cDBG nodes from {}'.format(contigs_filename))\n for n, record in enumerate(screed.open(contigs_filename)):\n if n % 50000 == 0 and n:\n print('... contig {} of {}'.format(n, n_contigs), end='\\r')\n\n # node ID is record name, must go from 0 to total-1\n cdbg_id = int(record.name)\n\n # get 64-bit numbers for each k-mer (doesn't really matter what hash)\n kmers = kh.get_kmer_hashes(record.sequence)\n\n # for each k-mer, find its MPHF hashval, & link to info.\n for kmer in kmers:\n mphf = x.lookup(kmer)\n mphf_to_kmer[mphf] = kmer\n mphf_to_cdbg[mphf] = cdbg_id\n\n # record each node size, while we're here.\n sizes[cdbg_id] = len(kmers)\n\n print('loaded {} contigs in pass2.\\n'.format(n_contigs))\n assert n == max(mphf_to_cdbg), (n, max(mphf_to_cdbg))\n\n print('done! saving to {} and {}'.format(mphf_filename, array_filename))\n\n x.save(mphf_filename)\n with open(array_filename, 'wb') as fp:\n numpy.savez_compressed(fp,\n mphf_to_kmer=mphf_to_kmer,\n kmer_to_cdbg=mphf_to_cdbg,\n sizes=sizes)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"spacegraphcats/index/index_contigs_by_kmer.py","file_name":"index_contigs_by_kmer.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"107467834","text":"#-------------------------------------------------------------------------------\r\n# Name: sfp_portscan_basic\r\n# Purpose: SpiderFoot plug-in for performing a basic port scan of IP\r\n# addresses identified.\r\n#\r\n# Author: Steve Micallef \r\n#\r\n# Created: 20/02/2013\r\n# Copyright: (c) Steve Micallef 2013\r\n# Licence: GPL\r\n#-------------------------------------------------------------------------------\r\n\r\nimport sys\r\nimport re\r\nimport socket\r\nimport random\r\nfrom sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent\r\n\r\n# SpiderFoot standard lib (must be initialized in setup)\r\nsf = None\r\n\r\nclass sfp_portscan_basic(SpiderFootPlugin):\r\n \"\"\"Scans for commonly open TCP ports on Internet-facing systems.\"\"\"\r\n\r\n # Default options\r\n opts = {\r\n # Commonly used ports on external-facing systems\r\n 'ports': [ 21, 22, 23, 25, 53, 79, 80, 81, 88, 110, 111, \r\n 113, 119, 123, 137, 138, 139, 143, 161, 179,\r\n 389, 443, 445, 465, 512, 513, 514, 515, 631, 636,\r\n 990, 992, 993, 995, 1080, 8080, 8888, 9000 ],\r\n 'timeout': 15,\r\n 'randomize': True\r\n }\r\n\r\n # Option descriptions\r\n optdescs = {\r\n 'ports': \"The TCP ports to scan.\",\r\n 'timeout': \"Seconds before giving up on a port.\",\r\n 'randomize': \"Randomize the order of ports scanned.\"\r\n }\r\n\r\n # Target\r\n baseDomain = None\r\n results = dict()\r\n\r\n def setup(self, sfc, target, userOpts=dict()):\r\n global sf\r\n\r\n sf = sfc\r\n self.baseDomain = target\r\n self.results = dict()\r\n\r\n for opt in userOpts.keys():\r\n self.opts[opt] = userOpts[opt]\r\n\r\n if self.opts['randomize']:\r\n random.shuffle(self.opts['ports'])\r\n\r\n # What events is this module interested in for input\r\n def watchedEvents(self):\r\n return ['IP_ADDRESS']\r\n\r\n # Handle events sent to this module\r\n def handleEvent(self, event):\r\n eventName = event.eventType\r\n srcModuleName = event.module\r\n eventData = event.data\r\n\r\n sf.debug(\"Received event, \" + eventName + \", from \" + srcModuleName)\r\n\r\n # Don't look up stuff twice\r\n if self.results.has_key(eventData):\r\n sf.debug(\"Skipping \" + eventData + \" as already scanned.\")\r\n return None\r\n else:\r\n self.results[eventData] = True\r\n\r\n for port in self.opts['ports']:\r\n if self.checkForStop():\r\n return None\r\n\r\n sf.info(\"Checking port: \" + str(port) + \" against \" + eventData)\r\n try:\r\n sock = socket.create_connection((eventData, port), self.opts['timeout'])\r\n sf.info(\"TCP Port \" + str(port) + \" found to be OPEN.\")\r\n evt = SpiderFootEvent(\"TCP_PORT_OPEN\", eventData + \":\" + str(port), \r\n self.__name__, event)\r\n self.notifyListeners(evt)\r\n sock.close()\r\n except Exception as e:\r\n sf.debug(\"Unable to connect to \" + eventData + \" on port \" + str(port) + \\\r\n \": \" + str(e))\r\n\r\n return None\r\n\r\n# End of sfp_portscan_basic class\r\n","sub_path":"modules/sfp_portscan_basic.py","file_name":"sfp_portscan_basic.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"309625986","text":"#!/usr/bin/env python\n# threadobjects.py - thread objects\nfrom threading import Thread\nfrom logging import basicConfig, debug, DEBUG\n\nbasicConfig(level=DEBUG, format=\"(%(threadName)s) %(message)s\",)\n\nclass MyThreadWithArgs(Thread):\n def __init__(self, args=(), kwargs=None):\n Thread.__init__(self)\n self.args = args\n self.kwargs = kwargs\n\n def run(self):\n debug(\"thread args: %s, %s\", self.args, self.kwargs)\n\nfor n in range(3):\n thread = MyThreadWithArgs(args=(n,n+1), kwargs={\"bob\" : 12, \"sue\" : 8})\n thread.start()\n\n##################################################\n#\n# $ threadobjects.py\n# (Thread-1) thread args: (0, 1), {'bob': 12, 'sue': 8}\n# (Thread-2) thread args: (1, 2), {'bob': 12, 'sue': 8}\n# (Thread-3) thread args: (2, 3), {'bob': 12, 'sue': 8}\n#\n","sub_path":"learning/training/python/py2/pgms/sec8/threadobjects.py","file_name":"threadobjects.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"173265801","text":"from greenlet import greenlet\n\ndef func1():\n print(1) #1: 输出1 \n gr2.switch()#3: 切换到func2\n print(2) #6: 输出2\n gr2.switch()#7: 切换到func2,从上次执行的位置继续向后执行\n\ndef func2():\n print(3) #4: 输出3\n gr1.switch()#5: 切换到func1,从上次执行的位置继续向后执行\n print(4) #8: 输出4\n\ngr1 = greenlet(func1)\ngr2 = greenlet(func2)\ngr1.switch() #1: 去执行func1","sub_path":"补充知识_异步编程/grennlet.py","file_name":"grennlet.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"270563214","text":"import logging\n\nimport gevent\nfrom gevent.queue import Queue\nfrom gevent.event import Event\n\nINITIAL_TIMEOUT = 5\nMAX_TIMEOUT = 600\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseService(object):\n service_type = None\n\n def __init__(self):\n self._send_queue = Queue()\n self._send_queue_cleared = Event()\n self._send_greenlet = None\n self.timeout = INITIAL_TIMEOUT\n self._feedback_queue = Queue()\n\n def start(self):\n \"\"\"Start the message sending loop.\"\"\"\n if self._send_greenlet is None:\n self._send_greenlet = gevent.spawn(self.save_err, self._send_loop)\n\n def _send_loop(self):\n self._send_greenlet = gevent.getcurrent()\n try:\n logger.info(\"%s service started\" % self.service_type)\n while True:\n message = self._send_queue.get()\n try:\n self.send_notification(message)\n except Exception:\n self.error_sending_notification(message)\n else:\n self.timeout = INITIAL_TIMEOUT\n finally:\n if self._send_queue.qsize() < 1 and \\\n not self._send_queue_cleared.is_set():\n self._send_queue_cleared.set()\n except gevent.GreenletExit:\n pass\n finally:\n self._send_greenlet = None\n logger.info(\"%s service stopped\" % self.service_type)\n\n def stop(self, timeout=10.0):\n if (self._send_greenlet is not None) and \\\n (self._send_queue.qsize() > 0):\n self.wait_send(timeout=timeout)\n\n if self._send_greenlet is not None:\n gevent.kill(self._send_greenlet)\n self._send_greenlet = None\n return self._send_queue.qsize() < 1\n\n def wait_send(self, timeout=None):\n self._send_queue_cleared.clear()\n return self._send_queue_cleared.wait(timeout=timeout)\n\n def queue_notification(self, notification):\n self._send_queue.put(notification)\n\n def send_notification(self, notification):\n raise NotImplementedError\n\n def save_err(self, func, *args, **kwargs):\n try:\n func(*args, **kwargs)\n except Exception as e:\n self.last_err = e\n raise\n\n def get_last_error(self):\n return self.last_err\n\n def error_sending_notification(self, notification):\n logger.exception(\"Error while pushing\")\n self._send_queue.put(notification)\n gevent.sleep(self.timeout)\n # approaching Fibonacci series\n timeout = int(round(float(self.timeout) * 1.6))\n self.timeout = min(timeout, MAX_TIMEOUT)\n\n def check_blocking(self):\n if self.timeout == INITIAL_TIMEOUT:\n return False\n return True\n","sub_path":"pulsus/services/base/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"529406407","text":"import os\nimport random\nimport urllib.request\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\ndef get_face_cam_url():\n \"\"\"\n\n :return: returns the link of changing images that'll be used as video\n for our case it's from ipwebcam an android application used as replacement of raspberry cam\n \"\"\"\n url = 'http://192.168.1.104:8080/shot.jpg' # robin\n # url = 'http://192.168.1.105:8080/shot.jpg' # platipus\n # url='http://192.168.43.220:8080/shot.jpg' #home\n # url='http://192.168.20.52:8080/shot.jpg'\n # url='http://192.168.43.1:8080/shot.jpg' #robin hotspot\n # url='http://192.168.28.115:8080/shot.jpg' #mobile hotspot shubham\n return url\n\n\ndef get_a_name(directory_name):\n directory_string = dir_path + directory_name\n directory = os.fsencode(directory_string)\n # print(\"Getting unique id\")\n existing_names = []\n for file_ in os.listdir(directory):\n filename = os.fsdecode(file_)\n if filename.endswith(\".jpg\"):\n existing_names.append(filename[:-4])\n\n while True:\n rand = random.randint(1000, 99999)\n if str(rand) in existing_names:\n continue\n else:\n break\n return str(rand)\n\n\ndef get_photo():\n url = get_face_cam_url()\n dropbox = dir_path + \"/dropbox/\"\n unique_name = get_a_name(\"/dropbox/\") + '.jpg'\n photo_path = dropbox + unique_name\n print(photo_path + url)\n urllib.request.urlretrieve(url, photo_path)\n return photo_path, unique_name\n\n\nif __name__ == '__main__':\n # print(get_photo())\n pass\n","sub_path":"mirengine/mirtools.py","file_name":"mirtools.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"589136801","text":"# -*- coding: utf-8 -*-\nimport time\nfrom time import sleep\nimport sys\n\n# Print iterations progress\ndef progress(count, total, prefix='', suffix=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('%s[%s] %s%s ...%s\\r' % (prefix, bar, percents, '%', suffix))\n sys.stdout.flush() # As suggested by Rom Ruben\n\ndef progress1(ti):\n for it in range(0, ti):\n time.sleep(1)\n sys.stdout.write(\"\\r%d%%\" % it)\n sys.stdout.flush()\n\ndef pfpbc(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\nclass ProgressBar:\n def __init__(self):\n # A List of Items\n items = list(range(0, 5))\n l = len(items)\n self.inicall(l, items)\n\n def inicall(self, le, item):\n # Initial call to print 0% progress\n pfpbc(0, le, prefix = 'Progress:', suffix = 'Complete')\n for i, item in enumerate(item):\n # Do stuff...\n sleep(0.1)\n # Update Progress Bar\n pfpbc(i + 1, le, prefix = 'Progress:', suffix = 'Complete')\n\nclass ProgressBar1():\n def __init__(self, end_va):\n end_v = end_va\n self.cli_progress_test(end_v)\n\n def cli_progress_test(self, end_val, bar_length=20):\n for i in range(0, end_val):\n percent = float(i) / end_val\n hashes = '#' * int(round(percent * bar_length))\n spaces = ' ' * (bar_length - len(hashes))\n sys.stdout.write(\"\\rPercent: [{0}] {1}%\".format(hashes + spaces, int(round(percent * 100.0))))\n sys.stdout.flush()\n\n\nif __name__ == \"__main__\":\n ProgressBar()\n print()\n progress1(10)\n print()\n ProgressBar1(10)\n print()\n progress(10, 10, 'Progress', 'Complete')\n\n\n\n","sub_path":"milo/miloSystems/progress/progress2bar.py","file_name":"progress2bar.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"328640237","text":"import os\nimport sys\n# import psutil\nimport logging\nimport json\nimport re\nimport uuid, time, datetime, pprint\nfrom datetime_engine import *\n# from databases.poly_database import *\n# from databases.poly_Mongo import *\n# from databases.poly_dynamo import *\n# from distance import *\nfrom flask import Flask,request, url_for, render_template, Response\n# from config import interpreter, database\n\napp = Flask(__name__)\n\nd = Duckling()\nd.load()\n\n\n@app.route('/')\ndef index():\n return render_template(\"textbox.html\")\n\n\n# emulator/tester\n# @app.route('/', methods=[\"POST\"])\n# def parse_view():\n# text_data = request.form['text']\n# uid = request.form['uid']\n# ret_val = process_identifier(text_data)\n\n# if ret_val == 'new_entry':\n# user_bound = goldenGates(text_data)\n# data = user_bound['uid']\n# return pep_talk(\"response.html\",var1=json.dumps(user_bound['parsed']), var2=data)\n\n# elif ret_val == 'affirm':\n# user_bound = \"Your request is being processed...\"\n# save_as_training_instance(uid)\n# return pep_talk(\"processing.html\",var1=user_bound)\n\n# elif ret_val == 'reset':\n# return render_template(\"textbox.html\")\n\n# elif ret_val == 'update':\n# new_parsed = transformer(text_data, uid)\n# return pep_talk(\"response.html\", var1=json.dumps(new_parsed['parsed']), var2=new_parsed['uid'])\n\n# else:\n# user_bound = osiris(ret_val, uid)\n# return str(user_bound)\n\n\n# # REST method\n# @app.route('/parse')\n# def parse_rest():\n# text_data = request.args.get('text')\n# uid = request.args.get('uid')\n\n# print(\"received a parse request, text = \" + text_data)\n\n# ret_val = process_identifier(text_data)\n# print(\"return type = \" + ret_val)\n\n# if ret_val == 'new_entry':\n# entity_to_return = goldenGates(text_data)\n# print(\"returning entity 2: \" + \"\\n\" + json.dumps(entity_to_return, indent=1))\n# return app.response_class(json.dumps(entity_to_return), content_type='application/json')\n\n# elif ret_val == 'update':\n# entity_to_return = transformer(text_data, uid)\n# print(\"returning entity: \" + \"\\n\" + json.dumps(entity_to_return, indent=1))\n# return app.response_class(entity_to_return, content_type='application/json')\n\n# else:\n# return \"Error, didn't know what to do\"\n\n\n@app.route('/datetime')\ndef date():\n d_string = request.args.get('date_string')\n return Response(datetime_engine(d_string), mimetype='application/json')\n\n# @app.route('/distance')\n# def w_distance():\n\n# text = request.args.get('text').lower().strip()\n# return Response(json.dumps(distance(text)), mimetype='application/json')\n\n\n# def process_identifier(text):\n# x = interpreter.parse(text)\n# value = x['intent']['name']\n\n# if value == 'affirm':\n# return value\n\n# elif value == 'None':\n# return 'update'\n\n# elif value == 'negation':\n\n# if x['entities'] != []:\n# return x['entities'][0]['value']\n\n# else:\n# return 'reset'\n\n# else:\n# return 'new_entry'\n\n\n# def osiris(new_value, uid):\n\n# try:\n\n# old_text = load_old_text(database, {'uid':uid})\n# new_text = old_text + \" \" + new_value\n# new_parsed = goldenGates(new_text)\n\n# if new_parsed['past_lives'] != []:\n\n# if new_parsed['past_lives'][0] != old_text:\n# new_parsed['past_lives'].append(old_text)\n\n# else:\n# new_parsed['past_lives'].append(old_text)\n\n# update_database(new_parsed)\n# data = new_parsed['uid']\n\n# return pep_talk(\"response.html\", var1=json.dumps(new_parsed['parsed']), var2=data)\n\n# except:\n# return \"Patience and perseverence.\"\n\n\n# def transformer(text, uid):\n\n# try:\n\n# entry = find_previous_entry(database, {'uid':uid})\n# old_text = entry['text']\n# new_text = old_text+ \" \" + text\n# new_parsed = goldenGates(new_text)\n\n# if new_parsed['past_lives'] != []:\n\n# if new_parsed['past_lives'][0] != old_text:\n# new_parsed['past_lives'].append(old_text)\n\n# else:\n# new_parsed['past_lives'].append(old_text)\n\n# update_database(new_parsed)\n# return new_parsed\n\n# except Exception as e:\n# return str(e)\n\n\n# def pep_talk(template,var1=None, var2=None):\n# return render_template(template, var1=var1, var2=var2)\n\n\n# purgables = [\"extractor\"]\n\n\n# def goldenGates(text_to_parse):\n# recall = check_database(database, text_to_parse)\n\n# if recall != False:\n# return recall # suitable entry exists. Return said entry. Process complete.\n# else:\n# new_entry = {\n# 'text': text_to_parse,\n# '_id' : str(uuid.uuid4()),\n# 'date': str(datetime.datetime.now()),\n# 'past_lives': []\n# }\n\n# insert_one(database, new_entry)\n\n# uid = new_entry['_id']\n# x = parser(new_entry['text'],uid,new_entry['date'],new_entry['past_lives'])\n# return x\n\n\n# def parser(text, uid, date_time, past_life):\n# parse = interpreter.parse(text)\n# parsed_data = {'parsed': parse, 'uid': uid, 'date': date_time, 'past_lives': past_life}\n\n# with open(\"./nsa/event_listener.txt\", \"a\") as myfile:\n# myfile.write(str(parsed_data)+\"\\n\\n\")\n\n# res = update_database(parsed_data)\n# parsed = time_formalizer(parse)\n# parsed_data['parsed'] = parsed\n\n# if parsed_data['parsed']['entities'] != []:\n\n# for i in range(0,len(parsed_data['parsed']['entities'])):\n\n# if \"extractor\" in parsed_data['parsed']['entities'][i]:\n# parsed_data['parsed']['entities'][i].pop('extractor')\n\n# return parsed_data\n\n\n# def update_database(new_data):\n# update_db(database, new_data)\n\n\n# def save_as_training_instance(uid):\n# find_clean_and_save(database, {'uid': uid})\n\n# with app.test_request_context():\n# print(url_for('parse', text='make your text queries here'))\n\ndef time_formalizer(parsed_data):\n\n for i in range(0, len(parsed_data['entities'])):\n\n if parsed_data['entities'][i]['entity'] == 'date_time':\n value = parsed_data['entities'][i]['value']\n formal = formalizer_helper(value)\n parsed_data['entities'][i]['value'] = formal\n\n return parsed_data\n\n\ndef formalizer_helper(time_string):\n parsed = d.parse(time_string)\n\n for i in range(0,len(parsed)):\n\n if parsed[i]['dim'] == 'time':\n new_value = parsed[i]['value']['value']\n return new_value\n\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"grassroot-nlu/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"502462922","text":"# -*- coding: utf-8 -*-\n'''\n 2019-12-18 BOJ-3052(나머지)\n\n C, C++, Python 으로 각각 따로 구현해보았다.\n 아직 나한테는 C++이 제일 쉽다. C++는 코드가 술술 나오는데 파이썬은 아직.....\n'''\n\nnum = [0] * 43\ncnt = 0\n\nfor i in range(1, 11):\n a = int(input())\n num[a % 42] += 1\n\nfor i in num:\n if(i != 0):\n cnt += 1\n\nprint(cnt)\n","sub_path":"BOJ-3052(나머지).py","file_name":"BOJ-3052(나머지).py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"212561510","text":"#!/usr/bin/env python \n# authro : dengxiangyu \n# date: 2018-1-9\n\n\nimport sys\nimport os\nimport json\nimport time\nimport shutil\nfrom aliyunsdkcore import client\nfrom aliyunsdkcms.request.v20170301 import QueryMetricListRequest\n\n\n\n#https://help.aliyun.com/document_detail/28619.html?spm=5176.doc51936.6.664.Q5sQGo\n\n\ninstance_list = [('r-bp18ce30a05fe2a4','prod-his-cloud-redis','cn-hangzhou'),\n ('r-bp18ce30a05fe2a4','prod-his-odcp-redis','cn-hangzhou'),\n ('r-bp16ad425a8a5b94','beta-dui-cloud-redis','cn-hangzhou'),\n ('r-bp19c5dfa4391b94','beta-dui-odcp-redis','cn-hangzhou'),\n ('r-bp1974c9e27528a4','prod-dui-cloud-redis','cn-hangzhou'),\n ('r-bp18c8eccfc83434','prod-dui-odcp-redis','cn-hangzhou')]\n\nclient_key = \"*******\"\nclient_value = \"*****\"\n\ndef GetRdsInstanceDescribe(instance_id,metric_list):\n for metric in metric_list:\n clt = client.AcsClient(client_key,client_value, instance_id[2]) \n request = QueryMetricListRequest.QueryMetricListRequest()\n request.set_accept_format('json')\n request.set_Project('acs_kvstore')\n request.set_Metric(metric)\n timestamp = int(time.time()) - 600 \n tmp = time.localtime(timestamp)\n start_time = time.strftime('%Y-%m-%d %H:%M:%S',tmp)\n timestamp_start = int(time.mktime(time.strptime(start_time, \"%Y-%m-%d %H:%M:%S\"))) * 1000\n request.set_StartTime(timestamp_start)\n request.set_Dimensions(\"{'instanceId': '%s'}\" % instance_id[0])\n request.set_Period('60')\n result = clt.do_action_with_exception(request)\n response = json.loads(result)\n print (\"duimonitor_redis{name=\\\"%s\\\",mode=\\\"%s\\\",region=\\\"%s\\\"} %s\" \\\n %(instance_id[1],metric,instance_id[2],response['Datapoints'][-1]['Average'])).strip()\n \n \n\ndef Controller():\n sys.stdout = open(\"/tmp/redis.log\", \"w\")\n metric_list = ['MemoryUsage','ConnectionUsage','IntranetInRatio','IntranetOutRatio',\\\n 'IntranetIn','IntranetOut','FailedCount','CpuUsage','UsedMemory']\n for instance_id in instance_list:\n GetRdsInstanceDescribe(instance_id,metric_list)\n sys.stdout.flush()\n shutil.move(\"/tmp/redis.log\",\"/tmp/lastest_redis.log\")\n \n\nif __name__ == '__main__':\n Controller()\n\n","sub_path":"promethues/monitor_redis.py","file_name":"monitor_redis.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"246726215","text":"import os\nfrom unittest import TestCase\n\nfrom utils.inference import get_data_loader, MsgpackFileDataset\n\ndir_path = \"/home/wuyuanyi/nndata/nncrystal/validation/small extrapolation/extrapolation_20_30_1/\"\nfile_path = os.path.join(dir_path, \"images.bin\")\n\nclass TestDataLoader(TestCase):\n def test_get_data_loader(self):\n dataset = MsgpackFileDataset(file_path)\n loader = get_data_loader(dataset)\n\n for name, data in loader:\n print(name, data.shape)\n return\n","sub_path":"projects/nncrystal/test/test_data_loader.py","file_name":"test_data_loader.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"146173882","text":"import Zope\nimport unittest\nimport OFS.Folder, OFS.SimpleItem\nimport Acquisition\nfrom Products.CMFCore.CatalogTool import *\nfrom Products.CMFCore.PortalContent import PortalContent\n\n\nclass DummyContent( PortalContent, OFS.SimpleItem.Item ):\n \"\"\"\n \"\"\"\n meta_type = 'Dummy'\n\nclass CatalogToolTests( unittest.TestCase ):\n\n def setUp( self ):\n get_transaction().begin()\n \n def tearDown( self ):\n get_transaction().abort()\n\n def test_processActions( self ):\n \"\"\"\n Tracker #405: CatalogTool doesn't accept optional third\n argument, 'idxs', to 'catalog_object'.\n \"\"\"\n tool = CatalogTool()\n dummy = DummyContent()\n\n tool.catalog_object( dummy, '/dummy' )\n tool.catalog_object( dummy, '/dummy', [ 'SearchableText' ] )\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTest( unittest.makeSuite( CatalogToolTests ) )\n return suite\n\ndef run():\n unittest.TextTestRunner().run(test_suite())\n\nif __name__ == '__main__':\n run()\n","sub_path":"CMF/tags/CMF-1_2-release/CMFCore/tests/test_CatalogTool.py","file_name":"test_CatalogTool.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"339331694","text":"# -*- coding: utf8 -*- \nimport wx\nclass MyFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self,None,-1,u\"10185102136 张靖\",size=(800,600))\n panel=wx.Panel(self,-1)\n #wx.StaticText(panel,-1,u\"进程ID : 模块名:\",pos=(0,305))\n icon=wx.Icon(name=\"icon1.ico\",type=wx.BITMAP_TYPE_ICO)\n self.SetIcon(icon)\n self.menuBar=wx.MenuBar()\n menu=wx.Menu()\n self.IdCommand=menu.Append(-1,u\"退出\\tCtrl+Shift+Delete\")\n self.Bind(wx.EVT_MENU,self.OnCommand,self.IdCommand)\n self.menuBar.Append(menu,u\"File\")\n self.color=wx.Menu()\n self.color.Append(201,u\"图标1\\tCtrl+X\",u\"\",wx.ITEM_RADIO)\n self.color.Append(202,u\"图标2\\tCtrl+Y\",u\"\",wx.ITEM_RADIO)\n self.color.Append(203,u\"图标3\\tCtrl+Z\",u\"\",wx.ITEM_RADIO)\n self.Bind(wx.EVT_MENU_RANGE,self.OnColor,id=201,id2=203)\n self.menuBar.Append(self.color,u\"&图标(&I)\")\n self.menuBar.Check(201,True)\n self.SetBackgroundColour(u\"Gray\")\n self.flag1=0\n self.flag2=0\n self.flag3=0\n self.flag4=0\n self.tag=201\n self.hello1=wx.StaticText(self,-1,\"\",pos=(200,200))\n self.hello2=wx.StaticText(self,-1,\"\",pos=(200,300))\n self.hello3=wx.StaticText(self,-1,\"\",pos=(300,200))\n self.hello4=wx.StaticText(self,-1,\"\",pos=(300,300))\n self.hello5=wx.StaticText(self,-1,\"\",pos=(100,100))\n self.hello5.SetLabel(u\"当前用位图1\")\n control=wx.Menu()\n control.Append(301,u\"显示1\\tCtrl+1\",u\"\",wx.ITEM_CHECK)\n control.Append(302,u\"显示2\\tCtrl+2\",u\"\",wx.ITEM_CHECK)\n control.Append(303,u\"显示3\\tCtrl+3\",u\"\",wx.ITEM_CHECK)\n control.Append(304,u\"显示4\\tCtrl+4\",u\"\",wx.ITEM_CHECK)\n self.menuBar.Append(control,u\"显示(&D)\")\n self.Bind(wx.EVT_MENU_RANGE,self.OnControl,id=301,id2=304)\n self.changeable=True\n menu=wx.Menu()\n IdAbout=menu.Append(-1,u\"程序信息(&I)\\tF1\",)\n self.Bind(wx.EVT_MENU,self.OnHelp,IdAbout)\n self.menuBar.Append(menu,u\"关于(&A)\")\n self.SetMenuBar(self.menuBar)\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n def OnPaint(self,event):\n #print(1) \n if (self.flag1==1):\n self.hello1.SetLabel(u\"显示信息1\")\n if (self.flag1==0):\n self.hello1.Destroy()\n self.hello1=wx.StaticText(self,-1,u\"\",pos=(200,200))\n if (self.flag2==1):\n self.hello2.SetLabel(u\"显示信息2\")\n if (self.flag2==0):\n self.hello2.Destroy()\n self.hello2=wx.StaticText(self,-1,u\"\",pos=(200,300))\n if (self.flag3==1):\n self.hello3.SetLabel(u\"显示信息3\")\n if (self.flag3==0):\n self.hello3.Destroy()\n self.hello3=wx.StaticText(self,-1,u\"\",pos=(300,200))\n if (self.flag4==1):\n self.hello4.SetLabel(u\"显示信息4\")\n if (self.flag4==0):\n self.hello4.Destroy()\n self.hello4=wx.StaticText(self,-1,u\"\",pos=(300,300))\n def OnCommand(self,evt):\n self.Close()\n def OnColor(self,evt):\n item=self.GetMenuBar().FindItemById(evt.GetId())\n text=item.GetItemLabel()\n if(wx.MessageBox(u\"You selected item '%s'\" %text,u\"Color Menu\",wx.YES_NO,self)==wx.YES):\n if self.changeable:\n self.tag=evt.GetId()\n if(self.tag==201):\n icon=wx.Icon(name=\"icon1.ico\",type=wx.BITMAP_TYPE_ICO)\n self.SetIcon(icon)\n self.menuBar.EnableTop(2,True)\n self.hello5.SetLabel(u\"当前用位图1\")\n if(self.tag==202):\n icon=wx.Icon(name=\"icon2.ico\",type=wx.BITMAP_TYPE_ICO)\n self.SetIcon(icon)\n self.menuBar.EnableTop(2,True)\n self.hello5.SetLabel(u\"当前用位图2\")\n if(self.tag==203):\n icon=wx.Icon(name=\"icon3.ico\",type=wx.BITMAP_TYPE_ICO)\n self.SetIcon(icon)\n self.menuBar.EnableTop(2,False)\n self.hello5.SetLabel(u\"当前用位图3\")\n else:\n self.menuBar.Check(self.tag,True)\n def OnControl(self,evt):\n if evt.GetId()==301:\n self.flag1=1-self.flag1\n if evt.GetId()==302:\n self.flag2=1-self.flag2\n if evt.GetId()==303:\n self.flag3=1-self.flag3\n if evt.GetId()==304:\n self.flag4=1-self.flag4\n self.Refresh()\n '''\n if (self.flag1==1):\n hello=wx.StaticText(self,-1,u\"显示信息1\",pos=(200,200))\n if (self.flag2==1):\n wx.StaticText(self,-1,u\"显示信息2\",pos=(300,200))\n if (self.flag3==1):\n wx.StaticText(self,-1,u\"显示信息3\",pos=(200,300))\n if (self.flag4==1):\n wx.StaticText(self,-1,u\"显示信息4\",pos=(300,300))\n '''\n def OnHelp(self,evt):\n wx.MessageBox(u\"10185102136 张靖\",u\"张靖\",\n wx.OK|wx.ICON_INFORMATION,self)\n def OnClose(self,evt):\n self.Close()\nif __name__=='__main__':\n app=wx.App()\n frame=MyFrame()\n frame.Show(True)\n app.MainLoop()\n","sub_path":"10185102136-张靖-LAB4-wx菜单图标/wx菜单图标.py","file_name":"wx菜单图标.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"592138863","text":"# Copyright 2017 The Australian National University\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom util import fncptr_from_c_script, mu_instance_via_ctyeps, may_spawn_proc\nimport ctypes\n\n@may_spawn_proc\ndef test_select():\n fnp, _ = fncptr_from_c_script('test_select.c', 'test_fnc', [ctypes.c_byte])\n assert fnp(0) == 20\n assert fnp(1) == 10\n\n@may_spawn_proc\ndef test_commoninst_pin():\n mu = mu_instance_via_ctyeps()\n fnp, _ = fncptr_from_c_script(\"test_commoninst_pin.c\", 'test_pin')\n assert fnp() == 6\n","sub_path":"tests/test_jit/test_otherops.py","file_name":"test_otherops.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"96036267","text":"import binascii\nimport logging\nfrom scapy.all import *\nfrom common.logger import LogConfig\n\nclass SnmpFuzzer():\n def __init__(self):\n # snmp模糊测试的主要配置项\n # target_ip--snmp服务主机ip地址\n # target_port--snmp服务端口\n # snmp_hex_string--snmp应用层数据的十六进制字符串,我随便从一个snmp数据包截这来的\n # log_type--日志类型,console将日志打印到控制台,file将日志写入到文件\n # log_file_prefix--日志文件前辍,console时不生效\n self.config = {\n \"target_ip\":\"10.10.6.98\",\n \"target_port\":161,\n \"snmp_hex_string\":\"302202010104067075626c6963a01502047c07ac6c0201000201003007300506012b0500\",\n 'log_type': 'console',\n 'log_file_prefix': 'snmp'\n }\n # 设置日志格式\n LogConfig(log_type=self.config[\"log_type\"], log_file_prefix=self.config[\"log_file_prefix\"])\n\n def fuzzSnmp(self):\n snmp_hex_string_lenght = self.config[\"snmp_hex_string\"].__len__()\n # 遍历每个字节\n for index in range(int(snmp_hex_string_lenght/2)):\n # 每个字节遍历0-255\n for value in range(0xff):\n # 遍历字节之前的部分\n snmp_hex_string_payload_pre = binascii.a2b_hex(self.config[\"snmp_hex_string\"][0:index*2])\n # 当前遍历到的字节\n value_hex = value.to_bytes(1,\"big\")\n # 遍历字节之后的部分\n snmp_hex_string_payload_post = binascii.a2b_hex(self.config[\"snmp_hex_string\"][index*2+2:])\n # 构造snmp应用层数据\n snmp_hex_string_payload = snmp_hex_string_payload_pre + value_hex + snmp_hex_string_payload_post\n # 构造最终要发送的snmp数据包\n udp_packet = IP(dst=self.config[\"target_ip\"])/UDP(sport=9876,dport=self.config[\"target_port\"])/snmp_hex_string_payload\n # 发送数据包并接收响应\n response_packet = sr1(udp_packet)\n logging.info(f\"{index *2}-{value_hex}-{response_packet.show()}\")\n\n\nif __name__ == \"__main__\":\n snmp_fuzzer = SnmpFuzzer()\n snmp_fuzzer.fuzzSnmp()\n","sub_path":"fuzzers/snmp_fuzzer.py","file_name":"snmp_fuzzer.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"144386581","text":"'''\nQuestion:\nWrite a program that computes the net amount of a bank account based a transaction log from console input. The transaction log format is shown as following:\nD 100\nW 200\n\nD means deposit while W means withdrawal.\nSuppose the following input is supplied to the program:\nD 300\nD 300\nW 200\nD 100\nThen, the output should be:\n500\n'''\n\nres = 0\nwhile True:\n inpt = input()\n if inpt:\n lineparsed = inpt.split(' ')\n if lineparsed[0] == 'D':\n res += int(lineparsed[1])\n else:\n res -= int(lineparsed[1])\n else:\n break\n\nprint(res)","sub_path":"ex17.py","file_name":"ex17.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"512762744","text":"##############################################################################\n#\n# Copyright (c) 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Browser Menu Directives Tests\n\n$Id$\n\"\"\"\n\nfrom StringIO import StringIO\nfrom unittest import TestCase, main, makeSuite\n\nfrom zope.configuration.xmlconfig import xmlconfig, XMLConfig\nfrom zope.publisher.browser import TestRequest\nfrom zope.app.tests.placelesssetup import PlacelessSetup\nfrom zope.app.publisher.browser.globalbrowsermenuservice \\\n import globalBrowserMenuService\n\nimport zope.app.publisher.browser\n\ntemplate = \"\"\"\n %s\n \"\"\"\n\nclass Test(PlacelessSetup, TestCase):\n\n def setUp(self):\n super(Test, self).setUp()\n XMLConfig('meta.zcml', zope.app.publisher.browser)()\n\n def test(self):\n xmlconfig(StringIO(template % (\n \"\"\"\n \n\n \n \n \n\n \n \n \n\n \n \n \n \n\n \n \n \n \n \n \n\n \n \n \n \"\"\")))\n\n\n from zope.app.publisher.browser.tests.test_globalbrowsermenuservice \\\n import TestObject\n\n menu = globalBrowserMenuService.getMenu('test_id', TestObject(),\n TestRequest())\n\n def d(n):\n return {'action': \"a%s\" % n,\n 'title': \"t%s\" % n,\n 'description': \"\",\n 'selected': '',\n 'extra': None,\n }\n\n self.assertEqual(list(menu), [d(5), d(6), d(3), d(2), d(1)])\n\n first = globalBrowserMenuService.getFirstMenuItem(\n 'test_id', TestObject(), TestRequest())\n\n self.assertEqual(first, d(5))\n\n\ndef test_suite():\n return makeSuite(Test)\n\nif __name__=='__main__':\n main(defaultTest='test_suite')\n","sub_path":"Zope3/tags/ZopeX3-3.0.0a2/src/zope/app/publisher/browser/tests/test_globalbrowsermenuservicedirectives.py","file_name":"test_globalbrowsermenuservicedirectives.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"222322752","text":"#### BAUS Filter with Gassian resampling for the Lorenz 3 variable system ###\n\nfrom pylab import *\nfrom L63RK4 import L63RK4\nfrom resamp_gauss import resamp_gauss\n\ndef BAUS_gauss_resample_RK(truth, obs, R, initial_cloud,h=0.01, a=10.0,\n b=8.0/3.0, r=28.0,tfin=2000,tanl=20):\n \n \n \"\"\"Returns [mean_state, mean_error,avg_mean_error,rsn] for\n mandatory inputs truth trajectory, observations, and intitial cloud.\n \n [state_dimension, time_step] = shape(truth) \n [state_dimension, Nanl] = shape(obs)\n [state_dimension, state_dimension] = shape(R)\n [particle_number, state_dimension] = shape(initial_cloud)\n [state_dimension, time_step] = shape(mean_state)\n [state_dimension, time_step] = shape(mean_error)\n [time_step] = shape(avg_mean_error)\n # of times resampler triggered = rsn\n \n Optional arguments include the time step=h=0.01, sigma=a=10, beta=8/3, \n rho=29, tanl=20. The duration of the experiment tfin=2000, and \n analysis time particle cloud is propagated between analysis times,\n at which point the observation is incorporated through the BAUS update\n and the Bayesian update of the weights. Resampling is initiated\n according to resamp_gauss when Neff is less than the threshold.\"\"\"\n \n # Time Step\n #h = 0.01\n\n # parameters nature\n #a=10.0 # sigma\n #b=8.0/3.0 # beta\n #r=28.0 # rho\n\n # EXPERIMENT DURATION\n #tfin = 2000\n #particle_number = 200\n\n # ASSIMILATION INTERVAL (timesteps)\n #tanl = 20\n\n # NUMBER OF ANALYSES\n Nanl = int(tfin/tanl)\n\n #number of resample steps\n rsn = 0\n #number of failsafe steps\n fsn = 0\n\n #Set initial trajectory matricies and weights\n [particle_number, state_dimension] = shape(initial_cloud) \n part = zeros([particle_number,state_dimension,tfin+1])\n part[:,:,0] = initial_cloud\n part_weights = ones(particle_number)*(1.0/particle_number)\n mean_state = zeros([state_dimension,tfin+1])\n bred_traj = zeros([state_dimension,tanl+1])\n b_step = zeros([state_dimension,2])\n \n #loop over number of analyses\n for j in range(Nanl):\n\n #propagate each particle to next analysis step\n for i in range(particle_number):\n part[i,:,j*tanl:(j+1)*tanl+1] = L63RK4(tanl,h,\n part[i,2,j*tanl],\n part[i,1,j*tanl],\n part[i,0,j*tanl],\n a,r,b)\n\n #Calculate the backward trajectory of current observation back to the\n #previous analysis time - these form base points \n #for the bred vector cloud\n bred_traj = L63RK4(45,-h,obs[2,j],obs[1,j],obs[0,j],a,r,b)\n bred_traj = fliplr(bred_traj)\n #A bred vector cloud is created and propagated along the trajectory \n b_cloud = multivariate_normal([0,0,0],R)\n \n for i in range(tanl): \n #Scale the pertubation vector and add it to the base point\n b_cloud = ((b_cloud/sqrt(b_cloud.dot(b_cloud)))*\n .01+bred_traj[:,i]) \n #Forward propagate the perturbation \n b_step[:,:] = L63RK4(1,h,b_cloud[2],b_cloud[1],b_cloud[0],a,r,b)\n #Take the difference of the forward perturbation and the forward\n #base point, \n b_cloud = b_step[:,1] - bred_traj[:,i+1]\n\n #Normal unstable Lyapunov direction defined \n L = b_cloud/sqrt(b_cloud.dot(b_cloud))\n #Calculate Mean at each propagated state up to analysis time step\n for i in range(tanl):\n cloud_step = part[:,:,j*tanl+i]\n weighted_cloud = (cloud_step.T*part_weights).T\n mean_state[:,j*tanl+i] = (mean(weighted_cloud,0)*particle_number)\n\n #AUS performed to each particle at observation time: \n #each innovation vector has the component spanned by the unstable \n #Lyapunov direction removed prior to weight update \n for i in range(particle_number): \n innov = obs[:,j] - part[i,:,(j+1)*tanl]\n #project innovation into unstable subspace \n proj = innov.dot(L)*L\n #correct the particle so the innnovation has no component in u-s\n part[i,:,(j+1)*tanl] = part[i,:,(j+1)*tanl] + proj\n #update the innovation for the new particle \n innov = innov - proj \n part_weights[i] = (part_weights[i]*exp(-0.5*\n (innov.dot(inv(R))).dot(innov)))\n \n part_weights = part_weights/sum(part_weights)\n\n #Resampling step if number of effective samples falls below threshold \n n_eff = 1/(part_weights.dot(part_weights))\n if (n_eff < 2) or isnan(n_eff):\n if isnan(n_eff):\n fsn = fsn + 1\n part_weights = ones(particle_number)*(1.0/particle_number)\n rsn = rsn + 1\n part[:,:,(j+1)*tanl] = resamp_gauss(part[:,:,(j+1)*tanl],\n part_weights,.0004,\n .01/particle_number)\n \n for i in range(particle_number): \n innov = obs[:,j] - part[i,:,(j+1)*tanl]\n #project innovation into unstable subspace \n proj = innov.dot(L)*L\n #correct the particle again with AUS\n part[i,:,(j+1)*tanl] = part[i,:,(j+1)*tanl] + proj \n \n part_weights = ones(particle_number)*(1.0/particle_number)\n \n #Find mean state updated weights \n weighted_cloud = (part[:,:,(j+1)*tanl].T*part_weights).T \n mean_state[:,(j+1)*tanl] = mean(weighted_cloud,0)*particle_number\n \n print(fsn)\n print(rsn - fsn)\n #mean_error_av = mean_error*mean_error\n mean_error = abs(mean_state - truth)\n error_dist = zeros(tfin+1) \n avg_mean_error = zeros(tfin+1)\n for i in range(tfin+1):\n error_dist[i] = sqrt(mean_error[:,i].dot(mean_error[:,i]))\n avg_mean_error[i] = mean(error_dist[0:i+1])\n return(mean_state,mean_error, avg_mean_error,rsn)","sub_path":"BAUS_gauss_resample_RK_fun.py","file_name":"BAUS_gauss_resample_RK_fun.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"533811877","text":"# -*- coding: utf-8 -*-\nfrom binascii import hexlify, unhexlify\n\nfrom raiden.blockchain.abi import (\n CONTRACT_MANAGER,\n CONTRACT_ENDPOINT_REGISTRY,\n)\nfrom raiden.constants import (\n DISCOVERY_REGISTRATION_GAS,\n)\nfrom raiden.exceptions import (\n AddressWithoutCode,\n TransactionThrew,\n UnknownAddress,\n)\nfrom raiden.network.rpc.transactions import (\n check_transaction_threw,\n)\nfrom raiden.settings import (\n DEFAULT_POLL_TIMEOUT,\n)\nfrom raiden.utils import (\n address_encoder,\n isaddress,\n pex,\n)\n\n\nclass Discovery(object):\n \"\"\"On chain smart contract raiden node discovery: allows registering\n endpoints (host, port) for your ethereum-/raiden-address and looking up\n endpoints for other ethereum-/raiden-addressess.\n \"\"\"\n\n def __init__(\n self,\n jsonrpc_client,\n discovery_address,\n startgas,\n gasprice,\n poll_timeout=DEFAULT_POLL_TIMEOUT):\n\n if not isaddress(discovery_address):\n raise ValueError('discovery_address must be a valid address')\n\n result = jsonrpc_client.call(\n 'eth_getCode',\n address_encoder(discovery_address),\n 'latest',\n )\n\n if result == '0x':\n raise AddressWithoutCode('Discovery address {} does not contain code'.format(\n address_encoder(discovery_address),\n ))\n\n proxy = jsonrpc_client.new_contract_proxy(\n CONTRACT_MANAGER.get_abi(CONTRACT_ENDPOINT_REGISTRY),\n address_encoder(discovery_address),\n )\n\n self.address = discovery_address\n self.proxy = proxy\n self.client = jsonrpc_client\n self.startgas = startgas\n self.gasprice = gasprice\n self.poll_timeout = poll_timeout\n\n def register_endpoint(self, node_address, endpoint):\n if node_address != self.client.sender:\n raise ValueError(\"node_address doesnt match this node's address\")\n\n transaction_hash = self.proxy.registerEndpoint.transact(\n endpoint,\n gasprice=self.gasprice,\n startgas=DISCOVERY_REGISTRATION_GAS,\n )\n\n self.client.poll(\n unhexlify(transaction_hash),\n timeout=self.poll_timeout,\n )\n\n receipt_or_none = check_transaction_threw(self.client, transaction_hash)\n if receipt_or_none:\n raise TransactionThrew('Register Endpoint', receipt_or_none)\n\n def endpoint_by_address(self, node_address_bin):\n node_address_hex = hexlify(node_address_bin)\n endpoint = self.proxy.findEndpointByAddress.call(node_address_hex)\n\n if endpoint == '':\n raise UnknownAddress('Unknown address {}'.format(pex(node_address_bin)))\n\n return endpoint\n\n def address_by_endpoint(self, endpoint):\n address = self.proxy.findAddressByEndpoint.call(endpoint)\n\n if set(address) == {'0'}: # the 0 address means nothing found\n return None\n\n return unhexlify(address)\n\n def version(self):\n return self.proxy.contract_version.call()\n","sub_path":"raiden/network/proxies/discovery.py","file_name":"discovery.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"598013371","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# __author__ = 'TesterCC'\n# __time__ = '17/12/13 23:11'\n\n# 麦子学院 Tornado入门教程01\n\nimport tornado.httpserver\nimport tornado.ioloop\n\n\ndef handle_request(request):\n message = \"Hello! Tornado\"\n request.write(\"HTTP/1.1 200 OK\\r\\nContent-Length:%d\\r\\n\\r\\n%s\" % (len(message), message))\n request.finish()\n\nif __name__ == '__main__':\n # 实例化一个http server对象\n http_server = tornado.httpserver.HTTPServer(handle_request)\n # listen 8888 socket prot\n http_server.listen(7777) # visit to 127.0.0.1:7777\n # 启动事件循环\n tornado.ioloop.IOLoop.instance().start()","sub_path":"tornado_demo/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"127955500","text":"import cv2\nimport torch\nimport unet\nimport crnn\nimport sys\nimport os\n# =================================================================\nfrom torch.utils.data import DataLoader\nfrom graph.dataloader import demo_preprocess, get_page_demo, data_process, ToTensor\nfrom graph.layers import GraphConvolution\nfrom graph.model import GCN\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport json \nfrom sklearn.metrics import f1_score, accuracy_score\n# =================================================================\nfrom explainer.GNNExplainer import GNNExplainer\nfrom explainer.explainerVisualizer import *\nfrom explainer.converter import *\n\nfrom graph.model import GCN\n\n\nimport matplotlib.pyplot as plt\n# =================================================================\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nimg_folder = './images'\nlayout_ocr_folder = './part1_output'\n\nlist_images = sorted(os.listdir('./images'))\n\nwith open('./data/train_list.json','r') as fp:\n\ttrain_list = json.load(fp)\nprint(train_list[1])\n\ndataset = data_process(data_list = train_list[1:2], transform = ToTensor())\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef layout_ocr(img_folder, out_folder='part1_output'):\n\tif not os.path.exists(out_folder):\n\t\tos.makedirs(out_folder)\n\n\tunet_model = unet.Model()\n\tunet_model.load_state_dict(torch.load('weights/unet.pt', map_location='cpu'))\n\n\tcrnn_model = crnn.Model()\n\tcrnn_model.load_state_dict(torch.load('weights/crnn.pt', map_location='cpu'))\n\n\timg_names = os.listdir(img_folder)\n\n\tfor img_name in img_names:\n\t\tim = os.path.join(img_folder, img_name)\n\t\tim = cv2.imread(im)\n\n\t\tm, h = unet.segment(unet_model, im)\n\t\tboxes = unet.detect_lines(m, h, im.shape)\n\n\t\tlabel_file = open(os.path.join(out_folder, img_name[:-3] + 'txt'), 'w')\n\n\t\tfor box in boxes:\n\t\t\tx1, y1 = box[0]\n\t\t\tx2, y2 = box[1]\n\n\t\t\ttext_line = im[y1:y2, x1:x2]\n\t\t\ttext_line = cv2.cvtColor(text_line, cv2.COLOR_BGR2GRAY)\n\t\t\ttext = crnn.ocr(crnn_model, text_line)\n\n\t\t\tif text == '':\n\t\t\t\tcontinue\n\n\t\t\tfor c in (x1, y1, x2, y1, x2, y2, x1, y2):\n\t\t\t\tlabel_file.write(str(c) + ', ')\n\t\t\tlabel_file.write(text)\n\t\t\tlabel_file.write('\\n')\n\n\t\tlabel_file.close()\n\n# ===============================================================================\n# print(\"Layout - OCR ... \")\n# # layout_ocr(img_folder, layout_ocr_folder)\n# print(\"Organizing ... \")\n# list_files = sorted(os.listdir(layout_ocr_folder))\n# print(list_files)\n\n# graph_dataset = demo_preprocess(data_list = list_files[1:2])\n\n# for i, batch in enumerate(graph_dataset):\n# \tprint(batch[0].size(), batch[1].size())\n\n\n# todo: Load trained GCN\nprint(\"Loading trained GCN ...\")\ndevice = 'cpu'\nmodel = GCN()\nmodel.load_state_dict(torch.load('./model12.pth.tar', map_location=torch.device('cpu')))\nprint(model)\nprint('Done!')\n\n# ==============================================================================================\n\ndef use_gcn(model, dataset, device):\n\tmodel.to(device)\n\n\tfor item in dataset:\n\t\t[V, A] = item\n\t\tV = V.to(device)\n\t\tA = A.to(device)\n\n\t\twith torch.no_grad():\n\t\t\toutput = model.eval([V, A])\n\t\t\treturn output\n\ndef process_image(image, pos, label):\n\t# Box the bboxes\n\tfor i, item in enumerate(pos):\n\t\tif label[i] == 0:\n\t\t\tcolor = (0,0,0) # black\n\t\tif label[i] == 1:\n\t\t\tcolor = (255,0,0) # red - company\n\t\tif label[i] == 2:\n\t\t\tcolor = (0,255,0) # green - date\n\t\tif label[i] == 3:\n\t\t\tcolor = (0,0,255) # blue - address\n\t\tif label[i] == 4:\n\t\t\tcolor = (0,255,255) # cyan - total\n\t\timage = cv2.rectangle(image, (item[0],item[1]), (item[4],item[5]), color = color, thickness = 2)\n\n\treturn image\n\n# out = use_gcn(model, graph_dataset, device)\n# prediction = out[0].numpy().argmax(axis = 1)\n\n# print(prediction)\n\n# image = cv2.imread('./images/'+list_images[3])\n# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# s, pos = get_page_demo(list_files[3])\n# for i in s:\n# \tprint(i)\n\n# image = process_image(image, pos, prediction)\n# plt.imshow(image)\n# plt.show()\n\n# ======================================================================================\n\n# todo: Demo\nWANT_TO_TEST = [0]\nTEST_ID = 'X00016469612'\nexplainer = GNNExplainer(model, epochs=200, lr=0.01)\n\n# ==============================================\n# list_id = []\n# for item in list_files:\n# \tlist_id.append(item[:-4])\n# list_id = sorted(list_id)\n# ==============================================\n\nfor i_test, (sample, name) in enumerate(zip(dataset, train_list[1:2])):\n\t# if i_test not in WANT_TO_TEST: continue\n\n\t# todo: Load pre-processed data\n\t\"\"\"\n\t\tV (Tensor): N x F --> number of nodes x number of features \t(this case: N x 600)\n\t\tA (Tensor): L x N x N (this case: L = 4)\n\t\tedge_index (Tensor): 2 x number_of_edges\n\t\tedge_type (Tensor): number_of_edges\n\t\"\"\"\n\t[V, A], label = sample\n\tedge_index, edge_type = convert_adj_to_edge_index(A)\n\t# print(edge_index)\n\n\t# todo: calculate predicted labels of nodes --> identify which nodes to be explained\n\twith torch.no_grad():\n\t\tpred = model.eval([V.to(device), A.to(device)])\n\tpred = pred[0].argmax(axis=1).cpu().numpy()\n\tprint(pred)\n\n\tindeces_of_nodes_to_explained = []\n\tvalues_of_nodes_to_explained = []\n\tfor i in range(pred.shape[0]):\n\t\tif pred[i] != 0:\n\t\t\tindeces_of_nodes_to_explained.append(i)\n\t\t\tvalues_of_nodes_to_explained.append(pred[i])\n\n\tprint(\"Nodes to be explained: \", indeces_of_nodes_to_explained)\n\tprint(\"With respective label: \", values_of_nodes_to_explained)\n\n\t# todo: explain each nodes\n\tvis = imageVisualizer_temp(name) # change the ID of the test here!!!\n\tedge_threshold = 0.9\n\n\tfor node_id in indeces_of_nodes_to_explained:\n\t\tfeature_mask, edge_mask = explainer.explain_node(node_id, V, A)\n\t\t\"\"\"\n\t\t\tfeature_mask (tensor): size = number_of_features; value in range[0,1]\n\t\t\tedge_mask (tensor): size = number_of_edge; value in range[0,1]\n\t\t\"\"\"\n\t\tfor z in range(edge_mask.size(0)):\n\t\t\tif edge_mask[z] > edge_threshold:\n\t\t\t\t# print(pred[node_id], int(edge_index[0][z]), int(edge_index[1][z]))\n\t\t\t\tvis.add_edge(pred[node_id], (int(edge_index[0][z]), int(edge_index[1][z])))\n\n\t\tprint(feature_mask.min(), feature_mask.max())\n\t\timportant_features = take_n_most_important(feature_mask)\n\t\tfor feature_id in important_features: vis.add_feature(pred[node_id], feature_id)\n\n\tvis.draw_important_box(indeces_of_nodes_to_explained, values_of_nodes_to_explained)\n\tvis.draw_edges()\n\tvis.show()\n\t# vis.save_image()\n\nprint(\"Explained\")\n","sub_path":"run_gcn.py","file_name":"run_gcn.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"331119930","text":"class Vampire:\n\n coven = []\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n self.in_coffin = True\n self.drank_blood_today = False\n\n def __str__(self):\n return f\"Name: {self.name}\\nAge: {self.age}\\nIn Coffin: {self.in_coffin}\\nDrank Blood Today: {self.drank_blood_today}\"\n\n def __repr__(self):\n return f\"{self.name} (Drank Blood: {self.drank_blood_today} In Coffin: {self.in_coffin})\"\n\n @classmethod\n def create(cls, name, age):\n vampire = Vampire(name, age)\n cls.coven.append(vampire)\n\n @classmethod\n def sunrise(cls):\n dead_vampires = []\n for vampire in cls.coven:\n if vampire.drank_blood_today is False or vampire.in_coffin is False:\n dead_vampires.append(vampire)\n\n for vampire in dead_vampires:\n cls.coven.remove(vampire)\n # if vampire.drank_blood_today or vampire.in_coffin:\n # pass\n # else:\n # cls.coven.remove(vampire)\n\n @classmethod\n def sunset(cls):\n for vampire in cls.coven:\n vampire.drank_blood_today = False\n vampire.in_coffin = False\n\n def drink_blood(self):\n self.drank_blood_today = True\n\n def go_home(self):\n self.in_coffin = True\n\n\nVampire.create(\"Dracula\", 1000)\nVampire.create(\"Selene\", 1500)\nVampire.create(\"Blade\", 2000)\nVampire.create(\"The Count\", 500)\n\nfor vampire in Vampire.coven:\n print()\n print(vampire)\n\nprint()\nprint(\n Vampire.coven\n) # [Dracula (Drank Blood: False In Coffin: True), Selene (Drank Blood: False In Coffin: True), Blade (Drank Blood: False In Coffin: True), The Count (Drank Blood: False In Coffin: True)]\n\nprint()\nprint(\"----sunset----\")\nVampire.sunset()\n\nprint()\nprint(\n Vampire.coven\n) # [Dracula (Drank Blood: False In Coffin: False), Selene (Drank Blood: False In Coffin: False), Blade (Drank Blood: False In Coffin: False), The Count (Drank Blood: False In Coffin: False)]\n\nprint()\nprint(\"----drink blood vampire 1 & 3----\")\nVampire.coven[0].drink_blood()\nVampire.coven[2].drink_blood()\n\nprint()\nprint(\n Vampire.coven\n) # [Dracula (Drank Blood: True In Coffin: False), Selene (Drank Blood: False In Coffin: False), Blade (Drank Blood: True In Coffin: False), The Count (Drank Blood: False In Coffin: False)]\n\nprint()\nprint(\"----go home vampire 3 & 4----\")\nVampire.coven[2].go_home()\nVampire.coven[3].go_home()\n\nprint()\nprint(\n Vampire.coven\n) # [Dracula (Drank Blood: True In Coffin: False), Selene (Drank Blood: False In Coffin: False), Blade (Drank Blood: True In Coffin: True), The Count (Drank Blood: False In Coffin: True)]\n\nprint()\nprint(\"----sunrise----\")\nVampire.sunrise()\n\nprint()\nprint(\n Vampire.coven\n) # [Dracula (Drank Blood: True In Coffin: False), Blade (Drank Blood: True In Coffin: True), The Count (Drank Blood: False In Coffin: True)]\n\nfor vampire in Vampire.coven:\n print()\n print(vampire)\n","sub_path":"oop-class-method-variables/vampires/vampire.py","file_name":"vampire.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"439865037","text":"import io\nimport os\nimport re\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef read(filename):\n filename = os.path.join(os.path.dirname(__file__), filename)\n text_type = type(u\"\")\n with io.open(filename, mode=\"r\", encoding=\"utf-8\") as fd:\n return re.sub(text_type(r\":[a-z]+:`~?(.*?)`\"), text_type(r\"``\\1``\"), fd.read())\n\n\nsetup(\n name=\"amsterdam-schema-tools\",\n version=\"0.5.0\",\n url=\"https://github.com/amsterdam/schema-tools\",\n license=\"Mozilla Public 2.0\",\n author=\"Jan Murre\",\n author_email=\"jan.murre@catalyz.nl\",\n description=\"Tools to work with Amsterdam schema.\",\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=[\n \"geoalchemy2\",\n \"psycopg2\",\n \"click\",\n \"jsonschema\",\n \"ndjson\",\n \"shapely\",\n \"python-string-utils\",\n \"python-dateutil\",\n \"requests\",\n \"jinja2\",\n \"mappyfile\",\n ],\n extras_require={\n \"tests\": [\"pytest\", \"pytest-cov\", \"pytest-sqlalchemy\", \"requests-mock\"],\n \"django\": [\n \"django >= 3.0.4\",\n \"django-postgres-unlimited-varchar >= 1.1.0\",\n \"django-gisserver >= 0.5\",\n ],\n },\n tests_require=[\"pytest\", \"pytest-cov\", \"pytest-sqlalchemy\", \"requests-mock\"],\n entry_points=\"\"\"\n [console_scripts]\n schema=schematools.cli:main\n \"\"\",\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n python_requires=\">=3.7\",\n)\n","sub_path":"pypi_install_script/amsterdam-schema-tools-0.5.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"511208563","text":"#Code was modified from https://github.com/soupault/scikit-inpaint\r\nfrom __future__ import division\r\n\r\nimport numpy as np\r\nfrom scipy import sparse\r\nfrom scipy.sparse.linalg import spsolve\r\nimport scipy.ndimage as ndi\r\nfrom scipy.ndimage.filters import laplace\r\nfrom skimage import img_as_float\r\nfrom skimage.measure import label\r\nimport sys, os, time\r\n\r\nclass Biharmonic():\r\n\r\n\r\n def _get_neighborhood(self, nd_idx, radius, nd_shape):\r\n bounds_lo = (nd_idx - radius).clip(min=0)\r\n bounds_hi = (nd_idx + radius + 1).clip(max=nd_shape)\r\n return bounds_lo, bounds_hi\r\n\r\n\r\n def _biharmonic_single_channel(self, img, mask, out, limits):\r\n # Initialize sparse matrices\r\n matrix_unknown = sparse.lil_matrix((np.sum(mask), out.size))\r\n matrix_known = sparse.lil_matrix((np.sum(mask), out.size))\r\n\r\n # Find indexes of masked points in flatten array\r\n mask_i = np.ravel_multi_index(np.where(mask), mask.shape)\r\n\r\n # Find masked points and prepare them to be easily enumerate over\r\n mask_pts = np.array(np.where(mask)).T\r\n\r\n # Iterate over masked points\r\n for mask_pt_n, mask_pt_idx in enumerate(mask_pts):\r\n # Get bounded neighborhood of selected radius\r\n b_lo, b_hi = self._get_neighborhood(mask_pt_idx, 2, out.shape)\r\n\r\n # Create biharmonic coefficients ndarray\r\n neigh_coef = np.zeros(b_hi - b_lo)\r\n neigh_coef[tuple(mask_pt_idx - b_lo)] = 1\r\n neigh_coef = laplace(laplace(neigh_coef))\r\n\r\n # Iterate over masked point's neighborhood\r\n it_inner = np.nditer(neigh_coef, flags=['multi_index'])\r\n for coef in it_inner:\r\n if coef == 0:\r\n continue\r\n tmp_pt_idx = np.add(b_lo, it_inner.multi_index)\r\n tmp_pt_i = np.ravel_multi_index(tmp_pt_idx, mask.shape)\r\n\r\n if mask[tuple(tmp_pt_idx)]:\r\n matrix_unknown[mask_pt_n, tmp_pt_i] = coef\r\n else:\r\n matrix_known[mask_pt_n, tmp_pt_i] = coef\r\n\r\n # Prepare diagonal matrix\r\n flat_diag_image = sparse.dia_matrix((out.flatten(), np.array([0])),\r\n shape=(out.size, out.size))\r\n\r\n # Calculate right hand side as a sum of known matrix's columns\r\n matrix_known = matrix_known.tocsr()\r\n rhs = -(matrix_known * flat_diag_image).sum(axis=1)\r\n\r\n # Solve linear system for masked points\r\n matrix_unknown = matrix_unknown[:, mask_i]\r\n matrix_unknown = sparse.csr_matrix(matrix_unknown)\r\n result = spsolve(matrix_unknown, rhs)\r\n\r\n # Handle enormous values\r\n result = np.clip(result, *limits)\r\n\r\n result = result.ravel()\r\n\r\n # Substitute masked points with inpainted versions\r\n for mask_pt_n, mask_pt_idx in enumerate(mask_pts):\r\n out[tuple(mask_pt_idx)] = result[mask_pt_n]\r\n\r\n return out\r\n\r\n\r\n def biharmonic(self, img, mask, multichannel=False):\r\n \"\"\"Inpaint masked points in image with biharmonic equations.\r\n Parameters\r\n ----------\r\n img : (M[, N[, ..., P]][, C]) ndarray\r\n Input image.\r\n mask : (M[, N[, ..., P]]) ndarray\r\n Array of pixels to be inpainted. Have to be the same shape as one\r\n of the 'img' channels. Unknown pixels have to be represented with 1,\r\n known pixels - with 0.\r\n multichannel : boolean, optional\r\n If True, the last `img` dimension is considered as a color channel,\r\n otherwise as spatial.\r\n Returns\r\n -------\r\n out : (M[, N[, ..., P]][, C]) ndarray\r\n Input image with masked pixels inpainted.\r\n References\r\n ----------\r\n .. [1] N.S.Hoang, S.B.Damelin, \"On surface completion and image inpainting\r\n by biharmonic functions: numerical aspects\",\r\n http://www.ima.umn.edu/~damelin/biharmonic\r\n Examples\r\n --------\r\n >>> img = np.tile(np.square(np.linspace(0, 1, 5)), (5, 1))\r\n >>> mask = np.zeros_like(img)\r\n >>> mask[2, 2:] = 1\r\n >>> mask[1, 3:] = 1\r\n >>> mask[0, 4:] = 1\r\n >>> out = inpaint_biharmonic(img, mask)\r\n \"\"\"\r\n # Defect image over the same region in each color channel\r\n print ('Start Time: ', time.asctime())\r\n image_defect = img.copy()\r\n for layer in range(image_defect.shape[-1]):\r\n image_defect[np.where(mask)] = 0\r\n\r\n if img.ndim < 1:\r\n raise ValueError('Input array has to be at least 1D')\r\n\r\n img_baseshape = img.shape[:-1] if multichannel else img.shape\r\n if img_baseshape != mask.shape:\r\n raise ValueError('Input arrays have to be the same shape')\r\n\r\n if np.ma.isMaskedArray(img):\r\n raise TypeError('Masked arrays are not supported')\r\n\r\n img = img_as_float(img)\r\n mask = mask.astype(np.bool)\r\n\r\n # Split inpainting mask into independent regions\r\n kernel = ndi.morphology.generate_binary_structure(mask.ndim, 1)\r\n mask_dilated = ndi.morphology.binary_dilation(mask, structure=kernel)\r\n mask_labeled, num_labels = label(mask_dilated, return_num=True)\r\n mask_labeled *= mask\r\n\r\n if not multichannel:\r\n img = img[..., np.newaxis]\r\n\r\n out = np.copy(img)\r\n\r\n for idx_channel in range(img.shape[-1]):\r\n known_points = img[..., idx_channel][~mask]\r\n limits = (np.min(known_points), np.max(known_points))\r\n\r\n for idx_region in range(1, num_labels+1):\r\n mask_region = mask_labeled == idx_region\r\n self._biharmonic_single_channel(\r\n img[..., idx_channel], mask_region,\r\n out[..., idx_channel], limits)\r\n\r\n if not multichannel:\r\n out = out[..., 0]\r\n \r\n print ('End Time: ', time.asctime())\r\n return out\r\n\r\n #from skimage import data\r\n #import matplotlib.pyplot as plt\r\n #import cv2\r\n\r\n #image_orig = data.astronaut()[0:200, 0:200]\r\n #image_orig = cv2.imread(\"BoyJump.png\")\r\n\r\n # Create mask with three defect regions: left, middle, right respectively\r\n\r\n #mask = np.zeros(image_orig.shape[:-1])\r\n #mask[100:150, 0:50] = 1\r\n #mask[160:180, 70:155] = 1\r\n #mask[30:60, 170:195] = 1\r\n\r\n #mask = cv2.imread(\"BoyJumpMask.png\")\r\n\r\n #image_result = biharmonic(image_orig, mask)\r\n\r\n #fig, axes = plt.subplots(ncols=2, nrows=2)\r\n #ax = axes.ravel()\r\n\r\n #ax[0].set_title('Original image')\r\n #cv2.normalize(image_orig, image_orig, 0, 255, cv2.NORM_MINMAX)\r\n #ax[0].imshow(image_orig)\r\n\r\n #ax[1].set_title('Mask')\r\n #ax[1].imshow(mask, cmap=plt.cm.gray)\r\n #cv2.normalize(mask, mask, 0, 255, cv2.NORM_MINMAX)\r\n\r\n #ax[3].set_title('Inpainted image')\r\n #ax[3].imshow(image_result)\r\n #cv2.normalize(image_result, image_result, 0, 255, cv2.NORM_MINMAX)\r\n #for a in ax:\r\n # a.axis('off')\r\n\r\n #fig.tight_layout()\r\n #plt.show()","sub_path":"Biharmonic/Source/Biharmonic.py","file_name":"Biharmonic.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"399100845","text":"edges={}\nmstedges={}\ndef find(parent,vno):\n return parent[vno]\ndef union(parent,c1,c2):\n for i in range(len(parent)):\n if parent[i]==c2:\n parent[i]=c1\nn=int(input('Enter the no of vertices '))\nstructure=int(input('Enter 0 for Entering adjacency matrix or 1 for Entering edges'))\nif structure:\n no=int(input('Enter the no of edges '))\n for i in range(no):\n a,b,c=int(input('Enter V1 ')),int(input('Enter V2 ')),int(input('Enter Weight'))\n edges[c]=(a,b)\nelse:\n g=[0 for i in range(n)]\n g=[g[:] for i in range(n)]\n for i in range(n):\n print('Enter the data for {} row'.format(i+1))\n for j in range(n):\n g[i][j]=int(input())\n for i in range(1,n):\n for j in range(0,i):\n if g[i][j]!=0:\n edges[g[i][j]]=(i,j)\nmstv=0\nparent=[]\nfor i in range(n):\n parent.append(i)\nfor i in sorted(edges.keys()):\n c1=find(parent,edges[i][0])\n c2=find(parent,edges[i][1])\n if c1 != c2:\n mstedges[i]=edges[i]\n mstv+=1\n union(parent,c1,c2)\nprint('No of vertices',mstv)\nprint('Total cost of edges in MST',sum(mstedges.keys()))\nprint('The edges are.....')\nfor i in sorted(mstedges.keys()):\n print(str(mstedges[i][0])+' -- '+str(mstedges[i][1])+' == '+str(i))\n","sub_path":"KrushkalAlgorithmPython3.py","file_name":"KrushkalAlgorithmPython3.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"650406700","text":"import webapp2\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n self.response.headers['Content-Type'] = 'text/html'\n self.response.write(\n '

Welcome!

'\n 'Please go '\n 'here instead.')\n\napplication = webapp2.WSGIApplication([\n ('/', MainPage),\n], debug=True)\n","sub_path":"app_engine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"640156125","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 18 22:12:56 2021\r\n\r\n@author: James Hiu\r\n\"\"\"\r\nclass Solution(object):\r\n def strStr(self, haystack, needle):\r\n \"\"\"\r\n :type haystack: str\r\n :type needle: str\r\n :rtype: int\r\n \"\"\"\r\n \r\n #look for a sub-string in a string\r\n #Question requirement, if no substring is provided (needle), return 0\r\n if len(needle)==0:\r\n return 0\r\n \r\n #Step 2\r\n #Use the index function to identify the first occurrance of the substring in the string\r\n #If the string does not contain the substring return -1 as requested\r\n try:\r\n x=haystack.index(needle)\r\n except:\r\n x=-1\r\n \r\n return x\r\n\r\n","sub_path":"Easy/Implement strStr().py","file_name":"Implement strStr().py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"311806959","text":"\"\"\" main.py \n\nAbout\n------\no Sunlight Foundation's APIs\n\nRequirements\n-------------\no Python 3.5.1\no Requests\n\nStable Date\n------------\nApr-08-2016\n\n\"\"\"\nfrom congress import Legislators, Bills\nfrom capitol_words import Text\n\n\ndef test1():\n\t\"\"\" Legislators().locate_zip(zip_code) \"\"\"\n\tfrom congress import Legislators\n\t\n\tLegislators = Legislators()\n\tresults = Legislators.locate_zip(21201)\n\n\tfor person in results:\n\t\tprint(person['title'], person['first_name'], person['last_name'], ' -- In Office:', person['in_office'])\n\n\ndef test2():\n\t\"\"\" Legislators().locate_state(state_code) \"\"\"\n\tfrom congress import Legislators\n\n\tLegislators = Legislators()\n\tresults = Legs.locate_state('MD')\n\n\tfor person in results:\n\t\tprint(person['title'], person['first_name'], person['last_name'], ' -- In Office:', person['in_office'])\n\n\ndef test3():\n\t\"\"\" Bills().all(per_page, page) \"\"\"\n\tfrom congress import Bills\n\n\tBills = Bills()\n\t# no return value yet\n\tBills.all(per_page=2, page=3)\n\n\ndef test4():\n\t\"\"\" Bills().search(query) \"\"\"\n\tfrom congress import Bills\n\n\tBills = Bills()\n\tresults = Bills.search('data')\n\n\ndef test5():\n\t\"\"\" Bills().info(bill_id) \"\"\"\n\tfrom congress import Bills\n\t\n\tBills = Bills()\n\tinfo = Bills.info('s2765-114')\n\tprint('Congress Number:', info[0]['congress'])\n\n\n\ndef test6():\n\t\"\"\" Bills().history(bill_id) \"\"\"\n\tfrom congress import Bills\n\n\t#: test these 3 different bill_ids\n\t#: active, inactive, \n\t#: s1927-113, s2765-114, hr1410-113th\n\tBills = Bills()\n\thistory = Bills.history('hr308-114')\n\tprint('Enacted:', history['enacted'])\n\n\n\ndef test7():\n\t\"\"\" Bills().count(congress_number) \"\"\"\n\tfrom congress import Bills\n\n\tBills = Bills()\n\tcount = Bills.count(114)\n\n\n\ndef test8():\n\t\"\"\" Text.search(parameters) \"\"\"\n\tfrom capitol_words import Text\n\n\tText = Text()\n\t# All the parameters you can supply\n\t# parameters = {\t\n\t# \t\t'phrase': '',\n\t# \t\t'title':'',\n\t# \t\t'state': '',\n\t# \t\t'party': '[R, D, I]',\n\t# \t\t'date': 'YYYY-MM-DD',\n\t# \t\t'start_date': '',\n\t# \t\t'end_date': '',\n\t# # sorting parameters\n\t# \t\t'page': '0',\n\t# \t\t'per_page': '20',\n\t# \t\t'sort': 'date asc' # 'date desc'\n\t# }\n\tparameters = {\n\t\t\t\t 'phrase': 'encryption', \n\t\t\t\t 'party': 'R', \n\t\t\t\t 'start_date': '2015-01-01', \n\t\t\t\t 'sort': 'date asc',\n\t\t\t\t 'page': '0'\n\t\t\t\t }\n\n\tcount, results = Text.search(parameters)\n\ndef test9():\n\t\"\"\" Text().party_count(parameters) \"\"\"\n\tfrom capitol_words import Text\n\n\tText = Text()\n\t# dont give the 'party' parameter in the party_count() method\n\tparameters = {'phrase': \"security\"}\n\tText.party_count(parameters)\n\t# returns nothing\n\ndef test10():\n\t\"\"\" Text().scatter(data) \"\"\"\n\tfrom capitol_words import Text\n\n\t# get 'data' from the party_count method, this graphs speach frequency for a given pattern\n\tdata = {'R': 10, 'D': 250}\n\tText().scatter(phrase=\"test phrase\", data=data)\n\n\n\nif __name__ == '__main__':\n\t# comment out which tests you dont want to use\n\n\tprint('\\n\\n============================== Test 1 ==============================')\n\tprint('---------------- Legislators().locate_zip(zip_code) ----------------')\n\ttest1()\n\n\tprint('\\n\\n============================== Test 2 ==============================')\n\tprint('---------------- Legislators().locate_state(state_code) ------------')\n\ttest1()\n\n\tprint('\\n\\n============================== Test 3 ==============================')\n\tprint('---------------------------- Bills().all() -------------------------')\n\ttest3()\t\n\n\tprint('\\n\\n============================== Test 4 ==============================')\n\tprint('---------------------------- Bills().search(query) -------------------------')\n\ttest4()\n\n\tprint('\\n\\n============================== Test 5 ==============================')\n\tprint('-------------------------- Bills().info(bill_id) -----------------------')\n\ttest5()\t\t\n\n\tprint('\\n\\n============================== Test 6 ==============================')\n\tprint('------------------------ Bills().history(bill_id) ----------------------')\n\ttest6()\t\t\n\n\tprint('\\n\\n============================== Test 7 ==============================')\n\tprint('------------------------ Bills().count(bill_id) ----------------------')\n\ttest7()\t\t\n\n\tprint('\\n\\n============================== Test 8 ==============================')\n\tprint('------------------------ Text().search(parameters) ----------------------')\n\ttest8()\t\n\n\n\tprint('\\n\\n============================== Test 9 ==============================')\n\tprint('------------------------ Text().party_count(parameters) ----------------------')\n\ttest9()\t\n\n\n\tprint('\\n\\n============================== Test 10 ==============================')\n\tprint('------------------------ Text().scatter(parameters) ----------------------\\n')\n\ttest10()\t\n\n\n\n\n\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"14055285","text":"'''\nAuthor: Zhou Hao\nDate: 2021-04-22 13:57:07\nLastEditors: Zhou Hao\nLastEditTime: 2021-04-22 14:56:43\nDescription: file content\nE-mail: 2294776770@qq.com\n'''\n#\n# @lc app=leetcode.cn id=51 lang=python3\n#\n# [51] N 皇后\n#\n\n# @lc code=start\nclass Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n board = ['.'* n ] * n #初始化空棋盘\n \n def isValid(board,row,col):\n '''判断当前位置能不能放置皇后'''\n #因为皇后是从上往下放的,所以不用检查下面的位置,不用检查当前行\n\n #判断当前列上面是否冲突\n for i in range(row):\n if board[i][col] == 'Q':\n return True\n # 右上\n for i,j in zip(range(row-1, -1, -1), range(col+1, n)):\n if(board[i][j] == 'Q'):\n return True\n # 左上\n for i,j in zip(range(row-1,-1,-1), range(col-1, -1, -1)):\n if(board[i][j] == 'Q'):\n return True\n\n return False\n\n\n def dfs(board,row):\n if row == n:\n res.append(board[:])\n return \n\n for col in range(n):\n #剪枝,如果位置冲突,就跳过\n if isValid(board,row,col):\n continue\n \n board[row] = '.'*col + 'Q' + '.'* (n-1-col)\n dfs(board,row+1)\n board[row] = '.'*n\n\n\n res = []\n dfs(board,0)\n return res\n# @lc code=end\n\n","sub_path":"51.n-皇后.py","file_name":"51.n-皇后.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"319220550","text":"# -*- coding:utf-8 -*-\nfrom xml.dom import minidom\nimport pandas as pd\nimport re\ndf=pd.read_excel('student.xls')\ndoc = minidom.Document()\ncomment_text = doc.createComment(u\"\"\"学生信息表 \"id\" : [名字, 数学, 语文, 英文]\"\"\")\nroot_node = doc.createElement('root')\ndoc.appendChild(root_node)\nstudent_node = doc.createElement('students')\nroot_node.appendChild(student_node)\ndict1 = {'1':str(df['1'].values),'2':str(df['2'].values),'3':str(df['3'].values)}\nk = re.sub(r'\"','',str(dict1))\nprint(k)\nname_text=doc.createTextNode(k)\nstudent_node.appendChild(comment_text)\nstudent_node.appendChild(name_text)\ntry:\n with open('student.xml','w',encoding='UTF-8') as fh:\n # 4.writexml()第一个参数是目标文件对象,第二个参数是根节点的缩进格式,第三个参数是其他子节点的缩进格式,\n # 第四个参数制定了换行格式,第五个参数制定了xml内容的编码。\n doc.writexml(fh,indent='',addindent='\\t',newl='\\n',encoding='UTF-8')\n print('写入xml OK!')\nexcept Exception as err:\n print('错误信息:{0}'.format(err))","sub_path":"17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"544910193","text":"\"\"\"\nA disjoint-set data structure is a data structure that keeps track of a set of\nelements partitioned into a number of disjoint (non-overlapping) subsets. A\nunion-find algorithm is an algorithm that performs two useful operations on\nsuch a data structure:\n\nFind: Determine which subset a particular element is in. This can be used for\ndetermining if two elements are in the same subset.\n\nUnion: Join two subsets into a single subset.\n\"\"\"\n# pylint: disable=too-few-public-methods\n\n\nclass UnionFind(object):\n \"\"\"\n Class that implements the union-find structure with\n union by rank and find with path compression\n\n :param size: Total number of elements in union set\n \"\"\"\n\n def __init__(self, size):\n self.size = size\n self.parent = list(range(size))\n self.rank = [-1 for _ in range(size)]\n\n def find(self, elem):\n \"\"\"\n The idea is to flatten the tree when find() is called.\n When find() is called for an elem, root of the\n tree is returned. The find() operation traverses up from\n elem to find root.\n This optimization is called Path Compression.\n The idea of path compression is to make the found root\n as parent of elem so that we don't have to traverse all\n intermediate nodes again. If elem is root of a subtree,\n then path (to root) from all nodes under elem also\n compresses.\n \"\"\"\n if not elem == self.parent[elem]:\n self.parent[elem] = self.find(self.parent[elem])\n return self.parent[elem]\n\n def union(self, set1, set2):\n \"\"\"\n Technically the rank is an upper bound for the height\n of a tree. The rank is not the height because during\n a find operation with path compression the height of\n a tree might become smaller, whereas the rank is not\n updated in the find function.\n Instead of simply linking the tree of set1 to the tree\n of set2, we will first compare their ranks. The tree\n with smaller rank is then linked to the tree with\n greater rank. This is called union by rank.\n \"\"\"\n x_root = self.find(set1)\n y_root = self.find(set2)\n if x_root == y_root:\n return\n if self.rank[x_root] > self.rank[y_root]:\n self.parent[y_root] = x_root\n else:\n self.parent[x_root] = y_root\n if self.rank[x_root] == self.rank[y_root]:\n self.rank[y_root] += 1\n\n def has_same_root(self, set1, set2):\n \"\"\"\n Determine if elements have same root\n \"\"\"\n x_root = self.find(set1)\n y_root = self.find(set2)\n return x_root == y_root\n\n def disjoint_sets(self):\n \"\"\"\n Get all disjoint sets\n \"\"\"\n my_dict = {}\n for node in range(self.size):\n root = self.find(node)\n if root not in my_dict:\n my_dict[root] = set([node])\n else:\n my_dict[root].add(node)\n\n return list(my_dict.values())\n\n def __str__(self):\n return \"Index: \"\\\n + str(list(range(self.size)))\\\n + \"\\n\"\\\n + \"Parent: \"\\\n + \"\".join(str(self.parent))\n\n __repr__ = __str__\n\n\ndef main():\n \"\"\"\n Running the code\n \"\"\"\n # Part a)\n union_find = UnionFind(9)\n\n print(\"Initial Set:\")\n print(union_find)\n\n union_find.union(2, 3)\n union_find.union(4, 3)\n union_find.union(6, 5)\n\n msg = (\n \"\\n\"\n \"Parent array after \"\n \"union(2, 1), \"\n \"union(4, 3) \"\n \"and union(6, 5):\"\n )\n\n print(msg)\n print(union_find)\n print(union_find.has_same_root(2, 3))\n\n # Part b)\n union_find.union(2, 4)\n print(\"\\nParent array after union(2, 4)\")\n print(union_find)\n\n # Part c)\n union_find.find(2)\n print(\"\\nParent array after find(2)\")\n print(union_find)\n\n # Part d)\n print(\"\\nDisjoint sets: \")\n print(\"\\n\".join([str(my_set) for my_set in union_find.disjoint_sets()]))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"disjoin_sets_union_find.py","file_name":"disjoin_sets_union_find.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"425633497","text":"import os\nimport sys\nimport logging\nfrom xdr_file import XdrFile\nimport re\n\n# coding: utf-8\n\n\nclass Rule:\n def __init__(self, config):\n self.config = config\n self.fields = self.create_fields(config)\n self.xdrFile = None\n self.contents = []\n self.allContents = []\n self.outputlist = []\n self.process_file_dir = \"\"\n self.process_id = \"\"\n\n def get_outputlist(self):\n return self.outputlist\n\n @staticmethod\n def create_fields(config):\n \"\"\"\n 将配置文件中的fieldname fieldindex fieldtype放到字典fields里面\n :param config:\n :return:\n \"\"\"\n fields = {}\n try:\n field_name = config['fieldname'].split(',') # 字段名称\n field_index = config['fieldindex'].split(',') # 字段在话单中的位置\n field_type = config['fieldtype'].split(',') # 字段类型(int,string)\n except keyError as e:\n logging.error('The configuration item is incomplete %s' % e)\n sys.exit()\n \n if len(field_index) == len(field_name) == len(field_type):\n for i in range(len(field_name)):\n if field_index[i] != \"\":\n fields[field_name[i]] = (field_type[i], int(field_index[i])-1)\n else:\n fields = None\n return fields\n\n def get_fields(self):\n return self.fields\n\n def work(self, xdr_file, cur, output_filename, source_filename):\n output_list = []\n filename_part = self.config.get(\"filenamepart\")\n condition_expr = self.config.get(\"conditionexpr\")\n dest_filename = self.config.get(\"destfilename\")\n group_fields = self.config.get(\"groupfield\")\n condition_mutex = self.config.get(\"conditionmutex\")\n dest_dir = self.config.get(\"destdir\")\n need_null = self.config.get(\"neednulldestfile\")\n SFN = source_filename\n OFN = output_filename\n HEAD = dest_filename\n SPLIT = \".\"\n of_temp_dir = dest_dir + \"/\" + self.process_id + \"/\"\n if not os.path.exists(of_temp_dir):\n os.mkdir(of_temp_dir)\n logging.info(\"%s not exist, make it\" % of_temp_dir)\n out_filename = \"\"\n self.xdrFile = xdr_file\n str_sql = \"SELECT id FROM sorttable%s \" % self.process_id\n if condition_expr != \"\": # 判断该rule是否有查询条件\n # 按省查询\n if \"{\" in condition_expr: # 判断是否是按省查询\n prov_list = re.findall('\\{(.*?)\\\\}', condition_expr) # 截取出省代码列表\n factor = condition_expr.split(\"{\")[0]\n # 被该分拣条件选中的话单不再被其他分拣条件分拣\n if condition_mutex == \"true\":\n for prov in prov_list[0].split(\",\"):\n str_sql = \"SELECT id FROM sorttable%s \" % self.process_id\n str_sql = str_sql + \" WHERE \" + factor + prov\n cur.execute(str_sql)\n contents = cur.fetchall()\n if not contents:\n continue\n filename = \"\"\n PROV = prov\n filename_part_list = filename_part.split(\",\")\n for part in filename_part_list:\n if part.startswith(\"$\"):\n part = part.strip(\"$\")\n filename += locals()[part]\n else:\n filename += part\n out_filename = of_temp_dir + filename\n self.do_to_onefile(out_filename, contents)\n output_list.append(out_filename)\n # 从表中删除符合该分拣条件的数据\n delete_sql = \"DELETE FROM sorttable\" + self.process_id + \" WHERE + factor + prov\"\n cur.execute(delete_sql)\n # 被该分拣条件选中的话单可以被其他分拣条件分拣\n else:\n for prov in prov_list[0].split(\",\"):\n str_sql = \"SELECT id FROM sorttable%s \" % self.process_id\n str_sql = str_sql + \" WHERE \" + factor + prov\n cur.execute(str_sql)\n contents = cur.fetchall()\n if not contents:\n continue\n filename = \"\"\n PROV = prov\n filename_part_list = filename_part.split(\",\")\n for part in filename_part_list:\n if part.startswith(\"$\"):\n part = part.strip(\"$\")\n filename += locals()[part]\n else:\n filename += part\n out_filename = of_temp_dir + filename\n self.do_to_onefile(out_filename, contents)\n # for content in contents:\n # str_sql_update = \"UPDATE sorttable set FLAG=1 where id = %s\" % content[0]\n # try:\n # cur.execute(str_sql_update)\n # except Exception as e:\n # logging.error(\"update error:%s\" % e)\n content_0_list = []\n for content in contents:\n content_0_list.append(str(content[0]))\n if len(content_0_list) > 0:\n content_0_str = \",\".join(content_0_list)\n str_sql_update = \"UPDATE sorttable\" + self.process_id + \\\n \" set FLAG=1 where id in (\" + content_0_str + \")\"\n try:\n cur.execute(str_sql_update)\n except Exception as e:\n logging.error(\"update error:%s\" % e)\n sys.exit()\n output_list.append(out_filename)\n\n return output_list\n # 不按省查询\n else:\n filename = \"\"\n filename_part_list = filename_part.split(\",\")\n for part in filename_part_list:\n if part.startswith(\"$\"):\n part = part.strip(\"$\")\n filename += locals()[part]\n else:\n filename += part\n out_filename = of_temp_dir + filename\n str_sql = str_sql + \" WHERE \" + condition_expr\n try:\n cur.execute(str_sql)\n except Exception as e:\n logging.error(\"execute sql err: %s\" % e)\n sys.exit()\n contents = cur.fetchall()\n if not contents:\n if need_null == \"0\":\n return\n if group_fields == \"\" or group_fields is None:\n\n self.do_to_onefile(out_filename, contents)\n output_list.append(out_filename)\n else:\n self.do_to_onefile(out_filename, contents)\n output_list.append(out_filename)\n if condition_mutex == \"true\":\n try:\n cur.execute(\"DELETE FROM sorttable%s WHERE %s\" % (self.process_id, condition_expr))\n except Exception as e:\n logging.error(\"execute sql err: %s\" % e)\n sys.exit()\n return output_list\n # 该rule无查询条件\n else:\n str_sql = \" SELECT id FROM sorttable%s where FLAG == '' \" % self.process_id\n try:\n cur.execute(str_sql)\n except Exception as e:\n logging.error(\"execute sql err: %s\" % e)\n sys.exit()\n contents = cur.fetchall()\n if not contents:\n if condition_mutex == \"true\":\n try:\n cur.execute(\"DELETE FROM sorttable%s\" % self.process_id)\n logging.info(\"DELETE FROM sorttable%s\" % self.process_id)\n except Exception as e:\n logging.error(\"execute sql err: %s\" % e)\n sys.exit()\n return\n filename = \"\"\n filename_part_list = filename_part.split(\",\")\n for part in filename_part_list:\n if part.startswith(\"$\"):\n part = part.strip(\"$\")\n filename += locals()[part]\n else:\n filename += part\n out_filename = of_temp_dir + filename\n if group_fields == \"\" or group_fields is None:\n self.do_to_onefile(out_filename, contents)\n output_list.append(out_filename)\n else:\n self.do_to_onefile(out_filename, contents)\n output_list.append(out_filename)\n if condition_mutex == \"true\":\n try:\n cur.execute(\"DELETE FROM sorttable%s\" % self.process_id)\n logging.info(\"DELETE FROM sorttable%s\" % self.process_id)\n except Exception as e:\n logging.error(\"execute sql err: %s\" % e)\n sys.exit()\n return output_list\n\n def do_to_onefile(self, ofn, contents, xdr_file=None):\n \"\"\"\n 写入文件\n :param ofn: 输出文件名\n :param contents: 文件内容\n :param xdr_file:\n :return:\n \"\"\"\n\n file_content = []\n ori_file_content = self.xdrFile.get_contents()\n if xdr_file is None and self.xdrFile is None:\n sys.exit()\n elif xdr_file:\n ori_file_content = xdr_file.get_contents()\n elif self.xdrFile:\n ori_file_content = self.xdrFile.get_contents()\n if contents:\n for content in contents:\n # try:\n # ori_file_content[content[0]][130] = os.path.basename(ofn)\n # line = \";\".join(ori_file_content[content[0]]) # + \"\\n\"\n # except IndexError as e:\n # logging.error(\"change xdr's filename err:%s\" % e)\n # sys.exit()\n try:\n line = \";\".join(ori_file_content[content[0]])\n file_content.append(line)\n except Exception as e:\n logging.error(\"build xdr err:%s\" % e)\n sys.exit()\n file_len = len(file_content)\n out_xdr_file = XdrFile()\n out_xdr_file.open_xdr_file(ofn, 'a')\n out_xdr_file.write_xdr_file(file_content)\n out_xdr_file.close_xdr_file()\n logging.info(\"write file:%s,line num:%d\" % (ofn, file_len))\n","sub_path":"rule.py","file_name":"rule.py","file_ext":"py","file_size_in_byte":11123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"487775112","text":"import json\nimport urllib\nimport os\n\nfrom .models import Part, Seller, SellerPart\nfrom datetime import datetime\nfrom django.conf import settings\nfrom .settings import BOM_CONFIG\n\n\ndef request(suburl):\n try:\n OCTOPART_API_KEY = settings.BOM_CONFIG['octopart_api_key']\n except KeyError as e:\n raise ValueError('No API key found on server. Contact administrator for help.')\n\n if not OCTOPART_API_KEY:\n raise ValueError('No API key found on server. Contact administrator for help.')\n\n url = 'https://octopart.com/api/v3/' + suburl\n url += '&apikey=' + OCTOPART_API_KEY\n\n try:\n data = urllib.request.urlopen(url).read()\n except Exception as e:\n raise\n\n return json.loads(data)\n\n\ndef match_part(manufacturer_part, organization):\n query = [{'mpn': manufacturer_part.manufacturer_part_number}]\n\n suburl = 'parts/match?queries=%s' \\\n % urllib.parse.quote(json.dumps(query))\n try:\n response = request(suburl)\n except urllib.error.URLError:\n return []\n\n # need for each part: digi-key, mouser prices, moqs, lead times\n DIGI_KEY_SELLER_ID = '459'\n MOUSER_SELLER_ID = '2401'\n\n seller_parts = []\n\n # print mpn's\n for result in response['results']:\n for item in result['items']:\n for offer in item['offers']:\n if (offer['seller']['id'] == DIGI_KEY_SELLER_ID or\n offer['seller']['id'] == MOUSER_SELLER_ID):\n seller_name = offer['seller']['name']\n seller, created = Seller.objects.get_or_create(\n name__iexact=seller_name,\n organization=organization,\n defaults={'name': seller_name})\n ltd = offer['factory_lead_days']\n if 'USD' in offer['prices']:\n for price in offer['prices']['USD']:\n try:\n moq = price[0]\n price = price[1]\n seller_parts.append(\n SellerPart(\n seller=seller,\n manufacturer_part=manufacturer_part,\n minimum_order_quantity=moq,\n unit_cost=price,\n lead_time_days=ltd,\n data_source='octopart'))\n except Exception as e:\n raise\n\n return seller_parts\n\n\ndef get_latest_datasheets(manufacturer_part_number):\n query = [{'mpn': manufacturer_part_number}]\n\n suburl = 'parts/match?queries=%s' \\\n % urllib.parse.quote(json.dumps(query)) + '&include[]=datasheets'\n response = request(suburl)\n\n datasheets = {}\n\n for result in response['results']:\n for item in result['items']:\n for datasheet in item['datasheets']:\n try:\n if datasheet['metadata']['last_updated'] is not None:\n lu = datetime.strptime(datasheet['metadata']['last_updated'], '%Y-%m-%dT%H:%M:%SZ')\n else:\n lu = None\n\n if datasheet['metadata']['date_created'] is not None:\n dc = datetime.strptime(datasheet['metadata']['date_created'], '%Y-%m-%dT%H:%M:%SZ')\n else:\n dc = None\n\n last_updated = lu if lu is not None and lu > dc else dc\n name = datasheet['attribution']['sources'][0]['name']\n url = datasheet['url']\n num_pages = datasheet['metadata']['num_pages']\n if (name not in datasheet) or (\n last_updated is not None and datasheet[name]['last_updated'] < last_updated):\n datasheets[name] = {\n 'url': url,\n 'last_updated': last_updated,\n 'num_pages': num_pages,\n }\n except TypeError as e:\n continue\n\n return datasheets\n","sub_path":"bom/octopart.py","file_name":"octopart.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"313588134","text":"'''\nGiven an array of size N that represents a Tree in such a way that array indexes are values in tree nodes \nand array values give the parent node of that particular index (or node). \nThe value of the root node index would always be -1 as there is no parent for root. \nConstruct the standard linked representation of Binary Tree from this array representation.\n\nInput:\nN = 7\nTree = -1 0 0 1 1 3 5\nOutput: 0 1 2 3 4 5 6\nExplanation:For the array parent[] = {-1,\n0, 0, 1, 1, 3, 5}; the tree generated\nwill have a sturcture like \n 0\n / \\\n 1 2\n / \\\n 3 4\n /\n 5\n/\n6\n\nhttps://www.geeksforgeeks.org/construct-a-binary-tree-from-parent-array-representation/\n\n'''\n\nclass Node:\n \n def __init__(self, data):\n \n self.data = data\n self.left = None\n self.right = None \n \ndef createNode(parent, i, created, root): \n \n # If this node is already created \n if created[i] is not None: \n return\n \n # Create a new node and set created[i] \n created[i] = Node(i) \n \n # If 'i' is root, change root pointer and return \n if parent[i] == -1: \n root[0] = created[i] # root[0] denotes root of the tree \n return\n \n # If parent is not created, then create parent first \n if created[parent[i]] is None: \n createNode(parent, parent[i], created, root ) \n \n # Find parent pointer \n p = created[parent[i]] \n \n # If this is first child of parent \n if p.left is None: \n p.left = created[i] \n # If second child \n else: \n p.right = created[i] \n \n\n\ndef createTree(parent):\n \n n = len(parent)\n \n root = [None]\n \n created = [None for i in range(n + 1)]\n \n for i in range(n):\n createNode(parent, i, created, root)\n \n return root[0]\n\ndef inorder(root): \n if(root is not None): \n inorder(root.left) \n print(root.data, end = \" \") \n inorder(root.right)\n \nparent = [-1, 0, 0, 1, 1, 3, 5] \nroot = createTree(parent)\ninorder(root) \n\n","sub_path":"geeksforgeeks/tree/24_Construct_Binary_Tree_from_Parent_Array.py","file_name":"24_Construct_Binary_Tree_from_Parent_Array.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"614009141","text":"import logging\nimport magic\nimport os\nimport sys\n\n# Make sure the current directory is in the\n# path so that we can run this from anywhere.\nthis_dir = os.path.dirname(__file__)\nif this_dir not in sys.path:\n sys.path.insert(0, this_dir)\n\nimport BaseSandboxParser\nimport SpenderCuckooParser\nimport CuckooParser\nimport VxstreamParser\nimport WildfireParser\n\nclass BaseAlert():\n def __init__(self, config, whitelister=None):\n # Initiate logging.\n self.logger = logging.getLogger()\n\n # Save the config. This should be a ConfigParser object.\n self.config = config\n\n # Save the whitelister. This should be a Whitelist object.\n self.whitelister = whitelister\n\n # A list of Indicator objects for the alert.\n self.iocs = []\n \n # When did the alert happen?\n self.time = \"\"\n \n # What tool generated the alert?\n self.tool = \"\"\n \n # What kind of alert is this?\n self.type = \"\"\n \n # What is the alert's name?\n self.name = \"\"\n \n # What is alert's description?\n self.description = \"\"\n \n # Does the alert have a URL to view it?\n self.url = \"\"\n \n # EmailParser object if the alert was created in response to an e-mail.\n self.email = None\n \n # Any BaseSandboxParser results associated with the alert.\n # The structure is expected to be:\n # {\"md5_of_sample\": [BaseSandboxParser1, BaseSandboxParser2, BaseSandboxParser3]}\n self.sandbox = {}\n \n # Sort the sandbox reports by their URLs. This helps make sure they\n # are displayed in a consistent order for anything using them.\n for sample in self.sandbox:\n self.sandbox[sample].sort(key=lambda x: x.sandbox_url)\n\n # Override __get/setstate__ in case someone\n # wants to pickle an object of this class.\n def __getstate__(self):\n d = dict(self.__dict__)\n if \"logger\" in d:\n del d[\"logger\"]\n return d\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n\n def get_file_mimetype(self, file_path):\n if os.path.exists(file_path):\n return magic.from_file(file_path, mime=True)\n else:\n return \"\"\n \n def add_sandbox(self, json_path):\n if isinstance(json_path, str):\n if os.path.exists(json_path):\n try:\n sandbox_name = BaseSandboxParser.detect_sandbox(json_path)\n except Exception:\n # Log and skip this sandbox report if it couldn't be detected.\n self.logger.exception(\"Error detecting sandbox: \" + json_path)\n\n sandbox_report = None\n if sandbox_name == \"spendercuckoo\":\n try:\n sandbox_report = SpenderCuckooParser.SpenderCuckooParser(self.config, json_path, whitelister=self.whitelister)\n except Exception:\n # Log and skip this sandbox report if it couldn't be parsed.\n self.logger.exception(\"Error parsing Spender Cuckoo report: \" + json_path)\n\n elif sandbox_name == \"cuckoo\":\n try:\n sandbox_report = CuckooParser.CuckooParser(self.config, json_path, whitelister=self.whitelister)\n except Exception:\n # Log and skip this sandbox report if it couldn't be parsed.\n self.logger.exception(\"Error parsing Cuckoo report: \" + json_path)\n\n elif sandbox_name == \"vxstream\":\n try:\n sandbox_report = VxstreamParser.VxstreamParser(self.config, json_path, whitelister=self.whitelister)\n except Exception:\n # Log and skip this sandbox report if it couldn't be parsed.\n self.logger.exception(\"Error parsing VxStream report: \" + json_path)\n\n elif sandbox_name == \"wildfire\":\n try:\n sandbox_report = WildfireParser.WildfireParser(self.config, json_path, whitelister=self.whitelister)\n except Exception:\n # Log and skip this sandbox report if it couldn't be parsed.\n self.logger.exception(\"Error parsing Wildfire report: \" + json_path)\n \n # Continue if we successfully parsed a sandbox report.\n if sandbox_report:\n # Check if this sample has already been added to the sandbox dictionary.\n if sandbox_report.md5 in self.sandbox:\n # Add the report if it isn't already there.\n if not sandbox_report in self.sandbox[sandbox_report.md5]:\n self.sandbox[sandbox_report.md5].append(sandbox_report)\n # Since this is a new sample, set up a list for it.\n else:\n self.sandbox[sandbox_report.md5] = [sandbox_report]\n","sub_path":"integralutils/BaseAlert.py","file_name":"BaseAlert.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"182793484","text":"from flask import Blueprint\nfrom flask import request, jsonify, json\nfrom flask_jwt import jwt_required, current_identity\nfrom flasgger import swag_from\nfrom services.conference_gallery_service import ConferenceGalleryService\nfrom utils.util import model_to_dict\n# from utils.s3_util import upload\n\nconference_gallery_service = ConferenceGalleryService()\nblueprint = Blueprint(\"conference_gallery\", __name__)\n\n# @blueprint.route(\"/conferencegallery\", methods=[\"PUT\"])\n# @jwt_required()\n# @swag_from('../../spec/conferencegallery/save.yml')\n# def conferencegallery_put():\n# try:\n# print(request.form['conferenceId'])\n# print(request.files['file'])\n# data = request.form\n# # req_json = json.loads(request.data)\n# req_data = {}\n# req_data['conferenceId'] = data['conferenceId']\n# req_data['backgroundImg'] = upload(request.files['file'], 'conferenceGallery')\n# req_data['aboutConferenceImg'] = upload(request.files['file'], 'conferenceGallery')\n#\n# res_data = conference_gallery_service.save(req_data)\n# res_json = {'status': 1, 'data': res_data}\n# except Exception as e:\n# if e.args:\n# res_data = e.args[0]\n# else:\n# res_data = e\n# res_json = {'status': 0, 'error': res_data}\n# return jsonify(res_json)\n\n@blueprint.route(\"/conferencegallery\", methods=[\"POST\"])\n# @jwt_required()\n@swag_from('../../spec/conferencegallery/search.yml')\ndef conferencegallery_post():\n try:\n req_json = json.loads(request.data)\n req_data = req_json.get('data', None)\n conference_gallery_service.session_info = current_identity\n res_data = conference_gallery_service.search(req_data)\n res_json = {'status': 1, 'data': res_data}\n except Exception as e:\n print(e)\n if e.args:\n res_data = e.args[0]\n else:\n res_data = e\n res_json = {'status': 0, 'error': res_data}\n return jsonify(res_json)\n\n@blueprint.route(\"/conferencegallery\", methods=[\"GET\"])\n# @jwt_required()\n@swag_from('../../spec/conferencegallery/entity.yml')\ndef conferencegallery_get():\n try:\n conference_gallery_service.session_info = current_identity\n _id = request.args['id']\n res_data = conference_gallery_service.model(_id)\n res_json = {'status': 1, 'data': res_data }\n except Exception as e:\n print(e)\n if e.args:\n res_data = e.args[0]\n else:\n res_data = e\n res_json = {'status': 0, 'error': res_data}\n return jsonify(res_json)\n\n# @blueprint.route(\"/conferencegallery\", methods=[\"DELETE\"])\n# @jwt_required()\n# @swag_from('../../spec/conferencegallery/delete.yml')\n# def conferencegallery_delete():\n# try:\n# conference_gallery_service.session_info = current_identity\n# id = request.args['id']\n# res_data = conference_gallery_service.delete(id)\n# res_json = {'status': 1, 'data': res_data}\n# except Exception as e:\n# if e.args:\n# res_data = e.args[0]\n# else:\n# res_data = e\n# res_json = {'status': 0, 'error': res_data}\n# return jsonify(res_json)","sub_path":"src/main/conference_gallery.py","file_name":"conference_gallery.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"269988596","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Sébastien Reuiller\"\n# __licence__ = \"Apache License 2.0\"\n\n# Python 3, prerequis : pip install pySerial influxdb\n#\n# Exemple de trame:\n# {\n# 'OPTARIF': 'HC..', # option tarifaire\n# 'IMAX': '007', # intensité max\n# 'HCHC': '040177099', # index heure creuse en Wh\n# 'IINST': '005', # Intensité instantanée en A\n# 'PAPP': '01289', # puissance Apparente, en VA\n# 'MOTDETAT': '000000', # Mot d'état du compteur\n# 'HHPHC': 'A', # Horaire Heures Pleines Heures Creuses\n# 'ISOUSC': '45', # Intensité souscrite en A\n# 'ADCO': '000000000000', # Adresse du compteur\n# 'HCHP': '035972694', # index heure pleine en Wh\n# 'PTEC': 'HP..' # Période tarifaire en cours\n# }\n\n\nimport serial\nimport logging\nimport time\nimport requests\nfrom datetime import datetime\nfrom influxdb import InfluxDBClient\n\n# clés téléinfo\nint_measure_keys = ['IMAX', 'HCHC', 'IINST', 'PAPP', 'ISOUSC', 'ADCO', 'HCHP']\n\n# création du logguer\nlogging.basicConfig(filename='/var/log/teleinfo/releve.log', level=logging.INFO, format='%(asctime)s %(message)s')\nlogging.info(\"Teleinfo starting..\")\n\n# connexion a la base de données InfluxDB\nclient = InfluxDBClient('localhost', 8086)\ndb = \"teleinfo\"\nconnected = False\nwhile not connected:\n try:\n logging.info(\"Database %s exists?\" % db)\n if not {'name': db} in client.get_list_database():\n logging.info(\"Database %s creation..\" % db)\n client.create_database(db)\n logging.info(\"Database %s created!\" % db)\n client.switch_database(db)\n logging.info(\"Connected to %s!\" % db)\n except requests.exceptions.ConnectionError:\n logging.info('InfluxDB is not reachable. Waiting 5 seconds to retry.')\n time.sleep(5)\n else:\n connected = True\n\n\ndef add_measures(measures, time_measure):\n points = []\n for measure, value in measures.items():\n point = {\n \"measurement\": measure,\n \"tags\": {\n \"host\": \"raspberry\",\n \"region\": \"linky\"\n },\n \"time\": datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n \"fields\": {\n \"value\": value\n }\n }\n points.append(point)\n\n client.write_points(points)\n\n\ndef main():\n with serial.Serial(port='/dev/ttyS0', baudrate=1200, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,\n bytesize=serial.SEVENBITS, timeout=1) as ser:\n\n logging.info(\"Teleinfo is reading on /dev/ttyS0..\")\n\n trame = dict()\n\n # boucle pour partir sur un début de trame\n line = ser.readline()\n while b'\\x02' not in line: # recherche du caractère de début de trame\n line = ser.readline()\n\n # lecture de la première ligne de la première trame\n line = ser.readline()\n\n while True:\n line_str = line.decode(\"utf-8\")\n ar = line_str.split(\" \")\n try:\n key = ar[0]\n if key in int_measure_keys :\n value = int(ar[1])\n else:\n value = ar[1]\n\n checksum = ar[2]\n trame[key] = value\n if b'\\x03' in line: # si caractère de fin dans la ligne, on insère la trame dans influx\n del trame['ADCO'] # adresse du compteur : confidentiel!\n time_measure = time.time()\n\n # insertion dans influxdb\n add_measures(trame, time_measure)\n\n # ajout timestamp pour debugger\n trame[\"timestamp\"] = int(time_measure)\n logging.debug(trame)\n\n trame = dict() # on repart sur une nouvelle trame\n except Exception as e:\n logging.error(\"Exception : %s\" % e)\n line = ser.readline()\n\n\nif __name__ == '__main__':\n if connected:\n main()\n\n\n","sub_path":"teleinfo.py","file_name":"teleinfo.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"132421789","text":"import CI\nimport CI.builder.system\n\ndescription = 'configure Vim user settings'\n\nfeatures = CI.Features(\n settings = CI.List(CI.String()),\n inject_rc = CI.File(),\n backup_directory = CI.Directory(),\n)\n\nCI.builder.system.features.packages = [\n 'vim',\n]\n\ndef actions(runner):\n if features.backup_directory:\n yield CI.action.CreateDirectory(features.backup_directory)\n if features.settings:\n yield CI.action.InjectText('~/.vimrc', '\"castiron: custom', features.settings)\n if features.inject_rc:\n yield CI.action.InjectText('~/.vimrc', '\"castiron: private', ['source %s' % features.inject_rc])\n","sub_path":"lib/CI/builder/vim.py","file_name":"vim.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"450101689","text":"# -*- coding: utf-8 -*-\n'''\n陌生人识别模型和表情识别模型的结合的主程序\n\n用法:\npython checkingstrangersandfacialexpression.py\npython checkingstrangersandfacialexpression.py --filename tests/room_01.mp4\n'''\n\n# 导入包\nimport argparse\nfrom oldcare.facial import FaceUtil\nfrom PIL import Image, ImageDraw, ImageFont\nfrom oldcare.utils import fileassistant\nfrom keras.models import load_model\nfrom keras.preprocessing.image import img_to_array\nimport cv2\nimport time\nimport numpy as np\nimport os\nimport imutils\nimport subprocess\n\n# 得到当前时间\ncurrent_time = time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(time.time()))\nprint('[INFO] %s 陌生人检测程序和表情检测程序启动了.'%(current_time))\n\n# 传入参数\nap = argparse.ArgumentParser()\nap.add_argument(\"-f\", \"--filename\", required=False, default = '',\n\thelp=\"\")\nargs = vars(ap.parse_args())\ninput_video = args['filename']\n\n# 全局变量\nfacial_recognition_model_path = 'info/face_recognition_hog.pickle' #jian ce ren lian\nfacial_expression_model_path = 'models/face_expression_seven_class.hdf5' #fenxi qinggan\n\noutput_stranger_path = 'supervision/strangers'\noutput_smile_path = 'supervision/smile'\n\npeople_info_path = 'info/people_info.csv'\nfacial_expression_info_path = 'info/facial_expression_info_seven_class.csv'\n# your python path\npython_path = '/root/anaconda3/envs/tensorflow/bin/python'\n\n# 全局常量\nFACIAL_EXPRESSION_TARGET_WIDTH = 48\nFACIAL_EXPRESSION_TARGET_HEIGHT = 48\n\nVIDEO_WIDTH = 1024\nVIDEO_HEIGHT = 720\n\nANGLE = 20\n\n# 得到 ID->姓名的map 、 ID->职位类型的map、\n#摄像头ID->摄像头名字的map、表情ID->表情名字的map\nid_card_to_name, id_card_to_type = fileassistant.get_people_info(\n people_info_path)\nfacial_expression_id_to_name=fileassistant.get_facial_expression_info(\n facial_expression_info_path)\n\n# 控制陌生人检测\nstrangers_timing = 0 # 计时开始\nstrangers_start_time = 0 # 开始时间\nstrangers_limit_time = 2 # if >= 2 seconds, then he/she is a stranger.\n\n# 控制微笑检测\nfacial_expression_timing = 0 # 计时开始\nfacial_expression_start_time = 0 # 开始时间\nfacial_expression_limit_time = 2 # if >= 2 seconds, he/she is smiling\n\n# 初始化摄像头\nif not input_video:\n\tvs = cv2.VideoCapture(0)\n\ttime.sleep(2)\nelse:\n\tvs = cv2.VideoCapture(input_video)\n\n# 初始化人脸识别模型\nfaceutil = FaceUtil(facial_recognition_model_path)\nfacial_expression_model = load_model(facial_expression_model_path,compile = False)\n \nprint('[INFO] 开始检测陌生人和表情...')\n# 不断循环\ncounter = 0\nwhile True:\n\tcounter += 1\n # grab the current frame\n\t(grabbed, frame) = vs.read()\n\n\t# if we are viewing a video and we did not grab a frame, then we\n\t# have reached the end of the video\n\tif input_video and not grabbed:\n\t\tbreak\n \n\tif not input_video:\n\t\tframe = cv2.flip(frame, 1)\n \n\tframe = imutils.resize(frame, width = VIDEO_WIDTH, \n height = VIDEO_HEIGHT)#压缩,加快识别速度\n\n\t# if counter%10!=0:\n\t# \tcv2.imshow(\"Checking Strangers and Ole People's Face Expression\",\n\t# \t\t\t frame)\n\t#\n\t# \t# Press 'ESC' for exiting video\n\t# \tk = cv2.waitKey(1) & 0xff\n\t# \tcontinue\n\n\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#grayscale,表情识别\n\n\t# if True:\n\t# \tcv2.imshow(\"Checking Strangers and Ole People's Face Expression\",\n\t# \t\t\t gray)\n\t# \tcontinue\n\n \n\tface_location_list, names = faceutil.get_face_location_and_name(\n frame)\n \n # 得到画面的四分之一位置和四分之三位置,并垂直划线\n\tone_fourth_image_center = (int(VIDEO_WIDTH/4), \n int(VIDEO_HEIGHT/4))\n\tthree_fourth_image_center = (int(VIDEO_WIDTH/4*3), \n int(VIDEO_HEIGHT/4*3))\n \n\tcv2.line(frame, (one_fourth_image_center[0], 0), \n (one_fourth_image_center[0], VIDEO_HEIGHT), \n (0, 255, 255), 1)\n\tcv2.line(frame, (three_fourth_image_center[0], 0), \n (three_fourth_image_center[0], VIDEO_HEIGHT), \n (0, 255, 255), 1)\n \n\t# 处理每一张识别到的人脸\n\tfor ((left, top, right, bottom), name) in zip(face_location_list, \n names):\n \n\t\t# 将人脸框出来\n\t\trectangle_color = (0, 0, 255)\n\t\tif id_card_to_type[name] == 'old_people':\n\t\t rectangle_color = (0, 0, 128)\n\t\telif id_card_to_type[name] == 'employee':\n\t\t rectangle_color = (255, 0, 0)\n\t\telif id_card_to_type[name] == 'volunteer':\n\t\t rectangle_color = (0, 255, 0)\n\t\telse:\n\t\t pass\n\t\tcv2.rectangle(frame, (left, top), (right, bottom),\n rectangle_color, 2)\n \n # 陌生人检测逻辑\n\t\tif 'Unknown' in names: # alert\n\t\t\tif strangers_timing == 0: # just start timing\n\t\t\t\tstrangers_timing = 1\n\t\t\t\tstrangers_start_time = time.time()\n\t\t\telse: # already started timing\n\t\t\t\tstrangers_end_time = time.time()\n\t\t\t\tdifference = strangers_end_time - strangers_start_time\n \n\t\t\t\tcurrent_time = time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(time.time()))\n \n\t\t\t\tif difference < strangers_limit_time:\n\t\t\t\t\tprint('[INFO] %s, 房间, 陌生人仅出现 %.1f 秒. 忽略.' %(current_time,difference))\n\t\t\t\telse: # strangers appear\n\t\t\t\t\tevent_desc = '陌生人出现!!!'\n\t\t\t\t\tevent_location = '房间'\n\t\t\t\t\tprint('[EVENT] %s, 房间, 陌生人出现!!!' %(current_time))\n\t\t\t\t\tcv2.imwrite(os.path.join(output_stranger_path, \n 'snapshot_%s.jpg' %(time.strftime('%Y%m%d_%H%M%S'))), frame)# snapshot\n \n\t\t\t\t\t# insert into database\n\t\t\t\t\tcommand = '%s inserting.py --event_desc %s --event_type 2 --event_location %s' %(python_path, event_desc, event_location)\n\t\t\t\t\tp = subprocess.Popen(command, shell=True)\n \n\t\t\t\t\t# 开始陌生人追踪\n\t\t\t\t\tunknown_face_center = (int((right + left)/2), \n int((top + bottom)/2))\n \n\t\t\t\t\tcv2.circle(frame, (unknown_face_center[0], \n unknown_face_center[1]), 4, (0, 255, 0), -1)\n \n\t\t\t\t\tdirection = ''\n # face locates too left, servo need to turn right, \n #so that face turn right as well\n\t\t\t\t\tif unknown_face_center[0]three_fourth_image_center[0]:\n\t\t\t\t\t direction = 'left'\n \n # adjust to servo\n\t\t\t\t\tif direction:\n\t\t\t\t\t print('%d-摄像头需要 turn %s %d 度' %(counter, \n direction, ANGLE))\n \n\t\telse: # everything is ok\n\t\t\tstrangers_timing = 0\n \n\t\t# 表情检测逻辑\n\t\t# 如果不是陌生人,且对象是老人\n\t\tif name != 'Unknown' and id_card_to_type[name] =='old_people':\n\t\t\t# 表情检测逻辑\n\t\t\troi = gray[top:bottom, left:right]\n\t\t\troi = cv2.resize(roi, (FACIAL_EXPRESSION_TARGET_WIDTH, \n FACIAL_EXPRESSION_TARGET_HEIGHT))\n\t\t\troi = roi.astype(\"float\") / 255.0\n\t\t\troi = img_to_array(roi)\n\t\t\troi = np.expand_dims(roi, axis=0)\n \n\t\t\t# determine facial expression\n\t\t\temotions = ['Angry','Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']\n\t\t\temotion_value_list = facial_expression_model.predict(roi)[0]\n\n\t\t\tfacial_expression_label = emotions[np.argmax(emotion_value_list)]\n \n\t\t\tif facial_expression_label == 'Happy': # alert\n\t\t\t\tif facial_expression_timing == 0: # just start timing\n\t\t\t\t\tfacial_expression_timing = 1\n\t\t\t\t\tfacial_expression_start_time = time.time()\n\t\t\t\telse: # already started timing\n\t\t\t\t\tfacial_expression_end_time = time.time()\n\t\t\t\t\tdifference = facial_expression_end_time - facial_expression_start_time\n\n\t\t\t\t\tcurrent_time = time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(time.time()))\n\t\t\t\t\tif difference < facial_expression_limit_time:\n\t\t\t\t\t print('[INFO] %s, 房间, %s仅笑了 %.1f 秒. 忽略.' %(current_time, id_card_to_name[name], difference))\n\t\t\t\t\telse: # he/she is really smiling\n\t\t\t\t\t event_desc = '%s正在笑' %(id_card_to_name[name])\n\t\t\t\t\t event_location = '房间'\n\t\t\t\t\t print('[EVENT] %s, 房间, %s正在笑.' %(current_time, id_card_to_name[name]))\n\t\t\t\t\t cv2.imwrite(os.path.join(output_smile_path,\n 'snapshot_%s.jpg' %(time.strftime('%Y%m%d_%H%M%S'))), frame)# snapshot\n\n\t\t\t\t\t # insert into database\n\t\t\t\t\t command = '%s inserting.py --event_desc %s --event_type 0 --event_location %s --old_people_id %d' %(python_path, event_desc, event_location, int(name))\n\t\t\t\t\t p = subprocess.Popen(command, shell=True)\n\n\t\t\telse: # everything is ok\n\t\t\t facial_expression_timing = 0\n \n\t\telse: # 如果是陌生人,则不检测表情\n\t\t\tfacial_expression_label = ''\n \n \n # 人脸识别和表情识别都结束后,把表情和人名写上 \n #(同时处理中文显示问题)\n\t\timg_PIL = Image.fromarray(cv2.cvtColor(frame, \n cv2.COLOR_BGR2RGB)) \n \n\t\tdraw = ImageDraw.Draw(img_PIL)\n\t\tfinal_label = id_card_to_name[name] + ': ' + facial_expression_id_to_name[facial_expression_label] if facial_expression_label else id_card_to_name[name]\n\t\tdraw.text((left, top - 30), final_label, \n font=ImageFont.truetype('NotoSansCJK-Black.ttc',40), \n fill=(255,0,0)) # linux\n \n # 转换回OpenCV格式 \n\t\tframe = cv2.cvtColor(np.asarray(img_PIL),cv2.COLOR_RGB2BGR)\n \n\t# show our detected faces along with smiling/not smiling labels\n\tcv2.imshow(\"Checking Strangers and Ole People's Face Expression\",\n frame)\n\n\t# Press 'ESC' for exiting video\n\tk = cv2.waitKey(1) & 0xff \n\tif k == 27:\n\t\tbreak\n\n# cleanup the camera and close any open windows\nvs.release()\ncv2.destroyAllWindows()\n\n","sub_path":"cv_part/camera_monitoring/checkingstrangersandfacialexpression_seven_class.py","file_name":"checkingstrangersandfacialexpression_seven_class.py","file_ext":"py","file_size_in_byte":10122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"155163150","text":"from setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\nimport versioneer\n\n\nversioneer.VCS = 'git'\nversioneer.versionfile_source = 'SALib/_version.py'\nversioneer.versionfile_build = None\nversioneer.tag_prefix = 'v' # tags are like 1.2.0\nversioneer.parentdir_prefix = 'SALib-' # dirname like 'myproject-1.2.0'\n\n\nclass NoseTestCommand(TestCommand):\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # Run nose ensuring that argv simulates running nosetests directly\n import nose\n nose.run_exit(argv=['nosetests'])\n\n\ndef setup_package():\n # Assemble additional setup commands\n cmdclass = versioneer.get_cmdclass()\n cmdclass['test'] = NoseTestCommand\n\n setup(\n name='SALib',\n packages=find_packages(exclude=[\"*tests*\"]),\n author=\"Jon Herman and Will Usher\",\n author_email=\"jdherman8@gmail.com\",\n license=open('LICENSE.md').read(),\n tests_require=['nose'],\n install_requires=[\n \"numpy>1.7\",\n \"scipy\",\n \"matplotlib>1.4\",\n ],\n \n extras_require = {\n \"gurobipy\": [\"gurobipy\",]\n },\n \n # Two arguments required by Versioneer\n version = versioneer.get_version(),\n cmdclass=cmdclass,\n url=\"https://github.com/SALib/SALib\",\n long_description=open('README.md').read(),\n description=(\n 'Tools for sensitivity analysis. Contains Sobol, Morris, and FAST methods.'),\n # entry_points = {\n # 'console_scripts': [\n # 'salib = SALib.bin.salib:main',\n # ]\n # },\n classifiers=[\n # 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n # 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n # 'Programming Language :: Python :: 2.7',\n # 'Programming Language :: Python :: 2 :: Only',\n 'Topic :: Scientific/Engineering :: Mathematics',\n ],)\n\n\nif __name__ == \"__main__\":\n setup_package()\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"276812638","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\nimport codecs\nimport logging\nimport os\n\nfrom pkg_resources import iter_entry_points\nfrom six import string_types\nimport yaml\n\nfrom tecodo.constants import ENV, FACTERS, VARIABLES, LAST, FIRST, RUN, LOGGER_NAME\nfrom tecodo.constants import COMMANDS\nfrom tecodo.run_selections import unfold_runs\nfrom tecodo.translation import ugettext as _\nfrom tecodo.utils import ConfigDict, ConfigList\n\n__author__ = 'flanker'\nlogger = logging.getLogger(LOGGER_NAME)\n\n\nclass BaseConfiguration(object):\n \"\"\" Store global configuration\n\n >>> obj = BaseConfiguration(configuration_files=[], extra_variables={})\n >>> obj.load()\n >>> obj.get_runs()\n []\n\n \"\"\"\n default_global_values = {'filters_dir': './tecodo_filters', }\n\n def __init__(self, configuration_files=None, extra_variables=None):\n self.configuration_files = configuration_files or []\n self.extra_jinja_variables = extra_variables or {}\n self.run = None\n self.config_jinja_variables = {}\n # config_jinja_variables[run] = ConfigDict()\n self.config_facters = {}\n self.config_shell_env_variables = {}\n self.config_commands = {}\n self.available_facter_classes = {}\n self.available_commands_classes = {}\n\n def get_runs(self):\n \"\"\" return the list of all run names\n :return:\n :rtype: :class:`list` of `str`\n \"\"\"\n if self.run is None:\n return []\n return unfold_runs(self.run)\n\n def load(self):\n \"\"\" load all configuration files and\n \"\"\"\n self._load_facters_and_commands()\n self.config_jinja_variables = {FIRST: ConfigDict(), LAST: ConfigDict(), }\n self.config_facters = {FIRST: ConfigList(), LAST: ConfigList(), }\n self.config_shell_env_variables = {FIRST: ConfigDict(), LAST: ConfigDict(), }\n self.config_commands = {FIRST: ConfigList(), LAST: ConfigList(), }\n attribs = {VARIABLES: (self.config_jinja_variables, dict), FACTERS: (self.config_facters, list),\n ENV: (self.config_shell_env_variables, dict), COMMANDS: (self.config_commands, list), }\n for config_index, config_file in enumerate(self.configuration_files):\n if not config_file:\n continue\n if not os.path.isfile(config_file):\n logger.debug(_('File %(path)s does not exist.') % {'path': config_file})\n continue\n with codecs.open(config_file, 'r', encoding='utf-8') as fd:\n content = yaml.load(fd)\n if not isinstance(content, dict):\n raise ValueError(_('%(path)s: file does not represent a dict.') % {'path': config_file})\n for path, path_values in content.items():\n if path_values is None:\n continue\n elif path == RUN:\n assert isinstance(path_values, string_types)\n self.run = path_values\n continue\n assert isinstance(path_values, dict)\n self.config_jinja_variables.setdefault(path, ConfigDict())\n self.config_facters.setdefault(path, ConfigList())\n self.config_shell_env_variables.setdefault(path, ConfigDict())\n self.config_commands.setdefault(path, ConfigList())\n for kind, sub_values in path_values.items():\n if kind not in attribs or sub_values is None:\n continue\n d, cls = attribs[kind]\n if not isinstance(sub_values, cls):\n raise ValueError(_('%(path)s: %(value)r is not a %(cls)s') % {'value': sub_values,\n 'path': config_file, 'cls': cls.__name__, })\n assert isinstance(sub_values, cls)\n d[path].set_source(config_index)\n d[path].update(sub_values)\n logger.info(_('File %(path)s loaded.') % {'path': config_file})\n\n def _load_facters_and_commands(self):\n \"\"\" load installed plugins for facters and commands\n :return:\n :rtype:\n \"\"\"\n for entry_point in iter_entry_points('tecodo.facter'):\n value = entry_point.load()\n if value.name is not None:\n self.available_facter_classes[value.name] = value\n for entry_point in iter_entry_points('tecodo.command'):\n value = entry_point.load()\n if value.name is not None:\n self.available_commands_classes[value.name] = value\n\n @staticmethod\n def _merge_config_dict(paths, dict_of_config_dicts):\n result = {}\n for k, v in dict_of_config_dicts[FIRST].items():\n result[k] = v\n for path in paths:\n if path in dict_of_config_dicts:\n for k, v in dict_of_config_dicts[path].items():\n result[k] = v\n for k, v in dict_of_config_dicts[LAST].items():\n result[k] = v\n return result\n\n @staticmethod\n def _parse_actions(action_value, available_action_classes):\n txt_value = {'r': action_value}\n if isinstance(action_value, string_types):\n return {'action': action_value, }\n elif isinstance(action_value, dict):\n result = {}\n for k, v in action_value.items():\n if k in available_action_classes:\n if 'action' in result:\n raise ValueError(_('Ambiguous action: %(r)r') % txt_value)\n result['action'] = k\n result['__params__'] = v\n else:\n result[k] = v\n if 'action' not in result:\n raise ValueError(_('Unable to determine action: %(r)r') % txt_value)\n else:\n raise ValueError(_('Invalid action %(r)s. Should be str or dict') % txt_value)\n return result\n\n def _merge_config_list(self, paths, dict_of_config_lists, available_actions):\n \"\"\"\n :param paths:\n :type paths:\n :param dict_of_config_lists:\n :type dict_of_config_lists:\n :param available_actions: dict whose keys are the valid action\n :type available_actions: :class:`dict`\n :return:\n :rtype:\n \"\"\"\n result = []\n for k in dict_of_config_lists[FIRST]:\n result.append(self._parse_actions(k, available_actions))\n for path in paths:\n if path in dict_of_config_lists:\n for k in dict_of_config_lists[path]:\n result.append(self._parse_actions(k, available_actions))\n for k in dict_of_config_lists[LAST]:\n result.append(self._parse_actions(k, available_actions))\n return result\n\n def get_jinja_variables_from_config(self, paths):\n result = self._merge_config_dict(paths, self.config_jinja_variables)\n result.update(self.extra_jinja_variables)\n return result\n\n def get_shell_env_variables_from_config(self, paths):\n return self._merge_config_dict(paths, self.config_shell_env_variables)\n\n def get_facter_names_from_config(self, paths):\n return self._merge_config_list(paths, self.config_facters, self.available_facter_classes)\n\n def get_command_names_from_config(self, paths):\n return self._merge_config_list(paths, self.config_commands, self.available_commands_classes)\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n","sub_path":"tecodo/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"466120080","text":"import sys\nsys.path.append('/home/zhouyj/software/PAL')\nimport data_pipeline as dp\n\nclass Config(object):\n def __init__(self):\n\n # Template cut\n self.win_len = [15,25] # cut window length\n self.min_sta = 4 # min sta num for a template events\n self.max_sta = 15\n self.get_data_dict = dp.get_data_dict\n self.get_sta_dict = dp.get_sta_dict\n # MFT params\n self.temp_win_det = [1.,11.] # win for detection, pre & post P\n self.temp_win_p = [0.5,1.5] # win for p pick, pre & post P\n self.temp_win_s = [0.5,2.5] # win for s pick, pre & post S\n self.trig_thres = 0.25 # cc thres for det & peak expansion\n self.expand_len = 2. # win len for cc peak expansion\n self.det_gap = 5. # gap sec for detection\n self.pick_win_p = [1.5, 1.5] # win for P pick\n self.pick_win_s = [2.5, 2.5] # win for S pick\n self.chn_p = [2]\n self.chn_s = [0,1]\n self.amp_win = [1, 4]\n # data process\n self.to_prep = False # False if templates are preprocessed \n self.samp_rate = 50\n self.freq_band = [2.,40.]\n self.num_workers = 10\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"262517817","text":"n, m, v = list(map(int, input().split()))\nln_st = [list(map(int,input().split())) for i in range(m)]\ngp = [[] for i in range(1001)]\ngp_q = []\nfor i in ln_st:\n gp[i[0]].append(i[1])\n gp[i[1]].append(i[0])\n if i[0] not in gp_q:\n gp_q.append(i[0])\n if i[1] not in gp_q:\n gp_q.append(i[1])\nfor i in gp_q:\n gp[i].sort(reverse=True)\n\nstack = [v]\nqueue = [v]\nst_vis = [v]\nq_vis = [v]\n\nwhile stack:\n chk = stack.pop()\n if chk not in st_vis:\n st_vis.append(chk)\n for i in gp[chk]:\n if i not in st_vis:\n stack.append(i)\nprint(' '.join(map(str,st_vis)))\n\nfor i in gp_q:\n gp[i].sort()\n\nwhile queue:\n chk = queue.pop(0)\n if chk not in q_vis:\n q_vis.append(chk)\n for i in gp[chk]:\n if i not in q_vis:\n queue.append(i)\nprint(' '.join(map(str, q_vis)))\n","sub_path":"Solving_Problem/daily_222/0917/graph_1260.py","file_name":"graph_1260.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"89423781","text":"##############################################################################\n#\n# Copyright (c) 2011 Vifib SARL and Contributors. All Rights Reserved.\n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsibility of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# guarantees and support are strongly adviced to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 3\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n##############################################################################\n\nimport os\nimport pkg_resources\nfrom logging import Formatter\n\nclass KumoTestBed(object):\n\n def run_default(self, recipe):\n run_kumo_cloud(recipe)\n\n def run_kumo_cloud(self, recipe):\n \"\"\" Deploy kumofs system on a cloud. \"\"\"\n\n kumo_cloud_config = {}\n kumo_cloud_config.update(recipe.options)\n kumo_cloud_config.update(recipe.parameter_dict)\n\n kumo_cloud_config['address'] = recipe.getGlobalIPv6Address()\n kumo_cloud_config['report_path'] = recipe.log_directory\n\n kumo_cloud_config.setdefault('max_server', 4)\n kumo_cloud_config.setdefault('max_tester', 5)\n kumo_cloud_config.setdefault('nb_thread', 32)\n kumo_cloud_config.setdefault('nb_request', 1024000)\n kumo_cloud_config.setdefault('erp5_publish_url', '')\n kumo_cloud_config.setdefault('erp5_publish_project', '')\n\n computer_guid_list = []\n computer_guid_list.append(\"COMP-23\") # manager\n computer_guid_list.append(\"COMP-13\") # server 1\n computer_guid_list.append(\"COMP-14\") # server 2\n computer_guid_list.append(\"COMP-20\") # server 3\n computer_guid_list.append(\"COMP-19\") # server 4\n computer_guid_list.append(\"COMP-23\") # tester 1\n computer_guid_list.append(\"COMP-22\") # tester 2\n computer_guid_list.append(\"COMP-14\") # tester 3\n computer_guid_list.append(\"COMP-20\") # tester 4\n computer_guid_list.append(\"COMP-19\") # tester 5\n\n kumo_cloud_config.setdefault('computer_guid_list', \":\".join(computer_guid_list))\n\n kumo_cloud_config['software_release_url'] = recipe.software_release_url\n kumo_cloud_config['server_url'] = recipe.server_url\n kumo_cloud_config['key_file'] = recipe.key_file\n kumo_cloud_config['cert_file'] = recipe.cert_file\n kumo_cloud_config['computer_id'] = recipe.computer_id\n kumo_cloud_config['computer_partition_id'] = recipe.computer_partition_id\n kumo_cloud_config['plugin_name'] = 'kumo'\n\n kumo_cloud_connection = {}\n kumo_cloud_connection['url'] = \"http://[\"+kumo_cloud_config['address']+\"]:5000/\"\n kumo_cloud_connection['computer_guid_list'] = kumo_cloud_config['computer_guid_list']\n recipe.computer_partition.setConnectionDict(kumo_cloud_connection)\n\n nosqltester_manager_wrapper_template_location = pkg_resources.resource_filename(\n __name__, os.path.join(\n 'template', 'kumotester_manager_run.in'))\n nosqltester_manager_runner_path = recipe.createRunningWrapper(\"kumotester_manager\",\n recipe.substituteTemplate(nosqltester_manager_wrapper_template_location, kumo_cloud_config))\n\n return [nosqltester_manager_runner_path]\n\n def run_all(self, recipe):\n \"\"\" Run all services on one machine. \"\"\"\n all_config = {}\n all_config.update(recipe.options)\n\n ipaddress = \"[%s]\" % recipe.getGlobalIPv6Address()\n\n all_config['manager_address'] = ipaddress\n all_config['manager_port'] = 19700\n all_config['server_address'] = ipaddress\n all_config['server_port'] = 19800\n all_config['server_listen_port'] = 19900\n all_config['server_storage'] = os.path.join(recipe.data_root_directory, \"kumodb.tch\")\n all_config['gateway_address'] = ipaddress\n all_config['gateway_port'] = 11411\n all_config['manager_log'] = os.path.join(recipe.log_directory, \"kumo-manager.log\")\n all_config['server_log'] = os.path.join(recipe.log_directory, \"kumo-server.log\")\n all_config['gateway_log'] = os.path.join(recipe.log_directory, \"kumo-gateway.log\")\n\n manager_wrapper_template_location = pkg_resources.resource_filename(\n __name__, os.path.join(\n 'template', 'kumo_manager_run.in'))\n manager_runner_path = recipe.createRunningWrapper(\"kumo-manager\",\n recipe.substituteTemplate(manager_wrapper_template_location, all_config))\n server_wrapper_template_location = pkg_resources.resource_filename(\n __name__, os.path.join(\n 'template', 'kumo_server_run.in'))\n server_runner_path = recipe.createRunningWrapper(\"kumo-server\",\n recipe.substituteTemplate(server_wrapper_template_location, all_config))\n gateway_wrapper_template_location = pkg_resources.resource_filename(\n __name__, os.path.join(\n 'template', 'kumo_gateway_run.in'))\n gateway_runner_path = recipe.createRunningWrapper(\"kumo-gateway\",\n recipe.substituteTemplate(gateway_wrapper_template_location, all_config))\n\n return [manager_runner_path, server_runner_path, gateway_runner_path]\n\n def run_kumo_manager(self, recipe):\n \"\"\" Run the kumofs manager. \"\"\"\n manager_config = {}\n manager_config.update(recipe.options)\n\n manager_config['manager_address'] = \"[%s]\" % recipe.getGlobalIPv6Address()\n manager_config['manager_port'] = 19700\n manager_config['manager_log'] = os.path.join(recipe.log_directory, \"kumo-manager.log\")\n\n manager_connection = {}\n manager_connection['address'] = manager_config['manager_address']\n manager_connection['port'] = manager_config['manager_port']\n recipe.computer_partition.setConnectionDict(manager_connection)\n\n manager_wrapper_template_location = pkg_resources.resource_filename(\n __name__, os.path.join(\n 'template', 'kumo_manager_run.in'))\n manager_runner_path = recipe.createRunningWrapper(\"kumo-manager\",\n recipe.substituteTemplate(manager_wrapper_template_location, manager_config))\n\n return [manager_runner_path]\n\n def run_kumo_server(self, recipe):\n \"\"\" Run the kumofs server. \"\"\"\n server_config = {}\n server_config.update(recipe.options)\n server_config.update(recipe.parameter_dict)\n\n server_config['server_address'] = \"[%s]\" % recipe.getGlobalIPv6Address()\n server_config['server_port'] = 19800\n server_config['server_listen_port'] = 19900\n server_config['server_storage'] = os.path.join(recipe.var_directory,\"kumodb.tch\")\n server_config['server_log'] = os.path.join(recipe.log_directory, \"kumo-server.log\")\n\n server_connection = {}\n server_connection['address'] = server_config['server_address']\n recipe.computer_partition.setConnectionDict(server_connection)\n\n server_wrapper_template_location = pkg_resources.resource_filename(\n __name__, os.path.join(\n 'template', 'kumo_server_run.in'))\n server_runner_path = recipe.createRunningWrapper(\"kumo-server\",\n recipe.substituteTemplate(server_wrapper_template_location, server_config))\n\n return [server_runner_path]\n\n def run_kumo_gateway(self, recipe):\n \"\"\" Run the kumofs gateway. \"\"\"\n gateway_config = {}\n gateway_config.update(recipe.options)\n gateway_config.update(recipe.parameter_dict)\n\n gateway_config['gateway_address'] = \"[%s]\" % recipe.getGlobalIPv6Address()\n gateway_config['gateway_port'] = 11411\n gateway_config['gateway_log'] = os.path.join(recipe.log_directory, \"kumo-gateway.log\")\n\n gateway_connection = {}\n gateway_connection['address'] = gateway_config['gateway_address']\n gateway_connection['port'] = gateway_config['gateway_port']\n recipe.computer_partition.setConnectionDict(gateway_connection)\n\n gateway_wrapper_template_location = pkg_resources.resource_filename(\n __name__, os.path.join(\n 'template', 'kumo_gateway_run.in'))\n gateway_runner_path = recipe.createRunningWrapper(\"kumo-gateway\",\n recipe.substituteTemplate(gateway_wrapper_template_location, gateway_config))\n\n return [gateway_runner_path]\n\n def run_kumo_tester(self, recipe):\n \"\"\" Run the kumofs tester. \"\"\"\n tester_config = {}\n tester_config.update(recipe.options)\n tester_config.update(recipe.parameter_dict)\n\n tester_config['tester_address'] = recipe.getGlobalIPv6Address()\n # tester_config['url'] = \"http://%s:5000/\" % tester_config['tester_address']\n # tester_config['start_url'] = \"http://%s:5000/start\" % tester_config['tester_address']\n tester_config['report_path'] = recipe.log_directory\n config_dict['binary'] = \"%s -g -l %s -p %s -t %s %s\" % (config_dict['memstrike_binary'],\n config_dict['gateway_address'].strip(\"[]\"),\n str(config_dict['gateway_port']),\n str(config_dict['nb_thread']),\n str(config_dict['nb_request']))\n tester_config['log_directory'] = recipe.log_directory\n tester_config['compress_method'] = \"bz2\"\n\n tester_connection = {}\n tester_connection['url'] = \"http://%s:5000/\" % tester_config['tester_address']\n recipe.computer_partition.setConnectionDict(tester_connection)\n\n tester_wrapper_template_location = pkg_resources.resource_filename(\n 'slapos.recipe.nosqltestbed', os.path.join(\n 'template', 'nosqltester_run.in'))\n tester_runner_path = recipe.createRunningWrapper(\"nosqltester\",\n recipe.substituteTemplate(tester_wrapper_template_location, tester_config))\n\n return [tester_runner_path]\n\n def run_kumo_tester_and_gateway(self, recipe):\n \"\"\" Run the kumofs tester and gateway on the same partition. \"\"\"\n address = recipe.getGlobalIPv6Address()\n\n config_dict = {}\n config_dict.update(recipe.options)\n config_dict.update(recipe.parameter_dict)\n\n # Gateway part\n config_dict['gateway_address'] = \"[%s]\" % address\n config_dict['gateway_port'] = 11411\n config_dict['gateway_log'] = os.path.join(recipe.log_directory, \"kumo-gateway.log\")\n\n # Tester part\n config_dict['tester_address'] = address\n config_dict['report_path'] = recipe.log_directory\n config_dict['binary'] = \"%s -g -l %s -p %s -t %s %s\" % (config_dict['memstrike_binary'],\n config_dict['gateway_address'].strip(\"[]\"),\n str(config_dict['gateway_port']),\n str(config_dict['nb_thread']),\n str(config_dict['nb_request']))\n config_dict['log_directory'] = recipe.log_directory\n config_dict['compress_method'] = \"bz2\"\n\n connection_dict = {}\n # connection_dict['address'] = config_dict['gateway_address']\n # connection_dict['port'] = config_dict['gateway_port']\n connection_dict['url'] = \"http://%s:5000/\" % config_dict['tester_address']\n recipe.computer_partition.setConnectionDict(connection_dict)\n\n gateway_wrapper_template_location = pkg_resources.resource_filename(\n __name__, os.path.join(\n 'template', 'kumo_gateway_run.in'))\n gateway_runner_path = recipe.createRunningWrapper(\"kumo-gateway\",\n recipe.substituteTemplate(gateway_wrapper_template_location, config_dict))\n\n tester_wrapper_template_location = pkg_resources.resource_filename(\n 'slapos.recipe.nosqltestbed', os.path.join(\n 'template', 'nosqltester_run.in'))\n tester_runner_path = recipe.createRunningWrapper(\"nosqltester\",\n recipe.substituteTemplate(tester_wrapper_template_location, config_dict))\n\n return [gateway_runner_path, tester_runner_path]\n\n def run_memstrike_set(self, recipe):\n \"\"\" Run memstrike in set mode. \"\"\"\n memstrike_config = {}\n memstrike_config.update(recipe.options)\n memstrike_config.update(recipe.parameter_dict)\n\n memstrike_config['gateway_address'] = memstrike_config['gateway_address'].strip(\"[]\")\n\n memstrike_connection = {}\n memstrike_connection['status'] = \"OK\"\n recipe.computer_partition.setConnectionDict(memstrike_connection)\n\n memstrike_wrapper_template_location = pkg_resources.resource_filename(\n __name__, os.path.join(\n 'template', 'memstrike_run.in'))\n memstrike_runner_path = recipe.createRunningWrapper(\"memstrike_set\",\n recipe.substituteTemplate(memstrike_wrapper_template_location, memstrike_config))\n\n return [memstrike_runner_path]\n\n","sub_path":"slapos/recipe/nosqltestbed/kumo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"248390394","text":"from strange_case.registry import Registry\nfrom strange_case.nodes.jinja import JinjaNode\nfrom strange_case.nodes import Processor\nimport types\n\n\ndef bind(object, name=None):\n def dec(function):\n my_name = name or function.__name__\n\n setattr(object, my_name, types.MethodType(function, object))\n return dec\n\n\ndef toc_processor(config, source_path, target_path):\n toc_processor = Processor(config)\n options = {\n 'entries': int(config.get('toc', {}).get('entries', [])),\n 'maxdepth': int(config.get('toc', {}).get('maxdepth', 3)),\n 'numbered': int(config.get('toc', {}).get('numbered', False)),\n 'titlesonly': int(config.get('toc', {}).get('titlesonly', False)),\n 'glob': int(config.get('toc', {}).get('glob', False)),\n 'hidden': int(config.get('toc', {}).get('hidden', [])),\n }\n\n @bind(toc_processor)\n def populate(self, site):\n ret = []\n page_config = self.config_copy(True) # copy *all* config, even name and title.\n node = JinjaNode(page_config, source_path, target_path)\n ret.append(node)\n return ret\n\n return (toc_processor, )\n\n\nRegistry.register('toc', toc_processor)\n","sub_path":"strange_case/extensions/toc_ext.py","file_name":"toc_ext.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"573312680","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n if not s:\n return 0\n left = 0\n window = set()\n n = len(s)\n maxlen = 0\n nowlen = 0\n for i in range(n):\n nowlen += 1\n #滑动窗口法\n while s[i] in window:\n window.remove(s[left])\n left += 1\n nowlen -= 1\n if nowlen > maxlen:maxlen = nowlen\n window.add(s[i])\n return maxlen\n","sub_path":"刷题笔记/腾讯企业题库LeetCode/数组与字符串/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"185577888","text":"import sys\nimport timeit\nimport pprint\n\nsys.stdin = open('../../problem_list.txt', 'r')\n\nstart_time = timeit.default_timer()\n\n\ndef solution(N, K):\n genuine_count = 0\n temp_index = 0\n Nsoldiers = N - 2\n status_soldiers = [0] + [1 for i in range(N - 2)]\n while Nsoldiers != 2:\n while genuine_count != K:\n temp_index += 1\n temp_index %= N - 1\n if status_soldiers[temp_index]:\n genuine_count += 1\n status_soldiers[temp_index] = 0\n Nsoldiers -= 1\n genuine_count = 0\n print(\"the answer is\")\n for i in range(N - 1):\n if status_soldiers[i]:\n print(i)\n print(\"done\")\n\n\nsolution(41, 3)\n\nend_time = timeit.default_timer()\n\nprint('running time: {}'.format(end_time - start_time))\n","sub_path":"Coding_Test/Joara/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"287605971","text":"# -*- coding: utf-8 -*-\nimport json\nimport re\nfrom scrapy.http.request import Request\nfrom scrapy_redis import defaults\nfrom scrapy_redis.utils import bytes_to_str\nfrom scrapy import log\nfrom WeiXinSpider.items import UserItem\nfrom WeiXinSpider.scrapy_redis.spiders import RedisSpider\nfrom WeiXinSpider.server import Utils\nimport logging\n\nlogger = logging.getLogger(__name__)\nclass SouGouSpider(RedisSpider):\n name = 'sougou'\n start_urls = ['http://weixin.sougou.com/']\n redis_key = 'sougou:start_urls'\n hurry_key = 'sougou:hurry_urls'\n sg_count = -1\n\n def __init__(self):\n self.Utils = Utils()\n urls = self.Utils.get_query_url() #json类型\n # self.ids = ids\n self.Utils.qurey_url2redis(start_urls=urls) #写入redis中 start_urls hurry_urls\n\n def next_requests(self):\n \"\"\"Returns a request to be scheduled or none.\"\"\"\n use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)\n fetch_one = self.server.spop if use_set else self.server.lpop #出来的是str\n # XXX: Do we need to use a timeout here?\n found = 0\n while found < self.redis_batch_size:\n if self.server.exists(self.hurry_key):\n data = json.loads(fetch_one(self.hurry_key))\n else:\n data = json.loads(fetch_one(self.redis_key))\n first_id = 0\n if int(data['id'])-1 == first_id:\n self.sg_count += 1\n logger.info(\"已轮循{}次\".format(self.sg_count))\n if data:\n self.server.rpush(\"sougou:start_urls\", json.dumps(data))\n\n if not data:\n # Queue empty.\n # if self.Utils.redis_con.exists(\"sougou:dupefilter\"):\n # self.Utils.redis_con.delete(\"sougou:dupefilter\")\n break\n req = self.make_request_from_data(data)\n if req:\n yield req\n found += 1\n else:\n self.logger.debug(\"Request not made from data: %r\", data)\n\n if found:\n self.logger.debug(\"Read %s requests from '%s'\", found, self.redis_key)\n\n\n\n def make_request_from_data(self, data):\n \"\"\"Returns a Request instance from data coming from Redis.\n\n By default, ``data`` is an encoded URL. You can override this method to\n provide your own message decoding.\n\n Parameters\n ----------\n data : bytes\n Message from redis.\n\n \"\"\"\n #传进来的是字符串 需转为dict\n # data = eval(data)\n url = bytes_to_str(data['url'], self.redis_encoding)\n return self.make_requests_from_url(url , data['id'])\n\n def make_requests_from_url(self, url, id):\n \"\"\" This method is deprecated. \"\"\"\n return Request(url, dont_filter=True, meta={'id': int(id)})\n # 'dont_redirect': True,\n\n def parse(self, response):\n user_item = UserItem()\n r = re.match(r'.*&query=(.[^&]*)&.*', response.url)\n if r:\n keyword = r.group(1)\n if response.status == 200:\n if 'antispider' not in response.url:\n user_item = UserItem()\n # 初始化id account status1\n user_item['id'] = response.meta.get('id')\n user_item['status1'] = 0\n # user_item['status2'] = 0\n # user_item['status3'] = 0\n user_item['status1'] = 1\n user_item['account'] = keyword\n logger.info('query keyword is {}'.format(keyword))\n user_item['keyword'] = ''\n user_item['certified_text'] = ''\n user_item['description'] = ''\n user_item['hs_url'] = ''\n user_item['name'] = ''\n list2 = response.css('ul.news-list2')\n if list2 and len(list2) > 0:\n logger.info(\"{}代理有效\".format(response.meta.get(\"proxy\")))\n li_nodes = list2.css('li')\n for li_node in li_nodes:\n url = li_node.css('.gzh-box2 .txt-box .tit a::attr(href)').extract_first().strip()\n name = li_node.css('.gzh-box2 .txt-box .tit a::text').extract_first()\n account = li_node.css('.gzh-box2 .txt-box .info label::text').extract_first()\n dl_nodes = li_node.css('dl')\n certified_text = ''\n description = ''\n if dl_nodes:\n description = li_node.css('dl:nth-child(2) > dd::text').extract_first()\n if len(dl_nodes) == 3:\n dt_text_tmp = li_node.css('dl:nth-child(3) > dt::text').extract()\n text = dt_text_tmp[1]\n dd_text = li_node.css('dl:nth-child(3) > dd::text').extract_first()\n if dd_text:\n certified_text = text + dd_text\n if account == keyword:\n user_item['hs_url'] = url\n user_item['account'] = account\n user_item['certified_text'] = certified_text\n user_item['description'] = description\n user_item['name'] = name\n user_item['keyword'] = account\n if user_item['hs_url']:\n logger.info('匹配成功 hs_url:{}'.format(user_item['hs_url']))\n user_item['status1'] = 2\n #将id url 以字典形式存储\n # self.Utils.redis_con.rpush('HS_URLS', json.dumps(dict(id=user_item['id'], url=url)))\n # redis_con = Redis(host=REDIS_HOST, db=REDIS_DB+1)\n # redis_con.set(user_item['id'],json.dumps(dict(hs_url=url, account=user_item['account'])))\n # redis_con = Redis(host=REDIS_HOST, db=1)\n # redis_con.set(user_item['id'],json.dumps(dict(id=user_item['id'],url=url,account=user_item['account'])))\n #status1<0 保存数据库not found\n else:\n logger.info('{}没有匹配结果!'.format(keyword))\n user_item['status1'] = -2\n\n elif '暂无' in response.body:\n logger.info('{}没有搜索结果!'.format(keyword))\n user_item['status1'] = -1\n else:\n logger.info('系统错误!')\n user_item['status1'] = -1\n yield user_item\n else:#输入验证码 换代理\n #pop proxy\n logger.info('请输入验证码!')\n self.Utils.pop_proxy()\n yield Request(response.url, callback=self.parse)\n\n else:\n logger.info('其他情况!account={0},code={1}'.format(user_item['account'],response.code))\n self.Utils.pop_proxy()\n yield Request(response.url, callback=self.parse)\n\n","sub_path":"WeiXinSpider/spiders/sougou.py","file_name":"sougou.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"267269423","text":"import pandas as pd\nimport numpy as np\nfrom collections import defaultdict\nfrom utils.save_models import Save\nfrom utils.functions import mean, total\n\n\nclass NoScreeningNoMRI(Save):\n\n \"\"\"Build cohort of non-screened using pre-2019 guidelines\"\"\"\n\n def __init__(self, params):\n self.cohort_list = []\n self.outcomes_list = []\n self.simulations = defaultdict(list)\n self.run_model = self._run_model(params)\n\n def _run_model(self, params):\n\n # Loop through age cohorts\n for year in np.arange(55, 70):\n\n incidence = params.pca_incidence[:, year-45:]\n pca_mortality = params.pca_death_baseline[:, year-45:]\n mortality_other_causes = params.death_other_causes[:, year-45:]\n localised_stage = params.stage_local_ns_psa[:, year-45:]\n advanced_stage = params.stage_adv_ns_psa[:, year-45:]\n tx_costs_local = params.tx_costs * params.tx.localised.values\n tx_costs_adv = params.tx_costs * params.tx.advanced.values\n\n # Year 1 in the model\n #####################\n age = np.arange(year, 90)\n length_df = len(age)\n\n # Cohorts, numbers 'healthy', and incident cases\n cohort = np.array([np.repeat(params.pop.loc[year, :], length_df)] * params.sims)\n pca_alive = np.array([np.zeros(length_df)] * params.sims)\n healthy = cohort - pca_alive\n pca_incidence = healthy * incidence\n\n # Deaths\n pca_death = ((pca_alive * pca_mortality)\n + (healthy * pca_mortality))\n\n pca_death_other = ((pca_incidence\n + pca_alive\n - pca_death)\n * mortality_other_causes)\n\n healthy_death_other = ((healthy - pca_incidence)\n * mortality_other_causes)\n\n total_death = (pca_death\n + pca_death_other\n + healthy_death_other)\n\n # Prevalent cases & life-years\n pca_prevalence_ns = (pca_incidence\n - pca_death\n - pca_death_other)\n\n lyrs_pca_nodiscount = pca_prevalence_ns * 0.5\n\n # Treatment costs\n costs_tx = np.array([np.zeros(length_df)] * params.sims)\n\n costs_tx[:, 0] = ((pca_incidence[:, 0]\n * localised_stage[:, 0].T\n * tx_costs_local.T).sum(axis=0)\n\n + (pca_incidence[:, 0]\n * advanced_stage[:, 0].T\n * tx_costs_adv.T).sum(axis=0)\n\n * params.relative_cost_clinically_detected[:, 0])\n\n # Year 2 onwards\n ################\n total_cycles = length_df\n for i in range(1, total_cycles):\n\n # Cohorts, numbers 'healthy', and incident cases\n cohort[:, i] = cohort[:, i-1] - total_death[:, i-1]\n\n # PCa alive at the beginning of the year\n pca_alive[:, i] = (pca_alive[:, i-1]\n + pca_incidence[:, i-1]\n - pca_death[:, i-1]\n - pca_death_other[:, i-1])\n\n healthy[:, i] = cohort[:, i] - pca_alive[:, i]\n\n pca_incidence[:, i] = healthy[:, i] * incidence[:, i]\n\n # Deaths\n pca_death[:, i] = ((pca_alive[:, i] * pca_mortality[:, i])\n + (healthy[:, i] * pca_mortality[:, i]))\n\n pca_death_other[:, i] = ((pca_incidence[:, i]\n + pca_alive[:, i]\n - pca_death[:, i])\n * mortality_other_causes[:, i])\n\n healthy_death_other[:, i] = ((healthy[:, i] - pca_incidence[:, i])\n * mortality_other_causes[:, i])\n\n total_death[:, i] = (pca_death[:, i]\n + pca_death_other[:, i]\n + healthy_death_other[:, i])\n\n # Prevalent cases & life-years\n pca_prevalence_ns[:, i] = (pca_incidence[:, i]\n + pca_alive[:, i]\n - pca_death[:, i]\n - pca_death_other[:, i])\n\n lyrs_pca_nodiscount[:, i] = ((pca_prevalence_ns[:, i-1]\n + pca_prevalence_ns[:, i])\n * 0.5)\n\n # Costs\n costs_tx[:, i] = ((pca_incidence[:, i]\n * localised_stage[:, i].T\n * tx_costs_local.T).sum(axis=0)\n\n + (pca_incidence[:, i]\n * advanced_stage[:, i].T\n * tx_costs_adv.T).sum(axis=0)\n\n * params.relative_cost_clinically_detected[:, i])\n\n ##############\n # Life-years #\n ##############\n\n # Life-years ('healthy')\n lyrs_healthy_nodiscount_ns = healthy - (0.5*(healthy_death_other+pca_incidence))\n lyrs_healthy_discount_ns = lyrs_healthy_nodiscount_ns * params.discount_factor[:total_cycles]\n\n # Life-years with prostate cancer\n lyrs_pca_discount_ns = lyrs_pca_nodiscount * params.discount_factor[:total_cycles]\n\n # Total life-years\n ##################\n lyrs_nodiscount_ns = lyrs_healthy_nodiscount_ns + lyrs_pca_nodiscount\n lyrs_discount_ns = lyrs_healthy_discount_ns + lyrs_pca_discount_ns\n\n #########\n # QALYs #\n #########\n\n # QALYs in the healthy\n qalys_healthy_nodiscount_ns = lyrs_healthy_nodiscount_ns * params.utility_background[:, year-45:]\n qalys_healthy_discount_ns = lyrs_healthy_discount_ns * params.utility_background[:, year-45:]\n\n # QALYs with prostate cancer\n qalys_pca_nodiscount_ns = lyrs_pca_nodiscount * params.utility_pca[:, year-45:]\n qalys_pca_discount_ns = lyrs_pca_discount_ns * params.utility_pca[:, year-45:]\n\n # Total QALYs\n #############\n qalys_nodiscount_ns = qalys_healthy_nodiscount_ns + qalys_pca_nodiscount_ns\n qalys_discount_ns = qalys_healthy_discount_ns + qalys_pca_discount_ns\n\n ###############\n # PSA testing #\n ###############\n\n # Cost of PSA testing\n n_psa_tests_ns = ((pca_incidence/params.p_biopsy_ns[:, year-45:])\n * params.n_psa_tests[:, year-45:])\n\n cost_psa_testing_nodiscount_ns = (n_psa_tests_ns\n * params.cost_psa[:, year-45:]\n * params.relative_cost_clinically_detected[:, year-45:])\n\n cost_psa_testing_discount_ns = cost_psa_testing_nodiscount_ns * params.discount_factor[:total_cycles]\n\n ############\n # Biopsies #\n ############\n\n # Cost of suspected cancer - biopsies\n n_biopsies_ns = pca_incidence / params.p_biopsy_ns[:, year-45:]\n\n cost_biopsy_nodiscount_ns = (n_biopsies_ns\n * params.cost_biopsy[:, year-45:]\n * params.relative_cost_clinically_detected[:, year-45:])\n\n cost_biopsy_discount_ns = cost_biopsy_nodiscount_ns * params.discount_factor[:total_cycles]\n\n #######\n # MRI #\n #######\n\n # Number and costs of MRI\n n_mri_ns = pca_incidence * params.mri_biopsy_first[:, year-45:]\n\n # MRI costs are indicative only - they are not used in total\n # cost calculations as they are taken into account under staging\n cost_mri_nodiscount_ns = (n_mri_ns\n * np.array([params.cost_mri]).T\n * params.relative_cost_clinically_detected[:, year-45:])\n\n cost_mri_discount_ns = cost_mri_nodiscount_ns * params.discount_factor[:total_cycles]\n\n ###########\n # Staging #\n ###########\n\n # Cost of staging\n cost_staging_nodiscount_ns = (params.cost_assessment\n * advanced_stage.T\n * pca_incidence.T\n * params.relative_cost_clinically_detected[:, year-45:].T).T\n\n cost_staging_discount_ns = cost_staging_nodiscount_ns * params.discount_factor[:total_cycles]\n\n #############\n # EOL Costs #\n #############\n\n # Cost in last 12 months of life\n cost_eol_nodiscount_ns = (params.cost_pca_death * pca_death.T).T\n cost_eol_discount_ns = cost_eol_nodiscount_ns * params.discount_factor[:total_cycles]\n\n ###################\n # Treatment costs #\n ###################\n\n # Costs of treatment\n cost_tx_discount_ns = costs_tx * params.discount_factor[:total_cycles]\n\n # Amalgamated costs\n cost_nodiscount_ns = (cost_psa_testing_nodiscount_ns\n + cost_mri_nodiscount_ns\n + cost_biopsy_nodiscount_ns\n + cost_staging_nodiscount_ns\n + costs_tx\n + cost_eol_nodiscount_ns)\n\n cost_discount_ns = (cost_psa_testing_discount_ns\n + cost_mri_discount_ns\n + cost_biopsy_discount_ns\n + cost_staging_discount_ns\n + cost_tx_discount_ns\n + cost_eol_discount_ns)\n\n #######################################\n # Generate dataframes of the outcomes #\n #######################################\n\n # Generate a mean dataframe for each age cohort\n cohort = pd.DataFrame({\n 'age': age,\n 'pca_cases': mean(pca_incidence),\n 'deaths_other': mean((pca_death_other+healthy_death_other)),\n 'deaths_pca': mean(pca_death),\n 'pca_alive': mean(pca_alive),\n 'healthy': mean(healthy),\n 'psa_tests': mean(n_psa_tests_ns),\n 'n_mri': mean(n_mri_ns),\n 'n_biopsies': mean(n_biopsies_ns),\n 'lyrs_healthy_nodiscount': mean(lyrs_healthy_nodiscount_ns),\n 'lyrs_healthy_discount': mean(lyrs_healthy_discount_ns),\n 'lyrs_pca_discount': mean(lyrs_pca_discount_ns),\n 'total_lyrs_discount': mean(lyrs_discount_ns),\n 'qalys_healthy_discount': mean(qalys_healthy_discount_ns),\n 'qalys_pca_discount': mean(qalys_pca_discount_ns),\n 'total_qalys_discount': mean(qalys_discount_ns),\n 'cost_psa_testing_discount': mean(cost_psa_testing_discount_ns),\n 'cost_mri_discount': mean(cost_mri_discount_ns),\n 'cost_biopsy_discount': mean(cost_biopsy_discount_ns),\n 'cost_staging_discount': mean(cost_staging_discount_ns),\n 'cost_treatment_discount': mean(cost_tx_discount_ns),\n 'costs_eol_discount': mean(cost_eol_discount_ns),\n 'total_cost_discount': mean(cost_discount_ns)\n })\n\n # Totals for each age cohort\n outcomes = pd.DataFrame({\n 'cohort_age_at_start': [year],\n 'pca_cases': [total(pca_incidence)],\n 'pca_deaths': [total(pca_death)],\n 'deaths_other_causes': [total((pca_death_other+healthy_death_other))],\n 'lyrs_healthy_discounted': [total(lyrs_healthy_discount_ns)],\n 'lyrs_pca_discounted': [total(lyrs_pca_discount_ns)],\n 'lyrs_undiscounted': [total(lyrs_nodiscount_ns)],\n 'lyrs_discounted': [total(lyrs_discount_ns)],\n 'qalys_healthy_discounted': [total(qalys_healthy_discount_ns)],\n 'qalys_pca_discounted': [total(qalys_pca_discount_ns)],\n 'qalys_undiscounted': [total(qalys_nodiscount_ns)],\n 'qalys_discounted': [total(qalys_discount_ns)],\n 'cost_psa_testing_undiscounted': [total(cost_psa_testing_nodiscount_ns)],\n 'cost_psa_testing_discounted': [total(cost_psa_testing_discount_ns)],\n 'cost_mri_undiscounted': [total(cost_mri_nodiscount_ns)],\n 'cost_mri_discounted': [total(cost_mri_discount_ns)],\n 'cost_biopsy_undiscounted': [total(cost_biopsy_nodiscount_ns)],\n 'cost_biopsy_discounted': [total(cost_biopsy_discount_ns)],\n 'cost_staging_undiscounted': [total(cost_staging_nodiscount_ns)],\n 'cost_staging_discounted': [total(cost_staging_discount_ns)],\n 'cost_eol_undiscounted': [total(cost_eol_nodiscount_ns)],\n 'cost_eol_discounted': [total(cost_eol_discount_ns)],\n 'cost_treatment_undiscounted': [total(costs_tx)],\n 'cost_treatment_discounted': [total(cost_tx_discount_ns)],\n 'costs_undiscounted': [total(cost_nodiscount_ns)],\n 'costs_discounted': [total(cost_discount_ns)],\n 'n_psa_tests': [total(n_psa_tests_ns)],\n 'n_mri': [total(n_mri_ns)],\n 'n_biopsies': [total(n_biopsies_ns)],\n 'overdiagnosis': [0]\n })\n\n self.simulations['qalys'].append(np.sum(qalys_discount_ns, axis=1))\n self.simulations['lyrs'].append(np.sum(lyrs_discount_ns, axis=1))\n self.simulations['costs'].append(np.sum(cost_discount_ns, axis=1))\n self.simulations['pca_deaths'].append(np.sum(pca_death, axis=1))\n self.simulations['pca_cases'].append(np.sum(pca_incidence, axis=1))\n self.simulations['cost_mri'].append(np.sum(cost_mri_discount_ns, axis=1))\n self.simulations['cost_biopsy'].append(np.sum(cost_biopsy_discount_ns, axis=1))\n self.simulations['n_mri'].append(np.sum(n_mri_ns, axis=1))\n self.simulations['n_biopsies'].append(np.sum(n_biopsies_ns, axis=1))\n self.simulations['n_psa_tests'].append(np.sum(n_psa_tests_ns, axis=1))\n self.simulations['overdiagnosis'].append([0])\n\n self.cohort_list.append(cohort)\n self.outcomes_list.append(outcomes)\n\n return self.cohort_list, self.outcomes_list, self.simulations\n","sub_path":"code/models/no_screening_noMRI.py","file_name":"no_screening_noMRI.py","file_ext":"py","file_size_in_byte":15115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"491199966","text":"import matplotlib.pyplot as plt\n\nx = [1, 2, 3, 4, 5, 6, 7, 8]\ny = [5, 2, 4, 2, 1, 4, 5, 2]\n# s 是标记大小,以平方磅为单位的标记面积\n# marker 标记点的样式 o是圆圈\n# color 是颜色, k是指black\nplt.scatter(x, y, label='skitscat', color='k', s=25, marker='o')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Interesting Graph\\nCheck it out')\nplt.legend()\nplt.savefig('5.png')\nplt.show()\n","sub_path":"kesci/1 matplotlib 的数据可视化/5 散点图.py","file_name":"5 散点图.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"404917771","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import convolve\nfrom scipy.integrate import cumtrapz\n\n#parameters\ntime = 4 #event duration, in hours\nl = 8 #x-length (m)\nw = 8 #y-length (m)\nh = 3 #z-length (m)\nx_o = 4 #x-coordinate of source\ny_o = 4 #y-coordinate of source\nv = 0.15 #air velocity (m/s) from left to right. \nR = 5 #aerosol generation rate (particles/s)\nx = 8 #x-coordinate evaluated\ny = 8 #y-coordinate evaluated\np = 1.3*10**(-4) #breathing rate (m^3/s)\nk = 0.0069 #infectivity constant for dose-response model\nQ = 0.0002 # Air exchange rate (s^-1)\nK = 0.0053 # Eddy diffusion coefficient (m^2/s)\nd = 1.7*10**(-4) #deactivation rate (s^-1)\ns = 1.1*10**(-4) #settling rate (s^-1)\n\n#time-axis\nt_end = 60*60*time #in seconds\ndelta_t = 1 #(s) time-steps\nn_t = int(t_end/delta_t)\nt = np.linspace(delta_t,t_end,n_t)\n\n#initialize source function. multiply by delta_t to discretize the function.\nS = delta_t * np.full(len(t), R)\n\n#Impulse function I\n#I_y is an approximation of the sum of y-exponentials in the impulse function\nI_y = np.exp(-((y-y_o)**2)/(4*K*t)) + np.exp(-((y+y_o)**2)/(4*K*t))\nfor n in range(1,4):\n I_y += np.exp(-((y-y_o - 2*n*w)**2)/(4*K*t)) + np.exp(-((y+y_o + 2*n*w)**2)/(4*K*t))\n I_y += np.exp(-((y-y_o + 2*n*w)**2)/(4*K*t)) + np.exp(-((y+y_o - 2*n*w)**2)/(4*K*t))\n#I_x is an approximation of the sum of x-exponentials in the impulse function\nI_x = np.exp(-((x-x_o-v*t)**2)/(4*K*t)) + np.exp(-((x+x_o+v*t)**2)/(4*K*t))\nm = int(v*3600/(2*l) *time) #no of times the particles travel around the recirculation loop\nfor n in range(1,m+1):\n I_x += np.exp(-((x-x_o -v*t + 2*n*l)**2)/(4*K*t)) + np.exp(-((x+x_o+v*t - 2*n*l)**2)/(4*K*t))\n I_x += np.exp(-((x-x_o -v*t - 2*n*l)**2)/(4*K*t)) + np.exp(-((x+x_o+v*t + 2*n*l)**2)/(4*K*t))\nsinks = np.exp(-(Q+d+s)*t) #effect of ventilation, deactivation and settling sinks\nI = 1/(4*np.pi*K*t) * I_y * I_x * sinks #Impulse function\n\n#Convolve S and I to find C\nC = convolve(S,I)[0:len(t)] / (h/2)\ndose = p * cumtrapz(C,t)\nP = 1 - np.exp(-dose * k)\n\nt = t/60 #convert time-axis to minutes for plotting of graph\n\nP = P * 100 #convert probability to percent for plotting of graph\n#plot Probability versus time\nplt.plot(t[1:],P)\nplt.xticks(fontsize=12)\nplt.yticks(fontsize=12)\nplt.xlabel(\"Time (min)\")\nplt.ylabel(\"Probability ($\\%$)\")\nplt.show()\n","sub_path":"Prob-point.py","file_name":"Prob-point.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"602165477","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 10 12:26:44 2018\r\n@author: John\r\n\"\"\"\r\n\r\nfrom eqn_functions import f\r\n\r\n# Classical Runge-Kutta RK4 solver.\r\ndef RK4(x_old, f, step_size, params):\r\n j1 = f(x_old, params)\r\n j2 = f(x_old + (step_size/2)*j1, params) \r\n j3 = f(x_old + (step_size/2)*j2, params)\r\n j4 = f(x_old + (step_size)*j3, params)\r\n \r\n x_new = x_old + (step_size/6)*(j1 + 2*j2 + 2*j3 + j4)\r\n \r\n return x_new\r\n\r\n\r\ndef get_IC(test_x, step_size, params):\r\n\r\n flag = 0\r\n while flag == 0:\r\n sol = RK4(test_x, f, step_size, params) # Runge-Kutta method\r\n \r\n if test_x != sol:\r\n test_x = sol\r\n else:\r\n flag = 1\r\n x0 = test_x\r\n \r\n return x0\r\n ","sub_path":"week of Feb 22/get_IC.py","file_name":"get_IC.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"443672200","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n url(r'^$', views.index, name=\"index_page\"),\r\n url(r'^offerrides/$', views.offer_ride, name=\"offer_ride\"),\r\n url(r'^takerides/$', views.take_ride, name=\"take_ride\"),\r\n url(r'^view_requests/$', views.view_requests, name=\"view_requests\"),\r\n url(r'^profile/$', views.view_profile, name=\"view_profile\"),\r\n url(r'^update_profile/$', views.update_profile, name=\"update_profile\"),\r\n url(r'^request_ride/(?P\\d+)/$', views.request_ride, name=\"request_ride\"),\r\n url(r'^contact_us/$', views.contact_us, name=\"contact_us\"),\r\n url(r'^validate_request/(?P\\d+)/$', views.validate_ride_request, name=\"validate_ride_request\"),\r\n url(r'^view_rides/$', views.view_user_rides, name=\"view_user_rides\"),\r\n url(r'^cancel_ride/(?P\\d+)/$', views.cancel_ride, name=\"cancel_ride\"),\r\n url(r'^edit_ride/(?P\\d+)/$', views.edit_rides, name=\"edit_ride\"),\r\n]","sub_path":"CarzRideOn/website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"365999511","text":"#-*- coding:utf-8 -*-\n\nimport sys\nimport io\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\n\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\nfrom selenium.common.exceptions import UnexpectedAlertPresentException\nfrom selenium.webdriver.common.keys import Keys\nimport os\n\n# os.system('pause')\n\nbtn_list = {\n '실습교육':'//*[@id=\"content-box\"]/ul[1]/li[1]/div[1]/p[5]/a',\n '실험전후안전':'//*[@id=\"content-box\"]/ul[1]/li[2]/div[1]/p[5]/a',\n '안전관리실무2':'//*[@id=\"content-box\"]/ul[1]/li[3]/div[1]/p[5]/a',\n '안전의식':'//*[@id=\"content-box\"]/ul[1]/li[4]/div[1]/p[5]/a'\n}\n\nsub_btn_list = {\n '실습교육':['//*[@id=\"content-box\"]/div/ul[1]/li/h4/span/a',\n '//*[@id=\"content-box\"]/div/ul[2]/li/h4/span/a'],\n '실험전후안전':['//*[@id=\"content-box\"]/div/ul[1]/li/h4/span/a',\n '//*[@id=\"content-box\"]/div/ul[2]/li/h4/span/a'],\n '안전관리실무2':['//*[@id=\"content-box\"]/div/ul[1]/li/h4/span/a',\n '//*[@id=\"content-box\"]/div/ul[2]/li/h4/span/a',\n '//*[@id=\"content-box\"]/div/ul[3]/li/h4/span/a',\n '//*[@id=\"content-box\"]/div/ul[4]/li/h4/span/a'],\n '안전의식':['//*[@id=\"content-box\"]/div/ul[1]/li/h4/span/a',\n '//*[@id=\"content-box\"]/div/ul[2]/li/h4/span/a',\n '//*[@id=\"content-box\"]/div/ul[3]/li/h4/span/a',\n '//*[@id=\"content-box\"]/div/ul[4]/li/h4/span/a']\n\n}\n\nprint(\"================================\")\nprint(\"안전교육 자동화를 시작합니다\")\nprint(\"================================\")\n\ninfo = []\n\ntry:\n with open(\"secu.txt\", \"r\") as f_r:\n line = f_r.readline()\n info = line.split(',')\n\nexcept Exception as e:\n with open(\"secu.txt\", \"w\") as f_w:\n id = input(\"id : \")\n pw = input(\"pw : \")\n total = id + \",\" + pw\n f_w.write(total)\n info = [id, pw]\n print(\"================================\")\n\nid = info[0]\npw = info[1]\n\nchromdriver = './chromedriver'\n\noptions = webdriver.ChromeOptions()\n# options.add_argument(\"headless\")\noptions.add_argument(\"disable-gpu\")\noptions.add_argument(\"lang=ko_KR\")\n\ndriver = webdriver.Chrome(chromdriver, options=options)\n\ndriver.get('https://edu.labs.go.kr/')\n\ndriver.find_element_by_name('loginDTO.userId').send_keys(id)\ndriver.find_element_by_name('loginDTO.userPass').send_keys(pw)\n\nlogin = driver.find_element_by_xpath('//*[@id=\"body_login_btn\"]')\nprint(\"로그인 합니다\")\nprint(\"================================\")\nlogin.click()\n\ncount = 0\n\nwhile(True):\n if count != 0:\n driver.get('https://edu.labs.go.kr/')\n count = count + 1\n\n mypage = driver.find_element_by_xpath('//*[@id=\"main02\"]/div[1]/fieldset/ul/li[1]/a')\n print(\"마이페이지로 갑니다\")\n print(\"================================\")\n mypage.click()\n\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n lecture_list = soup.select('div.floatL > p.mT10')\n\n lecture_text_list = []\n\n for lecture in lecture_list:\n lecture_text_list.append(lecture.text)\n\n print(\"학습하실 강좌 리스트\\n\")\n\n for lecture in lecture_text_list:\n print(lecture)\n print(\"================================\")\n\n study_bool = True\n\n while(study_bool):\n # print(\"3, 4번만 가능합니다.\")\n num = input(\"1. 실습교육 \\n2. 실험전후안전 \\n3. 안전관리실무2 \\n4. 안전의식 \\n 선택 : \")\n\n if num < \"0\" or num > \"4\":\n print(\"다시\")\n continue\n\n print(\"================================\")\n\n if num == \"1\":\n btn_code = btn_list['실습교육']\n elif num == \"2\":\n btn_code = btn_list['실험전후안전']\n elif num == \"3\":\n btn_code = btn_list['안전관리실무2']\n elif num == \"4\":\n btn_code = btn_list['안전의식']\n\n study = driver.find_element_by_xpath(btn_code)\n study.click()\n\n driver.switch_to.frame('listContentsInfoFrame')\n\n if num == \"1\":\n sub_btn_codes = sub_btn_list['실습교육']\n elif num == \"2\":\n sub_btn_codes = sub_btn_list['실험전후안전']\n elif num == \"3\":\n sub_btn_codes = sub_btn_list['안전관리실무2']\n elif num == \"4\":\n sub_btn_codes = sub_btn_list['안전의식']\n\n study_bool = False\n sub_study_bool = True\n\n parent_window = driver.current_window_handle\n print(parent_window)\n\n while(sub_study_bool):\n sub_study_html = driver.page_source\n sub_sutdy_soup = BeautifulSoup(sub_study_html, 'html.parser')\n sub_lecture_list = sub_sutdy_soup.select('li > h4 > a')\n\n sub_lecture_text_list = []\n\n for lecture in sub_lecture_list:\n sub_lecture_text_list.append(lecture.text.strip())\n\n print(\"학습하실 강좌 리스트\\n\")\n\n for i, lecture in enumerate(sub_lecture_text_list):\n print(str(i) + \". \" + lecture)\n\n all_window = driver.window_handles\n print(all_window)\n\n sub_num = input(\"선택 : \")\n sub_num = int(sub_num)\n if sub_num not in range(len(sub_lecture_list)):\n print(\"다시\")\n continue\n print(\"================================\")\n\n sub_study = driver.find_element_by_xpath(sub_btn_codes[sub_num])\n sub_study.click()\n\n driver.implicitly_wait(10)\n\n all_window = driver.window_handles\n print(all_window)\n\n child_window = all_window[-1]\n\n driver.switch_to.window(child_window)\n print(driver.current_window_handle)\n\n driver.switch_to.frame('contentsMain')\n\n driver.maximize_window()\n\n sub_study_bool = False\n\n while(True):\n try:\n time.sleep(2)\n\n if sub_num < 2:\n video_html = driver.page_source\n video_soup = BeautifulSoup(video_html, 'html.parser')\n times = video_soup.select('div.vjs-duration.vjs-time-control.vjs-control > div')\n print(times)\n time_min_sec = []\n\n times = times[0].text\n\n time_min_sec = times.split('Time')\n\n time_min_sec = time_min_sec[1].split(\":\")\n\n min = int(time_min_sec[0])\n sec = int(time_min_sec[1])\n total_time = (min * 60) + sec + 3\n\n print(time_min_sec)\n\n else:\n # driver.implicitly_wait(300)\n video_html = driver.page_source\n video_soup = BeautifulSoup(video_html, 'html.parser')\n times = video_soup.select('div.time > span.time--duration')\n\n time_min_sec = []\n\n times = times[0].text\n # print(\"times :\", times)\n time_min_sec = times.split(\":\")\n\n # print(\"time :\", time)\n\n min = int(time_min_sec[0])\n sec = int(time_min_sec[1])\n total_time = (min * 60) + sec + 3\n\n try:\n if sub_num < 2:\n mute_btn = driver.find_element_by_xpath('/html/body/footer/nav/ul/li[13]/i[2]')\n else:\n mute_btn = driver.find_element_by_xpath('/html/body/div/div[3]/div[6]/div[10]')\n\n mute_btn.click()\n\n except:\n pass\n\n # driver.implicitly_wait(total_time)\n time.sleep(total_time)\n # print(\"sleep\")\n if sub_num < 2:\n print(\"0\")\n next_btn = driver.find_element_by_xpath('/html/body/footer/nav/ul/li[17]/i')\n else:\n next_btn = driver.find_element_by_xpath('/ html / body / div / div[3] / div[6] / div[13]')\n next_btn.click()\n\n\n except UnexpectedAlertPresentException as e:\n print(e.__dict__[\"msg\"])\n driver.close()\n driver.switch_to.window(driver.window_handles[0])\n driver.switch_to.frame('listContentsInfoFrame')\n\n break\n\n except Exception as e:\n print(e)\n continue\n\n\n next = input(\"강의를 다 보셨으면 0번을 누르세요.\\n 아니라면 아무키나 입력하세요\")\n\n if(next==\"0\"):\n driver.quit()\n else:\n pass\n\n # # driver.switch_to_default_content()\n # # driver.switch_to.frame('ec141b1710aa0db35ed5a5d406a9d641')\n # driver.find_element_by_class_name(\"close\").click()\n # # driver.find_element_by_xpath('//*[@id=\"modal1_box\"]/div/div[1]/button').send_keys(Keys.ENTER)\n # # close_btn = driver.find_element_by_xpath('//*[@id=\"modal1_box\"]/div/div[1]/button')\n # # close_btn.click()\n # driver.switch_to_default_content()\n # # driver.switch_to.frame('listContentsInfoFrame')\n # # driver.switch_to.window(window_name=parent_window)\n print(\"================================\")\n\n","sub_path":"crawling/edu_lab.py","file_name":"edu_lab.py","file_ext":"py","file_size_in_byte":9619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"132349127","text":"import networkx as nx\nimport os\nimport pickle\nimport numpy as np\nfrom sklearn import preprocessing\nfrom scipy.stats.mstats import kruskalwallis\nfrom scipy.stats import kstest\n\npathname1 = \"E:\\\\脑电python2\\\\PCC矩阵\\\\\"\npathname3 = \"E:\\\\脑电python2\\\\KS\\\\\"\n\nfor i in range(0, 32): # 1~32的循环\n if (i + 1) < 10:\n filename1 = \"s0%d.dat\" % (i + 1)\n if (i + 1) >= 10:\n filename1 = \"s%d.dat\" % (i + 1)\n\n if (i + 1) < 10:\n pathname2 = \"s0%d\\\\\" % (i + 1)\n if (i + 1) >= 10:\n pathname2 = \"s%d\\\\\" % (i + 1)\n pathname12 = pathname1 + pathname2\n pathname4 = pathname3 + pathname2\n # if not os.path.exists(pathname4): # 创建文件夹\n # os.mkdir(pathname4)\n\n for j in range(0, 40): # 0~40的循环\n if (i + 1) < 10:\n filename2 = \"s0%d-%d.dat\" % ((i + 1), (j + 1))\n if (i + 1) >= 10:\n filename2 = \"s%d-%d.dat\" % ((i + 1), (j + 1))\n print(pathname12 + filename2)\n s = open(pathname12 + filename2, \"rb\")\n x = pickle.load(s, encoding=\"latin1\") # x是字典,键是'data''labels',值是两个列表\n\n Theta = x['Theta'] # 取出键对应的值\n Alpha = x['Alpha']\n Beta1 = x['Beta1']\n Beta2 = x['Beta2']\n labels = x['labels']\n # Theta\n data1Theta = np.array(Theta['data1'])\n data2Theta = np.array(Theta['data2'])\n data3Theta = np.array(Theta['data3'])\n data4Theta = np.array(Theta['data4'])\n data5Theta = np.array(Theta['data5'])\n data6Theta = np.array(Theta['data6'])\n data7Theta = np.array(Theta['data7'])\n data8Theta = np.array(Theta['data8'])\n data9Theta = np.array(Theta['data9'])\n data0Theta = np.array(Theta['data0'])\n # Alpha\n data1Alpha = np.array(Alpha['data1'])\n data2Alpha = np.array(Alpha['data2'])\n data3Alpha = np.array(Alpha['data3'])\n data4Alpha = np.array(Alpha['data4'])\n data5Alpha = np.array(Alpha['data5'])\n data6Alpha = np.array(Alpha['data6'])\n data7Alpha = np.array(Alpha['data7'])\n data8Alpha = np.array(Alpha['data8'])\n data9Alpha = np.array(Alpha['data9'])\n data0Alpha = np.array(Alpha['data0'])\n # Beta1\n data1Beta1 = np.array(Beta1['data1'])\n data2Beta1 = np.array(Beta1['data2'])\n data3Beta1 = np.array(Beta1['data3'])\n data4Beta1 = np.array(Beta1['data4'])\n data5Beta1 = np.array(Beta1['data5'])\n data6Beta1 = np.array(Beta1['data6'])\n data7Beta1 = np.array(Beta1['data7'])\n data8Beta1 = np.array(Beta1['data8'])\n data9Beta1 = np.array(Beta1['data9'])\n data0Beta1 = np.array(Beta1['data0'])\n # Beta2\n data1Beta2 = np.array(Beta2['data1'])\n data2Beta2 = np.array(Beta2['data2'])\n data3Beta2 = np.array(Beta2['data3'])\n data4Beta2 = np.array(Beta2['data4'])\n data5Beta2 = np.array(Beta2['data5'])\n data6Beta2 = np.array(Beta2['data6'])\n data7Beta2 = np.array(Beta2['data7'])\n data8Beta2 = np.array(Beta2['data8'])\n data9Beta2 = np.array(Beta2['data9'])\n data0Beta2 = np.array(Beta2['data0'])\n\n Theta_average_clustering = []\n Theta_diameter = []\n Theta_global_efficiency = []\n Theta_local_efficiency = []\n Theta_average_path = []\n\n Alpha_average_clustering = []\n Alpha_diameter = []\n Alpha_global_efficiency = []\n Alpha_local_efficiency = []\n Alpha_average_path = []\n\n Beta1_average_clustering = []\n Beta1_diameter = []\n Beta1_global_efficiency = []\n Beta1_local_efficiency = []\n Beta1_average_path = []\n\n Beta2_average_clustering = []\n Beta2_diameter = []\n Beta2_global_efficiency = []\n Beta2_local_efficiency = []\n Beta2_average_path = []\n\n\n def addTheta(average_clustering, diameter, global_efficiency, local_efficiency, average_path):\n # Theta_average_clustering = []\n # Theta_diameter = []\n # Theta_global_efficiency = []\n # Theta_local_efficiency = []\n # Theta_average_path = []\n Theta_average_clustering.append(average_clustering)\n Theta_diameter.append(diameter)\n Theta_global_efficiency.append(global_efficiency)\n Theta_local_efficiency.append(local_efficiency)\n Theta_average_path.append(average_path)\n Theta_list = [Theta_average_clustering, Theta_diameter, Theta_global_efficiency,\n Theta_local_efficiency, Theta_average_path]\n # Theta_list = [Theta_average_clustering]\n return Theta_list\n\n def addAlpha(average_clustering, diameter, global_efficiency, local_efficiency, average_path):\n # Alpha_average_clustering = []\n # Alpha_diameter = []\n # Alpha_global_efficiency = []\n # Alpha_local_efficiency = []\n # Alpha_average_path = []\n Alpha_average_clustering.append(average_clustering)\n Alpha_diameter.append(diameter)\n Alpha_global_efficiency.append(global_efficiency)\n Alpha_local_efficiency.append(local_efficiency)\n Alpha_average_path.append(average_path)\n Alpha_list = [Alpha_average_clustering, Alpha_diameter, Alpha_global_efficiency,\n Alpha_local_efficiency, Alpha_average_path]\n return Alpha_list\n\n def addBeta1(average_clustering, diameter, global_efficiency, local_efficiency, average_path):\n # Beta1_average_clustering = []\n # Beta1_diameter = []\n # Beta1_global_efficiency = []\n # Beta1_local_efficiency = []\n # Beta1_average_path = []\n Beta1_average_clustering.append(average_clustering)\n Beta1_diameter.append(diameter)\n Beta1_global_efficiency.append(global_efficiency)\n Beta1_local_efficiency.append(local_efficiency)\n Beta1_average_path.append(average_path)\n Beta1_list = [Beta1_average_clustering, Beta1_diameter, Beta1_global_efficiency,\n Beta1_local_efficiency, Beta1_average_path]\n return Beta1_list\n\n def addBeta2(average_clustering, diameter, global_efficiency, local_efficiency, average_path):\n # Beta2_average_clustering = []\n # Beta2_diameter = []\n # Beta2_global_efficiency = []\n # Beta2_local_efficiency = []\n # Beta2_average_path = []\n Beta2_average_clustering.append(average_clustering)\n Beta2_diameter.append(diameter)\n Beta2_global_efficiency.append(global_efficiency)\n Beta2_local_efficiency.append(local_efficiency)\n Beta2_average_path.append(average_path)\n Beta2_list = [Beta2_average_clustering, Beta2_diameter, Beta2_global_efficiency,\n Beta2_local_efficiency, Beta2_average_path]\n return Beta2_list\n\n\n def attribute(xx, band):\n xxx = np.array(xx)\n shold = [0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]\n for k in shold:\n print(k)\n print('*****')\n data_binarized = preprocessing.Binarizer(threshold=k).transform(xxx) # 二值化\n data = np.nonzero(data_binarized)\n row = data[0]\n col = data[1]\n di = zip(row, col)\n list_di = list(di)\n G = nx.Graph()\n G.add_edges_from(list_di)\n\n a = []\n for i in range(0, 32):\n for j in range(0, 32):\n while nx.has_path(G, i, j) == False:\n a.append(j)\n break\n if len(a) == 0:\n # print('!!!')\n average_clustering = nx.average_clustering(G)\n diameter = nx.diameter(G)\n global_efficiency = nx.global_efficiency(G)\n local_efficiency = nx.local_efficiency(G)\n average_path = nx.average_shortest_path_length(G)\n\n if band == 'Theta':\n print('Theta')\n list_group = addTheta(average_clustering, diameter, global_efficiency,\n local_efficiency, average_path)\n if band == 'Alpha':\n print('Alpha')\n list_group = addAlpha(average_clustering, diameter, global_efficiency,\n local_efficiency, average_path)\n if band == 'Beta1':\n print('Beta1')\n list_group = addBeta1(average_clustering, diameter, global_efficiency,\n local_efficiency, average_path)\n if band == 'Beta2':\n print('Beta2')\n list_group = addBeta2(average_clustering, diameter, global_efficiency,\n local_efficiency, average_path)\n print(list_group)\n Kruskawallis_test(list_group)\n # KS_test(list_group)\n\n\n else:\n print('ERROR----------------')\n\n\n def Kruskawallis_test(list_groups):\n # Perform the Kruskal-Wallis test,返回True表示有显著差异,返回False表示无显著差异\n # print(\"Use kruskawallis test:\")\n h, p = kruskalwallis(list_groups)\n # print(\"H value:\", h)\n # print(\"p\", p)\n\n # Print the results\n if p < 0.05:\n print('Has difference -----√')\n return True\n else:\n print('No difference -----×')\n return False\n\n def KS_test(list_groups):\n print('ks------------------------------------')\n p = kstest(list_groups, 'norm')\n print(p)\n # Print the results\n # if p < 0.05:\n # print('Has difference -----√')\n # return True\n # else:\n # print('No difference -----×')\n # return False\n\n print(data1Theta)\n\n attribute(data1Theta, 'Theta')\n attribute(data2Theta, 'Theta')\n attribute(data3Theta, 'Theta')\n attribute(data4Theta, 'Theta')\n attribute(data5Theta, 'Theta')\n attribute(data6Theta, 'Theta')\n attribute(data7Theta, 'Theta')\n attribute(data8Theta, 'Theta')\n attribute(data9Theta, 'Theta')\n attribute(data0Theta, 'Theta')\n\n attribute(data1Alpha, 'Alpha')\n attribute(data2Alpha, 'Alpha')\n attribute(data3Alpha, 'Alpha')\n attribute(data4Alpha, 'Alpha')\n attribute(data5Alpha, 'Alpha')\n attribute(data6Alpha, 'Alpha')\n attribute(data7Alpha, 'Alpha')\n attribute(data8Alpha, 'Alpha')\n attribute(data9Alpha, 'Alpha')\n attribute(data0Alpha, 'Alpha')\n\n attribute(data1Beta1, 'Beta1')\n attribute(data2Beta1, 'Beta1')\n attribute(data3Beta1, 'Beta1')\n attribute(data4Beta1, 'Beta1')\n attribute(data5Beta1, 'Beta1')\n attribute(data6Beta1, 'Beta1')\n attribute(data7Beta1, 'Beta1')\n attribute(data8Beta1, 'Beta1')\n attribute(data9Beta1, 'Beta1')\n attribute(data0Beta1, 'Beta1')\n\n attribute(data1Beta2, 'Beta2')\n attribute(data2Beta2, 'Beta2')\n attribute(data3Beta2, 'Beta2')\n attribute(data4Beta2, 'Beta2')\n attribute(data5Beta2, 'Beta2')\n attribute(data6Beta2, 'Beta2')\n attribute(data7Beta2, 'Beta2')\n attribute(data8Beta2, 'Beta2')\n attribute(data9Beta2, 'Beta2')\n attribute(data0Beta2, 'Beta2')\n\n\n\n\n\n","sub_path":"KSTest.py","file_name":"KSTest.py","file_ext":"py","file_size_in_byte":12022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"376963992","text":"import h5py\nimport scipy.io as sio\nfrom scipy.ndimage.interpolation import zoom\nimport numpy as np\nimport os\n\ndirname = 'examples/nyu_depth_v2/data'\n\nif not os.path.exists(dirname):\n os.makedirs(dirname)\n\ndata_path = \"data/nyu_depth_v2/nyu_depth_v2_labeled.mat\"\nsplit_path = \"data/nyu_depth_v2/splits.mat\"\n\ntrain_file = os.path.join(dirname,'train.h5')\ntest_file = os.path.join(dirname, 'test.h5')\n\ndata = h5py.File(data_path)\n# data have keys:\n# #refs#,\n# #subsystem#,\n# accelData,\n# depths,\n# images,\n# instances,\n# labels,\n# names,\n# namesToIds,\n# rawDepthFilenames,\n# rawDepths,\n# rawRgbFilenames,\n# sceneTypes,\n# scenes\nimages = np.array(data['images'], dtype=np.float32)\nnew_images = np.zeros((1449, 3, 240, 320))\n# images.shape = (1449, 3, 640, 480) in N x C x W x H form\n# for n in xrange(images.shape[0]):\n# images[n][0] = images[n][0] - 122.54631805\n# images[n][1] = images[n][1] - 104.78344727\n# images[n][2] = images[n][2] - 100.0244751\nfor n in xrange(images.shape[0]):\n img_ = images[n].transpose((2,1,0))\n img_ = zoom(img_, (0.5, 0.5, 1.0))\n # img_ = np.array(img_, dtype=np.float32) # must convert to float32, or the substraction is invalid\n img_ = img_ - 128 # approx substract mean\n # new_images[n] = img_[6:234, 8:312, :].transpose((2,0,1)) # crop center size 228 x 304\n new_images[n] = img_.transpose((2,0,1))\n \ndepths = np.array(data['depths'], dtype=np.float32)\nnew_depths = np.zeros((1449, 1, 240, 320))\n# the original depths.shape = (1449, 640, 480) in N x W x H\nfor n in xrange(depths.shape[0]):\n dep_ = depths[n].transpose((1,0))\n dep_ = zoom(dep_, 0.5)\n # dep_ = imresize(dep_, size=(240, 320), interp='bicubic')\n # dep_ = dep_[1::2, 1::2]\n # dep_ = dep_[6:234, 8:312][1::4, 1::4][1:56, 1:75] # crop + downsampling + crop\n new_depths[n] = dep_[np.newaxis, :, :]\n\nsplit = sio.loadmat(split_path)\n# split.keys() = ['testNdxs', 'trainNdxs', '__version__', '__header__', '__globals__']\ntrainIdx = np.squeeze(split['trainNdxs']) - 1\ntestIdx = np.squeeze(split['testNdxs']) - 1\n# start from 1 to 1449, need to decrease 1 to fit pythonic index\n\nwith h5py.File(train_file, 'w') as f:\n f['data'] = new_images[trainIdx]\n f['label'] = new_depths[trainIdx]\n\nwith h5py.File(test_file, 'w') as f:\n f['data'] = new_images[testIdx]\n f['label'] = new_depths[testIdx]\n\nwith open(os.path.join(dirname, 'train.txt'), 'w') as f:\n f.write(train_file + '\\n')\n f.write(train_file + '\\n')\n\nwith open(os.path.join(dirname, 'test.txt'), 'w') as f:\n f.write(test_file + '\\n')\n f.write(test_file + '\\n')\n\n","sub_path":"examples/nyu_depth_v2/convert_nyu_dataset.py","file_name":"convert_nyu_dataset.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"612977494","text":"#from Tkinter import Tk, Label, Button, Scale\nimport Tkinter as tk\nimport data_generators_WIN\n\n# try using\n# - Frame: \n# - Checkbutton\n# - Radiobutton\n# - Listbox\n# - Entry\n\n\nclass MyFirstGUI:\n def __init__(self, master):\n self.master = master\n master.title(\"Data Generator\")\n self.snr_value = 0\n\n self.label = tk.Label(master, text=\"Control Data Modulation\")\n self.label.pack()\n\n self.qpsk_button = tk.Button(master, text=\"Gen QPSK\", command=self.send_QPSK)\n self.qpsk_button.pack()\n \n self.bpsk_button = tk.Button(master, text=\"Gen BPSK\", command=self.send_BPSK)\n self.bpsk_button.pack()\n\n self.close_button = tk.Button(master, text=\"Close\", command=master.quit)\n self.close_button.pack()\n\n self.scale_test = tk.Scale(master, from_=-20, to=20, variable=self.snr_value)\n self.scale_test.pack()\n \n def send_QPSK(self):\n print(\"Sending QPSK, SNR: \" + str(self.snr_value))\n gen_qpsk(snr_value)\n \n def send_BPSK(self):\n print(\"Sending BPSK, SNR: \" + str(self.snr_value))\n\nroot = tk.Tk()\nmy_gui = MyFirstGUI(root)\ntk.root.mainloop()\n","sub_path":"python27/gui/GUI_mod.py","file_name":"GUI_mod.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"453291967","text":"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport sklearn\nfrom sklearn import preprocessing\n\n#Note: Change 3 columns(Diabetes,Taste.Preferences,Food.Preferences) in uaha.csv manually to avoid out of scope error\ndftrain=pd.read_csv(\"uaha.csv\")\ndftrain=dftrain.drop(['Timestamp','Age','Locality','Height(cms)','Weight(kgs)',],axis=1)\n\ndatalikes=dftrain['Any specific Likes?'].str.get_dummies(sep=',')\ndataftypes=dftrain['Food Types ( You can select more than one)'].str.get_dummies(sep=',')\ndataallergies=dftrain['Allergies(leave blank if none)'].str.get_dummies(sep=',')\n\n\n\ndf=pd.concat([dftrain, dataftypes,dataallergies,datalikes], axis=1)\ndf=df.drop(['Allergies(leave blank if none)','Food Types ( You can select more than one)','Any specific Likes?'],axis=1)\n\n#Food type\ndf[\"FoodType.Egg\"]=df.iloc[:,4]|df.iloc[:,8] #EGG 5 9 1\ndf[\"FoodType.NonVegetarian\"]=df.iloc[:,5]|df.iloc[:,9]\ndf[\"FoodType.Seafood\"]=df.iloc[:,8]|df.iloc[:,10]\ndf[\"FoodType.Vegan\"]=df.iloc[:,9]|df.iloc[:,11]\ndf[\"FoodType.Vegetarian\"]=df.iloc[:,12]\n\n\n\n#Allergie 17 28\n\ndf[\"Allergies.Corn\"]=df.iloc[:,13]|df.iloc[:,20]\ndf[\"Allergies.Eggs\"]=df.iloc[:,14]|df.iloc[:,21]\ndf[\"Allergies.Fish\"]=df.iloc[:,15]|df.iloc[:,22]\ndf[\"Allergies.Gelatin\"]=df.iloc[:,16]|df.iloc[:,23]\ndf[\"Allergies.Peanuts\"]=df.iloc[:,17]|df.iloc[:,26]\ndf[\"Allergies.Soy\"]=df.iloc[:,18]|df.iloc[:,27]\ndf[\"Allergies.Wheat\"]=df.iloc[:,19]|df.iloc[:,28]\ndf[\"Allergies.Milk\"]=df.iloc[:,24]\ndf[\"Allergies.None\"]=df.iloc[:,25]\n\n#Likes\ndf[\"Likes.Chinese\"]=df.iloc[:,29]|df.iloc[:,34]\ndf[\"Likes.Lebanese\"]=df.iloc[:,30]|df.iloc[:,35]\ndf[\"Likes.Maharashtrian\"]=df.iloc[:,31]|df.iloc[:,36]\ndf[\"Likes.Punjabi\"]=df.iloc[:,32]|df.iloc[:,38]\ndf[\"Likes.Western\"]=df.iloc[:,33]|df.iloc[:,40]\ndf[\"Likes.None\"]=df.iloc[:,37]\ndf[\"Likes.Thai\"]=df.iloc[:,39]\n\ndf=df.drop(df.iloc[:,4:41],axis=1) #until 43 not including 43\n\nleR = preprocessing.LabelEncoder()\ndf['Recommended.Package'] = leR.fit_transform(df['Recommended.Package'])\n#print(leR.classes_) #to see the order\n\nleD = preprocessing.LabelEncoder()\ndf['Diabetes'] = leD.fit_transform(df['Diabetes'])\n#print(leD.classes_)\n\nleT = preprocessing.LabelEncoder()\ndf['Taste.Preferences'] = leT.fit_transform(df['Taste.Preferences'])\n#print(leT.classes_)\n\ndf['BMI'] = df['BMI'].round(3) # reduce float\n\n'''\nOne hot: 1 true 0 false\nLabel Encoding:\n\nRecommended Package:0-16\n['BMI-High' 'BMI-Low' 'Chinese-Normal' 'Chinese-Vegan'\n 'Chinese-Vegetarian' 'Diabetes' 'Lebenese-Nor' 'Lebenese-Vegeterain'\n 'Maharashtrian-Nor' 'Maharashtrian-Vegetarian' 'Punjabi-Normal'\n 'Punjabi-Vegan' 'Punjabi-Vegetarian' 'Thai-Nor' 'Thai-Veg' 'Western Nor'\n 'Western-Veg']\n\n Diabetes:['No' 'Yes']\nTaste Preference:['Bland' 'Medium-Spicy' 'None' 'Spicy']\n\n\n Note change 3 columns manually.\n'''\n#df.to_csv(\"x.csv\")\n\n\n\n\n#df.to_csv(\"x.csv\")\n\n\n#IMPLLEMENTING BOOSTEDTREESCLASSIFIER TENSORFLOW\n\ndftrain = pd.read_csv(\"x.csv\")\ndfeval = pd.read_csv(\"x.csv\")\ny_train = dftrain.pop('Recommended.Package') #put as target and pop from dataset\ny_eval = dfeval.pop('Recommended.Package')\n\n\n\nfc = tf.feature_column\nCATEGORICAL_COLUMNS = [ 'Taste.Preferences','Diabetes','FoodType.Egg',\t'FoodType.NonVegetarian',\t'FoodType.Seafood'\t,'FoodType.Vegan',\t'FoodType.Vegetarian',\t'Allergies.Corn',\t'Allergies.Eggs',\t'Allergies.Fish',\t'Allergies.Gelatin',\t'Allergies.Peanuts',\t'Allergies.Soy',\t'Allergies.Wheat',\t'Allergies.Milk',\t'Allergies.None',\t'Likes.Chinese',\t'Likes.Lebanese',\t'Likes.Maharashtrian',\t'Likes.Punjabi',\t'Likes.Western','Likes.None',\t'Likes.Thai']\nNUMERIC_COLUMNS = ['BMI']\n\ndef one_hot_cat_column(feature_name, vocab):\n return tf.feature_column.indicator_column(\n tf.feature_column.categorical_column_with_vocabulary_list(feature_name,\n vocab))\n\nfeature_columns = []\nfor feature_name in CATEGORICAL_COLUMNS:\n # Need to one-hot encode categorical features.\n vocabulary = dftrain[feature_name].unique()\n feature_columns.append(one_hot_cat_column(feature_name, vocabulary))\n\n\n\n\n\nfor feature_name in NUMERIC_COLUMNS:\n feature_columns.append(tf.feature_column.numeric_column(feature_name,\n dtype=tf.float32))\n\n\n\nclassifier = tf.estimator.BoostedTreesClassifier(\n feature_columns=feature_columns,\n n_batches_per_layer=10\n)\n\n\n\nNUM_EXAMPLES = len(y_train)\n\ndef make_input_fn(X, y, n_epochs=1000, shuffle=True):\n def input_fn():\n dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))\n if shuffle:\n dataset = dataset.shuffle(NUM_EXAMPLES)\n # For training, cycle thru dataset as many times as need (n_epochs=None).\n dataset = dataset.repeat(n_epochs)\n # In memory training doesn't use batching.\n dataset = dataset.batch(NUM_EXAMPLES)\n return dataset\n return input_fn\n\ntrain_input_fn = make_input_fn(dftrain, y_train)\neval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)\n\n\n\nn_batches = 1\nest = tf.estimator.DNNClassifier(hidden_units=[64,32],feature_columns=feature_columns,\n n_classes=17)\n\n# The model will stop training once the specified number of trees is built, not\n# based on the number of steps.\nest.train(train_input_fn, steps=100)\n\n# Eval.\nresults = est.evaluate(eval_input_fn)\n\nprint('Accuracy : ', results['accuracy'])\n\n","sub_path":"ML/Backup/dnnclassifier.py","file_name":"dnnclassifier.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"324776395","text":"from google.cloud import vision\nfrom google.protobuf.json_format import MessageToJson\nimport base64\nimport json\n\ndef dniverification(request):\n request_json = request.get_json()\n if request_json and 'img' in request_json:\n data = document_ocr(request_json['img'])\n print('----------------')\n print(request_json['name'])\n print(request_json['dni'])\n print(json.loads(data)['textAnnotations'][0]['description'])\n print('----------------')\n return data\n else:\n return f'Error!'\n\ndef document_ocr(image):\n client = vision.ImageAnnotatorClient()\n response = client.annotate_image({'image': {'content': base64.b64decode(image)}, 'features': [{'type': vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION}],})\n return MessageToJson(response)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"178398136","text":"from core import SDK\nfrom prometheus_client import start_http_server, Gauge\nimport time\nimport os\nfrom threading import Thread\nimport datetime as dt\nimport random\nfrom google.protobuf import empty_pb2\nfrom dialog_api import sequence_and_updates_pb2\nimport requests\nimport urllib.parse\n\nUPLOAD_SPEED = Gauge('file_uploading_mbps', 'File upload speed')\nDOWNLOAD_SPEED = Gauge('file_downloading_mbps', 'File download speed')\nSHARING_UPLOAD_SPEED = Gauge('sharing_uploading_mbps', 'Sharing upload speed')\n\nuploaded = False\ndownloaded = False\nsharing_uploaded = False\nuploaded_location = None\n\n\ndef create_file(length=1_000_000):\n alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n result = ''\n for _ in range(length):\n result += alphabet[random.randint(0, len(alphabet)-1)]\n\n filename = 'testfile_' + result[:10]\n\n with open(filename, 'w+') as f:\n f.write(result)\n\n return filename\n\n\ndef bot2_seq_handler():\n global downloaded\n for update in bot2.updates.SeqUpdates(empty_pb2.Empty()):\n up = sequence_and_updates_pb2.UpdateSeqUpdate()\n up.ParseFromString(update.update.value)\n if up.updateMessage.message.documentMessage.file_id != 0:\n file_id = up.updateMessage.message.documentMessage.file_id\n access_hash = up.updateMessage.message.documentMessage.access_hash\n bot2.download_file(file_id, access_hash)\n downloaded = True\n break\n\n\ndef upload_handler():\n global uploaded\n global uploaded_location\n location = bot1.upload_file(file)\n if location:\n uploaded = True\n uploaded_location = location\n\n\ndef sharing_upload_handler():\n global sharing_uploaded\n data = {\n 'peer': 'private_' + str(bot2_outpeer.id) + ':' + str(bot2_outpeer.access_hash),\n }\n files = {\n 'file': open(file, 'rb')\n }\n r = requests.post(\n os.environ['SHARING_URL'] + '/v1/messaging?token=' + urllib.parse.quote(bot1.token),\n data=data,\n files=files,\n )\n\n print(r.text)\n if r.status_code == 200:\n sharing_uploaded = True\n\n\nif __name__ == '__main__':\n bot1 = SDK(os.environ.get('BOT_ENDPOINT'))\n bot2 = SDK(os.environ.get('BOT_ENDPOINT'))\n\n token1 = os.environ.get('FIRST_BOT_TOKEN')\n token2 = os.environ.get('SECOND_BOT_TOKEN')\n\n start_http_server(8082)\n\n counter = 0\n while True:\n try:\n print('counter =', counter, dt.datetime.now())\n counter += 1\n\n uploaded = False\n downloaded = False\n sharing_uploaded = False\n uploaded_location = None\n\n bot1_user_info = bot1.bot_authorize(token1)\n bot2_user_info = bot2.bot_authorize(token2)\n\n bot2_outpeer = bot1.find_user_outpeer_by_nick(bot2_user_info.user.data.nick.value)\n\n file = create_file()\n\n upload_start_time = time.time()\n bot2_upload_thread = Thread(target=upload_handler()).start()\n\n while True:\n if time.time() - upload_start_time >= 10:\n UPLOAD_SPEED.set(0)\n break\n if uploaded:\n print('file uploaded at ' + str(dt.datetime.now()) + ': ', uploaded_location.file_id)\n UPLOAD_SPEED.set(os.path.getsize(file) / (time.time() - upload_start_time) / 1024 / 1024)\n break\n\n bot1.send_file_by_location(bot2_outpeer, file, uploaded_location)\n\n bot2_thread = Thread(target=bot2_seq_handler).start()\n download_start_time = time.time()\n\n while True:\n if time.time() - download_start_time >= 10:\n DOWNLOAD_SPEED.set(0)\n break\n if downloaded:\n print('file downloaded at ' + str(dt.datetime.now()) + ': ', uploaded_location.file_id)\n DOWNLOAD_SPEED.set(os.path.getsize(file) / (time.time() - download_start_time) / 1024 / 1024)\n break\n\n sharing_upload_start_time = time.time()\n bot2_sharing_thread = Thread(target=sharing_upload_handler).start()\n\n while True:\n if time.time() - sharing_upload_start_time >= 10:\n SHARING_UPLOAD_SPEED.set(0)\n break\n if sharing_uploaded:\n print('sharing file uploaded at ' + str(dt.datetime.now()))\n SHARING_UPLOAD_SPEED.set(\n os.path.getsize(file) / (time.time() - sharing_upload_start_time) / 1024 / 1024\n )\n break\n\n bot1.logout()\n bot2.logout()\n os.remove(file)\n time.sleep(120)\n except Exception as e:\n print(e)\n continue\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"132100438","text":"# Problem Set 6: Simulating robots\n\nimport math\nimport random\n\nimport ps6_visualize as visualize\nimport pylab\nimport numpy\n\nclass Position(object):\n \"\"\"\n A Position represents a location in a two-dimensional room.\n \"\"\"\n def __init__(self, x, y):\n \"\"\"\n Initializes a position with coordinates (x, y).\n \"\"\"\n self.x = x\n self.y = y\n def getX(self):\n return self.x\n def getY(self):\n return self.y\n def getNewPosition(self, angle, speed):\n \"\"\"\n Computes and returns the new Position after a single clock-tick has\n passed, with this object as the current position, and with the\n specified angle and speed.\n\n Does NOT test whether the returned position fits inside the room.\n\n angle: float representing angle in degrees, 0 <= angle < 360\n speed: positive float representing speed\n\n Returns: a Position object representing the new position.\n \"\"\"\n old_x, old_y = self.getX(), self.getY()\n # Compute the change in position\n delta_y = speed * math.cos(math.radians(angle))\n delta_x = speed * math.sin(math.radians(angle))\n # Add that to the existing position\n new_x = round((old_x + delta_x),2)\n new_y = round((old_y + delta_y),2)\n return Position(new_x, new_y)\n\n# === Problems 1\n\nclass RectangularRoom(object):\n \"\"\"\n A RectangularRoom represents a rectangular region containing clean or dirty\n tiles.\n\n A room has a width and a height and contains (width * height) tiles. At any\n particular time, each of these tiles is either clean or dirty.\n \"\"\"\n def __init__(self, width, height):\n \"\"\"\n Initializes a rectangular room with the specified width and height.\n\n Initially, no tiles in the room have been cleaned.\n\n width: an integer > 0\n height: an integer > 0\n \"\"\"\n self.robots=[]\n self.width=width\n self.height=height\n self.floordictionary={}\n for x in range(width):\n for y in range(height):\n self.floordictionary[x,y]=1\n \n def cleanTileAtPosition(self, pos):\n \"\"\"\n Mark the tile under the position POS as cleaned.\n\n Assumes that POS represents a valid position inside this room.\n\n pos: a Position\n \"\"\"\n self.floordictionary[int(pos.x),int(pos.y)]=0 \n\n def isTileCleaned(self, m, n):\n \"\"\"\n Return True if the tile (m, n) has been cleaned.\n\n Assumes that (m, n) represents a valid tile inside the room.\n\n m: an integer\n n: an integer\n returns: True if (m, n) is cleaned, False otherwise\n \"\"\"\n return self.floordictionary[m,n]==0\n \n def getNumTiles(self):\n \"\"\"\n Return the total number of tiles in the room.\n\n returns: an integer\n \"\"\"\n return len(self.floordictionary)\n\n def getNumCleanedTiles(self):\n \"\"\"\n Return the total number of clean tiles in the room.\n\n returns: an integer\n \"\"\"\n cleaned=[]\n for k in self.floordictionary.keys():\n if self.floordictionary[k]==0:\n cleaned.append(k)\n return len(cleaned)\n \n def getRandomPosition(self):\n \"\"\"\n Return a random position inside the room.\n\n returns: a Position object.\n \"\"\"\n return Position(float((random.randrange(self.width)+ random.randrange(11)*.1)),\n float((random.randrange(self.height)+ random.randrange(11)*.1)))\n \n def isPositionInRoom(self, pos):\n \"\"\"\n Return True if pos is inside the room.\n\n pos: a Position object.\n returns: True if pos is in the room, False otherwise.\n \"\"\"\n if pos.x>=self.width or pos.x<=0:\n return False\n elif pos.y>=self.height or pos.y<=0:\n return False\n else:\n return True\n\nclass Robot(object):\n \"\"\"\n Represents a robot cleaning a particular room.\n\n At all times the robot has a particular position and direction in the room.\n The robot also has a fixed speed.\n\n Subclasses of Robot should provide movement strategies by implementing\n updatePositionAndClean(), which simulates a single time-step.\n \"\"\"\n def __init__(self, name, room, speed):\n \"\"\"\n Initializes a Robot with the given speed in the specified room. The\n robot initially has a random direction and a random position in the\n room. The robot cleans the tile it is on.\n\n room: a RectangularRoom object.\n speed: a float (speed > 0)\n \"\"\"\n self.name=name\n room.robots.append(self)\n self.room=room\n self.position=room.getRandomPosition()\n self.speed=speed\n self.direction=random.randrange(360)\n room.cleanTileAtPosition(self.position)\n \n def getRobotPosition(self):\n \"\"\"\n Return the position of the robot.\n\n returns: a Position object giving the robot's position.\n \"\"\"\n return self.position\n \n def getRobotDirection(self):\n \"\"\"\n Return the direction of the robot.\n\n returns: an integer d giving the direction of the robot as an angle in\n degrees, 0 <= d < 360.\n \"\"\"\n return self.direction\n\n def setRobotPosition(self, position):\n \"\"\"\n Set the position of the robot to POSITION.\n\n position: a Position object.\n \"\"\"\n try:\n self.position=position\n except:\n print(\"not a valid position\")\n\n def setRobotDirection(self, direction):\n \"\"\"\n Set the direction of the robot to DIRECTION.\n\n direction: integer representing an angle in degrees\n \"\"\"\n self.direction=direction\n\n def updatePositionAndClean(self):\n \"\"\"\n Simulate the raise passage of a single time-step.\n\n Move the robot to a new position and mark the tile it is on as having\n been cleaned.\n \"\"\"\n testPos=self.position.getNewPosition(self.direction, self.speed)\n if not self.room.isPositionInRoom(testPos):\n self.setRobotDirection(random.randrange(1,361,1))\n print(self.name + \" bounces \" + str(self.direction)+\" degrees\")\n self.updatePositionAndClean()\n \n else:\n self.position=self.position.getNewPosition(self.direction, self.speed)\n self.room.cleanTileAtPosition(self.position)\n \n# === Problem 2 \n# Fixed issues in problem 1, now uses float->int conversion to identify panel to be cleaned in room\n# Idea = write a recursive element so it tests validitiy on each\n# Validiated to work up to problem 3. method updatepostion copied to class, remove out of range if needed\n\nclass StandardRobot(Robot):\n \"\"\"\n A StandardRobot is a Robot with the standard movement strategy.\n\n At each time-step, a StandardRobot attempts to move in its current direction; when\n it hits a wall, it chooses a new direction randomly.\n \"\"\"\n def updatePositionAndClean(self):\n \"\"\"\n Simulate the passage of a single time-step.\n\n Move the robot to a new position and mark the tile it is on as having\n been cleaned.\n \"\"\"\n testPos=self.position.getNewPosition(self.direction, self.speed)\n if not self.room.isPositionInRoom(testPos):\n self.setRobotDirection(random.randrange(361))\n## print(self.name + \" bounces \" + str(self.direction)+\" degrees\")\n self.updatePositionAndClean()\n \n else:\n self.position=self.position.getNewPosition(self.direction, self.speed)\n self.room.cleanTileAtPosition(self.position)\n \n\n# === Problem 3\n# How to approach? First get multiple robots going all from simulation function - check\n# figure out how to generate and record data + coverage - check\n# iterate over a number of simulations and figure out how to mean data - check\n## This is fully operational, like a Death Star ready to destroy the rebel fleet\n## \n\ndef runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n \"\"\"\n Runs NUM_TRIALS trials of the simulation and returns the mean number of\n time-steps needed to clean the fraction MIN_COVERAGE of the room.\n\n The simulation is run with NUM_ROBOTS robots of type ROBOT_TYPE, each with\n speed SPEED, in a room of dimensions WIDTH x HEIGHT.\n\n num_robots: an int (num_robots > 0)\n speed: a float (speed > 0)\n width: an int (width > 0)\n height: an int (height > 0)\n min_coverage: a float (0 <= min_coverage <= 1.0)\n num_trials: an int (num_trials > 0)\n robot_type: class of robot to be instantiated (e.g. Robot or\n RandomWalkRobot)\n \"\"\"\n resultDict={}\n totalArray=0\n for trial in range(num_trials):\n## anim=visualize.RobotVisualization(num_robots,width,height)\n room=RectangularRoom(width,height)\n counter=0\n for n in range(num_robots):\n n=robot_type(\"robot-\"+str(n),room,speed)\n while (room.getNumCleanedTiles()/len(room.floordictionary) < min_coverage):\n for r in room.robots:\n r.updatePositionAndClean()\n## anim.update(room,room.robots)\n counter+=1\n resultDict[trial]=counter\n## print(str(counter) + \" seconds\")\n## print(str(room.getNumCleanedTiles())+ \" / \" + str(len(room.floordictionary)))\n for x,y in resultDict.items():\n totalArray+=y\n return totalArray / len(resultDict) \n\n\n# === Problem 4\n#\n# 1) How long does it take to clean 80% of a 20×20 room with each of 1-10 robots?\n#\n# 2) How long does it take two robots to clean 80% of rooms with dimensions \n#\t 20×20, 25×16, 40×10, 50×8, 80×5, and 100×4?\n\n\ndef showPlot1():\n \"\"\"\n Produces a plot showing dependence of cleaning time on number of robots.\n \"\"\"\n ydata,robotarray=[],[]\n for robots in range(1,11): \n ydata+=[runSimulation(robots, 1, 20, 20, 0.8, 20, StandardRobot)] \n robotarray+=[robots] \n\n print(ydata)\n pylab.plot(robotarray,ydata)\n pylab.title('Seconds to Clean Room by Robots Used')\n pylab.xlabel('Robots')\n pylab.ylabel('Seconds')\n pylab.show()\n\n\ndef showPlot2():\n \"\"\"\n Produces a plot showing dependence of cleaning time on room shape.\n \"\"\"\n \n dimensions=[(20,20),(25,16),(40,10),(50,8),(80,5),(100,4)]\n ydata,ratio=[],[]\n\n for t in dimensions: \n ydata+=[runSimulation(2, 1, t[0], t[1], 0.8, 20, StandardRobot)] \n ratio+=[t[0]/t[1]]\n\n print(ydata)\n pylab.plot(ratio,ydata)\n pylab.title('Seconds to Clean Room by Room Dimensions Ratio')\n pylab.xlabel('Room Dimension Ratio(width/height)')\n pylab.ylabel('Seconds')\n pylab.show()\n\n\n### === Problem 5\n##\n\n \nclass RandomWalkRobot(Robot):\n \"\"\"\n A RandomWalkRobot is a robot with the \"random walk\" movement strategy: it\n chooses a new direction at random after each time-step.\n \"\"\"\n def updatePositionAndClean(self):\n \"\"\"\n Simulate the passage of a single time-step.\n\n Move the robot to a new position and mark the tile it is on as having\n been cleaned.\n \"\"\"\n \n testPos=self.position.getNewPosition(random.randrange(361), self.speed)\n if not self.room.isPositionInRoom(testPos):\n self.updatePositionAndClean()\n \n else:\n self.position=testPos\n self.room.cleanTileAtPosition(self.position)\n \n\n### === Problem 6\n##\n### For the parameters tested below (cleaning 80% of a 20x20 square room),\n### RandomWalkRobots take approximately twice as long to clean the same room as\n### StandardRobots do.\n\n \ndef showPlot3():\n \"\"\"\n Produces a plot comparing the two robot strategies.\n \"\"\"\n ydata,robotarray,zdata=[],[],[]\n for robots in range(1,11): \n ydata+=[runSimulation(robots, 1, 20, 20, 0.8, 20, StandardRobot)]\n zdata+=[runSimulation(robots, 1, 20, 20, 0.8, 20, RandomWalkRobot)]\n robotarray+=[robots] \n\n\n pylab.plot(robotarray,ydata,linewidth=3.0,color=\"red\", label=\"Standard\")\n pylab.plot(robotarray,zdata,linewidth=3.0,color=\"black\", label=\"Random\") \n pylab.title('Seconds to Clean Room by Robots Used')\n pylab.xlabel('Robots')\n pylab.ylabel('Seconds')\n pylab.show()\n\n### \n### Get some - problem vanquished\n\n\n","sub_path":"Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":12506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"608564788","text":"from enum import IntEnum, unique\n\n@unique\nclass ImageTagState(IntEnum):\n NOT_READY = 0\n READY_TO_TAG = 1\n TAG_IN_PROGRESS = 2\n COMPLETED_TAG = 3\n INCOMPLETE_TAG = 4\n ABANDONED = 5\n\n# An entity class for a VOTT image\nclass ImageInfo(object):\n def __init__(self, image_name, image_location, height, width):\n self.image_name = image_name\n self.image_location = image_location\n self.height = height\n self.width = width\n\n\n# Entity class for Tags stored in DB\nclass ImageTag(object):\n def __init__(self, image_id, x_min, x_max, y_min, y_max, classification_names):\n self.image_id = image_id\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n self.classification_names = classification_names\n \n @staticmethod\n def fromJson(dictionary):\n if dictionary.items():\n image_tag = ImageTag(dictionary[\"image_id\"], dictionary[\"x_min\"], dictionary[\"x_max\"], dictionary[\"y_min\"], dictionary[\"y_max\"], dictionary[\"classification_names\"])\n return image_tag\n\n#This class doesn't have box and image confidence because they are human curated labels\nclass AnnotatedLabel(object):\n def __init__(self, image_id, classification_id, x_min, x_max, y_min, y_max):\n self.image_id = image_id\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n self.classification_id = classification_id\n\n\nclass ImageLabel(object):\n def __init__(self,image_id, imagelocation,image_height: int, image_width: int, labels: list, user_folder=None):\n self.image_id = image_id\n self.imagelocation = imagelocation\n self.image_height = image_height\n self.image_width = image_width\n self.user_folder = user_folder\n self.labels = labels\n \n @staticmethod\n def fromJson(dictionary):\n tags = []\n if (isinstance(dictionary[\"labels\"], dict)):\n tags = [ImageTag.fromJson(dictionary[\"labels\"])]\n elif (isinstance(dictionary[\"labels\"], list)):\n tags = [ImageTag.fromJson(label) for label in dictionary[\"labels\"]]\n\n image_label = ImageLabel(dictionary[\"image_id\"], dictionary[\"imagelocation\"], dictionary[\"image_height\"], dictionary[\"image_width\"], tags, dictionary.get(\"user_folder\"))\n return image_label\n\n\nclass Tag(object):\n def __init__(self,classificationname, x_min: float, x_max: float, y_min: float, y_max: float):\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n self.classificationname = classificationname\n\n def convert_to_relative(self, width, height):\n self.x_min = self.x_min/width\n self.x_max = self.x_max/width\n self.y_min = self.y_min/height\n self.y_max = self.y_max/height\n\n\nclass PredictionLabel(AnnotatedLabel):\n def __init__(self, training_id, image_id, classification_id, x_min, x_max, y_min, y_max, \n image_height, image_width, box_confidence=0, image_confidence= 0):\n super().__init__(image_id, classification_id, x_min, x_max, y_min, y_max)\n self.training_id = training_id\n self.image_height = image_height\n self.image_width = image_width\n self.box_confidence = box_confidence\n self.image_confidence = image_confidence\n\n def convert_to_absolute(self):\n self.x_min = self.x_min*self.image_width\n self.x_max = self.x_max*self.image_width\n self.y_min = self.y_min*self.image_height\n self.y_max = self.y_max*self.image_height\n\nclass TrainingSession(object):\n def __init__(self, description, model_url, avg_perf: float, class_perf: dict):\n self.description = description\n self.model_url = model_url\n self.avg_perf = avg_perf\n self.class_perf = class_perf \n","sub_path":"functions/pipeline/shared/db_access/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"298462208","text":"\nimport argparse\nimport os\nfrom collections import namedtuple\n\n'''\nFor more information about types and locales\nplease visit the website:\n\nhttp://en.wikipedia.org/wiki/Wikipedia:GEO#type:T\n\nI separated the insert and extract script in order to\navoid that redundand or useless data flows into the database.\nTherefore, please always check the .csv before inserting it into\nMongoDB.\n'''\n\n# which languages does the script accept.\n# if you need more languages than English, German and Spanish\n# then add the language ISO code here.\nACCEPTED_LANGUAGES = set(['en', 'de', 'es'])\n\ndef parse_args():\n description = 'Save geo entities in MongoDB'\n\n parser = argparse.ArgumentParser(description,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--source-csv', '-s', type=str, dest='source_file', required=True,\n help='The path of the CSV file.')\n\n parser.add_argument('--languages', '-l', type=str, dest='languages',\n required=True, nargs='+',\n help='Which languages should be extracted - use ISO codes!')\n\n parser.add_argument('--mongo-url', '-u', type=int, dest='mongo_url',\n default='localhost',\n help='The url of the MongoDB')\n\n parser.add_argument('--mongo-port', '-p', type=int, default=5010, dest='port',\n help='Which port to the MongoDB should be used.')\n\n parser.add_argument('--destination_file', '-d', type=str, default='/tmp/entities.csv',\n help='Where the resulting csv should be saved.', dest='destination_file')\n\n args = parser.parse_args()\n src = args.source_file\n languages = args.languages\n\n # some basic check ups for the script.\n assert all(lang in ACCEPTED_LANGUAGES for lang in languages), \\\n 'Invalid language codes provided: %s' % languages\n\n assert os.path.isfile(src), 'Source file does not exist: %s!' % src\n\n ParserArguments = namedtuple('ParserArguments', ['source_file',\n 'mongo_url',\n 'mongo_port'\n 'languages'])\n return ParserArguments(source_file=src,\n mongo_url=args.mongo_url,\n mongo_port=args.port,\n languages=languages)\n","sub_path":"helping_network/helping_network_website/scripts/geo/insert_csv_into_mongo.py","file_name":"insert_csv_into_mongo.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"321615547","text":"import openpyxl\n\ntheFile = openpyxl.load_workbook('Customers1.xlsx')\nprint(theFile.sheetnames)\ncurrentSheet = theFile['customers 1']\nprint(currentSheet['B4'].value)\n\n\"\"\"As you can see, this code prints all sheets by their names. It then selects the sheet that is named “customers 1”\n and saves it to a currentSheet variable. In the last line, the code prints the value that is located in the B4 \n position of the “customers 1” sheet.\n\"\"\"\n\nimport openpyxl\n\ntheFile = openpyxl.load_workbook('Customers1.xlsx')\nallSheetNames = theFile.sheetnames\n\nprint(\"All sheet names {} \" .format(theFile.sheetnames))\n\n\nfor x in allSheetNames:\n print(\"Current sheet name is {}\" .format(x))\n currentSheet = theFile[x]\n print(currentSheet['B4'].value)\n\n\"\"\"Read the file\nGet all sheet names\nLoop through all sheets\nIn the last step, the code will print values that are located in B4 fields of each found sheet inside the workbook.\n\nhttps://www.freecodecamp.org/news/how-to-create-read-update-and-search-through-excel-files-using-python-c70680d811d4/\n\"\"\"\n","sub_path":"open_excelfile.py","file_name":"open_excelfile.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"560366060","text":"# encoding: utf-8\n\nfrom collections import namedtuple\n\n\"\"\" special\n\"\"\"\n\nSpecial = namedtuple('Special', [\n 'NUM1', 'NUM2', 'NUM3', 'NUM4', 'NUM5', 'NUM6', 'NUM7',\n 'LOC',\n 'EMJ',\n 'ABU',\n 'CTAM', 'CTA',\n 'ADD',\n 'AGR',\n 'TRS', 'TRS3RD', 'TRSBHV', 'TRSSUS',\n 'RES', 'RESS',\n 'VIP',\n 'WHO',\n 'PLG',\n 'OGM',\n 'MNY',\n 'URL',\n 'AUD',\n 'HAO',\n 'SYS',\n 'SALE',\n 'TSP',\n 'NEED',\n 'HVN',\n 'FIT',\n],\ndefaults=[\n '[NUM]-1', '[NUM]-2', '[NUM]-3', '[NUM]-4', '[NUM]-5', '[NUM]-6', '[NUM]-7',\n '[LOC]',\n '[EMJ]',\n '[ABU]',\n '[CTA-M]', '[CTA]',\n '[ADD]',\n '[AGR]',\n '[TRS]', '[TRS-3RD]', '[TRS-BHV]', '[TRS-SUS]',\n '[RES]', '[RES-S]',\n '[VIP]',\n '[WHO]',\n '[PLG]',\n '[OGM]',\n '[MNY]',\n '[URL]',\n '[AUD]',\n '[HAO]',\n '[SYS]',\n '[SALE]',\n '[TSP]',\n '[NEED]',\n '[HVN]',\n '[FIT]',\n])\n\nspecial = Special()\n\n\n\"\"\" special num\n\"\"\"\n\nSpecialNum = namedtuple('SpecialNum', [\n 'NUM1', 'NUM2', 'NUM3', 'NUM4', 'NUM5', 'NUM6', 'NUM7',\n],\ndefaults=[\n\tspecial.NUM1, special.NUM2, special.NUM3, special.NUM4, special.NUM5, special.NUM6, special.NUM7,\n])\n\nspecialnum = SpecialNum()\n\n\n\"\"\" special 0\n\"\"\"\n\nSpecial0 = namedtuple('Special0', [\n\t'EMJ', 'NUM1', 'NUM2', 'NUM3', 'NUM4', 'NUM5', 'NUM6', 'NUM7', 'ABU', 'WHO', 'AUD', 'HVN', 'FIT',\n],\ndefaults=[\n\tspecial.EMJ, special.NUM1, special.NUM2, special.NUM3, special.NUM4, special.NUM5, special.NUM6, special.NUM7, special.ABU, special.WHO, special.AUD, special.HVN, special.FIT,\n])\n\nspecial0 = Special0()\n\n\n\"\"\" special 1\n\"\"\"\n\nSpecial1 = namedtuple('Special1', [\n\t'OGM', 'TRS3RD', 'TSP',\n],\ndefaults=[\n\tspecial.OGM, special.TRS3RD, special.TSP,\n])\n\nspecial1 = Special1()\n\n\n\"\"\" special 2\n\"\"\"\n\nSpecial2 = namedtuple('Special2', [\n\t'LOC', 'CTAM', 'CTA', 'TRS', 'TRSBHV', 'TRSSUS', 'RES', 'RESS', 'VIP', 'PLG', 'MNY', 'URL', 'ADD', 'AGR', 'HAO', 'SALE', 'NEED',\n],\ndefaults=[\n\tspecial.LOC, special.CTAM, special.CTA, special.TRS, special.TRSBHV, special.TRSSUS, special.RES, special.RESS, special.VIP, special.PLG, special.MNY, special.URL, special.ADD, special.AGR, special.HAO, special.SALE, special.NEED,\n])\n\nspecial2 = Special2()\n\n\n\"\"\" special risk\n\"\"\"\n\nSpecialRisk = namedtuple('SpecialRisk', [\n\t'OGM', 'TRS3RD', 'TSP', 'TRS', 'TRSBHV', 'TRSSUS', 'RES', 'VIP', 'PLG', 'MNY', 'URL', 'HAO', 'SALE',\n],\ndefaults=[\n\tspecial.OGM, special.TRS3RD, special.TSP, special.TRS, special.TRSBHV, special.TRSSUS, special.RES, special.VIP, special.PLG, special.MNY, special.URL, special.HAO, special.SALE,\n])\n\nspecialrisk = SpecialRisk()\n\n\ndef pprint(nt):\n\tfor k, v in nt._asdict().items():\n\t print(k, v)\n\tprint()\n\n\nif __name__ == '__main__':\n\tpprint(special)\n\tpprint(specialnum)\n\tpprint(special0)\n\tpprint(special1)\n\tpprint(special2)\n","sub_path":"ads/tokenizer/special.py","file_name":"special.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"172411911","text":"# coding: utf-8\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.template import loader\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom imagestore.qiniu_manager import(\n get_extension,\n handle_uploaded_file,\n upload,\n url,\n)\n\nfrom web.models import(\n\tAdvertising\n)\nfrom settings import(\n BACK_PAGE_COUNT, FILED_CHECK_MSG,\n UPLOAD_DIR\n)\nimport os\nimport datetime\nimport time\nimport simplejson\nimport re\nimport requests\n\n\n@staff_member_required(login_url='/admin/login')\ndef index(request):\n\n context = {\n 'module': 'home',\n }\n return render(request, 'super/index.html', context)\n\n\n@staff_member_required(login_url='/admin/login')\ndef advertising_list(request):\n\n clients = Advertising.objects.filter(is_del=False).order_by('order_no')\n\n paginator = Paginator(clients, BACK_PAGE_COUNT)\n page = request.GET.get('page', '')\n\n try:\n clients = paginator.page(page)\n except PageNotAnInteger:\n clients = paginator.page(1)\n except EmptyPage:\n clients = paginator.page(paginator.num_pages)\n\n context = {\n 'module': 'advertising',\n 'clients': clients,\n }\n return render(request, 'super/index/advertising/list.html', context)\n\n\n@staff_member_required(login_url='/admin/login')\ndef advertising_create(request):\n context = {\n 'module': 'advertising',\n }\n\n if request.method == 'POST':\n error = {}\n name = request.POST.get('name', '')\n target_url = request.POST.get('target_url', '')\n\n flag = True\n advertising = Advertising()\n\n if not len(name):\n flag = False\n error['name_msg'] = FILED_CHECK_MSG\n else:\n advertising.name = name\n\n # 进行类型判断\n advertising.target_url = target_url\n\n if flag:\n\n if request.FILES:\n if request.FILES.get('img', None):\n # 上传图片\n img = request.FILES['img']\n ts = int(time.time())\n ext = get_extension(img.name)\n key = 'logo_{}.{}'.format(ts, ext)\n handle_uploaded_file(img, key)\n upload(key, os.path.join(UPLOAD_DIR, key))\n advertising.img = key\n\n advertising.save()\n # 序号\n advertising.order_no = advertising.id\n advertising.save()\n\n return HttpResponseRedirect(reverse('web:advertising_list'))\n else:\n context['client'] = advertising\n context['error'] = error\n\n return render(request, 'super/index/advertising/create.html', context)\n\n\n@staff_member_required(login_url='/admin/login')\ndef advertising_edit(request, advertising_id):\n\n advertising = Advertising.objects.filter(id=advertising_id).first()\n context = {\n 'client': advertising,\n 'module': 'advertising',\n }\n\n if request.method == 'POST':\n error = {}\n name = request.POST.get('name', '')\n target_url = request.POST.get('target_url', '')\n\n flag = True\n\n if not len(name):\n flag = False\n error['name_msg'] = FILED_CHECK_MSG\n else:\n advertising.name = name\n\n # 进行类型判断\n advertising.target_url = target_url\n\n if flag:\n\n if request.FILES:\n if request.FILES.get('img', None):\n # 上传图片\n img = request.FILES['img']\n ts = int(time.time())\n ext = get_extension(img.name)\n key = 'logo_{}.{}'.format(ts, ext)\n handle_uploaded_file(img, key)\n upload(key, os.path.join(UPLOAD_DIR, key))\n advertising.img = key\n\n advertising.save()\n\n return HttpResponseRedirect(reverse('web:advertising_list'))\n else:\n context['error'] = error\n\n return render(request, 'super/index/advertising/create.html', context)\n\n\n@staff_member_required(login_url='/admin/login')\ndef advertising_delete(request, advertising_id):\n advertising = Advertising.objects.filter(pk=advertising_id).first()\n if advertising:\n advertising.delete()\n return HttpResponseRedirect(reverse('web:advertising_list'))\n\n\n@staff_member_required(login_url='/admin/login')\ndef advertising_up(request, advertising_id):\n advertising = Advertising.objects.filter(pk=advertising_id).first()\n if advertising:\n before_advertisings = Advertising.objects.filter(\n Q(order_no__lt=advertising.order_no) & ~Q(is_del=True)\n ).order_by('-order_no')\n if before_advertisings:\n # 旧\n before_advertising = before_advertisings[0]\n old_order_no = advertising.order_no\n advertising.order_no = before_advertising.order_no\n advertising.save()\n # 新\n before_advertising.order_no = old_order_no\n before_advertising.save()\n return HttpResponseRedirect(reverse('web:advertising_list'))\n\n\n@staff_member_required(login_url='/admin/login')\ndef advertising_down(request, advertising_id):\n advertising = Advertising.objects.filter(pk=advertising_id).first()\n if advertising:\n after_advertisings = Advertising.objects.filter(\n Q(order_no__gt=advertising.order_no) & ~Q(is_del=True)\n ).order_by('order_no')\n\n if after_advertisings:\n # 旧\n after_advertising = after_advertisings[0]\n old_order_no = advertising.order_no\n advertising.order_no = after_advertising.order_no\n advertising.save()\n # 新\n after_advertising.order_no = old_order_no\n after_advertising.save()\n return HttpResponseRedirect(reverse('web:advertising_list'))\n\n\n@csrf_exempt\ndef ckeditor_upload(request):\n if request.FILES:\n ts = int(time.time())\n img_file = request.FILES.get('upload', '')\n\n checkNum = request.GET.get('CKEditorFuncNum', '')\n ext = get_extension(request.FILES['upload'].name)\n key = 'ckeditor_{}.{}'.format(ts, ext)\n\n handle_uploaded_file(img_file, key)\n # 上传图片到qiniu\n upload(key, os.path.join(UPLOAD_DIR, key))\n return HttpResponse(\"\")\n","sub_path":"web/views/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":6823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"638603124","text":"import inspect\r\nimport os\r\nimport re\r\nfrom collections import Counter\r\nfrom matplotlib import pyplot as plt\r\nimport numpy\r\nimport pandas as pd\r\n\r\n\r\ndef main():\r\n os.chdir('E:\\\\Intern Work TeamTact') # change the current root directory\r\n np_methods_list = []\r\n lines_with_np_methods = []\r\n method_count_list = []\r\n fetch_np_methods(np_methods_list)\r\n file_fetch(lines_with_np_methods)\r\n find_methods_in_lines(lines_with_np_methods, np_methods_list, method_count_list)\r\n plot_top_20(method_count_list)\r\n\r\n\r\n# For extracting all the methods in Numpy Package\r\ndef fetch_np_methods(np_methods_list):\r\n np_methods = inspect.getmembers(numpy, inspect.isfunction)\r\n for i in np_methods:\r\n np_methods_list.append(i[0])\r\n\r\n\r\n# for fetching the files from the directories and searching for numpy entries\r\ndef file_fetch(lines_with_np_methods):\r\n # looping over the files through walker\r\n for path, subdirs, files in os.walk(os.getcwd()):\r\n for name in files:\r\n if name.endswith('.py') or name.endswith('.ipynb'): #\r\n with open(os.path.join(path, name), 'r', encoding=\"utf8\") as f: # opening the files\r\n for line in f:\r\n # finding all lines with numpy methods\r\n if re.findall(r'np\\.[a-z]+\\(*', line) or re.findall(r'numpy\\.[a-z]+\\(*',\r\n line): # searching for words like np.* or numpy.*\r\n lines_with_np_methods.append(line.strip()) # removing all the white spaces from the code\r\n\r\n\r\n# finding all the methods in comparision with numpy methods\r\ndef find_methods_in_lines(lines_with_np_methods, np_methods_list, method_count_list):\r\n for i in lines_with_np_methods:\r\n for xs in np_methods_list:\r\n if 'np.' + xs in i:\r\n method_count_list.append(str(xs))\r\n\r\n\r\n# plot top 20 methods used in all files\r\ndef plot_top_20(method_count_list):\r\n letter_counts = Counter(method_count_list) # Making a dictionary of frequencies of methods used\r\n df = pd.DataFrame.from_dict(letter_counts, orient='index')\r\n df = df.nlargest(20, df.columns) # Finding nlargest from the entries\r\n print(df)\r\n df.plot(kind='bar')\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Assignment 1/file_fetch.py","file_name":"file_fetch.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"91745850","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport logging\nimport tempfile\nimport subprocess\nimport json\n\nlogging.basicConfig(level=logging.DEBUG, filename = os.path.join(tempfile.gettempdir(), \"make_cdb.log\"))\n\ntry:\n # Prepare command to be executed\n logging.info(\"Called with arguments: \" + ' '.join(sys.argv))\n make_command = [\"make\", \"-q\", \"compile_db\"]\n # Call make\n logging.info(\"Invoking '{}'\".format(' '.join(make_command)))\n output = subprocess.Popen(make_command, stdout=subprocess.PIPE).communicate()[0]\n if output:\n output = output.decode('utf8')\n # Parse response\n logging.info(\"Parsing json response\")\n db = json.loads(output)\n # Spit result\n logging.info(\"Dumping result\")\n json.dump(db, sys.stdout)\nexcept Exception as e:\n # Log any kind of exception\n logging.error(\"Exception occurred\", exc_info=True)\n","sub_path":".vim/conf/plugins/lsp/make_compile_db.py","file_name":"make_compile_db.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"600567734","text":"import face_recognition\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\nfrom datetime import datetime\r\nglobal n1\r\npath=\"New folder\"\r\nimages=[]\r\nclassname=[]\r\nlist=os.listdir(path)\r\nfor cl in list:\r\n cur_img=cv2.imread(f\"{path}/{cl}\")\r\n images.append(cur_img)\r\n classname.append(os.path.splitext(cl)[0])\r\ndef findencode(images):\r\n encode_list=[]\r\n for imgS in images:\r\n imgS=cv2.cvtColor(imgS,cv2.COLOR_BGR2RGB)\r\n faceencode=face_recognition.face_encodings(imgS)[0]\r\n encode_list.append(faceencode)\r\n return encode_list\r\n\r\ndef markattendance(names):\r\n with open(\"attendance.csv\",\"r+\") as f:\r\n myDataList=f.readlines()\r\n name_list=[]\r\n\r\n\r\n\r\n for line in myDataList:\r\n nameo=line.split(\",\")\r\n print(nameo)\r\n name_list.append(nameo[0])\r\n if names not in name_list:\r\n now=datetime.now()\r\n time=now.strftime(f\"%H:%M:%S\")\r\n f.writelines(f\"{names},{time}\")\r\n\r\n\r\nencodelistknown=findencode(images)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ncap=cv2.VideoCapture(0)\r\nwhile True:\r\n success,img=cap.read()\r\n im=cv2.resize(img,(0,0),None,0.25,0.25)\r\n im=cv2.cvtColor(im,cv2.COLOR_BGR2RGB)\r\n faceloc=face_recognition.face_locations(im)\r\n faceenco=face_recognition.face_encodings(im,faceloc)\r\n for f ,fa in zip(faceenco,faceloc):\r\n matches=face_recognition.compare_faces(encodelistknown,f)\r\n matcloc=face_recognition.face_distance(encodelistknown,f)\r\n matchindex=np.argmin(matcloc)\r\n if matches[matchindex]:\r\n name=classname[matchindex].upper()\r\n print(name)\r\n x1,y1,x2,y2=fa\r\n x1, y1 , x2 , y2 =x1*4,y1* 4,x2* 4,y2* 4\r\n cv2.rectangle(img,(y2,x1),(y1,x2),(0,255,0),2)\r\n cv2.rectangle(img, (y2 , x2-35 ), (y1 , x2 ), (0, 255, 0), cv2.FILLED)\r\n cv2.putText(img,name,(y2+6,x2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)\r\n markattendance(name)\r\n cv2.imshow(\"webcam\",img)\r\n cv2.waitKey(1)\r\n\r\n","sub_path":"facedetct.py","file_name":"facedetct.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"651480147","text":"import projectq\r\nfrom projectq import MainEngine\r\nfrom projectq.backends import ResourceCounter, Simulator\r\nfrom projectq.cengines import DecompositionRuleSet, AutoReplacer, InstructionFilter, TagRemover, LocalOptimizer\r\nfrom projectq.ops import BasicMathGate, get_inverse, QFT, Swap\r\nfrom projectq.setups import restrictedgateset\r\n\r\n\r\ndef high_level_gates(eng, cmd):\r\n g = cmd.gate\r\n if g == QFT or get_inverse(g) == QFT or g == Swap:\r\n return True\r\n if isinstance(g, BasicMathGate):\r\n return True\r\n if isinstance(g, AddConstant):\r\n return True\r\n elif isinstance(g, AddConstantModN):\r\n return True\r\n return False\r\n return eng.next_engine.is_available(cmd)\r\n\r\n\r\ndef get_engine(api=None):\r\n resource_counter = ResourceCounter()\r\n rule_set = DecompositionRuleSet(modules=[projectq.libs.math,\r\n projectq.setups.decompositions])\r\n compilerengines = [AutoReplacer(rule_set),\r\n InstructionFilter(high_level_gates),\r\n TagRemover(),\r\n LocalOptimizer(3),\r\n AutoReplacer(rule_set),\r\n TagRemover(),\r\n LocalOptimizer(3),\r\n resource_counter]\r\n\r\n # make the compiler and run the circuit on the simulator backend\r\n backend = Simulator()\r\n return MainEngine(backend, compilerengines), backend\r\n","sub_path":"src/engines/pq_engine.py","file_name":"pq_engine.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"286931793","text":"#cAssume s is a string of lower case characters.\n#Write a program that prints the longest substring of s in which the letters occur\n# in alphabetical order. For example, if s = 'azcbobobegghakl', then your program should print\n#Longest substring in alphabetical order is: beggh\n#In the case of ties, print the first substring. For example, if s = 'abcbcd',\n# then your program should print\n#Longest substring in alphabetical order is: abc\n\ns = 'abcdcd'\nmaxLen = 0\ncurrent = s[0]\nlong = s[0]\n\nfor x in range(len(s) - 1):\n if s[x + 1] >= s[x]:\n current += s[x + 1]\n if len(current) > maxLen:\n maxLen = len(current)\n long = current\n else:\n current = s[x + 1]\n\n x += 1\nprint ('Longest substring in alphabetical order is: ' + long)\n","sub_path":"Week1/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"645407019","text":"import win32api\nimport PIL.ImageGrab\n\nimport datetime\nimport pytz\n\n\ndef getCurrColor(pos=None):\n x, y = pos\n if pos is None:\n x, y = win32api.GetCursorPos()\n grab = PIL.ImageGrab.grab((x, y, x + 1, y + 1))\n return grab.getpixel((0, 0))\n\n\npos = win32api.GetCursorPos()\ncolor = getCurrColor(pos)\n\npst = pytz.timezone('US/Pacific')\n\nwhile True:\n nextColor = getCurrColor(pos)\n\n if color != nextColor:\n print(datetime.datetime.now(pst))\n\n color = nextColor\n","sub_path":"specific/Python/Other/discordWatcher.py","file_name":"discordWatcher.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"156990285","text":"# Copyright 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: pdo_contract_enclave_registry.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='pdo_contract_enclave_registry.proto',\n package='',\n syntax='proto3',\n serialized_pb=_b('\\n#pdo_contract_enclave_registry.proto\\\"\\xa7\\x01\\n\\x16PdoContractEnclaveInfo\\x12\\x15\\n\\rverifying_key\\x18\\x01 \\x01(\\t\\x12\\x16\\n\\x0e\\x65ncryption_key\\x18\\x02 \\x01(\\t\\x12\\x10\\n\\x08owner_id\\x18\\x03 \\x01(\\t\\x12\\'\\n\\x1flast_registration_block_context\\x18\\x04 \\x01(\\t\\x12#\\n\\x1bregistration_transaction_id\\x18\\x05 \\x01(\\t\\\"a\\n\\x1dPdoContractEnclaveTransaction\\x12\\x0c\\n\\x04verb\\x18\\x01 \\x01(\\t\\x12\\x15\\n\\rverifying_key\\x18\\x02 \\x01(\\t\\x12\\x1b\\n\\x13transaction_details\\x18\\x03 \\x01(\\x0c\\\"\\xa8\\x01\\n\\x1aPdoContractEnclaveRegister\\x12\\x1b\\n\\x13organizational_info\\x18\\x01 \\x01(\\t\\x12\\x16\\n\\x0e\\x65ncryption_key\\x18\\x02 \\x01(\\t\\x12\\x12\\n\\nproof_data\\x18\\x03 \\x01(\\t\\x12\\x1d\\n\\x15\\x65nclave_persistent_id\\x18\\x04 \\x01(\\t\\x12\\\"\\n\\x1aregistration_block_context\\x18\\x05 \\x01(\\t\\\">\\n\\x18PdoContractEnclaveUpdate\\x12\\\"\\n\\x1aregistration_block_context\\x18\\x01 \\x01(\\tB\\x19\\n\\x15pdo.sawtooth.protobufP\\x01\\x62\\x06proto3')\n)\n\n\n\n\n_PDOCONTRACTENCLAVEINFO = _descriptor.Descriptor(\n name='PdoContractEnclaveInfo',\n full_name='PdoContractEnclaveInfo',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='verifying_key', full_name='PdoContractEnclaveInfo.verifying_key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='encryption_key', full_name='PdoContractEnclaveInfo.encryption_key', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='owner_id', full_name='PdoContractEnclaveInfo.owner_id', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='last_registration_block_context', full_name='PdoContractEnclaveInfo.last_registration_block_context', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='registration_transaction_id', full_name='PdoContractEnclaveInfo.registration_transaction_id', index=4,\n number=5, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=40,\n serialized_end=207,\n)\n\n\n_PDOCONTRACTENCLAVETRANSACTION = _descriptor.Descriptor(\n name='PdoContractEnclaveTransaction',\n full_name='PdoContractEnclaveTransaction',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='verb', full_name='PdoContractEnclaveTransaction.verb', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='verifying_key', full_name='PdoContractEnclaveTransaction.verifying_key', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='transaction_details', full_name='PdoContractEnclaveTransaction.transaction_details', index=2,\n number=3, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=209,\n serialized_end=306,\n)\n\n\n_PDOCONTRACTENCLAVEREGISTER = _descriptor.Descriptor(\n name='PdoContractEnclaveRegister',\n full_name='PdoContractEnclaveRegister',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='organizational_info', full_name='PdoContractEnclaveRegister.organizational_info', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='encryption_key', full_name='PdoContractEnclaveRegister.encryption_key', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='proof_data', full_name='PdoContractEnclaveRegister.proof_data', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='enclave_persistent_id', full_name='PdoContractEnclaveRegister.enclave_persistent_id', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='registration_block_context', full_name='PdoContractEnclaveRegister.registration_block_context', index=4,\n number=5, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=309,\n serialized_end=477,\n)\n\n\n_PDOCONTRACTENCLAVEUPDATE = _descriptor.Descriptor(\n name='PdoContractEnclaveUpdate',\n full_name='PdoContractEnclaveUpdate',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='registration_block_context', full_name='PdoContractEnclaveUpdate.registration_block_context', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=479,\n serialized_end=541,\n)\n\nDESCRIPTOR.message_types_by_name['PdoContractEnclaveInfo'] = _PDOCONTRACTENCLAVEINFO\nDESCRIPTOR.message_types_by_name['PdoContractEnclaveTransaction'] = _PDOCONTRACTENCLAVETRANSACTION\nDESCRIPTOR.message_types_by_name['PdoContractEnclaveRegister'] = _PDOCONTRACTENCLAVEREGISTER\nDESCRIPTOR.message_types_by_name['PdoContractEnclaveUpdate'] = _PDOCONTRACTENCLAVEUPDATE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nPdoContractEnclaveInfo = _reflection.GeneratedProtocolMessageType('PdoContractEnclaveInfo', (_message.Message,), dict(\n DESCRIPTOR = _PDOCONTRACTENCLAVEINFO,\n __module__ = 'pdo_contract_enclave_registry_pb2'\n # @@protoc_insertion_point(class_scope:PdoContractEnclaveInfo)\n ))\n_sym_db.RegisterMessage(PdoContractEnclaveInfo)\n\nPdoContractEnclaveTransaction = _reflection.GeneratedProtocolMessageType('PdoContractEnclaveTransaction', (_message.Message,), dict(\n DESCRIPTOR = _PDOCONTRACTENCLAVETRANSACTION,\n __module__ = 'pdo_contract_enclave_registry_pb2'\n # @@protoc_insertion_point(class_scope:PdoContractEnclaveTransaction)\n ))\n_sym_db.RegisterMessage(PdoContractEnclaveTransaction)\n\nPdoContractEnclaveRegister = _reflection.GeneratedProtocolMessageType('PdoContractEnclaveRegister', (_message.Message,), dict(\n DESCRIPTOR = _PDOCONTRACTENCLAVEREGISTER,\n __module__ = 'pdo_contract_enclave_registry_pb2'\n # @@protoc_insertion_point(class_scope:PdoContractEnclaveRegister)\n ))\n_sym_db.RegisterMessage(PdoContractEnclaveRegister)\n\nPdoContractEnclaveUpdate = _reflection.GeneratedProtocolMessageType('PdoContractEnclaveUpdate', (_message.Message,), dict(\n DESCRIPTOR = _PDOCONTRACTENCLAVEUPDATE,\n __module__ = 'pdo_contract_enclave_registry_pb2'\n # @@protoc_insertion_point(class_scope:PdoContractEnclaveUpdate)\n ))\n_sym_db.RegisterMessage(PdoContractEnclaveUpdate)\n\n\nDESCRIPTOR.has_options = True\nDESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\\n\\025pdo.sawtooth.protobufP\\001'))\n# @@protoc_insertion_point(module_scope)\n","sub_path":"python/sawtooth/pdo_protos/pdo_contract_enclave_registry_pb2.py","file_name":"pdo_contract_enclave_registry_pb2.py","file_ext":"py","file_size_in_byte":11073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"185107774","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os\nfrom random import random\nfrom math import exp, sqrt\n\n# 2.1 - Sauvegarde d'images\n\ndef dim(img):\n \"\"\"Renvoie les dimensions du tableau 'img' qui représente une image\n pgm.\"\"\"\n n = len(img)\n p = len(img[0])\n return (n,p)\n\ndef rectangle_noir(n,p):\n \"\"\"Renvoie un tableau qui représente un rectangle noir de dimensions\n n * p.\"\"\"\n img = []\n for i in range(n):\n img.append([0]*p)\n return img\n\n# Question 1\n\ndef sauve_image(img, N, f):\n \"\"\"Sauve l'image représentée par le tableau img dans le fichier nommé f\n avec le niveau de gris maximal N.\"\"\"\n retour = True\n with open(f,\"w\") as fo:\n retour = (fo.write(\"P2\\n\") == 3)\n retour = (0 < fo.write(\"{} {}\\n\".format(dim(img)[1],dim(img)[0])) <= 14)\n retour = (0 < fo.write(\"{}\\n\".format(N)) <= 6)\n for i in range(dim(img)[0]):\n for j in range(dim(img)[1]):\n retour = (0 < fo.write(\"{}\\n\".format(img[i][j])) <= 7)\n return retour\n\n# Question 2\n\ndef sauve_rectangle_noir(n,p,N,f):\n \"\"\"Sauve un rectangle noir de dimensions n * p dans le fichier f avec le\n niveau de gris maximal N.\"\"\"\n return sauve_image(rectangle_noir(n,p),N,f)\n\n# Question 3\n\ndef rectangle_blanc(n,p,N):\n \"\"\"Renvoie un tableau qui représente un rectangle blanc de dimensions\n n * p avec le niveau de gris maximal N.\"\"\"\n img = []\n for i in range(n):\n img.append([N]*p)\n return img\n\ndef sauve_rectangle_blanc(n,p,N,f):\n \"\"\"Sauve un rectangle blanc de dimensions n * p dans le fichier f avec le\n niveau de gris maximal N.\"\"\"\n return sauve_image(rectangle_blanc(n,p,N),N,f)\n\n# Question 4\n\ndef xor(a,b):\n \"\"\"Renvoie a ou exclusif b.\"\"\"\n return ((not a) and b) or (a and (not b))\n\ndef echiquier(p,N):\n \"\"\"Renvoie tableau représentant un échiquier carré de p pixels de large,\n avec le niveau de gris maximal N.\n\n Fonctionne mieux si p est divisible par 8 car un échiquier a 8 * 8 cases.\"\"\"\n img = []\n tab_temp = []\n for i in range(p):\n for j in range(p):\n if (i // (p // 8)) % 2 == 0:\n a = False\n else:\n a = True\n if (j // (p // 8)) % 2 == 0:\n b = False\n else:\n b = True\n if xor(a,b):\n tab_temp.append(0)\n else:\n tab_temp.append(N)\n img.append(tab_temp)\n tab_temp = []\n return img\n\ndef sauve_echiquier(p,N,f):\n \"\"\"Sauve un échiquier carré de p pixels de large dans le fichier f,\n avec le niveau de gris maximal N.\n\n Fonctionne mieux si p est divisible par 8 car un échiquier a 8 * 8 cases.\"\"\"\n return sauve_image(echiquier(p,N),N,f)\n\n\n# 2.2 - Lecture\n\n# Question 5\n\ndef lit_valeurs(f):\n \"\"\"Lit le contenu du fichier image f et retourne la liste des valeurs\n lues (séparées par des blancs) sous forme d'une liste de chaines de\n caractères. La première valeur est normalement 'P2'.\"\"\"\n with open(f,'r') as fo:\n c = fo.read()\n return c.split()\n\ndef lit_image(f):\n \"\"\"Renvoie un tuple dont le premier élément est un tableau représentant\n l'image pgm nommée f et dont le deuxième élément est le niveau de gris\n maximal de f.\"\"\"\n img = []\n tabInter = []\n c = lit_valeurs(f)\n compteur = 4\n for i in range(int(c[2])):\n for j in range(int(c[1])):\n tabInter.append(c[compteur])\n compteur += 1\n img.append(tabInter)\n tabInter = []\n return (img,c[3])\n\ndef verifieEgalite(f,g):\n with open(f,'r') as f1:\n with open(g,'r') as f2:\n return f1.read() == f2.read()\n\ndef test_lit_image():\n \"\"\"Teste la fonction lit_image\"\"\"\n try:\n a = lit_image(\"joconde.pgm\")\n except FileNotFoundError:\n return False\n #except:\n # assert False\n img = a[0]\n N = a[1]\n sauve_image(img, N, \"joconde_test.pgm\")\n a = verifieEgalite(\"joconde.pgm\",\"joconde_test.pgm\")\n #os.system(\"rm joconde_test.pgm\")\n assert a\n return True\n\n# Question 6\n\ndef negatif(f,g):\n \"\"\"Lit le fichier pgm f et crée le fichier pgm g contenant le négatif de\n f.\"\"\"\n img = lit_image(f)[0]\n N = lit_image(f)[1]\n for i in range(len(img)):\n for j in range(len(img[i])):\n img[i][j] = int(N) - int(img[i][j])\n return sauve_image(img, N, g)\n\ndef test_double_negatif():\n \"\"\"Teste la fonction negatif en l'appliquant deux fois et en vérifiant\n que les fichiers obtenus sont les memes.\"\"\"\n try:\n negatif(\"joconde.pgm\",\"joconde_test.pgm\")\n except FileNotFoundError:\n return False\n #except:\n # assert False\n negatif(\"joconde_test.pgm\",\"joconde_test2.pgm\")\n a = verifieEgalite(\"joconde.pgm\",\"joconde_test2.pgm\")\n #os.system(\"rm joconde_test.pgm joconde_test2.pgm\")\n assert a\n return True\n\ndef imgAlea(n,p,N):\n \"\"\"Le film sur Canal+\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append(int(N*random()))\n img.append(tab)\n tab = []\n return (img,N)\n\ndef imgPasTropAlea1(n,p,N):\n \"\"\"Vagues\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append((int((N/2)*random()) + i + j) % N+1)\n img.append(tab)\n tab = []\n return (img,N)\n\ndef imgPasTropAlea2(n,p,N):\n \"\"\"Vagues dans l'autre sens\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append((int((N/2)*random()) + i - j) % N+1)\n img.append(tab)\n tab = []\n return (img,N)\n\ndef imgPasTropAlea3(n,p,N):\n \"\"\"Cercles concentriques (taches de diffraction ?)\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append((int((N/2)*random()) + i**2 + j**2) % N+1)\n img.append(tab)\n tab = []\n return (img,N)\n\ndef imgPasTropAlea4(n,p,N):\n \"\"\"Chevrons pas pointus\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append((int((N/2)*random()) + i**2 + 10*j) % N+1)\n img.append(tab)\n tab = []\n return (img,N)\n\ndef imgPasTropAlea5(n,p,N):\n \"\"\"Coin de mur\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append((int((N/2)*random()) + max(i,j)) % N+1)\n img.append(tab)\n tab = []\n return (img,N)\n\ndef imgPasTropAlea7(n,p,N):\n \"\"\"Coin de mur franchement pointu\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append((int((N/2)*random()) + max(i,j)**2) % N+1)\n img.append(tab)\n tab = []\n return (img,N)\n\ndef imgPasTropAlea7(n,p,N):\n \"\"\"Angle de granit\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append((int((N/2)*random()) + max(i,j)**3) % N+1)\n img.append(tab)\n tab = []\n return (img,N)\n\ndef imgPasTropAlea8(n,p,N):\n \"\"\"Brouillard londonien\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append((int((N/2)*random()) + int(exp(i)) + j) % N+1)\n img.append(tab)\n tab = []\n return (img,N)\n\ndef imgPasTropAlea9(n,p,N):\n \"\"\"Écran de télé s'éteignant (tiou !)\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append((int((N/2)*random()) + i*j) % N+1)\n img.append(tab)\n tab = []\n return (img,N)\n\ndef imgPasTropAlea10(n,p,N):\n \"\"\"Houle brisée sur la plage\"\"\"\n img = []\n tab = []\n for i in range(n):\n for j in range(p):\n tab.append((int((N/2)*random()) + int(sqrt(i)) * int(sqrt(j))) % N+1)\n img.append(tab)\n tab = []\n return (img,N)\n\ndef symetrieAxeVertical(f,g):\n ligne = []\n total = []\n (img, N) = lit_image(f)\n (p,n) = dim(img)\n for i in range(p):\n for j in range(n-1, -1, -1):\n ligne.append(img[i][j])\n total.append(ligne)\n ligne = []\n return sauve_image(total, N, g)\n\ndef rotation180(f,g):\n ligne = []\n total = []\n (img, N) = lit_image(f)\n (p,n) = dim(img)\n for i in range(p-1, -1, -1):\n for j in range(n-1, -1, -1):\n ligne.append(img[i][j])\n total.append(ligne)\n ligne = []\n return sauve_image(total, N, g)\n\ndef symetrieAxeHorizontal(f,g):\n ligne = []\n total = []\n (img, N) = lit_image(f)\n (p,n) = dim(img)\n for i in range(p-1, -1, -1):\n for j in range(n):\n ligne.append(img[i][j])\n total.append(ligne)\n ligne = []\n return sauve_image(total, N, g)\n\n# Question 7\n\ndef rotation90(f,g):\n ligne = []\n total = []\n (img, N) = lit_image(f)\n (p,n) = dim(img)\n for i in range(n-1,-1,-1):\n for j in range(p):\n ligne.append(img[j][i])\n total.append(ligne)\n ligne = []\n return sauve_image(total, N, g)\n\ndef test_rotation90():\n \"\"\"Teste la fonction rotation90 en l'appliquant 4 fois et en vérifiant que\n les fichiers obtenus sont les memes.\"\"\"\n try:\n rotation90(\"joconde.pgm\",\"joconde_test1.pgm\")\n except FileNotFoundError:\n return False\n #except:\n # assert False\n rotation90(\"joconde_test1.pgm\",\"joconde_test2.pgm\")\n rotation90(\"joconde_test2.pgm\",\"joconde_test3.pgm\")\n rotation90(\"joconde_test3.pgm\",\"joconde_test4.pgm\")\n a = verifieEgalite(\"joconde.pgm\",\"joconde_test4.pgm\")\n #os.system(\"rm joconde_test1.pgm joconde_test2.pgm\")\n #os.system(\"rm joconde_test3.pgm joconde_test4.pgm\")\n assert a\n return True\n","sub_path":"TP07/tp07_rennesson_hyenne_complet.py","file_name":"tp07_rennesson_hyenne_complet.py","file_ext":"py","file_size_in_byte":9618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"102482168","text":"class Solution(object):\n def threeSumClosest(self, nums, target):\n nums.sort()\n answer1 = 999999\n answer2 = -999999\n for i in range(len(nums) - 2):\n left = i + 1\n right = len(nums) - 1\n while left < right:\n tmp = nums[i] + nums[left] + nums[right]\n delta = tmp - target\n if delta > 0:\n right -= 1\n answer1 = min(answer1, delta)\n elif delta < 0:\n left += 1\n answer2 = max(answer2, delta)\n elif delta == 0:\n return target\n \n if answer1 < abs(answer2):\n return target + answer1\n else:\n return target + answer2\n","sub_path":"python/3Sum Closest.py","file_name":"3Sum Closest.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"375613324","text":"#Imports\nimport os, sys\n\n#Import sister module.\n# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'python-brlcad-tcl')))\nfrom python_brlcad_tcl.brlcad_tcl import *\nfrom examples import toothGen\n\n#Some constants\nendl = '\\n'\nspc = ' '\n\n#The path to output to, based on the location of the current file\nscriptPath = os.path.dirname(__file__)\noutFilePathBase = scriptPath + r'/output/pencilSharpener'\noutFileTCL = outFilePathBase + '.tcl'\noutFileSTL = outFilePathBase + '.stl'\n\n#The quality for the STL output. 0.5 is worse, 0.1 is pretty good, 0.01 makes files large but really good quality...\nstlQuality = 0.001\n\n#Some variables for creating all of the various components\n\ndef createPencilSharpener():\n\n\t#Create the two cylinders for the bottom layer\n\t#Also we're working in Inches\n\tbrl = brlcad_tcl(tcl_filepath = outFileTCL, title = \"Pencil Sharpener Gear\", make_g = False, make_stl = True, stl_quality = stlQuality, units = 'in')\n\t\n\t#Create our cylinders, regions, names, etc...\n\tbottom \t\t\t= 'b'\n\tbottomNegative \t= 'bn'\n\tbottomRing\t\t= 'br'\n\tbottomFinal\t\t= 'bf'\n\t\n\tmiddle\t\t\t= 'm'\n\tmiddleNegative \t= 'mn'\n\tmiddleRing\t\t= 'mr'\n\tmiddleWithGuide\t= 'mh'\n\tmiddleFinal\t\t= 'mf'\n\t\n\tmiddleTop\t\t\t= 'mt'\n\tmiddleTopNegative\t= 'mtn'\n\tmiddleTopRing\t\t= 'mtr'\n\t\n\ttop\t\t\t\t= 't'\n\ttopNegative\t\t= 'tn'\n\ttopRing\t\t\t= 'tr'\n\ttopFinal\t\t= 'tf'\n\t\n\tshavingsHole\t= 'sh'\n\t\n\tleftGuide\t\t= 'l'\n\trightGuide\t\t= 'r'\n\t\n\tgearName\t\t= 'gf'\n\ttoothName\t\t= 'tooth'\n\t\n\tallFinal\t\t= 'finished'\n\n\t#Variables for their sizes and positions, in inches\n\tbottomOuterRadius\t= 0.650\n\tbottomInnerRadius\t= 0.460\n\tbottomHeight \t\t= 0.262\n\tbottomPosition\t\t= (0, 0, 0)\n\t\n\tmiddleOuterRadius\t= 0.502\n\tmiddleInnerRadius\t= 0.4575\n\tmiddleHeight\t\t= 0.275\n\tmiddlePosition\t\t= (0, 0, bottomHeight)\t#Placed right on top of the bottom one\n\t\n\ttopOuterRadius\t\t= 0.49/2\t#Measured diameter with Callipers\n\ttopInnerRadius\t\t= 0.3937/2\t#Measured diameter with Callipers\n\ttopHeight\t\t\t= 0.383\n\ttopPosition\t\t\t= (0, 0, bottomHeight + middleHeight)\n\t\n\tmiddleTopOuterRadius\t= middleOuterRadius\t\t#This acts as the top for the middle, since it is too thin\n\tmiddleTopInnerRadius\t= topInnerRadius\t\t#This is the hole that goes through the top, so the bushel can go through\n\tmiddleTopHeight\t\t\t= -middleHeight*0.25\t#Negative because we're going from the top downward\n\tmiddleTopPosition\t\t= (0, 0, bottomHeight + middleHeight)\n\t\n\tleftGuideOuterRadius\t= 0.0625\t#GET BETTER MEASUREMENT?!?\n\trightGuideOuterRadius\t= leftGuideOuterRadius\n\tleftGuideHeight\t\t\t= topHeight/2 #ALSO GET BETTER\n\trightGuideHeight\t\t= leftGuideHeight\n\tleftGuideYOffset\t\t\t= -leftGuideOuterRadius/2 #Apparently they are offset a little.\n\tleftGuidePosition\t\t= (middleOuterRadius - leftGuideOuterRadius, leftGuideYOffset, bottomHeight + middleHeight)\n\trightGuidePosition\t\t= (-leftGuidePosition[0], leftGuidePosition[1], leftGuidePosition[2]) #Put it on the other side\n\t\n\t#Now we need the box that'll make our pencil shavings hole\n\tholeWidthFull\t= 0.75\n\tholeWidth\t\t= holeWidthFull/2\n\tholeHeightFull\t= middleHeight\n\tholeHeight\t\t= holeHeightFull/2\n\tholeDepthFull\t= middleOuterRadius\n\t\n\tholeCenterHeight = bottomHeight + (middleHeight/2)\n\t\n\t#Create the 8 points that make the box\n\tholePoints\t= (\n\t\t\t\t\t(-holeWidth\t, 0\t, holeCenterHeight - holeHeight),\n\t\t\t\t\t(holeWidth\t, 0\t, holeCenterHeight - holeHeight),\n\t\t\t\t\t(holeWidth\t, 0\t, holeCenterHeight + holeHeight),\n\t\t\t\t\t(-holeWidth\t, 0\t, holeCenterHeight + holeHeight),\n\t\t\t\t\t\n\t\t\t\t\t(-holeWidth\t, holeDepthFull\t, holeCenterHeight - holeHeight),\n\t\t\t\t\t(holeWidth\t, holeDepthFull\t, holeCenterHeight - holeHeight),\n\t\t\t\t\t(holeWidth\t, holeDepthFull\t, holeCenterHeight + holeHeight),\n\t\t\t\t\t(-holeWidth\t, holeDepthFull\t, holeCenterHeight + holeHeight),\n\t\t\t\t)\n\t\t\t\t\t\n\t#Get the lines for the gears\n\ttoothHalfWidth = 0.27\n\ttoothDepth = bottomInnerRadius + ((bottomOuterRadius - bottomInnerRadius)/2)\n\ttoothHeight = bottomHeight*0.75\t#3/4 of the height\n\t\n\t#Now we start building all of our shapes\n\t\n\t#Start with bottom...\n\tbrl.rcc(bottom, bottomPosition, (0, 0, bottomHeight), bottomOuterRadius)\n\tbrl.rcc(bottomNegative, bottomPosition, (0, 0, bottomHeight), bottomInnerRadius)\n\t\n\t#Now subtract inner from outer\n\tbrl.region(bottomRing, subtract(bottom, bottomNegative))\n\t\n\t#Now make the middle and cut it into a ring\n\tbrl.rcc(middle, middlePosition, (0, 0, middleHeight), middleOuterRadius)\n\tbrl.rcc(middleNegative, middlePosition, (0, 0, middleHeight), middleInnerRadius)\n\tbrl.region(middleRing, subtract(middle, middleNegative))\n\t\n\t#Now turn the top into a ring\n\tbrl.rcc(top, topPosition, (0, 0, topHeight), topOuterRadius)\n\tbrl.rcc(topNegative, topPosition, (0, 0, topHeight), topInnerRadius)\n\tbrl.region(topRing, subtract(top, topNegative))\n\t\n\t#Now going from top down, we'll turn the top into a top-hat\n\tbrl.rcc(middleTop, middleTopPosition, (0, 0, middleTopHeight), middleTopOuterRadius)\n\tbrl.rcc(middleTopNegative, middleTopPosition, (0, 0, middleTopHeight), middleTopInnerRadius)\n\tbrl.region(middleTopRing, subtract(middleTop, middleTopNegative))\n\t\n\t#And now add it to the top\n\tbrl.region(topFinal, union(topRing, middleTopRing))\n\t\n\t#Next we will add the guides to the middle...\n\tbrl.rcc(leftGuide, leftGuidePosition, (0, 0, leftGuideHeight), leftGuideOuterRadius)\n\tbrl.rcc(rightGuide, rightGuidePosition, (0, 0, rightGuideHeight), rightGuideOuterRadius)\n\t\n\t#Now add them\n\tbrl.region(middleWithGuide, union(middleRing, leftGuide, rightGuide))\n\t\n\t#And now cut the hole for pencil shavings\n\tbrl.arb8(shavingsHole, holePoints)\n\tbrl.region(middleFinal, subtract(middleWithGuide, shavingsHole))\n\t\n\t#Finally, we'll cut the gears out of the bottom...\t\n\t#Get all the content for the gears\n\tgearLines, gearName = toothGen.createGears(gearName = gearName, toFile = False, toothHalfWidth = toothHalfWidth, toothDepth = toothDepth, toothHeight = toothHeight, toothBaseName = toothName)\n\t\n\t#Now add those lines to the object...\n\tlinesFlat = '\\n'.join(gearLines)\t\n\tbrl.add_script_string(linesFlat)\n\t\n\t#Now we want to subtract the gear from the bottom\n\tbrl.region(bottomFinal, subtract(bottomRing, gearName))\n\t\n\t#We now have a bunch of final regions. Combine them into the final gear housing\n\tbrl.region(allFinal, union(bottomFinal, middleFinal, topFinal))\n\t\t\n\t#Now we can convert the .g into a .stl, using the final region. save_stl requires a list.\n\tbrl.run_and_save_stl([allFinal])\n\t\nif __name__ == '__main__':\n createPencilSharpener()\n\n'''\nEOF\n'''","sub_path":"examples/pencilSharpener.py","file_name":"pencilSharpener.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"383215707","text":"# **************************\n# Title: Justin Jameson Mailroom 2.py\n# Desc: command-line script that is executable. The script should accomplish the following goals:\n# 1) have a data structure that holds a list of your donors and a history of the amounts they have donated.\n# This structure should be populated with at least five donors, with 1 to 3 donations each.\n#\n# 2) The script should prompt the user to choose from a menu of 3 actions:\n# “Send a Thank You”\n# “Create a Report”\n# “quit”\n# Change Log: (Who, When, What)\n# Justin Jameson, 01282019, created file\n# Justin Jameson, 01282019,\n# 1. Modified listOfDonors to be a dictionary.\n# 2. Modified report to display format properly.\n# 3. Added function to save bulk emails to txt files.\n# **************************\n\nimport sys # imports go at the top of the file\n\n# -- Data -- #\n# declare variables and constants\n# heading for the table\ndonorTableHeading = ['First/Last Name', 'Donation Totals', 'Number of donations', 'Average donation amount']\n# Include Donor Name, total donated, number of donations, and average donation amount as values in each row.\nlistOfDonors = {\"Dana Spam\": [39, 700500, 10], \"Jay Byrd\": [10, 343553235, 90], \"Jo Jo\": [6, 2353],\n \"Kilee Boss\": [235235124], \"Katee Pie\": [3, ], \"Ethon George\": [25, 23423443, 52352]}\n\n\nmainPrompt = \"\\n\".join((\"Be grateful you have a job!\",\n \"Please choose from below options:\",\n \"1 - Send a Thank You note\",\n \"2 - Create a Report\",\n \"3 - Send a letter to all Donors\",\n \"4 - Quit\",\n \">>> \"))\n\nthankYouPrompt = \"\\n\".join((\"Lets get that Thank You note done!\",\n \"Please choose from below options:\",\n \"1 - Enter the recipients full name.\",\n \"2 - Review the List of donors.\",\n \"3 - Quit\",\n \">>> \"))\n\n\n# -- Processing -- #\n# Running Prompts: #\ndef main():\n while True:\n response = input(mainPrompt) # continuously collect user selection\n # redirect to feature functions based on the user selection\n if response == \"1\":\n Send_a_ThankYou(thankYouPrompt)\n elif response == \"2\":\n Create_A_Report(donorTableHeading, listOfDonors)\n elif response == \"3\":\n Send_Bulk_ThankYou(listOfDonors)\n elif response == \"4\":\n exit_program()\n else:\n print(\"Not a valid option!\")\n# End of main function.\n\ndef Send_a_ThankYou(thankYouPrompt):\n \"\"\"\n\n :param thankYouPrompt:\n :return:\n \"\"\"\n while True:\n response = input(thankYouPrompt) # continuously collect user selection\n # redirect to feature functions based on the user selection\n if response == \"1\":\n UpdateListOfDonors(listOfDonors)\n elif response == \"2\":\n # Show a list of the donor names and re-prompt.\n Create_A_Report(donorTableHeading, listOfDonors)\n elif response == \"3\":\n break\n else:\n print(\"Not a valid option!\")\n\n# End of Send a Thank you function.\n\n# perform tasks #\n\n\ndef UpdateListOfDonors(seq):\n # prompt user for a Full Name.\n donorFirstName = input(\"Enter the donors First name or enter to exit: \")\n donorLastName = input(\"Enter the donors Last name: \")\n # prompt for a donation amount and add it to the donation history of the selected user.\n donorDonationAmount = int(input(\"Enter the Donation amount: \"))\n donorFullName = donorFirstName.capitalize() + \" \" + donorLastName.capitalize()\n # check to see if the name exists\n if donorFullName in seq:\n for v in seq.values():\n v.append(donorDonationAmount)\n # If the name is not in the dict, add that name and use it.\n else:\n seq[donorFullName] = [donorDonationAmount]\n ThankYouLetter(donorFirstName, donorDonationAmount)\n main()\n\n\ndef ThankYouLetter(fName, dAmount):\n print(f\"Thank you {fName}, your donation of $ {dAmount} is appreciated!\")\n\n# End of ThankYouLetter function.\n\ndef Create_A_Report(rHeading, rTable):\n \"\"\" Create a Report\n 1) print a list of your donors, sorted by total historical donation amount.\n 2) Include Donor Name, total donated, number of donations, and average donation amount as values in each row.\n 3) do not print out all of each donor’s donations, just the summary info.\n 4) Using string formatting, format the output rows as nicely as possible. The end result should be\n tabular (values in each column should align with those above and below).\n 5) After printing this report, return to the original prompt.\n\n :param rHeading: Sequence passed in to represent the header information of the table, currently a list.\n :param rTable: Dict representing donors.\n :return: working on the return to presentation portion of the script.\n \"\"\"\n\n print(\"|{:^17}|\"\"{:^18}\"\"|{:^21}|\"\"{:^25}|\".format(rHeading[0],rHeading[1],rHeading[2],rHeading[3]))\n print(\"{:_>85}\".format(\"_\"))\n for k, v in rTable.items():\n print(\"{:<18}\"\"${:>18,.2f}\"\"{:^22}\"\"${:>25,.2f}\".format(k, sum(v), len(v), (sum(v) / len(v))))\n print(\"\\n\\n\")\n main()\n\n# End of Create_A_Report function.\n\ndef Send_Bulk_ThankYou(lDonors):\n \"\"\"generates a thank you letter for all donors in listOfDonors and writes each letter to disk as a text file.\n :param lDonors: pass in a dict of donors.\n :return: returns nothing, creates txt files.\n \"\"\"\n for k, v in lDonors.items():\n fName = str('C:\\\\Python210\\\\' + k+\".txt\")\n with open(fName, 'w') as f:\n f.write(\" Dear {}, \\n Thank you for your donation of \"\"${:,.2f}; it is very appreciated!\"\n \"\\n\\n The team!\".format(k, sum(v)))\n main()\n\n# End of ThankYouLetter function.\n\ndef exit_program():\n print(\"Thank you for using this program, bye!\")\n sys.exit() # exit the interactive script\n\n# End of exit program function.\n\n\n\n\n\n\n#-- Presentation (Input/Output) --#\n# get user input\n\n\n# send program output\n\n\nif __name__ == \"__main__\":\n # guards against Justin Jameson Mailroom code running automatically if this module is imported\n main()\n","sub_path":"students/JustinJameson/JustinJameson_Assignment_04/Justin Jameson Mailroom 2/Justin Jameson Mailroom 2.py","file_name":"Justin Jameson Mailroom 2.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"519321903","text":"#!/usr/bin/env python3\n\nbin_num = input(\"Binary number to convert: \")\npower = 0\ndec = 0\n\nfor i in range(len(bin_num), 0, -1):\n dec += 2 ** power * int(bin_num[i-1])\n power += 1\n\nprint(dec)\n","sub_path":"binary_convert/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"8004978","text":"# -*- coding:utf-8 -*-\nfrom django.conf import settings\n\nSPACER_URL = u'%s/common/spacer.gif' % settings.MEDIA_URL\n\nDEVICE_SP = u'スマートフォン'\n\nLIST = u\"一覧\"\nDETAIL = u\"詳細\"\nHAVING = u\"所持数\"\nTRAP = u\"クローゼット\"\nLOVE = u\"服従\"\n\nAPP_NAME = u'オーディンバトル'\nAPP_NAME_SHORT = u'オーディン'\n\nMYPAGE = u'巣'\n\nPLAYER = u'プレーヤー'\nPLAYER_RES = u'さん'\nLEVEL = u'レベル' #プレイヤーに使う\nLEVEL_SHORT = u'レベル' #プレイヤーに使う\nLEVEL_VSHORT = u'Lv' #カードに使う\nMASTER = u'マスター'\nMEMBER = u'メンバー'\nPARTY = u'ゲスト' #ボス戦の時の仲間\nFRIENDSHIP = u'アシストP'\nNEWCOMER = u'新クラブ'\nNEWCOMER_TOP = u'新人冒険者'\nQUEST = u'侵略'\nQUEST_PLAY = u'侵略する'\nBOSS_BATTLE = u\"討伐\"\nBATTLE = u'試合'\nBATTLE_TIE = u'練習試合'\nBATTLE_START = u'キックオフ'\nFORMATION = u'フォーメーション'\nCARD = u'選手'\nCARD_COUNT = u'人'\nGASHAPON = u'ガチャ'\nBOOK = u'図鑑'\nWARRECORD = u'戦績'\nHELP = u'ヘルプ'\nCOMPOSITION = u'トレーニング'\nCOMPOSITION_PARTNER = u'トレーニングパートナー'\nSELL = u'移籍'\nSELL_MONEY = u'移籍金'\nRIVAL = u'%sの近い%s' % (LEVEL_SHORT, PLAYER,)\nITEM = u'アイテム'\nMONEY_SHORT = u'育成P'\nMONEY = u'育成ポイント'\nWISHLIST = u'欲しい物'\nTRADE = u'トレード'\nTREASURE = u'ピース'\nTREASURE_EVENT = u'イベントお宝'\nVITALITY = u'体力P'\nSHOP = u'ショップ'\nLIMITED_SHOP = u'限定ショップ'\nCARD_DECK = u'デッキ'\nEXPERIENCE = u'経験値'\nGREET = u'応援'\nTEXT_INPUT_ATTENTION_GREET_COMMNET = u'同じ人にメッセージを残して%sが手に入るのは1日に1回だけです。' % FRIENDSHIP\nTEXT_INPUT_ATTENTION = u'※本名、メアド、電話番号などの個人情報や個人の特定に繋がる情報は入力しないでください。'\nINVITE_MESSAGE = u'始めてみたんだけど、レア選手ほしいから一度やって!お願い!!'\nGIFT = u'プレゼント'\nWISHLIST = u'欲しい物リスト'\nAREA = u'エリア'\nAREA_LONG = u'エリア' #相手のprof\nNOTIFICATION = u'運営からのお知らせ'\nBULKGREET = u'一括エール'\nABILITY = u'スキル'\nPROLOGUE_TIME_SWF = u''\nPROLOGUE_TIME_SWF_PLACE_NAME = u'%sに向かいます'\n\nFRIENDS = u'仲間'\n\nGUILD = u'同盟'\nGUILDLEVEL = u'同盟Lv'\nGUILDSCORE = u'同盟pt'\nGUILD_CONTRIBUTION_SCORE = u'個人同盟pt' \nGUILDINTRO = u'アピールコメント'\nGUILDRANKING = u'ランキング'\nGUILDTITLE = u'称号'\nGUILDGIFT = u'同盟ギフト'\nGUILDRECRUITBOARD = u'募集板'\nGUILDBB = u'戦略室'\nGUILDTROPHY = u'称号'\n\nATTACK_POWER = u'攻撃'\nATTACK_POWER_LONG = u'攻撃コスト'\nDEFENSE_POWER = u'防御'\nDEFENSE_POWER_LONG = u'防御コスト'\n\nPRESENT_PAGE = u'プレゼントページ'\n\nCARD_BIRTH_DAY = u'生年月日'\nCARD_HEIGHT = u'身長'\nCARD_WEIGHT = u'体重'\nCARD_MATCH_SEASON = u'2010-2011'\nCARD_MATCH_NUM = u'出場数'\nCARD_GOAL_NUM = u'得点'\nCARD_NATIONALITY = u'国籍'\n\nCARD_NATIONALITY = u'出身地'\nCARD_HEIGHT_UNIT = u'cm'\nCARD_WEIGHT_UNIT = u'kg'\nCARD_BUST = u'B'\nCARD_WASTE = u'W'\nCARD_HIP = u'H'\n\nimport re\nre_template_text = re.compile(r'\\{\\{T\\.(\\w+)\\}\\}')\n\ndef _template_text_replace_callback(m):\n attribute_name = m.group(1)\n from common import template_text as T\n return getattr(T, attribute_name, m.group(0))\n\ndef replace_template_text(text):\n \"\"\"\n 文��列のテンプレートテキストを置換する。\n テンプレートテキストは、{{T.MONEY}} のような形式で書かなければいけない。\n \"\"\"\n return re_template_text.sub(_template_text_replace_callback, text)\n\n","sub_path":"server/module/common/template_text.py","file_name":"template_text.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"17286021","text":"\n# ifcondicion es muy importante los espacios un espacio de mas da errores\nedad = 18\npago = True #imporante T mayuscula\nif edad > 17:\n print (\"si es mayor de edad\")\n if pago == True:\n print (\"Eres mayor y se ha realizado el pago\")\n else:\n print (\"Eres mayor y no has pagado\")\nelse:\n print (\"no es mayor de edad\")\n","sub_path":"codigo Inicial/IF anidados.py","file_name":"IF anidados.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"403831987","text":"# Copyright 2018 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\"\"\"Quest for running a Telemetry benchmark in Swarming.\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport re\n\nfrom dashboard.pinpoint.models.quest import run_performance_test\n\n_DEFAULT_EXTRA_ARGS = [\n '-v', '--upload-results', '--output-format', 'histograms'\n]\n\n_STORY_REGEX = re.compile(r'[^a-zA-Z0-9]')\n\n\ndef _StoryToRegex(story_name):\n # Telemetry's --story-filter argument takes in a regex, not a\n # plain string. Stories can have all sorts of special characters\n # in their names (see crbug.com/983993) which would confuse a\n # regex. We thus keep only a small set of \"safe chars\"\n # and replace all others with match-any-character regex dots.\n return '^%s$' % _STORY_REGEX.sub('.', story_name)\n\n\ndef ChangeDependentArgs(args, change):\n # For results2 to differentiate between runs, we need to add the\n # Telemetry parameter `--results-label ` to the runs.\n extra_args = list(args)\n extra_args += ('--results-label', str(change))\n if '--story-filter' in extra_args:\n extra_args.append('--run-full-story-set')\n if change.change_args:\n extra_args.extend(change.change_args)\n return extra_args\n\n\nclass RunTelemetryTest(run_performance_test.RunPerformanceTest):\n\n @classmethod\n def _ComputeCommand(cls, arguments):\n # We're moving the definition of which command to run here, instead of\n # relying on what's in the isolate because the 'command' feature is\n # deprecated and will be removed soon (EOY 2020).\n # TODO(dberris): Move this out to a configuration elsewhere.\n command = [\n 'luci-auth', 'context', '--', 'vpython', '../../testing/test_env.py',\n '../../testing/scripts/run_performance_tests.py',\n '../../tools/perf/run_benchmark'\n ]\n relative_cwd = arguments.get('relative_cwd', 'out/Release')\n return relative_cwd, command\n\n def Start(self, change, isolate_server, isolate_hash):\n extra_swarming_tags = {'change': str(change)}\n return self._Start(\n change,\n isolate_server,\n isolate_hash,\n ChangeDependentArgs(self._extra_args, change),\n extra_swarming_tags,\n execution_timeout_secs=None)\n\n @classmethod\n def _ExtraTestArgs(cls, arguments):\n extra_test_args = []\n\n benchmark = arguments.get('benchmark')\n if not benchmark:\n raise TypeError('Missing \"benchmark\" argument.')\n extra_test_args += ('--benchmarks', benchmark)\n\n story = arguments.get('story')\n if story:\n # TODO(crbug.com/982027): Note that usage of \"--run-full-story-set\"\n # and \"--story-filter\"\n # may be replaced with --story= (no regex needed). Support\n # for --story flag landed in\n # https://chromium-review.googlesource.com/c/catapult/+/1869800\n # (Oct 22, 2019)\n # so we cannot turn this on by default until we no longer need to\n # be able to run revisions older than that. In the meantime, the\n # following argument plus the --run-full-story-set argument added in\n # Start() accomplish the same thing.\n extra_test_args += ('--story-filter', _StoryToRegex(story))\n\n story_tags = arguments.get('story_tags')\n if story_tags:\n extra_test_args += ('--story-tag-filter', story_tags)\n\n # TODO: Workaround for crbug.com/677843.\n if (benchmark.startswith('startup.warm')\n or benchmark.startswith('start_with_url.warm')):\n extra_test_args += ('--pageset-repeat', '2')\n else:\n extra_test_args += ('--pageset-repeat', '1')\n\n browser = arguments.get('browser')\n if not browser:\n raise TypeError('Missing \"browser\" argument.')\n extra_test_args += ('--browser', browser)\n extra_test_args += _DEFAULT_EXTRA_ARGS\n extra_test_args += super(RunTelemetryTest, cls)._ExtraTestArgs(arguments)\n return extra_test_args\n\n @classmethod\n def _GetSwarmingTags(cls, arguments):\n tags = {}\n benchmark = arguments.get('benchmark')\n if not benchmark:\n raise TypeError('Missing \"benchmark\" argument.')\n tags['benchmark'] = benchmark\n story_filter = arguments.get('story')\n tag_filter = arguments.get('story_tags')\n tags['hasfilter'] = '1' if story_filter or tag_filter else '0'\n if story_filter:\n tags['storyfilter'] = story_filter\n if tag_filter:\n tags['tagfilter'] = tag_filter\n return tags\n","sub_path":"dashboard/dashboard/pinpoint/models/quest/run_telemetry_test.py","file_name":"run_telemetry_test.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"146501472","text":"import bisect\nimport sys\nMAX_N = 1000000\nfrom bisect import*\n\ndef LIS(arr):\n d = [0]*n\n last = []\n for i in range(n):\n if not last or last[-1] < arr[i]:\n last.append(arr[i])\n idx = bisect_left(last,arr[i])\n last[idx] = arr[i]\n d[i] = idx +1\n return d\n\nn = int(input())\na = list(map(int,input().split()))\nd = LIS(a)\nlis = max(d)\nans = []\ncnt = lis\nfor i in range(n-1,-1,-1):\n if d[i]==cnt:\n ans.append(a[i])\n cnt-=1\nans.reverse()\nprint(lis)\nfor v in ans:\n print(v,end=' ')","sub_path":"Gold2/LIS.py","file_name":"LIS.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"430914529","text":"\"\"\"mSleepClusterX Standard\"\"\"\n\n# Authors: Jeffrey Wang\n# License: BSD 3 clause\n\nfrom utils import data\n\n# A mSleepClusterX-type SleepCluster Standard\n#\tBuild: 0\nclass mSleepClusterX():\n\n\tdef __init__(self):\n\t\tself.CHANNELS = {\n\t\t\t\t\t\t\t'EEG':1,\n\t\t\t\t\t\t\t'EMG':2\n\t\t\t\t\t\t}\n\t\tself.EPOCH_SIZE = 5\n\n\t\tself.NORMALIZER = \t\t{\t'EEG': data.maxNormalize, \t'EMG': data.maxNormalize }\n\t\tself.NORMALIZE_ARG = \t{ \t'EEG': None, \t\t\t\t\t'EMG': None \t\t\t\t}\n\n\t\tself.NPERSEG_FACTOR = \t{ \t'EEG': 0.75,\t\t'EMG': 0.75\t\t\t}\n\t\tself.NOVERLAP_FACTOR = \t{ \t'EEG': 0.5, \t\t'EMG': 0.5 \t\t\t}\n\t\tself.DETREND = \t\t\t{ \t'EEG': 'constant', \t'EMG': 'constant' \t}\n\n\t\tself.FEATURES = {\n\t\t\t\t\t\t\t'0.BANDS':\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t'EEG': [\t(0.5, 4), \t(7, 9), \t(11, 40)\t],\n\t\t\t\t\t\t\t\t\t\t\t\t\t'EMG': None,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'merge': 'MEAN'\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'1.ENTROPY': \t\t{ \t'EEG': True,\t'EMG': False, \t'merge': 'MAX' \t},\n\t\t\t\t\t\t\t'2.RMS': \t\t\t{ \t'EEG': False,\t'EMG': False, \t'merge': 'MAX' \t},\n\t\t\t\t\t\t\t'3.PERCENTILE': \t{ \t'EEG': None, \t'EMG': 95, \t\t'merge': 'MEAN'\t},\n\t\t\t\t\t\t\t'4.MEAN':\t\t\t{ \t'EEG': False, \t'EMG': False, \t'merge': 'MEAN'\t}\n\t\t\t\t\t\t}\n","sub_path":"sleepcluster/standards/msleepclusterX.py","file_name":"msleepclusterX.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"477865883","text":"#!/usr/bin/env python\n\nimport os, re, sys, string\n\nfcbs='./fcbs'\nrs='Rs'\n\nclass Run:\n\tdef __init__(self,data,d,m,D2=False):\n\t\tself.data = data\n\t\tself.d = d\n\t\tself.D2 = D2\n\t\tself.m = m\n\t\tself.args = '-v 2 -b 1000'\n\t\tif self.D2: self.args = self.args + ' -D2'\n\t\tself.prog=fcbs\n\n\tdef execline(self):\n\t\treturn '%s -r ../../%s -d %s -m %i %s' \\\n\t\t\t%(self.prog,self.data,self.d,self.m,self.args)\n\n\tdef rundir(self):\n\t\trname = self.data\n\t\trundir = '%s.%s' %(rname,self.d)\n\t\tif(self.D2): rundir = rundir + '.D2'\n\t\trundir = '%s.%i' %(rundir,self.m)\n\t\tif not os.path.exists(rundir): os.mkdir(rundir)\n\t\treturn rundir\n\n\tdef script(self):\n\t\tself.fname = 'sge.%s.csh' %self.rundir()\n\t\tf = open(self.fname,'w')\n\t\tf.write('''\n#!/bin/bash\n#$ -cwd\n#$ -l h_rt=120:00:00\n#$ -M justin.ashworth@uts.edu.au\n#$ -m n\n#$ -N fcbs\n#$ -o log\n#$ -e err\n#$ -S /bin/bash\\n\n''')\n\n\t\tf.write('''\nr=`head -n $SGE_TASK_ID %s | tail -n 1`\n\ncd %s\n\n# multiscale bootstrap levels (run in separate dirs)\nmkdir $r\ncd $r\n\n%s -s $r >& log\n\n''' %(rs,self.rundir(), self.execline()))\n\nruns = [\n#\tRun('data.tab','euclidean',1),\n#\tRun('data.tab','euclidean',4),\n\tRun('data.tab','euclidean',4,True),\n#\tRun('data.tab','pearson1',1),\n#\tRun('data.tab','pearson1',4),\n#\tRun('data.tab','pearson1',4,True),\n#\tRun('data.tab','pearson2',1),\n#\tRun('data.tab','pearson2',4),\n#\tRun('data.tab','pearson2',4,True),\n#\tRun('data.tab','spearman',1),\n#\tRun('data.tab','spearman',4),\n#\tRun('data.tab','spearman',4,True),\n]\n\nfor run in runs:\n\trun.script()\n","sub_path":"make.qsub.scripts.py","file_name":"make.qsub.scripts.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"297511457","text":"import sys\nfrom pathlib import Path\nimport os\nimport boto3\nfrom jawfish import utilities as util\n#%% Logging\nimport logging\nlogger = logging.getLogger()\nlogger.handlers = []\n\n# Set level\nlogger.setLevel(logging.INFO)\n\n# Create formatter\nFORMAT = \"%(asctime)s %(levelno)s - %(module)-15s - %(funcName)-15s - %(message)s\"\nDATE_FMT = \"%Y-%m-%d %H:%M:%S\"\nformatter = logging.Formatter(FORMAT, DATE_FMT)\n\n# Create handler and assign\nhandler = logging.StreamHandler(sys.stderr)\nhandler.setFormatter(formatter)\nlogger.handlers = [handler]\nlogger.critical(\"Logging started\")\n\n#%% Setup S3\nassert 'AWS_PROFILE' in os.environ, \"Ensure AWS_PROFILE is in your environment variables\"\nlogging.info(\"Using profile '{}'\".format(os.environ['AWS_PROFILE']))\n\n\n\n\n#%%\nBUCKET_NAME = \"datacommons-seeding-us-east\"\n\n\n# sess = boto3.session.Session()\n# REGION = sess.region_name\n# if not REGION:\n# REGION = 'us-east-1'\n\ns3 = boto3.resource('s3')\n# s3_client = boto3.client('s3')\nbucket = s3.Bucket(BUCKET_NAME)\nlogging.info(\"Connecting to bucket: {}\".format(bucket.name))\n\nfor obj in bucket.objects.all():\n print(obj.key, obj.last_modified)\ns3_client = boto3.client('s3')\n#%%\nresult = bucket.meta.client.list_objects(Bucket=bucket.name, Delimiter='/')\n\nfor folder in result.get('CommonPrefixes'):\n this_prefix = folder.get('Prefix').replace('/','')\n # print(this_prefix)\n # logging.info(\"Dataset {}\".format(this_prefix))\n for i, obj3 in enumerate(bucket.objects.filter(Prefix=folder.get('Prefix'))):\n # print(i)\n # if obj3.key.endswith('.json'):\n # print(obj3.key)\n if obj3.key.endswith('metadata_OEP8.json'):\n this_url = util.get_key_url(s3.meta.client, bucket, obj3.key)\n # print(obj3.key, obj3.key.endswith('.json'))\n print(this_url)\n#%%\n\ndef list_objects(s3, bucket_name = \"datacommons-seeding-us-east\"):\n logging.info(\"Listing {}\".format(bucket_name))\n bucket = s3.Bucket(bucket_name)\n\n for obj in bucket.objects.all():\n print(obj.key, obj.last_modified)\n\ndef get_s3():\n return boto3.resource('s3')\n\ndef main():\n s3 = get_s3()\n list_objects(s3)\n\nif __name__ == \"__main__\":\n assert 'AWS_PROFILE' in os.environ, \"Ensure AWS_PROFILE is in your environment variables\"\n logging.debug(\"Using {} profile\".format(os.environ['AWS_PROFILE']))\n main()","sub_path":"scripts/superceded/jawfish_OLD/list_bucket.py","file_name":"list_bucket.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"270715215","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport re\n\n__author__ = 'Trofimov Igor'\n\n\ndef exit_with_error(message: str) -> None:\n \"\"\"\n Функция печатает сообщение и завершается с ошибкой.\n\n :type message: str\n :param message: Сообщение, печатаемое перед завершением программы с ошибкой\n :rtype: None\n \"\"\"\n print(message)\n exit(1)\n\n\ndef send(signal: int, pid: int) -> True:\n \"\"\"\n Функция отправляет сигнал процессу.\n\n :type signal: int\n :param signal: Номер передаваемого сигнала\n :type pid: int\n :param pid: PID процесса, кому передается сигнал\n :rtype: bool\n :return: True\n \"\"\"\n os.kill(pid, signal)\n\n return True\n\n\ndef loads() -> list:\n \"\"\"\n Функция получает список всех процессов в системе.\n\n :rtype: list\n :return: Список всех процессов в системе\n \"\"\"\n return os.popen('ps -aux').readlines()[1:]\n\n\ndef range_check(processes: list, start_pid: int, finish_pid: int) -> list:\n \"\"\"\n Функция отбирает процессы, у которых pid лежит в определенном диапазоне.\n\n :type processes: list\n :param processes: Список процессов\n :type start_pid: int\n :param start_pid: Первый PID диапазона\n :type finish_pid: int\n :param finish_pid: Последний PID диапазона\n :rtype: list\n :return: Отфильтрованный список процессов\n \"\"\"\n if start_pid > finish_pid:\n exit_with_error('Начальный PID должен быть меньше конечного PID.')\n\n result = []\n for process in processes:\n # Регулярка вытаскивает PID процесса\n if finish_pid >= int(re.match(r'^.*? +(\\d+) ', process, re.MULTILINE).group(1)) >= start_pid:\n result.append(process)\n\n return result\n\n\ndef username_check(processes: list, username: str) -> list:\n \"\"\"\n Функция отбирает процессы с определенным владельцем.\n\n :type processes: list\n :param processes: С��исок процессов\n :type username: str\n :param username: Имя пользователя\n :rtype: list\n :return: Отфильтрованный список процессов\n \"\"\"\n result = []\n for process in processes:\n # Регулярка вытаскивает владельца процесса\n if username == re.match(r'^(.*?) ', process, re.MULTILINE).group(1):\n result.append(process)\n\n return result\n\n\ndef type_check(processes: list, process_type: str) -> list:\n \"\"\"\n Функция отбирает процессы с определенным типом.\n\n :type processes: list\n :param processes: Список процессов\n :type process_type: str\n :param process_type: Тип процесса\n :rtype: list\n :return: Отфильтрованный список процессов\n \"\"\"\n result = []\n for process in processes:\n # Регулярка вытаскивает тип процесса\n if process_type == re.match(r'^.* (\\S{1,5}?) +\\d{1,2}:\\d{1,2} +\\d{1,2}:\\d{1,2} ', process, re.MULTILINE).group(\n 1):\n result.append(process)\n\n return result\n\n\ndef name_check(processes: list, name: str) -> list:\n \"\"\"\n Функция отбирает процессы с определенным именем.\n\n :type processes: list\n :param processes: Список процессов\n :type name: str\n :param name: Часть имени процесса\n :rtype: list\n :return: Отфильтрованный список процессов\n \"\"\"\n result = []\n for process in processes:\n # Регулярка вытаскивает имя процесса\n if name in re.match(r'^.* +\\d{1,2}:\\d{1,2} +(.*)$', process, re.MULTILINE).group(1):\n result.append(process)\n\n return result\n\n\ndef own_pid_check(processes: list) -> list:\n \"\"\"\n Функция убирает собственный экземпляр программы из списка процессов\n\n :type processes: list\n :param processes: Список процессов\n :rtype: list\n :return: Отфильтрованный список процессов\n \"\"\"\n result = []\n for process in processes:\n # Регулярка вытаскивает PID процесса\n if os.getpid() != int(re.match(r'^.*? +(\\d+) ', process, re.MULTILINE).group(1)):\n result.append(process)\n\n return result\n\n\ndef set_argparse() -> argparse.Namespace:\n \"\"\"\n Функция устанавливает обрабатываемые нами аргументы.\n\n :rtype: instance of the argparse.Namespace class\n :return: Класс, состоящий из аргументов программы\n \"\"\"\n # Создание объекта\n parser = argparse.ArgumentParser(\n description='Простой менеджер для группового управления \\\n запущенными процессами в linux')\n\n # Номер сигнала\n parser.add_argument('signal', type=int, action='store',\n help='Номер отправляемого сигнала')\n # PID процесса\n parser.add_argument('-p', '--pid', type=int, action='store', dest='pid',\n help='PID процесса, которому направляем сигнал')\n # PID последнего процесса диапазона\n parser.add_argument('-pr', '--pidrange', type=int, action='store', dest='range',\n help='PID процесса, завершающего диапазон \\\n процессов (включительно), которым отправляем сигнал.')\n # Имя процессов\n parser.add_argument('-n', '--name', type=str, action='store', dest='name',\n help='Полные/частичные имена процессов, которым отправляем сигнал.')\n # Тип процессов\n parser.add_argument('-t', '--type', type=str, action='store', dest='type',\n help='Тип процессов, которым направляем сигнал.')\n # Имя пользователя\n parser.add_argument('-u', '--username', type=str, action='store', dest='username',\n help='Имя пользователя, чьим процессам отправляем сигнал.')\n\n return parser.parse_args()\n\n\ndef main(args: argparse.Namespace) -> None:\n \"\"\"\n Функция, реализующая основную логику работы.\n\n :type args: instance of the argparse.Namespace class\n :param args: Аргументы программы\n :rtype: None\n \"\"\"\n processes = loads()\n\n # Фильтры\n # Если задан диапазон PIDов\n if args.pid is not None and args.range is not None:\n processes = range_check(processes, args.pid, args.range)\n # Если задан определенный PID\n elif args.pid is not None:\n processes = range_check(processes, args.pid, args.pid)\n # Если задано имя пользователя\n if args.username is not None:\n processes = username_check(processes, args.username)\n # Если задан тип процесса\n if args.type is not None:\n processes = type_check(processes, args.type)\n # Если задано имя\n if args.name is not None:\n processes = name_check(processes, args.name)\n # Убираем себя из списка\n processes = own_pid_check(processes)\n\n success = 0\n fail = 0\n # Каждому отобранному процессу отправляем сигнал\n for process in processes:\n try:\n # Регулярка вытаскивает PID процесса\n send(args.signal, int(re.match(r'^.*? +(\\d+) ', process, re.MULTILINE).group(1)))\n success += 1\n except PermissionError:\n fail += 1\n except ProcessLookupError:\n fail += 1\n except OverflowError:\n exit_with_error('Сигнал/PID имеет некорректное значение.')\n\n print('Выбрано {0} процессов.'.format(len(processes)))\n print('Сигнал {0} успешно отправлен {1} процессам.'.format(args.signal, success))\n print('{0} процессам отправить не удалось.'.format(fail))\n\n\nif __name__ == '__main__':\n main(set_argparse())\n","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":9023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"274739560","text":"from __future__ import absolute_import, unicode_literals\n\n\nclass QuadrigaError(Exception):\n \"\"\"Base class for all client exceptions.\"\"\"\n\n\nclass RequestError(QuadrigaError):\n \"\"\"Raised when an API request to QuadrigaCX fails.\"\"\"\n\n def __init__(self, response, message, error_code=None):\n self.url = response.url\n self.body = response.text\n self.headers = response.headers\n self.http_code = response.status_code\n self.error_code = error_code\n Exception.__init__(self, message)\n\n\nclass InvalidCurrencyError(QuadrigaError):\n \"\"\"Raised when an invalid major currency is given.\"\"\"\n\n\nclass InvalidOrderBookError(QuadrigaError):\n \"\"\"Raised when an invalid order book is given.\"\"\"\n","sub_path":"quadriga/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"543856584","text":"\"\"\" Dynamically builds Python wrappers for Gibbs Seawater Toolbox at runtime \"\"\"\n\nimport ctypes\nfrom os import listdir\nfrom os.path import dirname, realpath, splitext, join\nimport itertools\nimport numpy\n\ninstall_dir = dirname(realpath(__file__))\n\ndef find_gsw(s):\n return splitext(s)[1] == \".so\" and s.startswith(\"cgsw\")\n\nname = list(filter(find_gsw, listdir(install_dir)))[0]\ncgsw = ctypes.cdll.LoadLibrary(join(install_dir, name))\n\nheader = \\\n\"\"\"\nextern void gsw_add_barrier(double *input_data, double lon, double lat,\n double long_grid, double lat_grid, double dlong_grid,\n double dlat_grid, double *output_data);\nextern void gsw_add_mean(double *data_in, double lon, double lat,\n double *data_out);\nextern double gsw_adiabatic_lapse_rate_from_ct(double sa, double ct, double p);\nextern double gsw_alpha(double sa, double ct, double p);\nextern double gsw_alpha_on_beta(double sa, double ct, double p);\nextern double gsw_alpha_wrt_t_exact(double sa, double t, double p);\nextern double gsw_beta_const_t_exact(double sa, double t, double p);\nextern double gsw_beta(double sa, double ct, double p);\nextern double gsw_c_from_sp(double sp, double t, double p);\nextern double gsw_cabbeling(double sa, double ct, double p);\nextern double gsw_ct_freezing(double sa, double p, double saturation_fraction);\nextern double gsw_ct_from_pt(double sa, double pt);\nextern double gsw_ct_from_t(double sa, double t, double p);\nextern double gsw_deltasa_atlas(double p, double lon, double lat);\nextern double gsw_deltasa_from_sp(double sp, double p, double lon, double lat);\nextern double gsw_dynamic_enthalpy(double sa, double ct, double p);\nextern double gsw_enthalpy(double sa, double ct, double p);\nextern double gsw_enthalpy_sso_0_p(double p);\nextern double gsw_enthalpy_t_exact(double sa, double t, double p);\nextern double gsw_cp_t_exact(double sa, double t, double p);\nextern double gsw_entropy_from_t(double sa, double t, double p);\nextern double gsw_entropy_part(double sa, double t, double p);\nextern double gsw_entropy_part_zerop(double sa, double pt0);\nextern double gsw_fdelta(double p, double lon, double lat);\nextern double gsw_gibbs(int ns, int nt, int np, double sa, double t, double p);\nextern double gsw_gibbs_pt0_pt0(double sa, double pt0);\nextern double gsw_grav(double lat, double p);\nextern double gsw_hill_ratio_at_sp2(double t);\nextern int gsw_indx(double *x, int n, double z);\nextern double gsw_internal_energy(double sa, double ct, double p);\nextern void gsw_ipv_vs_fnsquared_ratio(double *sa, double *ct, double *p,\n int nz, double *ipv_vs_fnsquared_ratio, double *p_mid);\nextern double gsw_kappa(double sa, double ct, double p);\nextern double gsw_kappa_t_exact(double sa, double t, double p);\nextern double gsw_latentheat_evap_ct(double sa, double ct);\nextern double gsw_latentheat_evap_t(double sa, double t);\nextern double gsw_latentheat_melting(double sa, double p);\nextern void gsw_nsquared(double *sa, double *ct, double *p, double *lat,\n int nz, double *n2, double *p_mid);\nextern double gsw_pot_rho_t_exact(double sa, double t, double p, double p_ref);\nextern double gsw_pt0_from_t(double sa, double t, double p);\nextern double gsw_pt_from_ct(double sa, double ct);\nextern double gsw_pt_from_t(double sa, double t, double p, double p_ref);\nextern double gsw_rho(double sa, double ct, double p);\nextern void gsw_rho_first_derivatives(double sa, double ct, double p,\n double *drho_dsa, double *drho_dct, double *drho_dp);\nextern double gsw_rho_t_exact(double sa, double t, double p);\nextern double gsw_saar(double p, double lon, double lat);\nextern double gsw_sa_from_rho(double rho, double ct, double p);\nextern double gsw_sa_from_sp_baltic(double sp, double lon, double lat);\nextern double gsw_sa_from_sp(double sp, double p, double lon, double lat);\nextern double gsw_sa_from_sstar(double sstar, double p,double lon,double lat);\nextern double gsw_sigma0(double sa, double ct);\nextern double gsw_sigma1(double sa, double ct);\nextern double gsw_sigma2(double sa, double ct);\nextern double gsw_sigma3(double sa, double ct);\nextern double gsw_sigma4(double sa, double ct);\nextern double gsw_sound_speed(double sa, double ct, double p);\nextern double gsw_sound_speed_t_exact(double sa, double t, double p);\nextern double gsw_specvol_anom(double sa, double ct, double p);\nextern double gsw_specvol(double sa, double ct, double p);\nextern double gsw_specvol_sso_0_p(double p);\nextern double gsw_specvol_t_exact(double sa, double t, double p);\nextern double gsw_sp_from_c(double c, double t, double p);\nextern double gsw_sp_from_sa_baltic(double sa, double lon, double lat);\nextern double gsw_sp_from_sa(double sa, double p, double lon, double lat);\nextern double gsw_sp_from_sk(double sk);\nextern double gsw_sp_from_sr(double sr);\nextern double gsw_sp_from_sstar(double sstar, double p,double lon,double lat);\nextern double gsw_sr_from_sp(double sp);\nextern double gsw_sstar_from_sa(double sa, double p, double lon, double lat);\nextern double gsw_sstar_from_sp(double sp, double p, double lon, double lat);\nextern double gsw_t_freezing(double sa, double p, double saturation_fraction);\nextern double gsw_t_from_ct(double sa, double ct, double p);\nextern double gsw_thermobaric(double sa, double ct, double p);\nextern void gsw_turner_rsubrho(double *sa, double *ct, double *p,\n int nz, double *tu, double *rsubrho, double *p_mid);\nextern double gsw_xinterp1(double *x, double *y, int n, double x0);\nextern double gsw_z_from_p(double p, double lat);\n\"\"\"\n\nimportnames = [\"gsw_adiabatic_lapse_rate_from_ct\",\n \"gsw_alpha\",\n \"gsw_alpha_on_beta\",\n \"gsw_alpha_wrt_t_exact\",\n \"gsw_beta_const_t_exact\",\n \"gsw_beta\",\n \"gsw_c_from_sp\",\n \"gsw_cabbeling\",\n \"gsw_ct_freezing\",\n \"gsw_ct_from_pt\",\n \"gsw_ct_from_t\",\n \"gsw_deltasa_atlas\",\n \"gsw_deltasa_from_sp\",\n \"gsw_dynamic_enthalpy\",\n \"gsw_enthalpy\",\n \"gsw_enthalpy_sso_0_p\",\n \"gsw_enthalpy_t_exact\",\n \"gsw_entropy_from_t\",\n \"gsw_entropy_part\",\n \"gsw_entropy_part_zerop\",\n \"gsw_fdelta\",\n \"gsw_gibbs\",\n \"gsw_gibbs_pt0_pt0\",\n \"gsw_grav\",\n \"gsw_hill_ratio_at_sp2\",\n \"gsw_internal_energy\",\n \"gsw_kappa\",\n \"gsw_kappa_t_exact\",\n \"gsw_latentheat_evap_ct\",\n \"gsw_latentheat_evap_t\",\n \"gsw_latentheat_melting\",\n \"gsw_pot_rho_t_exact\",\n \"gsw_pt0_from_t\",\n \"gsw_pt_from_ct\",\n \"gsw_pt_from_t\",\n \"gsw_rho\",\n \"gsw_rho_t_exact\",\n \"gsw_saar\",\n \"gsw_sa_from_rho\",\n \"gsw_sa_from_sp_baltic\",\n \"gsw_sa_from_sp\",\n \"gsw_sa_from_sstar\",\n \"gsw_sigma0\",\n \"gsw_sigma1\",\n \"gsw_sigma2\",\n \"gsw_sigma3\",\n \"gsw_sigma4\",\n \"gsw_sound_speed\",\n \"gsw_sound_speed_t_exact\",\n \"gsw_specvol_anom\",\n \"gsw_specvol\",\n \"gsw_specvol_sso_0_p\",\n \"gsw_specvol_t_exact\",\n \"gsw_sp_from_c\",\n \"gsw_sp_from_sa_baltic\",\n \"gsw_sp_from_sa\",\n \"gsw_sp_from_sk\",\n \"gsw_sp_from_sr\",\n \"gsw_sp_from_sstar\",\n \"gsw_sr_from_sp\",\n \"gsw_sstar_from_sa\",\n \"gsw_sstar_from_sp\",\n \"gsw_t_freezing\",\n \"gsw_t_from_ct\",\n \"gsw_thermobaric\",\n \"gsw_z_from_p\"]\n\nlines = header.split(\"\\n\")\nlines = filter(lambda s: s.startswith(\"extern double\") and s.endswith(\";\"), lines)\n\ndef vectorize(fn, docstring=None):\n \"\"\" Given a function, return a function that maps result over values in\n argument lists. \"\"\"\n def wrapper(*args):\n # if all(hasattr(a, \"__iter__\") for a in args):\n # return list(map(fn, *args))\n if any(hasattr(a, \"__iter__\") for a in args):\n n = max(len(a) for a in args if hasattr(a, \"__len__\"))\n vargs = []\n for arg in args:\n if not hasattr(arg, \"__iter__\") or len(arg) == 1:\n vargs.append(itertools.repeat(arg, n))\n elif len(arg) == n:\n vargs.append(arg)\n else:\n raise ValueError(\"Variable length arguments illegal\")\n return numpy.array(list(map(fn, *vargs)))\n else:\n return fn(*args)\n wrapper.__doc__ = fn.__doc__\n return wrapper\n\ndef cname(line):\n return line.split(\" \", 2)[2].split(\"(\", 1)[0]\n\ndef getfuncpointer(name):\n return cgsw.__getattr__(name)\n\ndef argtypes(line):\n args = line.rsplit(\"(\", 1)[1].split(\")\", 1)[0].split(\",\")\n cargs = []\n for arg in args:\n typ = arg.split()[0].strip()\n if typ == \"double\":\n cargs.append(ctypes.c_double)\n elif typ == \"int\":\n cargs.append(ctypes.c_int)\n return tuple(cargs)\n\ndef argnames(line):\n args = line.rsplit(\"(\", 1)[1].split(\")\", 1)[0].split(\",\")\n names = []\n for arg in args:\n names.append(arg.split()[1].strip())\n return tuple(names)\n\ndef restype(line):\n s = line.split(\" \", 2)[1:2][0].strip()\n if s == \"double\":\n return ctypes.c_double\n\ndef addname(line):\n \"\"\" Pull a function from the cgsw namespace into the gsw namespace \"\"\"\n name = line.split(\" \", 2)[2].split(\"(\", 1)[0]\n if name[:4] == \"gsw_\":\n exec(\"{0} = vectorize(cgsw.{1})\".format(name[4:], name), addname.__globals__)\n return\n\nfor line in lines:\n name = cname(line)\n if name in importnames:\n func = getfuncpointer(name)\n func.argtypes = argtypes(line)\n func.restype = restype(line)\n func.__doc__ = name + str(argnames(line))\n addname(line)\n\n","sub_path":"narwhal/gsw.py","file_name":"gsw.py","file_ext":"py","file_size_in_byte":10109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"403123716","text":"def check_pin_code(pinCode):\n nums = pinCode.split('-')\n first = True\n second = True\n third = True\n if nums[0] == '1':\n return 'Некорректен'\n else:\n for i in range(2, int(nums[0])):\n if int(nums[0]) % i == 0:\n first = False\n return 'Некорректен'\n check_second = []\n for i in nums[1]:\n check_second.append(i)\n check_check = check_second.copy()\n check_second.reverse()\n if check_check != check_second:\n second = False\n return 'Некорректен'\n count = 0\n count_2 = -1\n while count < int(nums[2]):\n count_2 += 1\n count = 2 ** count_2\n if count != int(nums[2]):\n third = False\n return 'Некорректен'\n if first and second and third:\n return 'Корректен'\nprint(check_pin_code(input()))","sub_path":"Пин код.py","file_name":"Пин код.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"149160425","text":"import argparse\nimport scipy\nfrom scipy import ndimage\nimport numpy as np\nimport sys\nimport re\nfrom packaging import version\n\nimport torch\nfrom torch.autograd import Variable\nimport torchvision.models as models\nimport torch.nn.functional as F\nfrom torch.utils import data, model_zoo\nfrom model.deeplab import Res_Deeplab\nfrom model.deeplab_multi import DeeplabMulti\nfrom model.deeplab_vgg import DeeplabVGG\nfrom dataset.dark_zurich_dataset import DarkZurichDataSet\nimport os\nfrom PIL import Image\nfrom utils.tool import fliplr\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport yaml\nimport imageio as iio\n\ntorch.backends.cudnn.benchmark=True\n\nIMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)\n\nDATA_DIRECTORY = './data/Cityscapes/data'\nDATA_LIST_PATH = './dataset/cityscapes_list/train.txt'\nSAVE_PATH = './data/Dark_zurich/data/pseudo_ohl-1/test'\n\nif not os.path.isdir('./data/Dark_zurich/data/pseudo_ohl-1/'):\n os.makedirs('./data/Dark_zurich/data/pseudo_ohl-1/')\n os.makedirs(SAVE_PATH)\n\nIGNORE_LABEL = 255\nNUM_CLASSES = 19\nRESTORE_FROM = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_multi-ed35151c.pth'\nRESTORE_FROM_VGG = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_vgg-ac4ac9f6.pth'\nRESTORE_FROM_ORC = 'http://vllab1.ucmerced.edu/~whung/adaptSeg/cityscapes_oracle-b7b9934.pth'\nSET = 'train' # We generate pseudo label for training set\nINPUT_SIZE = '800,512'\n\nMODEL = 'DeeplabMulti'\n\npalette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,\n 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,\n 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]\nzero_pad = 256 * 3 - len(palette)\nfor i in range(zero_pad):\n palette.append(0)\n\n\ndef colorize_mask(mask):\n # mask: numpy array of the mask\n new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')\n new_mask.putpalette(palette)\n\n return new_mask\n\ndef get_arguments():\n \"\"\"Parse all the arguments provided from the CLI.\n\n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"DeepLab-ResNet Network\")\n parser.add_argument(\"--model\", type=str, default=MODEL,\n help=\"Model Choice (DeeplabMulti/DeeplabVGG/Oracle).\")\n parser.add_argument(\"--data-dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the Cityscapes dataset.\")\n parser.add_argument(\"--data-list\", type=str, default=DATA_LIST_PATH,\n help=\"Path to the file listing the images in the dataset.\")\n parser.add_argument(\"--ignore-label\", type=int, default=IGNORE_LABEL,\n help=\"The index of the label to ignore during the training.\")\n parser.add_argument(\"--num-classes\", type=int, default=NUM_CLASSES,\n help=\"Number of classes to predict (including background).\")\n parser.add_argument(\"--restore-from\", type=str, default=RESTORE_FROM,\n help=\"Where restore model parameters from.\")\n parser.add_argument(\"--gpu\", type=int, default=0,\n help=\"choose gpu device.\")\n parser.add_argument(\"--batchsize\", type=int, default=4,\n help=\"choose gpu device.\")\n parser.add_argument(\"--set\", type=str, default=SET,\n help=\"choose evaluation set.\")\n parser.add_argument(\"--save\", type=str, default=SAVE_PATH,\n help=\"Path to save result.\")\n parser.add_argument(\"--input-size\", type=str, default=INPUT_SIZE,\n help=\"Comma-separated string with height and width of source images.\")\n return parser.parse_args()\n\ndef save_heatmap(output_name):\n output, name = output_name\n fig = plt.figure()\n plt.axis('off')\n heatmap = plt.imshow(output, cmap='viridis')\n fig.colorbar(heatmap)\n fig.savefig('%s_heatmap.png' % (name.split('.jpg')[0]))\n return\n\ndef main():\n \"\"\"Create the model and start the evaluation process.\"\"\"\n\n args = get_arguments()\n\n w, h = map(int, args.input_size.split(','))\n\n config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')\n with open(config_path, 'r') as stream:\n config = yaml.load(stream)\n\n args.model = config['model']\n print('ModelType:%s'%args.model)\n print('NormType:%s'%config['norm_style'])\n gpu0 = args.gpu\n batchsize = args.batchsize\n\n model_name = os.path.basename( os.path.dirname(args.restore_from) )\n #args.save += model_name\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n confidence_path = os.path.join(args.save, 'submit/confidence')\n label_path = os.path.join(args.save, 'submit/labelTrainIds')\n label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')\n for path in [confidence_path, label_path, label_invalid_path]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n if args.model == 'DeepLab':\n model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])\n elif args.model == 'Oracle':\n model = Res_Deeplab(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_ORC\n elif args.model == 'DeeplabVGG':\n model = DeeplabVGG(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_VGG\n\n if args.restore_from[:4] == 'http' :\n saved_state_dict = model_zoo.load_url(args.restore_from)\n else:\n saved_state_dict = torch.load(args.restore_from)\n\n try:\n model.load_state_dict(saved_state_dict)\n except:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(saved_state_dict)\n model.eval()\n model.cuda(gpu0)\n\n testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n scale = 1.25\n testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n\n if version.parse(torch.__version__) >= version.parse('0.4.0'):\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)\n else:\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear')\n\n sm = torch.nn.Softmax(dim = 1)\n log_sm = torch.nn.LogSoftmax(dim = 1)\n kl_distance = nn.KLDivLoss( reduction = 'none')\n prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]\n prior = torch.from_numpy(prior)\n for index, img_data in enumerate(zip(testloader, testloader2) ):\n batch, batch2 = img_data\n image, _, name = batch\n image2, _, name2 = batch2\n\n inputs = image.cuda()\n inputs2 = image2.cuda()\n print('\\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')\n if args.model == 'DeepLab':\n with torch.no_grad():\n output1, output2 = model(inputs)\n output_batch = interp(sm(0.5* output1 + output2))\n\n heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)\n\n output1, output2 = model(fliplr(inputs))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs\n\n output1, output2 = model(inputs2)\n output_batch += interp(sm(0.5* output1 + output2))\n output1, output2 = model(fliplr(inputs2))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs2\n ratio = 0.95\n output_batch = output_batch.cpu() / 4\n # output_batch = output_batch *(ratio + (1 - ratio) * prior)\n output_batch = output_batch.data.numpy()\n heatmap_batch = heatmap_batch.cpu().data.numpy()\n elif args.model == 'DeeplabVGG' or args.model == 'Oracle':\n output_batch = model(Variable(image).cuda())\n output_batch = interp(output_batch).cpu().data.numpy()\n\n output_batch = output_batch.transpose(0,2,3,1)\n score_batch = np.max(output_batch, axis=3)\n output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)\n\n threshold = 0.3274\n for i in range(output_batch.shape[0]):\n output_single = output_batch[i,:,:]\n output_col = colorize_mask(output_single)\n output = Image.fromarray(output_single)\n\n name_tmp = name[i].split('/')[-1]\n dir_name = name[i].split('/')[-2]\n save_path = args.save + '/' + dir_name\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n output.save('%s/%s' % (save_path, name_tmp))\n print('%s/%s' % (save_path, name_tmp))\n output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))\n\n # heatmap_tmp = heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:])\n # fig = plt.figure()\n # plt.axis('off')\n # heatmap = plt.imshow(heatmap_tmp, cmap='viridis')\n # fig.colorbar(heatmap)\n # fig.savefig('%s/%s_heatmap.png' % (save_path, name_tmp.split('.')[0]))\n\n if args.set == 'test' or args.set == 'val':\n # label\n output.save('%s/%s' % (label_path, name_tmp))\n # label invalid\n output_single[score_batch[i, :, :] < threshold] = 255\n output = Image.fromarray(output_single)\n output.save('%s/%s' % (label_invalid_path, name_tmp))\n # conficence\n\n confidence = score_batch[i, :, :] * 65535\n confidence = np.asarray(confidence, dtype=np.uint16)\n print(confidence.min(), confidence.max())\n iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)\n\n return args.save\n\nif __name__ == '__main__':\n with torch.no_grad():\n save_path = main()\n #os.system('python compute_iou.py ./data/Cityscapes/data/gtFine/train %s'%save_path)\n","sub_path":"generate_plabel_dark_zurich.py","file_name":"generate_plabel_dark_zurich.py","file_ext":"py","file_size_in_byte":10763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"109582012","text":"from collections import deque\nfrom time import time\n\nimport numpy as np\nimport pandas as pd\n\nfrom lead_lag.contrast import CrossCorrelationHY\n\n\nclass LeadLag:\n\n def __init__(self,\n arr_1_with_ts: np.array,\n arr_2_with_ts: np.array,\n max_absolute_lag: int,\n verbose: bool,\n specific_lags=None):\n self.contrasts = None\n # lead format format is also useful for plotting.\n self.x, self.y, self.t_x, self.t_y, = convert_to_lead_lag_format(\n arr_1_with_ts, arr_2_with_ts)\n assert len(self.x) == len(self.y)\n if specific_lags is None:\n self.lag_range = np.arange(-max_absolute_lag,\n max_absolute_lag + 1, 1)\n else:\n if sorted(specific_lags) != specific_lags:\n raise Exception(\n 'Make sure the lag list passed as argument is sorted.')\n self.lag_range = np.array(specific_lags)\n self.inference_time = None\n self.cc = CrossCorrelationHY(self.x, self.y, self.t_x, self.t_y,\n self.lag_range, normalize=True, verbose_mode=verbose)\n\n def run_inference(self, multi_threading=True):\n start_time = time()\n if multi_threading:\n self.contrasts = self.cc.fast_inference()\n else:\n self.contrasts = self.cc.slow_inference()\n self.inference_time = time() - start_time\n\n @property\n def lead_lag(self):\n if self.contrasts is None:\n return None\n if np.std(self.contrasts) == 0.0:\n return None\n\n argmax_contrast = np.argmax(self.contrasts)\n return self.lag_range[argmax_contrast], self.contrasts[argmax_contrast]\n\n @property\n def llr(self):\n if self.contrasts is None:\n return None\n positive_range_indexes = self.lag_range > 0\n negative_range_indexes = self.lag_range < 0\n positive_contrasts = np.sum(self.contrasts[positive_range_indexes])\n negative_contrasts = np.sum(self.contrasts[negative_range_indexes])\n if negative_contrasts != 0.0:\n llr = positive_contrasts / negative_contrasts\n else:\n llr = np.nan\n return llr\n\n def write_results_to_file(self, output_filename):\n self._contrasts_to_df().to_csv(path_or_buf=output_filename)\n\n def _contrasts_to_df(self):\n df = pd.DataFrame(data=np.transpose(\n [self.lag_range, self.contrasts]), columns=['LagRange', 'Contrast'])\n df.set_index('LagRange', inplace=True)\n return df\n\n def plot_results(self, title=None, kind='line', x=None, y=None, file_name='figure1', scale_factor=1, scale_label='cs', max_lag=None, llr=None):\n import matplotlib.pyplot as plt\n if self.contrasts is not None:\n # self._contrasts_to_df().plot(kind=kind, x=x, y=y)\n plt.rcParams[\"figure.figsize\"] = [6, 4]\n fig, ax = plt.subplots()\n fig.patch.set_facecolor('white')\n\n ax.axvline(linestyle='dashed', color='black', linewidth=0.75)\n\n plt.scatter(self.lag_range * scale_factor, self.contrasts, s=15) \n\n if title is not None:\n plt.title(title)\n \n plt.xlabel(f\"Lag ({scale_label})\")\n plt.ylabel('Cross-Correlation')\n # plt.legend([])\n if max_lag is not None:\n plt.text(0.95, 0.5, f'Lead-Lag Time ({scale_label}): {\"{:.2f}\".format(max_lag * scale_factor)}', transform=plt.gcf().transFigure)\n if llr is not None:\n plt.text(0.95, 0.45, f'Lead-Lag Ratio: {\"{:.2f}\".format(llr)}', transform=plt.gcf().transFigure)\n\n plt.savefig(f\"{file_name}.png\", bbox_inches='tight')\n # plt.savefig(f\"{file_name}.png\")\n plt.show()\n\n def plot_data(self, legend=None, date=None):\n import matplotlib.pyplot as plt\n plt.title(f\"Non-synchronous data with leader / lagger relationship — {date}\")\n plt.xlabel('Time Axis (grid granularity)')\n plt.scatter(self.t_x, self.x[self.t_x], s=0.5, color='lime')\n plt.scatter(self.t_y, self.y[self.t_y], s=0.5, color='blue')\n if legend is None:\n plt.legend(['X(t)', 'Y(t)'])\n else:\n plt.legend(legend)\n plt.show()\n\n\ndef convert_to_lead_lag_format(arr1, arr2):\n assert len(arr1.shape) == 2 # (x, t_x)\n assert len(arr2.shape) == 2 # (y, t_y)\n time_origin = min(arr2[0, 0], arr1[0, 0])\n arr1[:, 0] -= time_origin\n arr2[:, 0] -= time_origin\n time_end = int(max(arr2[-1, 0], arr1[-1, 0]))\n x = np.zeros(shape=time_end + 1) * np.nan\n t_x = []\n for element_slice in arr1:\n x[int(element_slice[0])] = element_slice[1]\n t_x.append(int(element_slice[0]))\n y = np.zeros(shape=time_end + 1) * np.nan\n t_y = []\n for element_slice in arr2:\n y[int(element_slice[0])] = element_slice[1]\n t_y.append(int(element_slice[0]))\n return x, y, t_x, t_y\n\n\nclass RealTimeAggregator:\n\n def __init__(self, history_length):\n self.ts = deque(maxlen=history_length)\n\n def add(self, value: float, timestamp: int):\n self.ts.append((timestamp, value))\n\n def get(self):\n return np.vstack(self.ts)\n","sub_path":"lead_lag/lead_lag.py","file_name":"lead_lag.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"481983594","text":"import matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nfrom numpy import array, arange, log10, where, median, std, mean, concatenate, vstack, argsort\r\nfrom mpl_toolkits.basemap import Basemap\r\n\r\nplt.close('all')\r\n# for bilinear sloping segment\r\n\r\n# use historical interface data\r\nusehistoric = True\r\nplt_inter_only = True\r\n\r\nimport pickle\r\nfrom shakemap_tools import *\r\nfrom mapping_tools import distance, reckon\r\nfrom numpy import array, sqrt, nan, isnan, arange, abs, unique, hstack, savetxt, \\\r\n logical_and, mean, median, std, log10, ones, logspace, exp, max, signbit, \\\r\n percentile, random, cov, dot, argsort\r\n#from make_slab_fault import make_slab_matrix\r\nfrom os import path, sep\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib\r\n\r\nfig = plt.figure(figsize=(10,10))\r\nplt.tick_params(labelsize=16)\r\n\r\n# set map\r\nurcrnrlat = 3\r\nllcrnrlat = -25\r\nurcrnrlon = -65\r\nllcrnrlon = -85\r\n\r\nlon_0 = mean([llcrnrlon, urcrnrlon])\r\nlat_1 = percentile([llcrnrlat, urcrnrlat], 25)\r\nlat_2 = percentile([llcrnrlat, urcrnrlat], 75)\r\n'''\r\nm = Basemap(projection='lcc',lat_1=lat_1,lat_2=lat_2,lon_0=lon_0,\\\r\n llcrnrlon=llcrnrlon,llcrnrlat=llcrnrlat, \\\r\n urcrnrlon=urcrnrlon,urcrnrlat=urcrnrlat,\\\r\n rsphere=6371200.,resolution='l',area_thresh=10000)\r\n'''\r\nm = Basemap(projection='merc', \\\r\n llcrnrlon=llcrnrlon,llcrnrlat=llcrnrlat, \\\r\n urcrnrlon=urcrnrlon,urcrnrlat=urcrnrlat,\\\r\n rsphere=6371200.,resolution='l',area_thresh=10000)\r\n \r\n#ax = fig.add_subplot(211)\r\n\r\n#m.drawcoastlines(color='0.5', linewidth=0.5)\r\nm.shadedrelief()\r\n#m.etopo()\r\nmeridians = arange(20,360,5)\r\nm.drawmeridians(meridians,labels=[0,0,0,1], fontsize=14, linewidth=0.5, color='0.5')\r\nparallels = arange(-90,90,5)\r\nm.drawparallels(parallels,labels=[1,0,0,0], fontsize=14, linewidth=0.5, color='0.5')\r\nm.drawcountries(linewidth=0.75, color='0.2')\r\n\r\n\r\n# now load trimmed faults\r\nftxt = open('//Users//tallen//Documents//PAGER//Data_Prep//trimmed_faults_dip_type.csv').readlines()[1:]\r\ndate = []\r\nlon = []\r\nlat = []\r\ntyp = []\r\nmag = []\r\n\r\nfor line in ftxt:\r\n dat = line.strip().split(',')\r\n if dat[-1] == 'i' or dat[-1] == 's' or dat[-1] == 't' \\\r\n or dat[-1] == 'o' or dat[-1] == 'h':\r\n date.append(dat[0])\r\n mag.append(float(dat[3]))\r\n lon.append(float(dat[4]))\r\n lat.append(float(dat[5]))\r\n typ.append(dat[-1])\r\n\r\nevtypes = unique(array(typ))\r\nevidx = argsort(argsort(date)) \r\nevidx += 1 # inc up one\r\n\r\n# get types\r\nilat = []\r\nilon = []\r\nimag = []\r\nslat = []\r\nslon = []\r\nsmag = []\r\ntlat = []\r\ntlon = []\r\ntmag = []\r\nolat = []\r\nolon = []\r\nomag = []\r\n\r\nfor i, t in enumerate(typ):\r\n plttxt = False\r\n x, y = m(lon[i], lat[i]) \r\n xt, yt = m(lon[i]-0.5, lat[i]+0.5)\r\n if t == 'i' or t == 'h':\r\n hi = plt.plot(x, y, '^', markerfacecolor='None', markeredgecolor='k', markeredgewidth=1.5, markersize=12)\r\n plttxt = True\r\n elif t == 's':\r\n hs = plt.plot(x, y, 'H', markerfacecolor='None', markeredgecolor='k', markeredgewidth=1.5, markersize=13)\r\n plttxt = True\r\n elif t == 't':\r\n ht = plt.plot(x, y, 'd', markerfacecolor='None', markeredgecolor='k', markeredgewidth=1.5, markersize=12)\r\n plttxt = True\r\n elif t == 'o':\r\n ho = plt.plot(x, y, 's', markerfacecolor='None', markeredgecolor='k', markeredgewidth=1.5, markersize=12)\r\n plttxt = True\r\n \r\n if plttxt == True:\r\n plt.text(xt, yt, str(evidx[i]), size=13, ha='right', va='center', weight='normal').set_clip_on(True)\r\n \r\nplt.legend((hi[0],hs[0]), ('Interface', 'Intraslab'), \\\r\n fontsize=14, loc='upper right', numpoints=1)\r\n\r\nplt.savefig('subduction_events_C.pdf', format='pdf', dpi=300, bbox_inches='tight') \r\nplt.savefig('subduction_events_C.png', format='png', dpi=300, bbox_inches='tight')\r\nplt.show()","sub_path":"2017/2017.BSSA_subduction_scaling/map_subduction_earthquakes_C.py","file_name":"map_subduction_earthquakes_C.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"388304400","text":"\"\"\"\r\nDerived module from dmdbase.py for higher order dmd.\r\n\r\nReference:\r\n- S. L Clainche, J. M. Vega, Higher Order Dynamic Mode Decomposition.\r\nJournal on Applied Dynamical Systems, 16(2), 882-925, 2017.\r\n\"\"\"\r\nfrom past.utils import old_div\r\nimport numpy as np\r\nimport scipy as sp\r\nfrom scipy.linalg import pinv2\r\nfrom mosessvd import MOSESSVD\r\nfrom numba import jit\r\n\r\nfrom .mosesdmdbase import MOSESDMDBase\r\n\r\n\r\ndef pinv(x): return pinv2(x, rcond=10 * np.finfo(float).eps)\r\n\r\n\r\nclass MOSESDMD(MOSESDMDBase):\r\n \"\"\"\r\n MOSES SVD based Higher Order Dynamic Mode Decomposition\r\n\r\n :param int svd_rank: rank truncation in SVD. If 0, the method computes the\r\n optimal rank and uses it for truncation; if positive number, the method\r\n uses the argument for the truncation; if -1, the method does not\r\n compute truncation.\r\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\r\n is 0, that means TLSQ is not applied.\r\n :param bool exact: flag to compute either exact DMD or projected DMD.\r\n Default is False.\r\n :param bool opt: flag to compute optimal amplitudes. See :class:`DMDBase`.\r\n Default is False.\r\n :param int d: the new order for spatial dimension of the input snapshots.\r\n Default is 1.\r\n :param int chunk_size: the horizontal size for the chunks given to MOSES SVD.\r\n :param numpy.dtype dtype: The desired datatype used for calculations.\r\n (might be removed in the future)\r\n :param boolean projection: Whether to use V or the projection of U for\r\n DMD. The second option is better, but requires more computations.\r\n Default is True.\r\n :param int or tring sqrt_K: Choose the method to calculate K. Default is True.\r\n \"\"\"\r\n\r\n def __init__(self, svd_rank=0, tlsq_rank=0, exact=False, opt=False, d=1,\r\n chunk_size=None, dtype=np.complex64, projection=True,\r\n sqrt_K=True):\r\n super(MOSESDMD, self).__init__(svd_rank, tlsq_rank, exact, opt)\r\n self.d = d\r\n self.chunk_size = chunk_size\r\n self.U = None\r\n self.s = None\r\n self.V = None\r\n self.K_list = None\r\n self.M = None\r\n self.dtype = dtype\r\n self.projection = projection\r\n self.sqrt_K = sqrt_K\r\n self.K_eigvec = None\r\n\r\n def linsolve(self, A, B):\r\n return np.matmul(B, np.linalg.inv(A))\r\n\r\n # @profile\r\n def fit(self, X):\r\n \"\"\"\r\n Compute the Dynamic Modes Decomposition to the input data.\r\n\r\n :param X: the input snapshots.\r\n :type X: numpy.ndarray or iterable\r\n \"\"\"\r\n if X.dtype != self.dtype:\r\n X = X.astype(self.dtype)\r\n\r\n self._snapshots = X\r\n\r\n n_samples = self._snapshots.shape[1]\r\n\r\n # X, Y = self._compute_tlsq(X, Y, self.tlsq_rank) not implemented\r\n\r\n msvd = MOSESSVD(rank=self.svd_rank)\r\n\r\n # MOSES SVD iteration loop\r\n i = -1\r\n for i in range(self.d-1, self._snapshots.shape[1] - self.chunk_size, self.chunk_size):\r\n chunk = [self._snapshots[:, i-j:i-j+self.chunk_size] for j in range(self.d)]\r\n chunk = np.vstack(chunk)\r\n msvd.update(chunk)\r\n\r\n # final chunk that contains the remaining snapshots\r\n chunk = [self._snapshots[:, i+1-j+self.chunk_size: self._snapshots.shape[1]-j] for j in range(self.d)]\r\n chunk = np.vstack(chunk)\r\n msvd.update(chunk)\r\n\r\n # get the SVD matrices\r\n U, s, V = msvd.S.astype(self.dtype), msvd.Gamma.astype(self.dtype), msvd.Q.astype(self.dtype)\r\n self.U, self.s, self.V = U, s, V\r\n\r\n M = np.zeros((self.svd_rank, self._snapshots.shape[1] - self.d)).astype(self.dtype)\r\n U_conj = np.ascontiguousarray(U.conj().T)\r\n\r\n # calculate M\r\n if self.projection:\r\n for i in range(self.svd_rank):\r\n M[i, :] = self.M_projection_value(self._snapshots, U_conj, i, self.d, self._snapshots.shape[1] - self.d,\r\n self.dtype)\r\n else:\r\n M = s.dot(V.conj().T)\r\n\r\n self.M = M\r\n\r\n # get the time shifted MX and MY\r\n MX = M[:, :-1]\r\n MY = M[:, 1:]\r\n\r\n # calculate the forward and backward operators\r\n Kf = MY.dot(pinv(MX))\r\n Kb = MX.dot(pinv(MY))\r\n Kbinv = pinv(Kb)\r\n if self.sqrt_K == \"mean\":\r\n K = (Kf + Kbinv) / 2\r\n elif self.sqrt_K:\r\n K = sp.linalg.sqrtm(Kf.dot(Kbinv))\r\n else:\r\n K = Kf\r\n self.Atilde = K\r\n K_eigval, K_eigvec = np.linalg.eig(K)\r\n self._eigs = K_eigval\r\n self.K_eigvec = K_eigvec\r\n\r\n # calculate the modes truncated to the original size\r\n self._modes = U[:self._snapshots.shape[0]].dot(K_eigvec.astype(self.dtype))\r\n\r\n # Default timesteps\r\n self.original_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\r\n self.dmd_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\r\n\r\n self._b = self._compute_amplitudes(self._modes, self._snapshots,\r\n self._eigs, self.opt)\r\n\r\n return self\r\n\r\n def _compute_amplitudes(self, modes, snapshots, eigs, opt):\r\n \"\"\"\r\n Compute the amplitude coefficients. If `opt` is False the amplitudes\r\n are computed by minimizing the error between the modes and the first\r\n snapshot; if `opt` is True the amplitudes are computed by minimizing\r\n the error between the modes and all the snapshots, at the expense of\r\n bigger computational cost.\r\n :param numpy.ndarray modes: 2D matrix that contains the modes, stored\r\n by column.\r\n :param numpy.ndarray snapshots: 2D matrix that contains the original\r\n snapshots, stored by column.\r\n :param numpy.ndarray eigs: array that contains the eigenvalues of the\r\n linear operator.\r\n :param bool opt: flag for computing the optimal amplitudes of the DMD\r\n modes, minimizing the error between the time evolution and all\r\n the original snapshots. If false the amplitudes are computed\r\n using only the initial condition, that is snapshots[0].\r\n :return: the amplitudes array\r\n :rtype: numpy.ndarray\r\n References for optimal amplitudes:\r\n Jovanovic et al. 2014, Sparsity-promoting dynamic mode decomposition,\r\n https://hal-polytechnique.archives-ouvertes.fr/hal-00995141/document\r\n \"\"\"\r\n if opt:\r\n # compute the vandermonde matrix\r\n omega = old_div(np.log(eigs), self.original_time['dt'])\r\n vander = np.exp(\r\n np.multiply(*np.meshgrid(omega, self.dmd_timesteps))).T\r\n\r\n # perform svd on all the snapshots\r\n # msvd = MOSESSVD(rank=self.svd_rank)\r\n # U, s, V = msvd.iterated_svd(snapshots, b=self.svd_rank+1)\r\n # V = V.conj().T\r\n # U, s, V = np.linalg.svd(self._snapshots, full_matrices=False)\r\n U, s, M = self.U, self.s, self.M\r\n K_eigvec = self.K_eigvec\r\n sinv = np.diag(np.reciprocal(np.diag(s)))\r\n V = np.dot(sinv, M).conj().T\r\n\r\n vander = vander[:,vander.shape[1] - V.shape[0]:]\r\n\r\n P = np.multiply(\r\n np.dot(K_eigvec.conj().T, K_eigvec),\r\n np.conj(np.dot(vander, vander.conj().T)))\r\n tmp = np.dot(V, s.conj().T)\r\n q = np.conj(np.diag(np.dot(np.dot(vander, tmp), K_eigvec)))\r\n\r\n # b optimal\r\n a = np.linalg.solve(P, q)\r\n else:\r\n a = np.linalg.lstsq(modes, snapshots.T[0], rcond=None)[0]\r\n\r\n return a\r\n\r\n @staticmethod\r\n @jit(nopython=True)\r\n def M_projection_value(snapshots, S_conj, index_i, d, length_j, dtype):\r\n \"\"\"\r\n Generates the i-th row from the matrix product of U and the stacked snapshots.\r\n This projects the stacked snapshots to the subspace of U\r\n Parameters\r\n ----------\r\n snapshots : numpy.ndarray\r\n Snapshot matrix\r\n U_conj : numpy.ndarray\r\n Complex conjugate of U matrix. It is more efficient to do the\r\n conjugate transpose outside this method\r\n index_i : int\r\n Index i for the M matrix\r\n d : int\r\n stacking depth of the snapshots\r\n dtype : numpy.dtype\r\n Target datatype.\r\n\r\n Returns\r\n -------\r\n value_row : The i-th row of M\r\n\r\n \"\"\"\r\n S_row = S_conj[index_i]\r\n snapshot_length = snapshots.shape[0]\r\n value_row = np.zeros(length_j).astype(dtype)\r\n for index_j in range(length_j):\r\n value = dtype(0)\r\n for m_slice_nr in range(d):\r\n m_slice = snapshots[:, index_j+d-1 - m_slice_nr]\r\n s_slice = S_row[m_slice_nr * snapshot_length : (m_slice_nr+1) * snapshot_length]\r\n value += s_slice.dot(m_slice)\r\n value_row[index_j] = value\r\n return value_row\r\n","sub_path":"pydmd/mosesdmd.py","file_name":"mosesdmd.py","file_ext":"py","file_size_in_byte":9030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"621023385","text":"import os\ncwd = os.getcwd()\nprint('CWD: ', cwd)\n\n# When execute selection in PyCharm\ncsv_file = 'io/sample_data.csv'\nif cwd.endswith('io'):\n # When run the whold module\n print(\"Running in 'io' folder\")\n csv_file = 'sample_data.csv'\n\ntotal_value = 0.0\nwith open(csv_file, 'r') as f:\n header_line = next(f) # skip first header line\n for line in f:\n print(repr(line))\n line = line.strip() # Strip whitespace\n parts = line.split(',')\n parts[0] = parts[0].strip('\"') # Strip double quotes\n parts[1] = parts[1].strip('\"')\n parts[2] = int(parts[2])\n parts[3] = float(parts[3])\n total_value += parts[2]*parts[3]\n print(parts)\n\nprint(\"Total value: \", total_value)\n\n\n\n#=== more demo =====\n# read whole file once\nf = open(csv_file, 'r')\nf\ndata = f.read()\ndata\nf.close()\n\nf = open(csv_file, 'r')\nlines = f.readlines()\nlines\nf.close()\n\n# read file line by line\nf = open(csv_file, 'r')\nfor i, line in enumerate(f, start=1):\n print(f\"Line {i}: {line}\")\nf.close()\n\n# close file automatically by using 'with'\nwith open(csv_file, 'r') as f:\n s = f.read()\n print('File: ', s)\n\nline = ' \"IBM\", 32, 28.88 \\r\\n'\n# String is immutable\nline.strip()\nline\nline = line.strip()\nline\nparts = line.split(\",\")\nparts\n","sub_path":"PythonProgrammingLanguage/io/file_basics.py","file_name":"file_basics.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"198744179","text":"import os\nimport nuke\nimport pyblish.api\nimport pype.api as pype\n\n\n@pyblish.api.log\nclass CollectNukeWrites(pyblish.api.InstancePlugin):\n \"\"\"Collect all write nodes.\"\"\"\n\n order = pyblish.api.CollectorOrder + 0.1\n label = \"Collect Writes\"\n hosts = [\"nuke\", \"nukeassist\"]\n families = [\"render\", \"render.local\", \"render.farm\"]\n\n def process(self, instance):\n\n node = None\n for x in instance:\n if x.Class() == \"Write\":\n node = x\n\n if node is None:\n return\n\n self.log.debug(\"checking instance: {}\".format(instance))\n\n # Determine defined file type\n ext = node[\"file_type\"].value()\n\n # Determine output type\n output_type = \"img\"\n if ext == \"mov\":\n output_type = \"mov\"\n\n # Get frame range\n handles = instance.context.data['handles']\n handle_start = instance.context.data[\"handleStart\"]\n handle_end = instance.context.data[\"handleEnd\"]\n first_frame = int(nuke.root()[\"first_frame\"].getValue())\n last_frame = int(nuke.root()[\"last_frame\"].getValue())\n\n if node[\"use_limit\"].getValue():\n handles = 0\n first_frame = int(node[\"first\"].getValue())\n last_frame = int(node[\"last\"].getValue())\n\n # get path\n path = nuke.filename(node)\n output_dir = os.path.dirname(path)\n self.log.debug('output dir: {}'.format(output_dir))\n\n # get version\n version = pype.get_version_from_path(nuke.root().name())\n instance.data['version'] = version\n self.log.debug('Write Version: %s' % instance.data('version'))\n\n # create label\n name = node.name()\n # Include start and end render frame in label\n label = \"{0} ({1}-{2})\".format(\n name,\n int(first_frame),\n int(last_frame)\n )\n\n if 'render' in instance.data['families']:\n if \"representations\" not in instance.data:\n instance.data[\"representations\"] = list()\n\n representation = {\n 'name': ext,\n 'ext': ext,\n \"stagingDir\": output_dir,\n \"anatomy_template\": \"render\"\n }\n\n try:\n collected_frames = os.listdir(output_dir)\n if collected_frames:\n representation['frameStart'] = \"%0{}d\".format(\n len(str(last_frame))) % first_frame\n representation['files'] = collected_frames\n instance.data[\"representations\"].append(representation)\n except Exception:\n instance.data[\"representations\"].append(representation)\n self.log.debug(\"couldn't collect frames: {}\".format(label))\n\n # Add version data to instance\n version_data = {\n \"handles\": handle_start,\n \"handleStart\": handle_start,\n \"handleEnd\": handle_end,\n \"frameStart\": first_frame + handle_start,\n \"frameEnd\": last_frame - handle_end,\n \"version\": int(version),\n \"colorspace\": node[\"colorspace\"].value(),\n \"families\": [instance.data[\"family\"]],\n \"subset\": instance.data[\"subset\"],\n \"fps\": instance.context.data[\"fps\"]\n }\n\n group_node = [x for x in instance if x.Class() == \"Group\"][0]\n deadlineChunkSize = 1\n if \"deadlineChunkSize\" in group_node.knobs():\n deadlineChunkSize = group_node[\"deadlineChunkSize\"].value()\n\n deadlinePriority = 50\n if \"deadlinePriority\" in group_node.knobs():\n deadlinePriority = group_node[\"deadlinePriority\"].value()\n\n instance.data.update({\n \"versionData\": version_data,\n \"path\": path,\n \"outputDir\": output_dir,\n \"ext\": ext,\n \"label\": label,\n \"handles\": handles,\n \"frameStart\": first_frame,\n \"frameEnd\": last_frame,\n \"outputType\": output_type,\n \"colorspace\": node[\"colorspace\"].value(),\n \"deadlineChunkSize\": deadlineChunkSize,\n \"deadlinePriority\": deadlinePriority\n })\n\n self.log.debug(\"instance.data: {}\".format(instance.data))\n","sub_path":"pype/plugins/nuke/publish/collect_writes.py","file_name":"collect_writes.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"241349621","text":"import random\nimport pygame\nimport numpy as np\nfrom pygame.locals import K_LEFT, K_RIGHT, K_UP, K_DOWN\nimport data.dataUtils as data\nfrom square import Square\nfrom snake import Snake\n\nclass Game:\n\n FOOD = 3\n HEAD = 2\n SNAKE = 1\n SPACE = 0\n\n width = data.getConfig(\"width\")\n rows = data.getConfig(\"rows\")\n square_color = data.getConfig(\"squareColor\")\n initial_snake_pos = data.getConfig(\"initialSnakePos\")\n food_color = data.getConfig(\"foodColor\")\n line_color = data.getConfig(\"lineColor\")\n board_color = data.getConfig(\"boardColor\")\n\n def __init__(self):\n '''snake game'''\n self.snake = Snake(self.initial_snake_pos, self.square_color)\n self.food = Square(self.random_food_pos(), self.food_color)\n self.size = 0\n\n def get_inputs(self):\n return[self.get_snake_head_pos(), self.get_snake_dir(), self.get_food_pos()]\n #return[self.get_snake_head_pos(), self.get_snake_full_pos(), self.get_snake_dir()]\n\n def get_game_map_rows(self):\n '''return rows config'''\n return self.rows\n\n def get_snake_head_pos(self):\n '''return snake head pos [x, y]'''\n return self.snake.head.pos\n\n def get_snake_full_pos(self):\n '''return array for each of the snake body pos [x, y]'''\n return [body.pos for body in self.snake.body]\n\n def get_snake_dir(self):\n '''return snake dir [x, y]'''\n return self.snake.dir\n\n def get_food_pos(self):\n '''return food pos [x, y]'''\n return self.food.pos\n\n def get_score(self):\n '''return len of the snake body'''\n return len(self.snake.body)\n\n def draw_grid(self, surface):\n '''draw visual of the game grid'''\n size_between = self.width // self.rows\n grid_x = 0\n grid_y = 0\n for l in range(self.rows):\n grid_x = grid_x + size_between\n grid_y = grid_y + size_between\n pygame.draw.line(surface, self.line_color, (grid_x, 0), (grid_x, self.width))\n pygame.draw.line(surface, self.line_color, (0, grid_y), (self.width, grid_y))\n\n def redraw_window(self, surface):\n '''draw visual of the full game board'''\n surface.fill(self.board_color)\n self.snake.draw(surface)\n self.food.draw(surface)\n self.draw_grid(surface)\n pygame.display.update()\n\n def random_food_pos(self):\n '''return a valid random food position'''\n positions = self.snake.body\n while True:\n pos = [random.randrange(self.rows), random.randrange(self.rows)]\n if len(list(filter(lambda z: z.pos == pos, positions))) > 0:\n continue\n break\n return pos\n\n def move_snake_up(self):\n '''Trigger one input K_UP to the snake'''\n newevent = pygame.event.Event(pygame.KEYDOWN, unicode='', key=K_UP, mod=pygame.locals.KMOD_NONE, scancode=111, window=None)\n newevent2 = pygame.event.Event(pygame.KEYUP, key=K_UP, mod=pygame.locals.KMOD_NONE, scancode=111, window=None)\n pygame.event.post(newevent)\n pygame.event.post(newevent2)\n\n def move_snake_down(self):\n '''Trigger one input K_DOWN to the snake'''\n newevent = pygame.event.Event(pygame.KEYDOWN, unicode='', key=K_DOWN, mod=pygame.locals.KMOD_NONE, scancode=116, window=None)\n newevent2 = pygame.event.Event(pygame.KEYUP, key=K_DOWN, mod=pygame.locals.KMOD_NONE, scancode=116, window=None)\n pygame.event.post(newevent)\n pygame.event.post(newevent2)\n\n def move_snake_left(self):\n '''Trigger one input K_LEFT to the snake'''\n newevent = pygame.event.Event(pygame.KEYDOWN, unicode='', key=K_LEFT, mod=pygame.locals.KMOD_NONE, scancode=113, window=None)\n newevent2 = pygame.event.Event(pygame.KEYUP, key=K_LEFT, mod=pygame.locals.KMOD_NONE, scancode=113, window=None)\n pygame.event.post(newevent)\n pygame.event.post(newevent2)\n\n def move_snake_right(self):\n '''Trigger one input K_RIGHT to the snake'''\n newevent = pygame.event.Event(pygame.KEYDOWN, unicode='', key=K_RIGHT, mod=pygame.locals.KMOD_NONE, scancode=114, window=None)\n newevent2 = pygame.event.Event(pygame.KEYUP, key=K_RIGHT, mod=pygame.locals.KMOD_NONE, scancode=114, window=None)\n pygame.event.post(newevent)\n pygame.event.post(newevent2)\n\n def send_inputs(self,net):\n '''Envoie 200 input à notre reseau de neuronne\n On commence par faire une matrice de taille 10x10 remplies de 0\n On insère la position courante de la tête du snake\n On refais la meme chose pour la position de la nourriture\n On transforme les deux matrices en liste unidimensionnels et on les appends ensemble\n On envoie nos 200 inputs à notre réseaux, on recois 4 ouput entre -1 et 1\n Dépendemment des outputs, on donne un certains mouvement\n La partie commenté au millieu est une méthode vorace qui avais beaucoup plus de succès\n '''\n matrixS = np.zeros((10, 10))\n snakePos = self.get_snake_head_pos()\n print(snakePos)\n matrixS[snakePos[0],snakePos[1]]=1\n inputS = np.squeeze(np.asarray(matrixS))\n\n matrixF = np.zeros((10, 10))\n foodPos = self.get_food_pos()\n matrixF[foodPos[0],foodPos[1]]=1\n inputF = np.squeeze(np.asarray(matrixF))\n\n input = np.append(inputS,inputF)\n\n output = net.activate(input)\n print(output)\n\n #méthode vorace\n # x = input[0]-input[4]\n # y = input[1]-input[5]\n #\n # if (x<0 and y<=0):\n # if (x=0 and y>=0):\n # if (x>y):\n # self.move_snake_left()\n # else:\n # self.move_snake_up()\n #\n # elif (x>=0 and y<=0):\n # if (x>abs(y)):\n # self.move_snake_left()\n # else:\n # self.move_snake_down()\n # elif (x<0 and y>0):\n # if (abs(x)>y):\n # self.move_snake_right()\n #\n # else:\n # self.move_snake_up()\n\n if (output[0]>=output[1] and output[0]>=output[2] and output[0]>=output[3]):\n self.move_snake_right()\n elif (output[1]>=output[0] and output[1]>=output[2] and output[1]>=output[3]):\n self.move_snake_left()\n elif (output[2]>=output[1] and output[2]>=output[0] and output[2]>=output[3]):\n self.move_snake_up()\n else:\n self.move_snake_down()\n\n def isCloser(self, snakeBefore, foodBefore):\n '''Regarde la position avant et après et retourne l'augmentation du fitness\n Retourne 10 si le snake a trouvé le food\n Retourne 1 si le snake a fait un mouvement vers le food\n Retourne -2 si le snake fait un mouvement plus loins du food'''\n snakeNow = self.get_snake_head_pos()\n foodNow = self.get_food_pos()\n distanceToFoodBefore = abs(snakeBefore[0]-foodBefore[0])+ abs(snakeBefore[1]-foodBefore[1])\n distanceToFoodNow = abs(snakeNow[0]-foodNow[0])+ abs(snakeNow[1]-foodNow[1])\n\n if (foodBefore[0]!=foodNow[0] or foodBefore[1]!=foodNow[1]):\n return 10\n elif(distanceToFoodNow < distanceToFoodBefore ):\n return 1\n else:\n return -2\n\n\n def start(self, net, genome):\n '''Main loop of the game'''\n self.size=0\n win = pygame.display.set_mode((self.width, self.width))\n clock = pygame.time.Clock()\n flag=True\n counter = 0\n while flag:\n counter+=1\n #if statement pour limité le nombre de mouvement à 50\n if counter>50:\n self.snake.alive=False\n # pygame.time.delay(50)\n # clock.tick(10)\n\n #les données avant un mouvement\n snakeBefore = self.get_snake_head_pos()[:]\n food = self.get_food_pos()\n #appel send inputs qui va envoyer envoyer des inputs dans noter RN et ensuite faire un mouvement\n self.send_inputs(net)\n #fonction qui prend les events de deplacement et l'applique\n self.snake.move()\n #augmentation du fitness\n genome.fitness += self.isCloser(snakeBefore,food)\n\n #si le serpent a mangé une nourriture\n if self.snake.body[0].pos == self.food.pos:\n self.snake.add_cube()\n self.food = Square(self.random_food_pos(), self.food_color)\n\n #si le serpent meurt\n if self.snake.alive == False:\n genome.fitness -= 10\n self.size = len(self.snake.body)\n print('Score:', self.size)\n self.snake.reset(data.getConfig(\"initialSnakePos\"))\n self.snake.alive = True\n flag=False\n self.redraw_window(win)\n","sub_path":"snakeNEAT/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"128423495","text":"#!/usr/bin/env python\n\n\nimport subprocess\nimport optparse\nimport re\n\n\ndef get_arguments():\n parser = optparse.OptionParser()\n parser.add_option(\"-i\", dest=\"interface\", help=\"Interface to change its MAC address\")\n parser.add_option(\"-m\", dest=\"new_mac\", help=\"New MAC address\")\n (options, arguments) = parser.parse_args()\n if not options.interface:\n parser.error(\"[-] Please specify an interface\")\n elif not options.new_mac:\n parser.error(\"[-] Please specify a new mac address\")\n return options\n\n\ndef mac_change(interface, new_mac):\n print(\"[+] Changing MAC address for \" + interface + \" to \" + new_mac)\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"down\"])\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"hw\", \"ether\", new_mac])\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"up\"])\n\n\ndef current_MAC(interface):\n result = subprocess.check_output([\"ifconfig\", interface])\n mac_result = re.search(r\"\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w\", result)\n\n if mac_result:\n return mac_result.group(0)\n else:\n print(\"[-] Could not read MAC address.\")\n\n\noptions = get_arguments()\n\ncurrent_mac = current_MAC(options.interface)\nprint(\"Current MAC address : \" + str(current_mac))\n\nmac_change(options.interface, options.new_mac)\n\ncurrent_mac = current_MAC(options.interface)\nif current_mac == options.new_mac:\n print(\"[+] MAC address was successfully changed.\")\nelse:\n print(\"[-] MAC address did not get changed.\")\n","sub_path":"mac_changer.py","file_name":"mac_changer.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"589385996","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 20 17:10:09 2018\n\n@author: c84109001\n\"\"\"\n\nimport socket\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n#建立连接\ns.connect(('127.0.0.1', 8999))\n#接受欢迎信息\nprint(s.recv(1024).decode('utf-8'))\nfor data in [b'Michael', b'Tracy', b'Sarah']:\n #发送数据\n s.send(data)\n print(s.recv(1024).decode('utf-8'))\ns.send(b'exit')\ns.close\n","sub_path":"learn16_TCP_2_Client.py","file_name":"learn16_TCP_2_Client.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"563188615","text":"import collections\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\nclass WideResNet(object):\n def __init__(self, is_training, num_classes,keep_prob,unit_num_in_block,width_k):\n self.num_classes = num_classes\n self._is_training = is_training\n self.keep_prob=keep_prob\n self.unit_num_in_block=unit_num_in_block\n self.width_k=width_k\n\n\n\n def preprocess(self, inputs):\n # ResNet暂不需要做输入预处理\n # preprocessed_inputs = tf.to_float(inputs)\n # preprocessed_inputs = tf.subtract(preprocessed_inputs, 128.0)\n # preprocessed_inputs = tf.div(preprocessed_inputs, 128.0)\n return inputs\n\n def wide_residual_unit(self,inputs,unit_output_channels,is_downsamping,name):\n input_channel=inputs.get_shape().as_list()[-1]\n unit_stride=2 if is_downsamping else 1\n\n with tf.variable_scope(name+\"/residual_branch\"):\n residual_branch = slim.batch_norm(inputs,scope=\"bn1\")\n residual_branch = slim.convolution2d(residual_branch,num_outputs=unit_output_channels,\n kernel_size=3,stride=unit_stride,scope=\"conv1\")\n\n residual_branch = slim.dropout(residual_branch,keep_prob=self.keep_prob)\n\n residual_branch = slim.batch_norm(residual_branch, scope=\"bn2\")\n residual_branch = slim.convolution2d(residual_branch, num_outputs=unit_output_channels,\n kernel_size=3, stride=1, scope=\"conv2\")\n identity_branch=inputs\n with tf.variable_scope(name+\"/identity_branch\"):\n if unit_stride!=1 or input_channel!=unit_output_channels: #下采样\n identity_branch=slim.batch_norm(identity_branch,scope=\"bn\")\n identity_branch=slim.convolution2d(identity_branch,num_outputs=unit_output_channels,\n kernel_size=1,stride=unit_stride)\n\n return residual_branch+identity_branch\n\n\n #inputs:[batch_size,32,32,3]\n def inference(self, inputs):\n with slim.arg_scope(self.WideResNet_arg_scope(is_training=self._is_training)):\n net=slim.convolution2d(inputs,num_outputs=16,kernel_size=3,stride=1,scope=\"conv1\")\n print(\"net:\", net)\n\n with tf.variable_scope(\"block1\"):\n for unit_id in range(self.unit_num_in_block):\n net=self.wide_residual_unit(net,unit_output_channels=16*self.width_k,\n is_downsamping=False,name=\"unit{}\".format(unit_id+1))\n print(\"block1_net:\",net)\n\n with tf.variable_scope(\"block2\"):\n for unit_id in range(self.unit_num_in_block):\n net=self.wide_residual_unit(net,unit_output_channels=32*self.width_k,\n is_downsamping = unit_id == 0,\n name=\"unit{}\".format(unit_id+1))\n print(\"block2_net:\", net)\n\n with tf.variable_scope(\"block3\"):\n for unit_id in range(self.unit_num_in_block):\n net = self.wide_residual_unit(net, unit_output_channels=64 * self.width_k,\n is_downsamping = unit_id == 0,\n name=\"unit{}\".format(unit_id + 1))\n print(\"block2_net:\", net)\n\n net=slim.batch_norm(net,scope=\"global_avg_pool/bn\")\n\n global_pool=tf.reduce_mean(net,axis=[1,2],keepdims=True,name=\"global_avg_pool\")\n\n logits=slim.convolution2d(global_pool,num_outputs=self.num_classes,kernel_size=1,\n stride=1,scope=\"fc\")\n print(\"logits:\",logits)\n\n logits=tf.squeeze(logits,axis=[1,2],name=\"squeeze\")\n\n return logits\n\n def postprocess(self,logits):\n softmax=tf.nn.softmax(logits)\n classes=tf.cast(tf.argmax(softmax,axis=1),tf.int32)\n return softmax,classes\n\n def loss(self,logits,labels):\n softmax_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits+1e-8,labels=labels),name=\"softmax_loss\")\n tf.add_to_collection(\"Loss\",softmax_loss)\n loss_all=tf.add_n(tf.get_collection(\"Loss\"),name=\"total_loss\")\n return loss_all\n\n def WideResNet_arg_scope(self,is_training,weight_decay=0.0001,batch_norm_decay=0.90,\n batch_norm_epsilon=1e-5,batch_norm_scale=True):\n\n batch_norm_params={\n 'is_training':is_training,\n 'decay':batch_norm_decay,\n 'epsilon':batch_norm_epsilon,\n 'scale':batch_norm_scale,\n \"activation_fn\":tf.nn.relu\n # 'updates_collections:':tf.GraphKeys.UPDATE_OPS\n }\n\n #DenseNet借鉴resNetV2,采用前置激活,不在卷积后进行bn和relu\n with slim.arg_scope(\n [slim.convolution2d],\n weights_regularizer=slim.l2_regularizer(weight_decay),\n weights_initializer=slim.variance_scaling_initializer(),\n activation_fn=None):\n\n with slim.arg_scope([slim.batch_norm],**batch_norm_params) :\n with slim.arg_scope([slim.avg_pool2d],padding=\"SAME\") as arg_sc:\n return arg_sc\n\n #最好使用这种方式,因为openvino转换是不支持dropout层,\n #这种参数空间的方式能保证is_training参数对slim.dropout也有效\n #这种方式生成的模型能直接转openvino,不用在调用freezing_graph.py\n # with slim.arg_scope([slim.batch_norm], **batch_norm_params):\n # with slim.arg_scope([slim.dropout],is_training=is_training):\n # with slim.arg_scope([slim.avg_pool2d], padding=\"SAME\") as arg_sc:\n # return arg_sc","sub_path":"Classification/WideResNet/nets/WideResNet.py","file_name":"WideResNet.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"431093592","text":"#!/usr/bin/python\n\n\"\"\"\nblockips.py\nThis takes a list of ips and adds cisco commands to them\nso we can unblock the heathens. After you paste hit return if there is not a newline,\nthen Control+D to send.\n\"\"\"\n\nimport sys\n\n\ndef main():\n with open('network_objects.txt', 'w') as outfile:\n print('Paste the IPs here followed by Ctrl D to add Cisco commands:')\n inputs = sys.stdin.read()\n candidate_list = inputs.split('\\n')\n\n for ip in candidate_list:\n if \" \" in ip:\n outfile.write('no network-object {}\\n'.format(ip))\n else:\n outfile.write('no network-object host {}\\n'.format(ip))\n\n with open('network_objects.txt') as output:\n print(output.read())\n\nif __name__ == '__main__':\n main()","sub_path":"blockips.py","file_name":"blockips.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"271272207","text":"#!/opt/stack/bin/python3\n#\n# @copyright@\n# Copyright (c) 2006 - 2018 Teradata\n# All rights reserved. Stacki(r) v5.x stacki.com\n# https://github.com/Teradata/stacki/blob/master/LICENSE.txt\n# @copyright@\n#\n\nimport os\nimport sys\nimport subprocess\nimport json\n\ndef run(home, away):\n\t#\n\t# get my index into the 'hosts' array\n\t#\n\tclient = away\n\ts = subprocess.Popen(['iperf3',\n\t\t'-J', '-t', '5', '-c', client],\n\t\tstdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\to, e = s.communicate()\n\n\tj = json.loads(o)\n\n\tfile = open('/tmp/nettest.debug', 'w')\n\tfile.write('j : %s\\n\\n' % j)\n\n\toutput = {}\n\toutput['home'] = home\n\toutput['away'] = client\n\n\tout = j['end']['streams'][0]\n\tfile.write('out : %s\\n\\n' % out)\n\n\ttry:\n\t\toutput['sent'] = out['sender']['bits_per_second']\n\texcept:\n\t\toutput['sent'] = '0.0'\n\n\ttry:\n\t\toutput['recv'] = out['receiver']['bits_per_second']\n\texcept:\n\t\toutput['recv'] = '0.0'\n\n\tfile.write('%s\\n' % output)\n\tfile.close()\n\n\treturn output\n\narray = sys.argv[1:]\nhome = array[0]\naway = array[1]\noutput = run(home,away)\nprint(output)\n","sub_path":"common/src/foundation/iperf/files/iperf3.py","file_name":"iperf3.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"100352574","text":"from tkinter import *\n\nroot = Tk()\nroot.title(\"kkyu GUI\")\nroot.geometry(\"640x480\")\n\nframe = Frame(root)\nframe.pack()\n\nscrollbar = Scrollbar(frame)\nscrollbar.pack(side=\"right\", fill=\"y\")\n\n# If you don't set \"set\", when scroll get down, but scroll get up.\nlistbox = Listbox(frame,\n selectmode=\"extended\",\n height=10,\n yscrollcommand=scrollbar.set)\nfor i in range(1, 32):\n listbox.insert(END, str(i) + \" Day\")\nlistbox.pack(side=\"left\")\n\nscrollbar.config(command=listbox.yview)\n\nroot.mainloop()\n","sub_path":"gui_basic/13_scrollbar.py","file_name":"13_scrollbar.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"419841217","text":"#!/usr/bin/env python3\nfrom lms_dm import package_manager\nimport os,sys\nimport subprocess\nfrom lms_dm import install_utils\n\nclass Package:\n#self.workingDir = \"\" #relative path, should be <.../dependency>\n#self.nameWithExtensions = \"\"#name with extension, for example lms:develop\n#self.name = \"\" #name of the package\n#self.url = \"\"#url to the \"parent, for example at github\n\n############################################################\n##### static functions\n @staticmethod\n def getPurePackageName(packageFull):\n return packageFull.split(\":\")[0]\n\n\n @staticmethod\n def isDirPackage(path):\n packageFile = path+'/lms_package.json'\n if not os.path.isfile(packageFile):\n return False\n return True\n\n @staticmethod\n def getPackageNameFromPath(path):\n packageFile = path+'/lms_package.json'\n if not os.path.isfile(packageFile):\n print('lms_package.json does not exist in: ' + package)\n return;\n packageData = parseJson(packageFile) #TODO error handling\n self.name = packageData['name'] \n\n @staticmethod\n def fromPath(path):\n if not isDirPackage(path):\n return #TODO error handling\n p = Package(getPackageNameFromPath(path))\n p.setPackageDir(path)\n return p\n\n#############################################################\n#####\n\n \n def __init__(self,nameWithExtensions,workingDir):\n self.nameWithExtensions = nameWithExtensions\n self.name = self.getPurePackageName(nameWithExtensions)\n self.workingDir = workingDir\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n return (self.name == other.name)\n\n def __ne__(self, other):\n # Not strictly necessary, but to avoid having both x==y and x!=y\n # True at the same time\n return not(self == other)\n\n def getDependencyDir(self):\n return os.path.join(self.workingDir,\"dependencies\")\n\n def getDir(self):\n return os.path.join(self.getDependencyDir(),self.name)\n\n def installedGlobally(self):\n if os.path.isdir(os.path.join(package_manager.getSrcDir(),self.name)):\n return True\n return False\n\n def downloadWithDependencies(self, ignoreGlobal=False):\n #install or update the current package\n if not self.download(ignoreGlobal):\n return\n\n #get all dependencies\n dependencies = self.getPackageDependencies()\n if dependencies is None:\n sys.exit(1)\n for dependency in dependencies:\n dependency.downloadWithDependencies(ignoreGlobal)\n\n\n def download(self, ignoreGlobal=False):\n #check if it is installed globally\n if not ignoreGlobal and self.installedGlobally():\n print(self.name + \" already installed globally, you might have to update it manually\")\n return False\n #set the url\n self.setUrl(package_manager.getPackageUrlFromName(self.name))\n #check if a url was set\n if self.url == None or len(self.url)==0:\n print('Package not found: '+self.name)\n sys.exit(1)\n\n ###get current source\n #create path\n myDir = self.getDir()\n os.makedirs(self.getDependencyDir(), exist_ok=True)\n dirAbs = os.path.abspath(myDir)\n if self.isGitUrl():\n #check if folder already exists\n print(\"mydir: \" + myDir)\n if os.path.isdir(myDir):\n #pull the dir\n p = subprocess.Popen(['git', 'pull'], cwd=myDir)\n output, err = p.communicate()\n if err is not None:\n print(output)\n print(\"pull failed\")\n sys.exit(1)\n print(\"pulled package\")\n else : \n p = subprocess.Popen(['git', 'clone', self.getUrl()], cwd=self.getDependencyDir())\n output, err = p.communicate()\n if err is not None:\n print(output)\n print(\"clone failed: \"+ myDir)\n sys.exit(1)\n print(\"cloned package\")\n\n packageNameParts= self.nameWithExtensions.split(\":\")\n print(\"LEN: {0}\".format(len(packageNameParts)))\n if len(packageNameParts) > 1:\n print(\"checking out: \"+packageNameParts[1])\n p = subprocess.Popen(['git', 'checkout',packageNameParts[1]], cwd=myDir)\n output, err = p.communicate()\n if err is not None:\n print(output)\n print(\"can't checkout: \"+packageNameParts[1])\n sys.exit(1)\n\n elif self.isLocalFolder() :\n print('handle local package: ' +packageName)\n if not os.path.isabs(self.getUrl()):\n self.setUrl(os.path.abspath(self.getUrl()));\n #create symlink\n #check if symlink already exists TODO check if valid\n if not os.path.exists(dirAbs):\n os.symlink(self.getUrl(), dirAbs, True)\n else:\n print(dirAbs + ' already exists')\n else :\n print(\"no valid url-type given\")\n sys.exit(1)\n\n return True\n \n \n\n \n\n def hasBinary(self):\n #TODO\n return False\n\n\n def hasSource(self):\n #TODO\n return True\n\n\n###############################################################\n#####Set functions\n \n def setUrl(self,url):\n self.url = url;\n \n def getUrl(self):\n return self.url\n\n def setPackageDir(self,packageDir):\n self.packageDir = packageDir;\n\n def isGitUrl(self):\n if 'github.com' in self.url: #TODO\n return True\n return False\n\n def isLocalFolder(self):\n if os.path.isdir(self.url):\n return True\n return False\n\n def isZipFile(self):\n return False #TODO\n\n def getPackageFilePath(self):\n return os.path.join(self.getDir(),'lms_package.json')\n\n def getPackageDependencies(self):\n res = list()\n packageFile = self.getPackageFilePath()\n if not os.path.isdir(self.getDir()):\n print('package does not exist: ' + self.getDir())\n return\n if not os.path.isfile(packageFile):\n print('lms_package.json does not exist in: ' + self.getDir())\n return\n packageData = install_utils.parseJson(packageFile) #TODO error handling\n if 'dependencies' in packageData:\n names = packageData['dependencies']\n for tmp in names:\n res.append(Package(tmp,self.workingDir))\n return res\n\n #returns a list with all binaries that have to be linked\n def getTargets(self):\n packageFilePath = self.getPackageFilePath()\n json = install_utils.parseJson(packageFilePath)\n if 'targets' in json:\n return json['targets']\n targets = list()\n targets.append(self.name)\n return targets\n\n\n def getPackageIncludes(self,absPath=True):\n packageFilePath = self.getPackageFilePath()\n json = install_utils.parseJson(packageFilePath)\n if 'includes' in json:\n includes = json['includes']\n else:\n includes = list()\n includes.add('include')\n \n result = list()\n for include in includes:\n if absPath:\n result.append(os.path.abspath(os.path.join(self.getDir(),include)))\n else:\n result.append(include)\n return result\n\n def getStringForPackageIncludes(self):\n #each package has one or more binary/target, we have to catch them all!\n targets = self.getTargets()\n dependencies = self.getPackageDependencies()\n #get includes for the dependencies\n includeList = list()\n for dependency in dependencies:\n for tmp in dependency.getPackageIncludes():\n includeList.append(tmp)\n if len(includeList) == 0:\n return \"\"\n res = \"\"\n for target in targets:\n res += 'target_include_directories({0} PUBLIC {1})'.format(target,' '.join(includeList)) + '\\n'\n return res\n\n def getStringForIncludeCopies(self,dest):\n includes = self.getPackageIncludes()\n if len(includes) == 0:\n return \"\"\n return \"\"\"file(GLOB HEADERS {0})\nfile(COPY ${{HEADERS}} DESTINATION {1})\n\"\"\".format('/* '.join(includes)+'/*',dest) + '\\n'\n\n\n def getPackageHierachyDict(self,d=None):\n if d is None:\n d = dict()\n d[self] = self.getPackageDependencies()\n for dep in d[self]:\n dep.getPackageHierachyDict(d)\n return d\n\n def getCMakeCallCompileDependencyMessage(self):\n return 'add_subdirectory({0})'.format(os.path.abspath(self.getDir()))\n \n\n def generateCMake(self):\n libPath = os.path.abspath(os.path.join(self.workingDir,\"lib\"))\n binPath = os.path.abspath(os.path.join(self.workingDir,\"bin\"))\n includePath = os.path.abspath(os.path.join(self.workingDir,\"includePath\"))\n\n #get all package-dependencies\n packageHierarchyList = self.getPackageHierachyDict()\n for p in packageHierarchyList:\n res = list()\n for dp in packageHierarchyList[p]:\n if not dp.installedGlobally():\n res.append(dp)\n packageHierarchyList[p] = res\n \n #generate hierarchy CMake\n cmakeFile = os.path.join(self.workingDir,'CMakeLists.txt')\n print(\"CMAKE FILE: \"+cmakeFile)\n #os.makedirs('lms_cmake',exist_ok=True) \n with open(cmakeFile,'w') as file:\n file.write(\"\"\"cmake_minimum_required(VERSION 2.8)\nproject({0})\nset(CMAKE_ARCHIVE_OUTPUT_DIRECTORY {1})\nset(CMAKE_LIBRARY_OUTPUT_DIRECTORY {1})\nset(CMAKE_RUNTIME_OUTPUT_DIRECTORY {2})\n\"\"\".format(\"NAME_OF_THE_PROJECT_TODO\",libPath,binPath))\n\n file.write('include(customCMake.txt) \\n')\n file.write('\\n \\n#package compile hierachy \\n')\n lastSize = 0\n while len(packageHierarchyList) > 0:\n if lastSize == len(packageHierarchyList):\n #TODO error handling if there is a closed loop :D\n print(\"Your dependencies have a closed loop! {0}\".format(packageHierarchyList))\n sys.exit(1)\n lastSize = len(packageHierarchyList)\n toRemove = list()\n for p in packageHierarchyList:\n if len(packageHierarchyList[p]) == 0:\n #write the dependency\n file.write(p.getCMakeCallCompileDependencyMessage()+\"\\n\")\n #remove it from others\n toRemove.append(p)\n for c in packageHierarchyList:\n if p in packageHierarchyList[c]:\n packageHierarchyList[c].remove(p)\n for p in toRemove:\n packageHierarchyList.pop(p)\n \n #set include paths\n file.write('\\n\\n#target include paths \\n')\n packageHierarchyList = self.getPackageHierachyDict()\n for package in list(packageHierarchyList):\n s = package.getStringForPackageIncludes()\n if len(s) != 0:\n file.write(s)\n #copy includes to lib\n for package in list(packageHierarchyList):\n s = package.getStringForIncludeCopies(includePath)\n if len(s) != 0:\n file.write(s)\n\n #generatre custom CMake file if it's missing\n customCmake = os.path.join(self.workingDir,'customCMake.txt')\n if not os.path.isfile(customCmake): \n with open(customCmake,'w') as file:\n file.write(\"#Add your cmake stuff here\")\n \n\n\n\n","sub_path":"lms_dm/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":11969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"540419165","text":"import pandas as pd\nimport pickle\nimport numpy as np\nimport math\n\nclass PreparedData:\n '''\n \n\n Parameters\n ----------\n data_name : str\n the name of desired dataset.\n aligned : bool, optional\n load the aligned data. The default is True.\n\n Raises\n ------\n ValueError\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n '''\n def __init__(self, data_name, aligned= True):\n self.data_name = data_name\n self.aligned = aligned\n base_path = \"/media/Work/Darkside/Graduation Project/Data/\"\n filename = ''\n if self.data_name == 'mosi':\n if self.aligned:\n filename = 'mosi_data.pkl'\n else:\n filename = 'mosi_data_noalign.pkl'\n elif self.data_name == 'mosei':\n if self.aligned:\n filename = 'mosei_senti_data.pkl'\n else:\n filename = 'mosei_senti_data_noalign.pkl'\n else:\n raise ValueError('There is no prepared data with that name')\n \n self.data = pd.read_pickle(base_path + filename)\n \n def load_data(self, splitted= False):\n '''\n \n\n Parameters\n ----------\n splitted : bool, optional\n Load training, Validation, and testing data separated. The default is False.\n\n '''\n X_tr_v = self.data['train']['vision']\n X_tr_t = self.data['train']['text']\n X_tr_a = self.data['train']['audio']\n X_tr_l = np.where(np.array(self.data['train']['labels']).astype(np.int32) > 0,\n int(1), int(0))\n \n X_te_v = self.data['test']['vision']\n X_te_t = self.data['test']['text']\n X_te_a = self.data['test']['audio']\n X_te_l = np.where(np.array(self.data['test']['labels']).astype(np.int32) > 0,\n int(1), int(0))\n \n X_val_v = self.data['valid']['vision']\n X_val_t = self.data['valid']['text']\n X_val_a = self.data['valid']['audio']\n X_val_l = np.where(np.array(self.data['valid']['labels']).astype(np.int32) > 0,\n int(1), int(0))\n \n if splitted:\n return ( X_tr_v, X_tr_a, X_tr_t, X_tr_l.reshape(-1)), \\\n (X_te_v, X_te_a, X_te_t, X_te_l.reshape(-1)), \\\n (X_val_v, X_val_a, X_val_t, X_val_l.reshape(-1))\n \n X_v = np.concatenate((X_tr_v, X_te_v, X_val_v)).astype(np.float32)\n X_a = np.concatenate((X_tr_a, X_te_a, X_val_a)).astype(np.float32)\n X_t = np.concatenate((X_tr_t, X_te_t, X_val_t)).astype(np.float32)\n Y = np.concatenate((X_tr_l, X_te_l, X_val_l)).reshape(-1).astype(np.int32)\n \n return X_v, X_a, X_t, Y\n \n def __get_Processed_train(self): \n for i in range(self.X_train_labels.shape[0]):\n if (self.X_train_labels[i]>0):\n self.X_train_labels[i] = int(1)\n else:\n self.X_train_labels[i] = int(0)\n \n for i in range(self.X_train_vision.shape[0]):\n for j in range(self.X_train_vision.shape[1]):\n for k in range(self.X_train_vision.shape[2]):\n if (np.isnan(self.X_train_vision[i][j][k]) or math.isinf(self.X_train_vision[i][j][k]) ):\n self.X_train_vision[i][j][k] = 0\n \n \n for i in range(self.X_train_audio.shape[0]):\n for j in range(self.X_train_audio.shape[1]):\n for k in range(self.X_train_audio.shape[2]):\n if (np.isnan(self.X_train_audio[i][j][k]) or math.isinf(self.X_train_audio[i][j][k]) ):\n self.X_train_audio[i][j][k] = 0\n \n for i in range(self.X_train_text.shape[0]):\n for j in range(self.X_train_text.shape[1]):\n for k in range(self.X_train_text.shape[2]):\n if (np.isnan(self.X_train_text[i][j][k]) or math.isinf(self.X_train_text[i][j][k]) ):\n self.X_train_text[i][j][k] = 0\n \n return self.X_train_vision, self.X_train_audio, self.X_train_text, self.X_train_labels\n \n \n def __get_Processed_test(self):\n \n for i in range(self.X_test_labels.shape[0]):\n if (self.X_test_labels[i]>0):\n self.X_test_labels[i] = int(1)\n else:\n self.X_test_labels[i] = int(0)\n \n for i in range(self.X_test_vision.shape[0]):\n for j in range(self.X_test_vision.shape[1]):\n for k in range(self.X_test_vision.shape[2]):\n if (np.isnan(self.X_test_vision[i][j][k]) or math.isinf(self.X_test_vision[i][j][k]) ):\n self.X_test_vision[i][j][k] = 0\n \n for i in range(self.X_test_audio.shape[0]):\n for j in range(self.X_test_audio.shape[1]):\n for k in range(self.X_test_audio.shape[2]):\n if (np.isnan(self.X_test_audio[i][j][k]) or math.isinf(self.X_test_audio[i][j][k]) ):\n self.X_test_audio[i][j][k] = 0\n \n for i in range(self.X_test_text.shape[0]):\n for j in range(self.X_test_text.shape[1]):\n for k in range(self.X_test_text.shape[2]):\n if (np.isnan(self.X_test_text[i][j][k]) or math.isinf(self.X_test_text[i][j][k]) ):\n self.X_test_text[i][j][k] = 0\n \n return self.X_test_vision, self.X_test_audio, self.X_test_text, self.X_test_labels \n \n def __get_Processed_valid(self):\n \n for i in range(self.X_valid_labels.shape[0]):\n if (self.X_valid_labels[i]>0):\n self.X_valid_labels[i] = int(1)\n else:\n self.X_valid_labels[i] = int(0)\n \n for i in range(self.X_valid_vision.shape[0]):\n for j in range(self.X_valid_vision.shape[1]):\n for k in range(self.X_valid_vision.shape[2]):\n if (np.isnan(self.X_valid_vision[i][j][k]) or math.isinf(self.X_valid_vision[i][j][k]) ):\n self.X_valid_vision[i][j][k] = 0\n \n for i in range(self.X_valid_audio.shape[0]):\n for j in range(self.X_valid_audio.shape[1]):\n for k in range(self.X_valid_audio.shape[2]):\n if (np.isnan(self.X_valid_audio[i][j][k]) or math.isinf(self.X_valid_audio[i][j][k]) ):\n self.X_test_audio[i][j][k] = 0\n \n for i in range(self.X_valid_text.shape[0]):\n for j in range(self.X_valid_text.shape[1]):\n for k in range(self.X_valid_text.shape[2]):\n if (np.isnan(self.X_valid_text[i][j][k]) or math.isinf(self.X_valid_text[i][j][k]) ):\n self.X_valid_text[i][j][k] = 0\n \n return self.X_valid_vision, self.X_valid_audio, self.X_valid_text, self.X_valid_labels\n\n","sub_path":"MultiModalSent/MultiModalSentimentAnalysis-master/DataPreparation/prepared_data.py","file_name":"prepared_data.py","file_ext":"py","file_size_in_byte":7004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"596849775","text":"#!/bin/python\n\nfrom minefield import *\nfrom robot import Robot\n\ndef build_game(filepath):\n minefield = build_minefield(filepath)\n start_space = find_start_space(minefield)\n robot = Robot(start_space)\n return (minefield, robot)\n\ndef find_start_space(minefield):\n direction = 's'\n\n def move_south(minefield):\n if minefield.south == None:\n direction = 'n'\n return minefield.east\n else:\n return minefield.south\n\n def move_north(minefield):\n if minefield.north == None:\n direction = 's'\n return minefield.west\n else:\n return minefield.north\n\n while True:\n if type(minefield) == StartSpace:\n return minefield\n if direction == 's':\n minefield = move_south(minefield)\n if direction == 'n':\n minefield = move_north(minefield)\n\nif __name__ == '__main__':\n inp = 'IENENNNNEEEEEEEE-'\n\n minefield, robot = build_game('./minefield.txt')\n for move in inp:\n robot.move(move)\n print(minefield)\n","sub_path":"python/intermediate/challenge340_MinefieldWalk/minefieldwalk.py","file_name":"minefieldwalk.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"285231188","text":"# 참고 : https://webnautes.tistory.com/1381\r\n\r\nimport socket\r\nfrom _thread import *\r\n\r\n\r\n#클라이언트의 점수를 저장할 점수판 딕셔너리\r\nscore_dict = {}\r\n\r\n# 쓰레드에서 실행되는 코드입니다.\r\n\r\n# 접속한 클라이언트마다 새로운 쓰레드가 생성되어 통신을 하게 됩니다.\r\ndef threaded(client_socket, addr):\r\n print('Connected by :', addr[0], ':', addr[1])\r\n\r\n # 클라이언트가 접속을 끊을 때 까지 반복합니다.\r\n while True:\r\n\r\n try:\r\n\r\n # 데이터가 수신되면 클라이언트에 다시 전송합니다.(에코)\r\n data = client_socket.recv(1024)\r\n\r\n data_a = data.decode()\r\n data_a = data_a.split(',')\r\n # 수신 받은 클라이언트의 주소와 점수를 기록\r\n score_dict[data_a[0]] = int(data_a[1])\r\n\r\n # 정렬하기 위해 딕셔너리를 리스트로 변환, 그리고 value기준으로 정렬\r\n items = list(score_dict.items())\r\n items.sort(key = lambda element : element[1],reverse=True)\r\n print(items[0])\r\n a=items.index(('{}'.format(data_a[0]), score_dict[data_a[0]]))\r\n if not data:\r\n print('Disconnected by ' + addr[0], ':', addr[1])\r\n break\r\n\r\n print('Received from ' + addr[0], ':', addr[1], data_a)\r\n winner = items[0][0]+','+str(items[0][1])+','+str(a)\r\n #winner=' '.join(['%d@%d' % (items[0][0], items[0][1])])\r\n print(winner)\r\n client_socket.send(winner.encode())\r\n\r\n except ConnectionResetError as e:\r\n\r\n print('Disconnected by ' + addr[0], ':', addr[1])\r\n break\r\n\r\n client_socket.close()\r\n\r\n######## 여기를 수정하세요!############\r\nHOST = '127.0.0.1'\r\n##################################\r\nPORT = 9998\r\n\r\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\nserver_socket.bind((HOST, PORT))\r\nserver_socket.listen()\r\n\r\nprint('server start')\r\n\r\n# 클라이언트가 접속하면 accept 함수에서 새로운 소켓을 리턴합니다.\r\n\r\n# 새로운 쓰레드에서 해당 소켓을 사용하여 통신을 하게 됩니다.\r\nwhile True:\r\n print('wait')\r\n\r\n client_socket, addr = server_socket.accept()\r\n start_new_thread(threaded, (client_socket, addr))\r\n\r\nserver_socket.close()\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"574524710","text":"# Time Complexity : O(n) where n is the number of elements in the list\n# Space Complexity : O(m) where m is the number of unique elements in the list\n# Did this code successfully run on LeetCode : yes\n# Any problem you faced while coding this : no\n\n\n\nclass Solution:\n def findPairs(self, nums: List[int], k: int) -> int:\n\n #when target is negative, we wil return 0\n if k < 0:\n return 0\n\n hashMap = {}\n output = 0\n\n #we iterate through the list and keep track of the frequency of numbers\n for num in nums:\n\n if num in hashMap:\n\n hashMap[num] += 1\n else:\n\n hashMap[num] = 1\n\n if k == 0:\n\n #If target is zero we will return number of items that have frequecy greater than 1\n for key, value in hashMap.items():\n\n if value > 1:\n\n output += 1\n\n return output\n\n else:\n\n #else we will return the number of (element + k) present in the hashmap\n for key in hashMap:\n\n if key + k in hashMap:\n\n output += 1\n\n return output\n\n","sub_path":"Kdiff.py","file_name":"Kdiff.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"253003902","text":"\n\nfrom xai.brain.wordbase.verbs._suspend import _SUSPEND\n\n#calss header\nclass _SUSPENDING(_SUSPEND, ):\n\tdef __init__(self,): \n\t\t_SUSPEND.__init__(self)\n\t\tself.name = \"SUSPENDING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"suspend\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_suspending.py","file_name":"_suspending.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"138413565","text":"## Command line argument parser to easily read flags and their options ##\nimport sys\n\nclass Arg:\n def __init__(self, argc=len(sys.argv), argv=sys.argv):\n self.len = 0\n self.flags = dict()\n marker = False\n\n for i in range(0,argc):\n self.len+=1\n\n if(argv[i]=='-' or argv[i]=='--'):\n continue\n\n pos = argv[i].find('--') if argv[i].find('--')>=0 else argv[i].find('-')\n\n if(pos == 0 and i < argc-1):\n self.flags[argv[i][pos+1:]] = argv[i+1]\n elif(pos == 0):\n self.flags[argv[i][pos+1:]] = None\n\n\n def flag(self,flag_char):\n return self.flags.get(flag_char,False)","sub_path":"arglib.py","file_name":"arglib.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"454735773","text":"# Copyright 2021 The NPLinker Authors\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport csv\n\nfrom .logconfig import LogConfig\nlogger = LogConfig.getLogger(__file__)\n\nGNPS_KEY = 'gnps'\nGNPS_URL_FORMAT = 'https://metabolomics-usi.ucsd.edu/{}/?usi=mzspec:GNPSLIBRARY:{}'\nGNPS_INDEX_COLUMN = '#Scan#'\nGNPS_DATA_COLUMNS = ['Compound_Name', 'Organism', 'MQScore', 'SpectrumID']\n\ndef headers_match_gnps(headers):\n for k in GNPS_DATA_COLUMNS:\n if k not in headers:\n return False\n return True\n\n# url_type: spectrum, png, svg, json\ndef gnps_url(id, url_type='spectrum'):\n return GNPS_URL_FORMAT.format(url_type, id)\n\ndef create_gnps_annotation(spec, gnps_anno):\n # also insert useful URLs\n for t in ['png', 'json', 'svg', 'spectrum']:\n gnps_anno['{}_url'.format(t)] = GNPS_URL_FORMAT.format(t, gnps_anno['SpectrumID'])\n\n if GNPS_KEY in spec.annotations:\n # TODO is this actually an error or can it happen normally?\n raise Exception('Multiple GNPS annotations for Spectrum {}!'.format(spec.spectrum_id))\n\n spec.set_annotations(GNPS_KEY, [gnps_anno])\n # shortcut, useful for rosetta code\n spec.gnps_id = gnps_anno['SpectrumID']\n\ndef load_annotations(root, config, spectra, spec_dict):\n if not os.path.exists(root):\n logger.debug('Annotation directory not found ({})'.format(root))\n return spectra\n\n ac = {}\n if os.path.exists(config):\n # parse annotations config file if it exists\n with open(config, 'r') as f:\n rdr = csv.reader(f, delimiter='\\t')\n for row in rdr:\n # expecting 3 columns: filename, index col name, data col name(s)\n if len(row) != 3:\n logger.warning('Malformed line in annotations configuration: {}'.format(row))\n continue\n # record the columns with filename as key\n data_cols = row[2].split(',')\n if len(data_cols) == 0:\n logger.warning('No data columns selected in annotation file {}, skipping it!'.format(row[0]))\n continue\n ac[row[0]] = (row[1], data_cols)\n\n logger.debug('Parsed {} annotations configuration entries'.format(len(ac)))\n\n annotation_files = []\n for f in os.listdir(root):\n if f == os.path.split(config)[1] or not f.endswith('.tsv'):\n continue\n annotation_files.append(os.path.join(root, f))\n\n logger.debug('Found {} annotations .tsv files in {}'.format(len(annotation_files), root))\n for af in annotation_files:\n with open(af, 'r') as f:\n rdr = csv.reader(f, delimiter='\\t')\n headers = next(rdr)\n filename = os.path.split(af)[1]\n\n if headers_match_gnps(headers):\n # assume this is our main annotations file\n logger.debug('Parsing GNPS annotations from {}'.format(af))\n\n scans_index = headers.index(GNPS_INDEX_COLUMN)\n\n # each line should be a different spec ID here\n for line in rdr:\n # read the scan ID column and get the corresponding Spectrum object\n scan_id = int(line[scans_index])\n if scan_id not in spec_dict:\n logger.warning('Unknown spectrum ID found in GNPS annotation file (ID={})'.format(scan_id))\n continue\n\n spec = spec_dict[scan_id]\n data_cols = set(GNPS_DATA_COLUMNS)\n # merge in any extra columns the user has provided\n if filename in ac:\n data_cols.update(ac[filename][1])\n\n data = {}\n for dc in data_cols:\n if dc not in headers:\n logger.warning('Column lookup failed for \"{}\"'.format(dc))\n continue\n data[dc] = line[headers.index(dc)]\n\n create_gnps_annotation(spec, data)\n else:\n logger.debug('Parsing general annotations from {}'.format(af))\n # this is a general annotations file, so rely purely on the user-provided columns\n if filename not in ac:\n logger.warning('Failed to parse annotations from \"{}\", no config info supplied in {}'.format(filename, config))\n continue\n\n index_col, data_cols = ac[filename]\n if index_col not in headers:\n raise Exception('Annotation index column \"{}\" not found in file \"{}\"!'.format(index_col, filename))\n\n spec_id_index = headers.index(index_col)\n\n # note that might have multiple lines for the same spec ID! \n spec_annotations = {}\n for line in rdr:\n scan_id = int(line[spec_id_index])\n if scan_id not in spec_dict:\n logger.warning('Unknown spectrum ID found in annotation file \"{}\", ID is \"{}\"'.format(filename, scan_id))\n continue\n\n spec = spec_dict[scan_id]\n if spec not in spec_annotations:\n spec_annotations[spec] = []\n\n data = {}\n for dc in data_cols:\n if dc not in headers:\n logger.warning('Column lookup failed for \"{}\"'.format(dc))\n continue\n data[dc] = line[headers.index(dc)]\n spec_annotations[spec].append(data)\n\n logger.debug('Adding annotation data to {} spectra'.format(len(spec_annotations)))\n for spec, anno in spec_annotations.items():\n spec.set_annotations(filename, anno)\n\n return spectra\n\n","sub_path":"prototype/nplinker/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":6381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"412181451","text":"import os\nimport numpy as np\n\n# user-defined modules\nimport clustering_sci as c\n\n\nclass Raster:\n\n def __init__(self, threshold, min_size):\n self.threshold = threshold\n self.min_size = min_size\n\n def fit(self, X, y=None, sample_weight=None):\n\n ## Step 1: Projection\n threshold = self.threshold\n precision = 0.9\n (tiles_dict, scalar) = c.mapToTiles_Tiles(X, precision, threshold)\n tiles = tiles_dict.keys()\n\n ## Step 2: Agglomeration\n min_size = self.min_size\n\n clusters = c.raster_clustering_tiles(tiles, min_size)\n print(\"Number of clusters: \", len(clusters))\n\n\n # key: unscaled input coordinate, value: cluster label\n full = dict()\n\n labels = []\n values = []\n\n output = []\n count = 0 # change for scikit-learn\n\n # assign a numerical label to each cluster\n for cluster in clusters:\n\n for (x, y) in cluster:\n # look up tile in tiles_dict\n full[(x, y)] = count\n\n count += 1\n\n # Initially, all samples are noise.\n labels = np.full(X.shape[0], -1, dtype=np.intp)\n\n # assign label to values; lookup X[i] in results_dict\n all_keys = full.keys()\n for i in range(len(X)):\n val = X[i]\n # scale and look-up\n a = int(val[0] * scalar)\n b = int(val[1] * scalar)\n\n if (a, b) not in all_keys:\n continue # leave existing valure at -1\n else:\n labels[i] = full[(a, b)]\n\n self.labels_ = labels\n","sub_path":"3_comparison_sklearn/raster_sci.py","file_name":"raster_sci.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"364487400","text":"# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\n__author__ = \"KARIS\"\n__date__ = \"$Jun 15, 2015 10:23:59 PM$\"\n\nimport random\nimport time\nimport scipy\nfrom scipy import stats\nimport math\nimport numpy\n\ndef getTimeValue(a):\n return a[0]\n\ndef getStartingRepair(a):\n b1=0\n b2=0\n for element in a:\n if(element=='starting'):\n b1=1\n if (element=='repair'):\n b2=1 \n if b1==1 and b2==1:\n return True\n else:\n return False\n \ndef getFinishRepair(a):\n b1=0\n b2=0\n for element in a:\n if(element=='finishing'):\n b1=1\n if (element=='repair'):\n b2=1 \n if b1==1 and b2==1:\n return True\n else:\n return False\n\ndef getMean(data):\n \"\"\"Return the sample arithmetic mean of data.\"\"\"\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/n\n\ndef main():\n i=1\n timeMachineFailure=[]\n timeMachineWorking=[]\n timeStartingRepair=[]\n timeFinishingRepair=[]\n timeReparationTime=[]\n for i in f.readlines():\n a=i.strip().split(' ')\n if(getStartingRepair(a)==True):\n timeStartingRepair.append(float(getTimeValue(a)))\n elif(getFinishRepair(a)==True):\n timeFinishingRepair.append(float(getTimeValue(a)))\n j=0\n while j < len(timeStartingRepair)-1:\n timeReparationTime.append(timeFinishingRepair[j]-timeStartingRepair[j])\n j=j+1\n meanTimeReparationTime=getMean(timeReparationTime)\n print (meanTimeReparationTime)\n f.close()\n\nif __name__ == \"__main__\":\n f = open(\"resultados.txt\", \"r\") \n main()\n\n","sub_path":"eventos-discretos/scripts-means-time/reparationtime.py","file_name":"reparationtime.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"56986354","text":"\"\"\"geekshop URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nimport mainapp.views as mainapp\nimport authapp.views as authapp\nimport basketapp.views as basketapp\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('', mainapp.main, name='main'),\n path('product/', mainapp.product, name='product'),\n path('catalog/', mainapp.catalog, name='catalog'),\n path('products/', mainapp.products, name='products'),\n path('products/page//', mainapp.products, name='page'),\n path('contacts/', mainapp.contacts, name='contacts'),\n path('strollers/', mainapp.strollers, name='strollers'),\n path('chears/', mainapp.chears, name='chears'),\n path('carseats/', mainapp.carseats, name='carseats'),\n path('black_grey/', mainapp.black_grey, name='black_grey'),\n path('black_red/', mainapp.black_red, name='black_red'),\n path('black_stars/', mainapp.black_stars, name='black_stars'),\n path('admin/', admin.site.urls),\n path('login/', authapp.login, name='login'),\n path('logout/', authapp.logout, name='logout'),\n path('register/', authapp.register, name='register'),\n path('edit/', authapp.edit, name='edit'),\n path('basket/', basketapp.basket, name='basket'),\n path('add//', basketapp.basket_add, name='add'),\n path('remove//', basketapp.basket_remove, name='remove'),\n path('basket/edit///', basketapp.basket_edit, name='edit'),\n path('myadmin/', include('adminapp.urls', namespace='myadmin')),\n path('auth', include('authapp.urls', namespace='auth')),\n path('', include('social_django.urls', namespace='social')),\n path('order/', include('ordersapp.urls', namespace='order')),\n]\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"geekshop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"145602573","text":"\n\nfrom xai.brain.wordbase.verbs._thicken import _THICKEN\n\n#calss header\nclass _THICKENS(_THICKEN, ):\n\tdef __init__(self,): \n\t\t_THICKEN.__init__(self)\n\t\tself.name = \"THICKENS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"thicken\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_thickens.py","file_name":"_thickens.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"522755837","text":"from math import ceil\r\n\r\ndef wallpaper(l, w, h):\r\n number_words = [\"zero\", \"one\", \"two\", \"three\", \"four\", \"five\",\r\n \"six\", \"seven\", \"eight\", \"nine\", \"ten\",\r\n \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\",\r\n \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\", \"twenty\"]\r\n if not (l > 0.0 and w > 0.0 and h > 0.0):\r\n return number_words[0]\r\n index = int(ceil(((2.0 * w * h) + (2.0 * h * l)) * 1.15 / 5.2))\r\n return number_words[index]","sub_path":"7-kyu/easy-wallpaper/python/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"368261841","text":"# -*- coding: utf-8 -*-\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import gaussian_filter1d\n\n\ndef homing_nn(n_trials, n_steps, learning_rate, eps, gamma, decayparameter, l=0.0):\n ## Definition of the environment\n N = 10 #height of the gridworld ---> number of rows\n M = 10 #length of the gridworld ---> number of columns\n N_states = N * M #total number of states\n states_matrix = np.eye(N_states)\n N_actions = 4 #number of possible actions in each state: 1->N 2->E 3->S 4->W\n action_row_change = np.array([-1,0,+1,0]) #number of cell shifted in vertical as a function of the action\n action_col_change = np.array([0,+1,0,-1]) #number of cell shifted in horizontal as a function of the action\n End = np.array([9, 9]) #terminal state--->reward\n #End = np.array([np.random.randint(N),np.random.randint(M)]) \n s_end = np.ravel_multi_index(End,dims=(N,M),order='F') #terminal state. Conversion in single index\n\n ## Rewards\n R = 1 #only when the robot reaches the charger, sited in End state\n\n ## Variables\n weights = np.random.rand(N_actions,N_states)\n etracelist = np.zeros((N_actions,N_states))\n learning_curve = np.zeros((n_trials))\n \n #elig_old = np.zeros((N_actions, N_states))\n\n ## SARSA\n\n # Start trials\n for trial in range(n_trials):\n\n # Initialization\n Start = np.array([np.random.randint(N),np.random.randint(M)]) #random start\n s_start = np.ravel_multi_index(Start,dims=(N,M),order='F') #conversion in single index\n state = Start #set current state\n s_index = s_start #conversion in single index\n step = 0\n minmumstep = abs(Start[0] - End[0]) + abs(Start[1] - End[1])\n \n # Start steps\n while s_index != s_end and step <= n_steps:\n\n step += 1\n learning_curve[trial] = step - minmumstep\n\n input_vector = states_matrix[:,s_index].reshape(N_states,1) #convert the state into an input vector\n\n #compute Qvalues. Qvalue=logsig(weights*input). Qvalue is 2x1, one value for each output neuron\n Q = 1 / ( 1 + np.exp( - weights.dot(input_vector))) #Qvalue is 2x1 implementation of logsig\n \n # Note it is possible to remove the activation function and have a linear layer\n #Q = weights.dot(input_vector)\n\n #eps-greedy policy implementation\n greedy = (np.random.rand() > eps) #1--->greedy action 0--->non-greedy action\n if greedy:\n action = np.argmax(Q) #pick best action\n else:\n action = np.random.randint(N_actions) #pick random action\n\n\n state_new = np.array([0,0])\n #move into a new state\n state_new[0] = state[0] + action_row_change[action]\n state_new[1] = state[1] + action_col_change[action]\n\n #put the robot back in grid if it goes out. Consider also the option to give a negative reward\n if state_new[0] < 0:\n state_new[0] = 0\n if state_new[0] >= N:\n state_new[0] = N-1\n if state_new[1] < 0:\n state_new[1] = 0\n if state_new[1] >= M:\n state_new[1] = M-1\n\n s_index_new = np.ravel_multi_index(state_new,dims=(N,M),order='F') #conversion in a single index\n \n \n if step > 1: \n # use etrace method to update the weights\n etracelist = gamma * decayparameter * etracelist + output_old.dot(input_old.T)\n weights += learning_rate * (r_old - Q_old + gamma * Q[action]) * etracelist\n \n \n\n #store variables for sarsa computation in the next step\n output = np.zeros((N_actions,1))\n output[action] = 1\n\n #update variables\n input_old = input_vector\n output_old = output\n Q_old = Q[action]\n r_old = 0\n # To do: calculate eligibility trace\n # elig_old = \n #print(Q[action])\n state[0] = state_new[0]\n state[1] = state_new[1]\n s_index = s_index_new\n\n ## TODO: check if state is terminal and update the weights consequently\n if s_index == s_end:\n # Update weights\n dw = learning_rate * (R - Q_old) * output_old*input_old.T\n weights += dw\n pass\n \n\n\n return learning_curve, weights\n\n\n\n# Parameter setup\nnrepetitions = 10; # number of runs for the algorithm\nnTrials = 1000 # should be integer >0\nnSteps = 50; # maximum number of allowed steps\nlearningRate = 0.3; # should be real, Greater than 0\n#learningRate = [0.1,0.2,0.3,0.4,0.5]\nepsilon = 0.1; # should be real, Greater or Equal to 0; epsion=0 Greedy, otherwise epsilon-Greedy\n#epsilon = [0.1,0.2,0.3,0.4,0.5]\ngamma = 0.9; # should be real, positive, smaller than 1\n#gamma = [0.5,0.6,0.7,0.8,0.9]\ndecayparameter = 0.3\n#decayparameter = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]\n\nlearning_curve = np.zeros(( nTrials))\n#weight = np.zeros((4,100))\n#averaged_extra_step = np.zeros((len(gamma)))\nlearning_curve, weight = homing_nn(nTrials, nSteps, learningRate, epsilon, gamma, decayparameter)\n\nmove_top = np.array([0,1])\nmove_down = np.array([0,-1])\nmove_left = np.array([-1,0])\nmove_right = np.array([1,0])\n\ndirection_top = np.zeros((100,2))\ndirection_left = np.zeros((100,2))\ndirection_right = np.zeros((100,2))\ndirection_down = np.zeros((100,2))\nfor each_weight in range(100):\n direction_top[each_weight] = weight[0][each_weight] * move_top\n direction_down[each_weight] = weight[1][each_weight] * move_down\n direction_left[each_weight] = weight[2][each_weight] * move_left\n direction_right[each_weight] = weight[3][each_weight] * move_right\n\ntotal_direction = direction_top + direction_left + direction_right + direction_down \nprint(total_direction)\n#number of possible actions in each state: 1->N 2->E 3->S 4->W\n\nx = np.arange(0, 10, 1)\ny = np.arange(0, 10, 1)\nX, Y = np.meshgrid(x, y)\n\nfig, ax = plt.subplots()\nq = ax.quiver(X,Y, total_direction[:,0], total_direction[:,1])\n\nplt.show()\n\n\n\n\n\n\n\n","sub_path":"AI_assign/assignment2_q5.py","file_name":"assignment2_q5.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"270466375","text":"# NOTES \n# fix nav bar in tweaks\n# - search is nice at top see products\n# can have singular links on nav bar for models\n# foldaway side menu functional\n# lots of links for conecting apps etc\n# further options on e.g. horizontal tabs for inlines\n# its based on adminLTE3 maybe lots of bootswatch style code drop ins for expansion.\n\n\nJAZZMIN_SETTINGS = {\n # title of the window (Will default to current_admin_site.site_title if absent or None)\n \"site_title\": \"MollyDoo ERP 1\",\n\n # Title on the brand, and login screen (19 chars max) (defaults to current_admin_site.site_header if absent or None)\n \"site_header\": \"Mollydoo 2\",\n\n # square logo to use for your site, must be present in static files, used for favicon and brand on top left\n ##\"site_logo\": \"books/img/logo.png\",\n\n # Welcome text on the login screen\n ##\"welcome_sign\": \"Welcome to the library\",\n\n # Copyright on the footer\n ##\"copyright\": \"Acme Library Ltd\",\n\n # The model admin to search from the search bar, search bar omitted if excluded\n \"search_model\": \"auth.User\",\n\n # Field name on user model that contains avatar image\n \"user_avatar\": None,\n\n ############\n # Top Menu #\n ############\n\n # Links to put along the top menu\n # url = reverse(name_proxy +\":erp_order_view\" {'name':'Orders','url':'/erp/erp/order/'}, \n \"topmenu_links\": [\n\n # Url that gets reversed (Permissions can be added)\n ##{\"name\": \"Home\", \"url\": \"admin:index\", \"permissions\": [\"auth.view_user\"]},\n\n # external url that opens in a new window (Permissions can be added)\n ## {\"name\": \"Support\", \"url\": \"https://github.com/farridav/django-jazzmin/issues\", \"new_window\": True},\n\n # model admin to link to (Permissions checked against model)\n ##{\"model\": \"auth.User\"},\n {'Order':'auth.User'},\n\n # App with dropdown menu to all its models pages (Permissions checked against models)\n {\"app\": \"erp\"}, # format is 'app':'appname' \n {\"model\": \"erp.company\"}, # wow (guessed!)\n ],\n\n #############\n # User Menu #\n #############\n\n # Additional links to include in the user menu on the top right (\"app\" url type is not allowed)\n # \"usermenu_links\": [\n # {\"name\": \"Support\", \"url\": \"https://github.com/farridav/django-jazzmin/issues\", \"new_window\": True},\n # {\"model\": \"auth.user\"}\n # ],\n\n #############\n # Side Menu #\n #############\n\n # Whether to display the side menu\n \"show_sidebar\": True,\n\n # Whether to aut expand the menu\n \"navigation_expanded\": True,\n\n # Hide these apps when generating side menu e.g (auth)\n \"hide_apps\": [],\n\n # Hide these models when generating side menu (e.g auth.user)\n \"hide_models\": [],\n\n # List of apps (and/or models) to base side menu ordering off of (does not need to contain all apps/models)\n ##\"order_with_respect_to\": [\"auth\", \"books\", \"books.author\", \"books.book\"],\n\n # Custom links to append to app groups, keyed on app name (NC look at the end/bottom)\n \"custom_links\": {\n \"erp\": [{\n \"name\": \"Make Messages\", \n \"url\": \"make_messages\", \n \"icon\": \"fas fa-comments\",\n #\"permissions\": [\"books.view_book\"]\n }]\n },\n\n # Custom icons for side menu apps/models See https://fontawesome.com/icons?d=gallery&m=free\n # for a list of icon classes\n \"icons\": {\n \"auth\": \"fas fa-users-cog\",\n \"auth.user\": \"fas fa-user\",\n \"auth.Group\": \"fas fa-users\",\n },\n # Icons that are used when one is not manually specified\n \"default_icon_parents\": \"fas fa-chevron-circle-right\",\n \"default_icon_children\": \"fas fa-circle\",\n\n #################\n # Related Modal #\n #################\n # Use modals instead of popups\n \"related_modal_active\": False,\n\n #############\n # UI Tweaks #\n #############\n # Relative paths to custom CSS/JS scripts (must be present in static files)\n \"custom_css\": None,\n \"custom_js\": None,\n # Whether to show the UI customizer on the sidebar\n \"show_ui_builder\": True, # NC this is a 'windows' style icon at the top right hand side\n\n #added from tweaks example -> not working here ??\n #\"navbar_fixed\": True, #False,\n\n ###############\n # Change view #\n ###############\n # Render out the change view as a single form, or in tabs, current options are\n # - single\n # - horizontal_tabs (default)\n # - vertical_tabs\n # - collapsible\n # - carousel\n \"changeform_format\": \"horizontal_tabs\",\n # override change forms on a per modeladmin basis\n #\"changeform_format_overrides\": {\"auth.user\": \"collapsible\", \"auth.group\": \"vertical_tabs\"},\n # what about the standard expand/collapse icon?\n \"changeform_format_overrides\": { # NC\n \"erp.order\": \"collapsible\", \n \"erp.company\": \"vertical_tabs\",\n \"erp.batch\": \"single\"\n },\n # Add a language dropdown into the admin\n ## \"language_chooser\": True, # causes crash\n}\n\nJAZZMIN_UI_TWEAKS = {\n \"navbar_small_text\": False,\n \"footer_small_text\": False,\n \"body_small_text\": False,\n \"brand_small_text\": False,\n \"brand_colour\": False,\n \"accent\": \"accent-primary\",\n \"navbar\": \"navbar-white navbar-light\",\n \"no_navbar_border\": False,\n \"navbar_fixed\": True, #False, NC\n \"layout_boxed\": False,\n \"footer_fixed\": False,\n \"sidebar_fixed\": False,\n \"sidebar\": \"sidebar-dark-primary\",\n \"sidebar_nav_small_text\": False,\n \"sidebar_disable_expand\": False,\n \"sidebar_nav_child_indent\": False,\n \"sidebar_nav_compact_style\": False,\n \"sidebar_nav_legacy_style\": False,\n \"sidebar_nav_flat_style\": False,\n \"theme\": \"default\",\n \"dark_mode_theme\": None,\n \"button_classes\": {\n \"primary\": \"btn-outline-primary\",\n \"secondary\": \"btn-outline-secondary\",\n \"info\": \"btn-outline-info\",\n \"warning\": \"btn-outline-warning\",\n \"danger\": \"btn-outline-danger\",\n \"success\": \"btn-outline-success\",\n },\n}\n\n","sub_path":"mollydoo/jazzmin_settings.py","file_name":"jazzmin_settings.py","file_ext":"py","file_size_in_byte":5978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"376635599","text":"import numpy as np\r\nimport pandas as pd\r\nimport xlwings as xw\r\nfrom geopy.geocoders import DataBC\r\nfrom geopy.extra.rate_limiter import RateLimiter\r\nimport geopy\r\nimport re\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Opens the file (must be deencrypted)\r\n modified = pd.read_excel('flagged_lists_manual_edits.xlsx', sheet_name='Flagged modified')\r\n corrfac = pd.read_excel('flagged_lists_manual_edits.xlsx', sheet_name='Flagged Corrections Facilities')\r\n hlbc = pd.read_excel('flagged_lists_manual_edits.xlsx', sheet_name='Flagged HLBC Clinic List')\r\n writer = pd.ExcelWriter('working_list.xlsx', engine='openpyxl')\r\n\r\n # Looks for Flags in all the lists and isolates them for geocoding \r\n modified_partial_1 = modified[(modified['FLAG_1']==1)]\r\n modified_partial_2 = modified[(modified['FLAG_2']==1)]\r\n modified_partial_1 = modified_partial_1.dropna(how = 'all')\r\n modified_partial_2 = modified_partial_2.dropna(how = 'all')\r\n corrfac_partial = corrfac.where(corrfac['FLAG']==1)\r\n corrfac_partial = corrfac_partial.dropna(how = 'all')\r\n hlbc_partial = hlbc.where(hlbc['FLAG']==1)\r\n hlbc_partial = hlbc_partial.dropna(how = 'all')\r\n\r\n # Geocoding\r\n geolocator = DataBC(user_agent=\"ISU_filter\")\r\n geocoded = RateLimiter(geolocator.geocode, min_delay_seconds=1/15)\r\n\r\n # Geocodes HLBC and Corrections Facilities\r\n lists_for_next_for = [corrfac_partial, hlbc_partial]\r\n for item in lists_for_next_for:\r\n item['GEO_LOCATION'] = item['ADDR_FOR_GEO'].astype(str).apply(lambda doot: geocoded(doot) if doot!= 'nan' else '')\r\n item['GEO_ADDRESS'] = item['GEO_LOCATION'].apply(lambda loc: loc.address if loc else \"\")\r\n item['GEO_RAW'] = item['GEO_LOCATION'].apply(lambda loc: loc.raw if loc else \"\")\r\n item['GEO_GPS'] = item['GEO_LOCATION'].apply(lambda loc: loc.point if loc else \"\")\r\n item['GEO_LATITUDE'] = item['GEO_LOCATION'].apply(lambda loc: loc.latitude if loc else \"\")\r\n item['GEO_LONGITUDE'] = item['GEO_LOCATION'].apply(lambda loc: loc.longitude if loc else \"\")\r\n\r\n # Geocodes Modified\r\n # FLAG 1\r\n modified_partial_1['GEO_LOCATION_1'] = modified_partial_1['ADDR_FOR_GEO_1'].astype(str).apply(lambda doot: geocoded(doot) if doot!= 'nan' else '')\r\n modified_partial_1['GEO_ADDRESS_1'] = modified_partial_1['GEO_LOCATION_1'].apply(lambda loc: loc.address if loc else \"\")\r\n modified_partial_1['GEO_RAW_1'] = modified_partial_1['GEO_LOCATION_1'].apply(lambda loc: loc.raw if loc else \"\")\r\n modified_partial_1['GEO_GPS_1'] = modified_partial_1['GEO_LOCATION_1'].apply(lambda loc: loc.point if loc else \"\")\r\n modified_partial_1['GEO_LATITUDE_1'] = modified_partial_1['GEO_LOCATION_1'].apply(lambda loc: loc.latitude if loc else \"\")\r\n modified_partial_1['GEO_LONGITUDE_1'] = modified_partial_1['GEO_LOCATION_1'].apply(lambda loc: loc.longitude if loc else \"\")\r\n # FLAG 2\r\n modified_partial_2['GEO_LOCATION_2'] = modified_partial_2['ADDR_FOR_GEO_2'].astype(str).apply(lambda doot: geocoded(doot) if doot!= 'nan' else '')\r\n modified_partial_2['GEO_ADDRESS_2'] = modified_partial_2['GEO_LOCATION_2'].apply(lambda loc: loc.address if loc else \"\")\r\n modified_partial_2['GEO_RAW_2'] = modified_partial_2['GEO_LOCATION_2'].apply(lambda loc: loc.raw if loc else \"\")\r\n modified_partial_2['GEO_GPS_2'] = modified_partial_2['GEO_LOCATION_2'].apply(lambda loc: loc.point if loc else \"\")\r\n modified_partial_2['GEO_LATITUDE_2'] = modified_partial_2['GEO_LOCATION_2'].apply(lambda loc: loc.latitude if loc else \"\")\r\n modified_partial_2['GEO_LONGITUDE_2'] = modified_partial_2['GEO_LOCATION_2'].apply(lambda loc: loc.longitude if loc else \"\")\r\n\r\n # Joins final lists and drops duplicates based on identifying factors through:\r\n # Provider name and MSC\r\n modified_final = pd.concat([modified, modified_partial_1, modified_partial_2])\r\n fullname = ['Last Name', 'Given Names', 'Msc']\r\n modified_final['fullname'] = modified_final[fullname].astype(str).apply(lambda x: ', '.join(x.dropna()), axis=1)\r\n modified_final = modified_final.drop_duplicates(subset='fullname', keep=\"last\")\r\n modified_final = modified_final.drop(columns=['fullname'])\r\n\r\n # Corrections Facility Name \r\n corrfac_final = pd.concat([corrfac, corrfac_partial])\r\n corrfac_final = corrfac_final.drop_duplicates(subset='GEO_LOCATION', keep=\"last\")\r\n\r\n # SL_Reference\r\n hlbc_final = pd.concat([hlbc, hlbc_partial])\r\n hlbc_final = hlbc_final.drop_duplicates(subset='SL_REFERENCE', keep=\"last\")\r\n\r\n # Exports excel file\r\n modified_final.to_excel(writer, sheet_name = 'Modified Final', index=False)\r\n corrfac_final.to_excel(writer, sheet_name = 'Corrections Facilities Final', index=False)\r\n hlbc_final.to_excel(writer, sheet_name = 'HLBC Final', index=False)\r\n writer.save()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n \r\n","sub_path":"V1/geocode_flagged.py","file_name":"geocode_flagged.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"302917713","text":"\n\nfrom xai.brain.wordbase.adjectives._shakedown import _SHAKEDOWN\n\n#calss header\nclass _SHAKEDOWNS(_SHAKEDOWN, ):\n\tdef __init__(self,): \n\t\t_SHAKEDOWN.__init__(self)\n\t\tself.name = \"SHAKEDOWNS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"shakedown\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_shakedowns.py","file_name":"_shakedowns.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"464357551","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModule contains the classes used to model the velocities \nof galaxies within their halos. \n\"\"\"\nfrom __future__ import (\n division, print_function, absolute_import, unicode_literals)\n\nimport numpy as np \nfrom scipy.integrate import quad as quad_integration\nfrom scipy.special import spence \n\nfrom astropy.extern import six \nfrom abc import ABCMeta, abstractmethod\n\nfrom ...utils.array_utils import convert_to_ndarray\n\n__author__ = ['Andrew Hearin']\n\n__all__ = ['IsotropicJeansVelocity', 'NFWJeansVelocity']\n\n\n\n@six.add_metaclass(ABCMeta)\nclass IsotropicJeansVelocity(object):\n \"\"\" Orthogonal mix-in class used to transform a configuration \n space model for the 1-halo term into a phase space model \n by solving the Jeans equation of the underlying potential. \n \"\"\"\n\n def __init__(self, velocity_bias = False, **kwargs):\n \"\"\"\n Parameters \n -----------\n velocity_bias : bool, optional \n Boolean specifying whether the galaxy velocities are biased \n with respect to the halo velocities. If True, ``param_dict`` will have a \n parameter called ``velbias_satellites`` that multiplies the underlying \n Jeans solution for the halo radial velocity dispersion by an overall factor. \n Default is False. \n \"\"\"\n if velocity_bias is True:\n self.param_dict['velbias_satellites'] = 1.\n\n @abstractmethod\n def dimensionless_velocity_dispersion(self, x, *args):\n \"\"\"\n Method returns the radial velocity dispersion scaled by \n the virial velocity, as a function of the \n halo-centric distance scaled by the halo radius.\n\n Parameters \n ----------\n x : array_like \n Halo-centric distance scaled by the halo boundary, so that \n :math:`0 <= x <= 1`. Can be a scalar or numpy array\n\n args : sequence, optional \n Any additional parameters necessary to specify the shape of the radial profile, \n e.g., halo concentration. \n\n Returns \n -------\n result : array_like \n Radial velocity dispersion profile scaled by the virial velocity. \n The returned result has the same dimension as the input ``x``. \n\n \"\"\"\n pass\n\n\nclass NFWJeansVelocity(IsotropicJeansVelocity):\n \"\"\" Orthogonal mix-in class providing the solution to the Jeans equation \n for galaxies orbiting in an isotropic NFW profile with no spatial bias. \n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Parameters \n -----------\n velocity_bias : bool, optional \n Boolean specifying whether the galaxy velocities are biased \n with respect to the halo velocities. If True, ``param_dict`` will have a \n parameter called ``velbias_satellites`` that multiplies the underlying \n Jeans solution for the halo radial velocity dispersion by an overall factor. \n Default is False. \n \"\"\"\n IsotropicJeansVelocity.__init__(self, **kwargs)\n\n def _jeans_integrand_term1(self, y):\n \"\"\"\n \"\"\"\n return np.log(1+y)/(y**3*(1+y)**2)\n\n def _jeans_integrand_term2(self, y):\n \"\"\"\n \"\"\"\n return 1/(y**2*(1+y)**3)\n\n def dimensionless_velocity_dispersion(self, x, conc):\n \"\"\"\n Parameters \n -----------\n x : array_like \n Halo-centric distance scaled by the halo boundary, so that \n :math:`0 <= x <= 1`. Can be a scalar or numpy array\n\n conc : float \n Concentration of the halo.\n\n Returns \n -------\n result : array_like \n Radial velocity dispersion profile scaled by the virial velocity. \n The returned result has the same dimension as the input ``x``. \n \"\"\"\n x = convert_to_ndarray(x)\n x = x.astype(float)\n result = np.zeros_like(x)\n\n prefactor = conc*(conc*x)*(1. + conc*x)**2/self.g(conc)\n\n lower_limit = conc*x\n upper_limit = float(\"inf\")\n for i in range(len(x)):\n term1, _ = quad_integration(self._jeans_integrand_term1, \n lower_limit[i], upper_limit, epsrel=1e-5)\n term2, _ = quad_integration(self._jeans_integrand_term2, \n lower_limit[i], upper_limit, epsrel=1e-5)\n result[i] = term1 - term2 \n\n return np.sqrt(result*prefactor)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"halotools/empirical_models/phase_space_models/velocity_models.py","file_name":"velocity_models.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"199935027","text":"from writelog import write_log, close_log\nfrom datetime import datetime\n\ntrash_top = -1\ndeck_top = -1\n\n\nclass Node:\n def __init__(self, field=\"\", s=0, p=0, h=0, c=0):\n self.field = field\n self.hand_sheep = s\n self.hand_pig = p\n self.hand_horse = h\n self.hand_cow = c\n\n def __str__(self):\n return \"----- Player status -----\\nFIELD : \" + str(self.field) + \\\n \"\\nHAND : \\n------------------------\"\n\n def is_equal(self, node):\n boolean1 = (self.field == node.field)\n boolean2 = (self.hand_sheep == node.hand_sheep)\n boolean3 = (self.hand_pig == node.hand_pig)\n boolean4 = (self.hand_horse == node.hand_horse)\n boolean5 = (self.hand_cow == node.hand_cow)\n return boolean1 and boolean2 and boolean3 and boolean4 and boolean5\n # return False\n\n\nclass Field:\n def __init__(self, left, right, farm, deck, trash):\n self.left = left\n self.right = right\n self.farm = farm\n self.deck = deck\n self.trash = trash\n\n def __str__(self):\n out = \"----- Field status -----\\nLeft Field : \" + self.left + \"\\nRight Field : \" + self.right + \"\\n\"\n if len(self.farm) > 0:\n out += \"Farm : \" + self.farm + \"\\n\"\n out += \"Deck : \" + str(self.deck)[1:-1] + \"\\n\"\n out += \"Trash : \" + str(self.trash)[1:-1] + \"\\n------------------------\"\n return out\n\n\n# lf = left field\n# rf = right field\ndef searching(player_root, field_root):\n root_node = {\n \"ref_num\": -1,\n \"player\": player_root,\n \"field\": field_root,\n \"parent\": None,\n \"depth\": 0,\n \"action\": None,\n }\n\n # Breadth-First Search\n frontier = [root_node]\n explored = []\n\n solution = [[], 0]\n\n # For debug\n current_depth = 0\n\n count = 0\n count_dup = 0\n count_max_queue = 0\n\n while len(frontier) > 0:\n if count_max_queue < len(frontier):\n count_max_queue = len(frontier)\n\n count += 1\n\n this_node = frontier.pop(0)\n this_node[\"ref_num\"] = count\n\n explored.append(this_node)\n write_log(\"Exploring >> Node : \" + str(this_node[\"ref_num\"]))\n # write_log(\"Depth : \" + str(this_node[\"depth\"]))\n # write_log(str(this_node[\"player\"]))\n # write_log(str(this_node[\"field\"]))\n\n player = this_node[\"player\"]\n field = this_node[\"field\"]\n depth = this_node[\"depth\"]\n\n # Debug depth\n # if current_depth < depth:\n # write_log(\"\\t^^^ Depth : \" + str(current_depth))\n # # print(\"\\t^^^ Depth : \" + str(current_depth))\n # current_depth = depth\n\n ref_num = count\n\n # Final state\n if player.field == \"\" and player.hand_sheep + player.hand_pig + player.hand_horse + player.hand_cow == 0:\n goal_state = explored[-1]\n path = [goal_state]\n prev = goal_state['parent']\n while prev is not None:\n prev_node = explored[prev - 1]\n prev = prev_node['parent']\n path.append(prev_node)\n path.reverse()\n write_log(\"\\t>> Goal state\")\n write_log(\"\\t\\t>> Max Queue :\" + str(count_max_queue))\n return [path, depth]\n if player.hand_sheep + player.hand_pig + player.hand_horse + player.hand_cow == 0:\n write_log(\"\\t>> Hand out\")\n continue\n\n # # Calculate scoring\n # player_score = scoring(player.field)\n # left_score = scoring(field.left)\n # right_score = scoring(field.right)\n\n # if player_score <= left_score or player_score <= right_score:\n # print(count_dup)\n # goal_state = explored[-1]\n # path = [goal_state]\n # prev = goal_state['parent']\n # while prev is not None:\n # prev_node = explored[prev - 1]\n # prev = prev_node['parent']\n # path.append(prev_node)\n # path.reverse()\n # write_log(\"\\t>> Goal state\")\n # return [path, node_count]\n # else:\n # # write_log(\"\\t>> Hand out : \" + str(player_score) + \" \" + str(left_score) + \" \" + str(right_score))\n # print(\"\\t>> Hand out : \" + str(player_score) + \" \" + str(left_score) + \" \" + str(right_score))\n # continue\n elif len(field.deck) == 0:\n write_log(\"\\t>> Deck out\")\n continue\n else:\n if player.hand_sheep >= 2:\n out = discard_2S(player, field)\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"discard_2S\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n if player.hand_pig >= 2:\n out = discard_2P(player, field)\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"discard_2P\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n if player.hand_horse >= 2:\n out = discard_2H(player, field)\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"discard_2H\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n if player.hand_cow >= 2:\n out = discard_2C(player, field)\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"discard_2C\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n if player.hand_sheep >= 1:\n out = discard_S(player, field)\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"discard_S\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n if player.hand_pig >= 1:\n out = discard_P(player, field)\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"discard_P\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n if player.hand_horse >= 1:\n out = discard_H(player, field)\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"discard_H\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n if player.hand_cow >= 1:\n out = discard_C(player, field)\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"discard_C\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n if len(field.trash) > 0:\n out = draw_trash(player, field)\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"draw_trash\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n if len(field.deck) > 0:\n out = draw_deck(player, field, 'S')\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"draw_deck_S\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n out = draw_deck(player, field, 'P')\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"draw_deck_P\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n out = draw_deck(player, field, 'H')\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"draw_deck_H\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n\n out = draw_deck(player, field, 'C')\n next_node = {\n \"ref_num\": -1,\n \"player\": out[\"player\"],\n \"field\": out[\"field\"],\n \"parent\": ref_num,\n \"depth\": depth + 1,\n \"action\": \"draw_deck_C\"\n }\n is_duplicate = False\n for node in explored + frontier:\n if next_node[\"player\"].is_equal(node[\"player\"]):\n is_duplicate = True\n write_log(\"\\t^^^ Duplicate Node\")\n count_dup += 1\n if not is_duplicate:\n frontier.append(next_node)\n return solution\n\n\ndef BFS(field, hand, left_field, right_field, farm, deck, trash):\n start_time = datetime.now()\n\n # left_field = player.left.field\n # right_field = player.right.field\n\n root_node = Node(field, hand['S'], hand['P'], hand['H'], hand['C'])\n # root_node = Node(player.field, player.hand['S'], player.hand['P'], player.hand['H'], player.hand['C'])\n root_field = Field(left_field, right_field, farm, deck, trash)\n\n out = searching(root_node, root_field)\n path = out.copy()[0]\n n_node = out.copy()[1]\n\n runtime = datetime.now().timestamp() - start_time.timestamp()\n write_log(\"\")\n write_log(get_str_action_path(path))\n write_log(\"Run-time : \" + str('%.4f' % runtime) + \" seconds\")\n write_log(\"Node(s) : \" + str(n_node))\n # logging(\"Space usage : \" + str(math.ceil(mem['used'] / (1024 ** 2))) + \" Mb\")\n\n solution = []\n for n in path:\n node = n[\"player\"]\n action = n[\"action\"]\n draw_state = 0\n if action is not None and (action[5:9] == \"deck\"):\n draw_state = 1\n solution.append(tuple([node.field, node.hand_sheep, node.hand_pig, node.hand_horse, node.hand_cow, draw_state]))\n solution = tuple(solution)\n write_log(\"\")\n write_log(str(solution))\n close_log()\n return solution\n\n\n# ( / )\ndef discard_2S(player, field):\n write_log(\"\\t >>> DO ACTION : Discard 2 Sheep cards\")\n\n player = Node(player.field, player.hand_sheep, player.hand_pig, player.hand_horse, player.hand_cow)\n field = Field(field.left, field.right, field.farm, field.deck.copy(), field.trash.copy())\n\n player.hand_sheep -= 2\n field.trash.append('S')\n field.trash.append('S')\n\n return animal_run('S', player, field)\n\n\ndef discard_2P(player, field):\n write_log(\"\\t >>> DO ACTION : Discard 2 Pig cards\")\n\n player = Node(player.field, player.hand_sheep, player.hand_pig, player.hand_horse, player.hand_cow)\n field = Field(field.left, field.right, field.farm, field.deck.copy(), field.trash.copy())\n\n player.hand_pig -= 2\n field.trash.append('P')\n field.trash.append('P')\n\n return animal_run('P', player, field)\n\n\ndef discard_2H(player, field):\n write_log(\"\\t >>> DO ACTION : Discard 2 Horse cards\")\n\n player = Node(player.field, player.hand_sheep, player.hand_pig, player.hand_horse, player.hand_cow)\n field = Field(field.left, field.right, field.farm, field.deck.copy(), field.trash.copy())\n\n player.hand_horse -= 2\n field.trash.append('H')\n field.trash.append('H')\n\n return animal_run('H', player, field)\n\n\ndef discard_2C(player, field):\n write_log(\"\\t >>> DO ACTION : Discard 2 Cow cards\")\n\n player = Node(player.field, player.hand_sheep, player.hand_pig, player.hand_horse, player.hand_cow)\n field = Field(field.left, field.right, field.farm, field.deck.copy(), field.trash.copy())\n\n player.hand_cow -= 2\n field.trash.append('C')\n field.trash.append('C')\n\n return animal_run('C', player, field)\n\n\n# ( / )\ndef discard_S(player, field):\n write_log(\"\\t >>> DO ACTION : Discard a Sheep card\")\n\n player = Node(player.field, player.hand_sheep, player.hand_pig, player.hand_horse, player.hand_cow)\n field = Field(field.left, field.right, field.farm, field.deck.copy(), field.trash.copy())\n\n player.hand_sheep -= 1\n field.trash.append('S')\n\n return {\n \"player\": player,\n \"field\": field\n }\n\n\n# ( / )\ndef discard_P(player, field):\n write_log(\"\\t >>> DO ACTION : Discard a Pig card\")\n\n player = Node(player.field, player.hand_sheep, player.hand_pig, player.hand_horse, player.hand_cow)\n field = Field(field.left, field.right, field.farm, field.deck.copy(), field.trash.copy())\n\n player.hand_pig -= 1\n field.trash.append('P')\n if len(field.deck) > 0:\n field.deck.pop(deck_top)\n\n return {\n \"player\": player,\n \"field\": field\n }\n\n\n# ( / )\ndef discard_H(player, field):\n write_log(\"\\t >>> DO ACTION : Discard a Horse card\")\n\n player = Node(player.field, player.hand_sheep, player.hand_pig, player.hand_horse, player.hand_cow)\n field = Field(field.left, field.right, field.farm, field.deck.copy(), field.trash.copy())\n\n player.hand_horse -= 1\n field.trash.append('H')\n if len(field.deck) > 0:\n field.deck.pop(deck_top)\n\n return {\n \"player\": player,\n \"field\": field\n }\n\n\n# ( / )\ndef discard_C(player, field):\n write_log(\"\\t >>> DO ACTION : Discard a Cow card\")\n\n player = Node(player.field, player.hand_sheep, player.hand_pig, player.hand_horse, player.hand_cow)\n field = Field(field.left, field.right, field.farm, field.deck.copy(), field.trash.copy())\n\n player.hand_cow -= 1\n field.trash.append('C')\n if len(field.deck) > 0:\n field.deck.pop(deck_top)\n if len(field.deck) > 0:\n field.deck.pop(deck_top)\n\n return {\n \"player\": player,\n \"field\": field\n }\n\n\n# ( / )\ndef draw_trash(player, field):\n write_log(\"\\t >>> DO ACTION : Draw a card (Top) from trash and skip\")\n\n player = Node(player.field, player.hand_sheep, player.hand_pig, player.hand_horse, player.hand_cow)\n field = Field(field.left, field.right, field.farm, field.deck.copy(), field.trash.copy())\n\n card = field.trash.pop(trash_top)\n\n if card == 'S':\n player.hand_sheep += 1\n elif card == 'P':\n player.hand_pig += 1\n elif card == 'H':\n player.hand_horse += 1\n elif card == 'C':\n player.hand_cow += 1\n return {\n \"player\": player,\n \"field\": field\n }\n\n\n# ( / )\ndef draw_deck(player, field, card):\n write_log(\"\\t >>> DO ACTION : Draw a card (Random : \" + card + \" ) from deck and skip\")\n\n player = Node(player.field, player.hand_sheep, player.hand_pig, player.hand_horse, player.hand_cow)\n field = Field(field.left, field.right, field.farm, field.deck.copy(), field.trash.copy())\n\n field.deck.pop(deck_top)\n\n if card == 'S':\n player.hand_sheep += 1\n elif card == 'P':\n player.hand_pig += 1\n elif card == 'H':\n player.hand_horse += 1\n elif card == 'C':\n player.hand_cow += 1\n return {\n \"player\": player,\n \"field\": field\n }\n\n\n# ( / )\ndef get_str_action_path(solution):\n st = \"\"\n for n in solution:\n if n[\"action\"] is None:\n st += \"START\"\n else:\n st += n[\"action\"]\n st += \" -> \"\n st += \"GOAL\"\n return st\n\n\ndef animal_run(animal, player, field):\n if animal in player.field:\n player.field = player.field.replace(animal, \"\")\n field.left += animal\n elif animal in field.left:\n field.left = field.left.replace(animal, \"\")\n field.right += animal\n elif animal in field.right:\n field.right = field.right.replace(animal, \"\")\n player.field += animal\n else:\n field.farm = field.farm.replace(animal, \"\")\n field.left += animal\n return {\n \"player\": player,\n \"field\": field\n }\n\n\n# # ( / )\n# def scoring(field):\n# score = 0\n# for c in field:\n# if c == \"S\":\n# score += 1\n# if c == \"P\":\n# score += 2\n# if c == \"H\":\n# score += 3\n# if c == \"C\":\n# score += 4\n# return score\n\n\ns = BFS(\"\", {\"S\": 2, \"P\": 2, \"H\": 2, \"C\": 2}, \"\", \"SPHC\", \"\",\n ['S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S'], [])\n\nprint(s)\n","sub_path":"bfs_test.py","file_name":"bfs_test.py","file_ext":"py","file_size_in_byte":21875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"525640500","text":"from aiogram.dispatcher import FSMContext\nfrom aiogram.types import Message, Location, ReplyKeyboardRemove\nfrom aiogram.dispatcher.filters import Command\nfrom aiogram.utils.callback_data import CallbackData\n\nfrom telegram import CallbackQuery\n\nfrom keyboards.default.start.start_menu_keyboard import start_menu_keyboard\nfrom keyboards.inline.faq.keyboards import faq_start_keyboard\nfrom keyboards.inline.profile_settings.callback_datas import age_callback_data, earning_callback_data\nfrom keyboards.inline.profile_settings.keyboards import select_age_keyboard, get_location_keyboard, \\\n has_earning_keyboard\nfrom loader import dp\n\nfrom states.profile_settings_form import Profile_settings\n\n\n@dp.callback_query_handler(text=\"settings\", state=None)\nasync def form_age(query: CallbackQuery, state: FSMContext):\n await query.answer(cache_time=60)\n await query.message.edit_text(text=\"Выберите свой возраст:\",\n reply_markup=select_age_keyboard)\n await Profile_settings.first()\n\n\n@dp.callback_query_handler(age_callback_data.filter(), state=Profile_settings.age)\nasync def form_location(query: CallbackQuery, callback_data: dict, state: FSMContext):\n print(callback_data[\"age\"]) # обрабатываем и записываем в БД возраст\n await query.answer(cache_time=60)\n await query.message.edit_text(text=\"А на аватарке выглядите намного моложе!\", reply_markup=None)\n await query.message.answer(text=\"Выберите город в котором проживаете:\\n\"\n \"P.S. Данная информация нужна исключительно для подбора\"\n \"персонализированных заданий, за котоые вы сможете\"\n \"получать больше денег, чем за обычные\\n\"\n \"P.P.S. Данные параметры ты сможешь всегда изменить в\"\n \"личном профиле\",\n reply_markup=get_location_keyboard)\n await Profile_settings.next()\n\n\n@dp.message_handler(content_types=[\"location\"], state=Profile_settings.location)\nasync def form_earning(message: Message, state: FSMContext):\n location = message.location\n print(location.latitude, location.longitude) # обрабатываем и записываем геолокацию в БД\n await message.answer(text=\"Отлично! И последний вопрос.\\n\"\n \"Имеешь ли ты личный источник дохода? (работа, личное дело и тд.)\",\n reply_markup=has_earning_keyboard)\n await Profile_settings.next()\n\n\n@dp.callback_query_handler(earning_callback_data.filter(), state=Profile_settings.earning)\nasync def form_finish(query: CallbackQuery, callback_data: dict, state: FSMContext):\n print(callback_data[\"has_earning\"]) # обрабатываем значение и записываем в БД\n await query.answer(cache_time=60)\n await query.message.edit_text(text=\"Ты успешно присоединился к нам!\\n\"\n \"Добро пожаловать в семью Platforma.\\n\",\n reply_markup=None)\n await query.message.answer(text=\"Для начала, мы рекомендуем тебе почитать FAQ для \"\n \"лучшего понимания всех наших процессов и нюансов. \"\n \"Не беспокойся, он читается быстро и легко)\",\n reply_markup=faq_start_keyboard)\n await state.finish()\n","sub_path":"handlers/users/profile_settings_form.py","file_name":"profile_settings_form.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"493176907","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass DiscoveredSecuritySolution(Model):\n \"\"\"DiscoveredSecuritySolution.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Resource Id\n :vartype id: str\n :ivar name: Resource name\n :vartype name: str\n :ivar type: Resource type\n :vartype type: str\n :ivar location: Location where the resource is stored\n :vartype location: str\n :param security_family: Required. The security family of the discovered\n solution. Possible values include: 'Waf', 'Ngfw', 'SaasWaf', 'Va'\n :type security_family: str or ~azure.mgmt.security.models.SecurityFamily\n :param offer: Required. The security solutions' image offer\n :type offer: str\n :param publisher: Required. The security solutions' image publisher\n :type publisher: str\n :param sku: Required. The security solutions' image sku\n :type sku: str\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'name': {'readonly': True},\n 'type': {'readonly': True},\n 'location': {'readonly': True},\n 'security_family': {'required': True},\n 'offer': {'required': True},\n 'publisher': {'required': True},\n 'sku': {'required': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'location': {'key': 'location', 'type': 'str'},\n 'security_family': {'key': 'properties.securityFamily', 'type': 'str'},\n 'offer': {'key': 'properties.offer', 'type': 'str'},\n 'publisher': {'key': 'properties.publisher', 'type': 'str'},\n 'sku': {'key': 'properties.sku', 'type': 'str'},\n }\n\n def __init__(self, *, security_family, offer: str, publisher: str, sku: str, **kwargs) -> None:\n super(DiscoveredSecuritySolution, self).__init__(**kwargs)\n self.id = None\n self.name = None\n self.type = None\n self.location = None\n self.security_family = security_family\n self.offer = offer\n self.publisher = publisher\n self.sku = sku\n","sub_path":"azure-mgmt-security/azure/mgmt/security/models/discovered_security_solution_py3.py","file_name":"discovered_security_solution_py3.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"319688147","text":"#(2839) 설탕배달\n\nimport sys\n\nN = int(sys.stdin.readline())\ncache = [float(\"INF\")] * (N+1)\ncache[0] = 0\nsugar_list = [3,5]\n\nfor n in sugar_list:\n for m in range(n, N+1):\n if cache[m-n] != float(\"INF\"):\n cache[m] = min(cache[m], cache[m-n]+1)\nif cache[N] == float(\"INF\"): print(-1)\nelse: print(cache[N])\n","sub_path":"code/2839.py","file_name":"2839.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"212554672","text":"# !/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n:copyright (c) 2014 - 2018, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Department of Energy) and contributors. All rights reserved. # NOQA\n:author 'Piper Merriam ', Paul Munday\n\nUnit tests for seed/views/labels.py\n\"\"\"\nfrom django.core.urlresolvers import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom seed.landing.models import SEEDUser as User\nfrom seed.models import (\n StatusLabel as Label,\n)\nfrom seed.test_helpers.fake import (\n mock_queryset_factory,\n)\nfrom seed.tests.util import DeleteModelsTestCase\nfrom seed.utils.organizations import create_organization\nfrom seed.views.labels import (\n UpdateInventoryLabelsAPIView,\n)\n\n\nclass TestLabelsViewSet(DeleteModelsTestCase):\n \"\"\"Test the label DRF viewset\"\"\"\n\n def test_results_are_not_actually_paginated(self):\n \"\"\"\n Ensure that labels are not actually paginated.\n \"\"\"\n user = User.objects.create_superuser(\n email='test_user@demo.com',\n username='test_user@demo.com',\n password='secret',\n )\n organization, _, _ = create_organization(user, \"test-organization\")\n\n # Create 101 labels. This should be pretty future proof against any\n # reasonable default pagination settings as well as realistic number of\n # labels.\n for i in range(101):\n Label.objects.create(\n color=\"red\",\n name=\"test_label-{0}\".format(i),\n super_organization=organization,\n )\n\n client = APIClient()\n client.login(username=user.username, password='secret')\n\n url = reverse('api:v2:labels-list')\n\n response = client.get(url, {'organization_id': organization.pk, 'inventory_type': 'property'})\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), organization.labels.count())\n\n results = response.data\n\n self.assertEqual(len(results), organization.labels.count())\n\n def test_organization_query_param_is_used(self):\n \"\"\"\n Ensure that when the organization_id query parameter is provided, that\n the endpoint returns the appropriate labels for that organization.\n \"\"\"\n user = User.objects.create_superuser(\n email='test_user@demo.com',\n username='test_user@demo.com',\n password='secret',\n )\n organization_a, _, _ = create_organization(user, \"test-organization-a\")\n organization_b, _, _ = create_organization(user, \"test-organization-b\")\n\n # Ensures that at least a single label exists to ensure that we are not\n # relying on auto-creation of labels for this test to pass.\n Label.objects.create(\n color=\"red\",\n name=\"test_label-a\",\n super_organization=organization_a,\n )\n\n Label.objects.create(\n color=\"red\",\n name=\"test_label-b\",\n super_organization=organization_b,\n )\n\n client = APIClient()\n client.login(username=user.username, password='secret')\n\n url = reverse('api:v2:labels-list')\n\n response_a = client.get(url, {'organization_id': organization_a.pk, 'inventory_type': 'property'})\n response_b = client.get(url, {'organization_id': organization_b.pk, 'inventory_type': 'property'})\n\n self.assertEqual(response_a.status_code, status.HTTP_200_OK)\n self.assertEqual(response_b.status_code, status.HTTP_200_OK)\n\n results_a = set(result['organization_id'] for result in response_a.data)\n results_b = set(result['organization_id'] for result in response_b.data)\n\n assert results_a == {organization_a.pk}\n assert results_b == {organization_b.pk}\n\n\nclass TestUpdateInventoryLabelsAPIView(DeleteModelsTestCase):\n\n def setUp(self):\n self.api_view = UpdateInventoryLabelsAPIView()\n\n # Models can't be imported directly hence self\n self.PropertyLabels = self.api_view.models['property']\n self.TaxlotLabels = self.api_view.models['taxlot']\n self.mock_property_queryset = mock_queryset_factory(\n self.PropertyLabels,\n flatten=True,\n property_id=range(1, 11),\n statuslabel_id=[1] * 3 + [2] * 3 + [3] * 2 + [4] * 2\n )\n self.user_details = {\n 'username': 'test_user@demo.com',\n 'password': 'test_pass',\n 'email': 'test_user@demo.com'\n }\n self.user = User.objects.create_superuser(**self.user_details)\n self.org, _, _ = create_organization(self.user)\n self.status_label = Label.objects.create(\n name='test', super_organization=self.org\n )\n self.client.login(**self.user_details)\n\n def test_get_label_desc(self):\n add_label_ids = [self.status_label.id]\n remove_label_ids = []\n result = self.api_view.get_label_desc(\n add_label_ids, remove_label_ids\n )[0]\n expected = {\n 'id': self.status_label.id,\n 'name': 'test',\n 'color': 'green'\n }\n self.assertEqual(result, expected)\n\n def test_get_inventory_id(self):\n result = self.api_view.get_inventory_id(\n self.mock_property_queryset[0], 'property'\n )\n self.assertEqual(result, 1)\n\n def test_exclude(self):\n result = self.api_view.exclude(\n self.mock_property_queryset, 'property', [3, 4]\n )\n expected = {3: [7, 8], 4: [9, 10]}\n self.assertEqual(result, expected)\n\n def test_label_factory(self):\n result = self.api_view.label_factory('property', 100, 100)\n self.assertEqual(\n result.__class__.__name__, self.PropertyLabels.__name__\n )\n self.assertEqual(result.property_id, 100)\n self.assertEqual(result.statuslabel_id, 100)\n\n def test_add_remove_labels(self):\n result = self.api_view.add_labels(\n self.mock_property_queryset, 'property',\n [1, 2, 3], [5, 6]\n )\n self.assertEqual(result, [1, 2, 3] * 2)\n qs = self.PropertyLabels.objects.all()\n self.assertEqual(len(qs), 6)\n self.assertEqual(qs[0].property_id, 1)\n self.assertEqual(qs[0].statuslabel_id, 5)\n\n result = self.api_view.remove_labels(qs, 'property', [5, 6])\n qs = self.PropertyLabels.objects.all()\n self.assertEqual(len(qs), 0)\n\n def test_put(self):\n client = APIClient()\n client.login(\n username=self.user_details['username'],\n password=self.user_details['password']\n )\n r = reverse('api:v2:property-labels')\n url = \"{}?organization_id={}\".format(\n r, self.org.id\n )\n\n post_params = {\n 'add_label_ids': [self.status_label.id],\n 'remove_label_ids': [],\n 'inventory_ids': [1, 2, 3],\n }\n response = client.put(\n url, post_params, format='json'\n )\n result = response.data\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(result['status'], 'success')\n self.assertEqual(result['num_updated'], 3)\n\n label = result['labels'][0]\n self.assertEqual(label['color'], self.status_label.color)\n self.assertEqual(label['id'], self.status_label.id)\n self.assertEqual(label['name'], self.status_label.name)\n\n post_params = {\n 'add_label_ids': [],\n 'remove_label_ids': [self.status_label.id],\n 'inventory_ids': [1, 2, 3],\n }\n response = client.put(\n url, post_params, format='json'\n )\n result = response.data\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(result['status'], 'success')\n self.assertEqual(result['num_updated'], 3)\n\n label = result['labels'][0]\n self.assertEqual(label['color'], self.status_label.color)\n self.assertEqual(label['id'], self.status_label.id)\n self.assertEqual(label['name'], self.status_label.name)\n","sub_path":"seed/tests/test_labels_api_views.py","file_name":"test_labels_api_views.py","file_ext":"py","file_size_in_byte":8322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"559506982","text":"#! /usr/bin/env python\nimport sys\nimport os\nimport re\nimport datetime\nfrom argparse import ArgumentParser\nfrom log_it import *\n\n\n#############################################################################\n# Created on September 25, 2013 by Gail Schmidt, USGS/EROS\n# Created Python script to generate the configuration files for the boosted\n# regression modeling\n#\n# History:\n#\n# Usage: generate_boosted_regression_config.py --help prints the help message\n############################################################################\nclass BoostedRegressionConfig():\n \"\"\"Class to handle generating the configuration file for the boosted\n regression processing.\n \"\"\"\n\n def __init__(self):\n pass\n\n\n def runGenerateConfig (self, config_file=None, seasonal_sum_dir=None,\n input_base_file=None, input_mask_file=None, output_dir=None,\n model_file=None, logfile=None):\n \"\"\"Generates the configuration file.\n Description: runGenerateConfig will use the input parameters to\n generate the configuration file needed for running the boosted\n regression. If config file is None (i.e. not specified) then the\n command-line parameters will be parsed for the information. If a log\n file was specified, then the output from each application will be\n logged to that file.\n \n History:\n Updated on Dec. 3, 2013 by Gail Schmidt, USGS/EROS LSRD Project\n Modified to use argparser vs. optionparser, since optionparser\n is deprecated.\n Updated on March 26, 2014 by Gail Schmidt, USGS/EROS LSRD Project\n Modified to support ESPA internal file format as input and output.\n Updated on April 9, 2014 by Gail Schmidt, USGS/EROS LSRD Project\n Modified to support the use of a log file.\n\n Args:\n config_file - name of the configuration file to be created or\n overwritten\n seasonal_sum_dir - name of the directory where the seasonal\n summaries reside for this scene\n input_base_file - name of the base surface reflectance file to be\n processed\n input_mask_file - name of the mask file associated with the base\n surface reflectance file\n output_dir - location of burn probability product to be written\n model_file - name of the geographic model to be used\n logfile - name of the logfile for logging information; if None then\n the output will be written to stdout\n \n Returns:\n ERROR - error generating the configuration file\n SUCCESS - successful creation\n \"\"\"\n\n # if no parameters were passed then get the info from the command line\n if config_file is None:\n # get the command line argument for the reflectance file\n parser = ArgumentParser(description='Generate the configuration ' \\\n 'file for boosted regression')\n parser.add_argument ('-c', '--config_file', type=str,\n dest='config_file',\n help='name of configuration file', metavar='FILE')\n parser.add_argument ('-s', '--seasonal_sum_dir', type=str,\n dest='seasonal_sum_dir',\n help='directory location of the seasonal summaries for this ' \\\n 'scene', metavar='DIR')\n parser.add_argument ('-i', '--input_base_file', type=str,\n dest='input_base_file',\n help='name of the base input image file to be processed',\n metavar='FILE')\n parser.add_argument ('-k', '--input_mask_file', type=str,\n dest='input_mask_file',\n help='name of the mask file for the image to be processed',\n metavar='FILE')\n parser.add_argument ('-o', '--output_dir', type=str,\n dest='output_dir',\n help='location of burn probability product to be written',\n metavar='DIR')\n parser.add_argument ('-m', '--model_file', type=str,\n dest='model_file', help='name of the XML model to load',\n metavar='FILE')\n parser.add_argument ('-l', '--logfile', type=str, dest='logfile',\n help='name of optional log file', metavar='FILE')\n\n options = parser.parse_args()\n \n # validate the input info\n if options.config_file is None:\n parser.error ('missing configuration file command-line ' \\\n 'argument');\n return ERROR\n config_file = options.config_file\n\n if options.seasonal_sum_dir is None:\n parser.error ('missing seasonal summary directory ' \\\n 'command-line argument');\n return ERROR\n seasonal_sum_dir = options.seasonal_sum_dir\n\n if options.input_base_file is None:\n parser.error ('missing the input base image file ' \\\n 'command-line argument');\n return ERROR\n input_base_file = options.input_base_file\n\n if options.input_mask_file is None:\n parser.error ('missing the input mask file command-line ' \\\n 'argument');\n return ERROR\n input_mask_file = options.input_mask_file\n\n if options.output_dir is None:\n parser.error ('missing the output directory command-line ' \\\n 'argument');\n return ERROR\n output_dir = options.output_dir\n\n if options.model_file is None:\n parser.error ('missing the model file command-line argument');\n return ERROR\n model_file = options.model_file\n\n logfile = options.logfile\n\n # open the log file if it exists; use line buffering for the output\n log_handler = None\n if logfile is not None:\n log_handler = open (logfile, 'w', buffering=1)\n\n # make sure the seasonal summary directory exists\n if not os.path.exists(seasonal_sum_dir):\n msg = 'Error: seasonal summary directory does not exist or is ' \\\n 'not accessible: %s' % seasonal_sum_dir\n logIt (msg, log_handler)\n return ERROR\n\n # make sure the input band 1 image file exists, just as a minor\n # sanity check. It doesn't guarantee that all the bands will be\n # there though.\n if not os.path.exists(input_base_file + '_sr_band1.img'):\n msg = 'Error: input base image file does not exist or is not ' \\\n 'accessible: %s_sr_band1.img' % input_base_file\n logIt (msg, log_handler)\n return ERROR\n\n # make sure the mask file exists\n if not os.path.exists(input_mask_file):\n msg = 'Error: input mask file does not exist or is not ' \\\n 'accessible: %s' % input_mask_file\n logIt (msg, log_handler)\n return ERROR\n\n # make sure the model file exists\n if not os.path.exists(model_file):\n msg = 'Error: XML model file does not exist or is not ' \\\n 'accessible: %s' % model_file\n logIt (msg, log_handler)\n return ERROR\n\n # make sure the output directory exists\n if not os.path.exists(output_dir):\n msg = 'Error: output directory does not exist or is not ' \\\n 'accessible: %s' % output_dir\n logIt (msg, log_handler)\n return ERROR\n\n # determine the output filename using the input image filename; split\n # the input string into a list where the second element in the list\n # is the scene name for the file. Example input filename is\n # LT50350322002237LGS01.\n base_file = os.path.basename(input_base_file)\n output_file = '%s/%s_burn_probability.img' % (output_dir, base_file)\n\n # open the configuration file for writing\n config_handler = open (config_file, 'w')\n if config_handler is None:\n msg = 'Error opening/creating the configuration file for write'\n logIt (msg, log_handler)\n return ERROR\n\n # create the config file\n config_line = 'INPUT_BASE_FILE=%s' % input_base_file\n config_handler.write (config_line + '\\n')\n config_line = 'INPUT_MASK_FILE=%s' % input_mask_file\n config_handler.write (config_line + '\\n')\n config_line = 'INPUT_FILL_VALUE=-9999'\n config_handler.write (config_line + '\\n')\n config_line = 'SEASONAL_SUMMARIES_DIR=%s' % seasonal_sum_dir\n config_handler.write (config_line + '\\n')\n config_line = 'OUTPUT_IMG_FILE=%s' % output_file\n config_handler.write (config_line + '\\n')\n config_line = 'LOAD_MODEL_XML=%s' % model_file\n config_handler.write (config_line + '\\n')\n\n # successful completion\n config_handler.close()\n return SUCCESS\n\n######end of BoostedRegressionConfig class######\n\nif __name__ == \"__main__\":\n sys.exit (BoostedRegressionConfig().runGenerateConfig())\n","sub_path":"scripts/boosted_regression_tree/generate_boosted_regression_config.py","file_name":"generate_boosted_regression_config.py","file_ext":"py","file_size_in_byte":9186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"423063617","text":"import sys\nimport numpy as np\nfrom i3d_learner import I3dLearner\nfrom ts_learner import TsLearner\nfrom svm_learner import SvmLearner\nfrom util import *\n\n# Train the model\ndef main(argv):\n if len(argv) < 2:\n print(\"Usage: python train.py [method]\")\n print(\"Optional usage: python train.py [method] [model_path]\")\n return\n method = argv[1]\n if method is None:\n print(\"Usage: python train.py [method]\")\n print(\"Optional usage: python train.py [method] [model_path]\")\n return\n model_path = None\n if len(argv) > 2:\n model_path = argv[2]\n train(method=method, model_path=model_path)\n\ndef train(method=None, model_path=None):\n if method == \"i3d-rgb\":\n model = I3dLearner()\n frame_path = \"../data/rgb/\"\n if model_path is None:\n model_path = \"../data/pretrained_models/i3d_rgb_imagenet_kinetics.pt\"\n model.fit(mode=\"rgb\", p_frame=frame_path, p_model=model_path)\n elif method == \"i3d-flow\":\n model = I3dLearner()\n frame_path = \"../data/flow/\"\n if model_path is None:\n model_path = \"../data/pretrained_models/i3d_flow_imagenet_kinetics.pt\"\n model.fit(mode=\"flow\", p_frame=frame_path, p_model=model_path)\n elif method == \"ts-rgb\":\n model = TsLearner()\n model.fit(mode=\"rgb\")\n elif method == \"ts-flow\":\n model = TsLearner()\n model.fit(mode=\"flow\")\n elif method == \"svm\":\n model = SvmLearner()\n model.fit()\n else:\n print(\"Method not allowed\")\n return\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"back-end/www/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"335291054","text":"import argparse\nimport math\nimport os\nimport time\nfrom typing import List, Mapping, Optional, Tuple\n\nimport numpy as np\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport yaml\nfrom torch import nn\nfrom torch.nn.modules.loss import _Loss\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.data import DataLoader\n\nfrom archai.common import utils\nfrom archai.common.ordered_dict_logger import get_global_logger\nfrom archai.supergraph import models\n\nlogger = get_global_logger()\n\n\ndef train(\n epochs, train_dl, val_dal, net, device, crit, optim, sched, sched_on_epoch, half, quiet, grad_clip: float\n) -> List[Mapping]:\n train_acc, _ = 0.0, 0.0\n metrics = []\n for epoch in range(epochs):\n lr = optim.param_groups[0][\"lr\"]\n train_acc, loss = train_epoch(epoch, net, train_dl, device, crit, optim, sched, sched_on_epoch, half, grad_clip)\n\n val_acc = test(net, val_dal, device, half) if val_dal is not None else math.nan\n metrics.append({\"val_top1\": val_acc, \"train_top1\": train_acc, \"lr\": lr, \"epoch\": epoch, \"train_loss\": loss})\n if not quiet:\n logger.info(f\"train_epoch={epoch}, val_top1={val_acc},\" f\" train_top1={train_acc}, lr={lr:.4g}\")\n return metrics\n\n\ndef optim_sched_orig(net, epochs):\n lr, momentum, weight_decay = 0.1, 0.9, 1.0e-4\n optim = torch.optim.SGD(net.parameters(), lr, momentum=momentum, weight_decay=weight_decay)\n logger.info(f\"lr={lr}, momentum={momentum}, weight_decay={weight_decay}\")\n\n sched = torch.optim.lr_scheduler.MultiStepLR(optim, milestones=[100, 150, 200, 400, 600]) # resnet original paper\n sched_on_epoch = True\n\n logger.info(f\"sched_on_epoch={sched_on_epoch}, sched={str(sched)}\")\n\n return optim, sched, sched_on_epoch\n\n\ndef optim_sched_cosine(net, epochs):\n lr, momentum, weight_decay = 0.025, 0.9, 1.0e-4\n optim = torch.optim.SGD(net.parameters(), lr, momentum=momentum, weight_decay=weight_decay)\n logger.info(f\"lr={lr}, momentum={momentum}, weight_decay={weight_decay}\")\n\n sched = torch.optim.lr_scheduler.CosineAnnealingLR(optim, epochs)\n sched_on_epoch = True\n\n logger.info(f\"sched_on_epoch={sched_on_epoch}, sched={str(sched)}\")\n\n return optim, sched, sched_on_epoch\n\n\ndef get_data(\n datadir: str,\n train_batch_size=128,\n test_batch_size=4096,\n cutout=0,\n train_num_workers=-1,\n test_num_workers=-1,\n val_percent=10.0,\n) -> Tuple[DataLoader, Optional[DataLoader], DataLoader]:\n if utils.is_debugging():\n train_num_workers = test_num_workers = 0\n logger.info(\"debugger=true, num_workers=0\")\n if train_num_workers <= -1:\n train_num_workers = torch.cuda.device_count() * 4\n if test_num_workers <= -1:\n test_num_workers = torch.cuda.device_count() * 4\n\n train_transform = cifar10_transform(aug=True, cutout=cutout)\n trainset = torchvision.datasets.CIFAR10(root=datadir, train=True, download=True, transform=train_transform)\n\n val_len = int(len(trainset) * val_percent / 100.0)\n train_len = len(trainset) - val_len\n\n valset = None\n if val_len:\n trainset, valset = torch.utils.data.random_split(trainset, [train_len, val_len])\n\n train_dl = torch.utils.data.DataLoader(\n trainset, batch_size=train_batch_size, shuffle=True, num_workers=train_num_workers, pin_memory=True\n )\n\n if valset is not None:\n val_dl = torch.utils.data.DataLoader(\n valset, batch_size=test_batch_size, shuffle=False, num_workers=test_num_workers, pin_memory=True\n )\n else:\n val_dl = None\n\n test_transform = cifar10_transform(aug=False, cutout=0)\n testset = torchvision.datasets.CIFAR10(root=datadir, train=False, download=True, transform=test_transform)\n test_dl = torch.utils.data.DataLoader(\n testset, batch_size=test_batch_size, shuffle=False, num_workers=test_num_workers, pin_memory=True\n )\n\n logger.info(f\"train_len={train_len}, val_len={val_len}, test_len={len(testset)}\")\n\n return train_dl, val_dl, test_dl\n\n\ndef train_epoch(\n epoch, net, train_dl, device, crit, optim, sched, sched_on_epoch, half, grad_clip: float\n) -> Tuple[float, float]:\n correct, total, loss_total = 0, 0, 0.0\n net.train()\n for batch_idx, (inputs, targets) in enumerate(train_dl):\n inputs = inputs.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if half:\n inputs = inputs.half()\n\n outputs, loss = train_step(net, crit, optim, sched, sched_on_epoch, inputs, targets, grad_clip)\n loss_total += loss\n\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n if sched and sched_on_epoch:\n sched.step()\n return 100.0 * correct / total, loss_total\n\n\ndef train_step(\n net: nn.Module,\n crit: _Loss,\n optim: Optimizer,\n sched: _LRScheduler,\n sched_on_epoch: bool,\n inputs: torch.Tensor,\n targets: torch.Tensor,\n grad_clip: float,\n) -> Tuple[torch.Tensor, float]:\n outputs = net(inputs)\n\n loss = crit(outputs, targets)\n optim.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(net.parameters(), grad_clip)\n\n optim.step()\n if sched and not sched_on_epoch:\n sched.step()\n return outputs, loss.item()\n\n\ndef test(net, test_dl, device, half) -> float:\n correct, total = 0, 0\n net.eval()\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(test_dl):\n inputs = inputs.to(device, non_blocking=False)\n targets = targets.to(device)\n\n if half:\n inputs = inputs.half()\n\n outputs = net(inputs)\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n return 100.0 * correct / total\n\n\ndef param_size(model: torch.nn.Module) -> int:\n \"\"\"count all parameters excluding auxiliary\"\"\"\n return sum(v.numel() for name, v in model.named_parameters() if \"auxiliary\" not in name)\n\n\ndef cifar10_transform(aug: bool, cutout=0):\n MEAN = [0.49139968, 0.48215827, 0.44653124]\n STD = [0.24703233, 0.24348505, 0.26158768]\n\n transf = [transforms.ToTensor(), transforms.Normalize(MEAN, STD)]\n\n if aug:\n aug_transf = [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()]\n transf = aug_transf + transf\n\n if cutout > 0: # must be after normalization\n transf += [CutoutDefault(cutout)]\n\n return transforms.Compose(transf)\n\n\nclass CutoutDefault:\n \"\"\"\n Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py\n \"\"\"\n\n def __init__(self, length):\n self.length = length\n\n def __call__(self, img):\n h, w = img.size(1), img.size(2)\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1:y2, x1:x2] = 0.0\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n return img\n\n\ndef log_metrics(expdir: str, filename: str, metrics, test_acc: float, args) -> None:\n print(\"filename:\", f\"test_acc: {test_acc}\", metrics[-1])\n results = [\n (\"test_acc\", test_acc),\n (\"val_acc\", metrics[-1][\"val_top1\"]),\n (\"epochs\", args.epochs),\n (\"train_batch_size\", args.train_batch_size),\n (\"test_batch_size\", args.test_batch_size),\n (\"model_name\", args.model_name),\n (\"exp_name\", args.experiment_name),\n (\"exp_desc\", args.experiment_description),\n (\"seed\", args.seed),\n (\"devices\", utils.cuda_device_names()),\n (\"half\", args.half),\n (\"cutout\", args.cutout),\n (\"train_acc\", metrics[-1][\"train_top1\"]),\n (\"loader_workers\", args.loader_workers),\n (\"date\", str(time.time())),\n ]\n utils.append_csv_file(os.path.join(expdir, f\"{filename}.tsv\"), results)\n with open(os.path.join(expdir, f\"{filename}.yaml\"), \"w\") as f:\n yaml.dump(metrics, f)\n\n\ndef create_crit(device, half):\n crit = nn.CrossEntropyLoss().to(device)\n if half:\n crit.half()\n return crit\n\n\ndef create_model(model_name, device, half) -> nn.Module:\n model_class = getattr(models, model_name)\n net = model_class()\n logger.info(f\"param_size_m={param_size(net):.1e}\")\n net = net.to(device)\n if half:\n net.half()\n return net\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Pytorch cifar training\")\n parser.add_argument(\"--experiment-name\", \"-n\", default=\"train_pytorch\")\n parser.add_argument(\"--experiment-description\", \"-d\", default=\"Train cifar usin pure PyTorch code\")\n parser.add_argument(\"--epochs\", \"-e\", type=int, default=1)\n parser.add_argument(\"--model-name\", \"-m\", default=\"resnet34\")\n parser.add_argument(\"--device\", default=\"\", help='\"cuda\" or \"cpu\" or \"\" in which case use cuda if available')\n parser.add_argument(\"--train-batch-size\", \"-b\", type=int, default=128)\n parser.add_argument(\"--test-batch-size\", type=int, default=4096)\n parser.add_argument(\"--seed\", \"-s\", type=float, default=42)\n parser.add_argument(\"--half\", type=lambda x: x.lower() == \"true\", nargs=\"?\", const=True, default=False)\n parser.add_argument(\"--cutout\", type=int, default=0)\n parser.add_argument(\"--grad-clip\", type=float, default=5.0)\n\n parser.add_argument(\"--datadir\", default=\"\", help=\"where to find dataset files, default is ~/torchvision_data_dir\")\n parser.add_argument(\"--outdir\", default=\"\", help=\"where to put results, default is ~/logdir\")\n\n parser.add_argument(\n \"--loader-workers\", type=int, default=-1, help=\"number of thread/workers for data loader (-1 means auto)\"\n )\n\n args = parser.parse_args()\n\n if not args.datadir:\n args.datadir = os.environ.get(\"PT_DATA_DIR\", \"\") or \"~/dataroot\"\n if not args.outdir:\n args.outdir = os.environ.get(\"PT_OUTPUT_DIR\", \"\")\n if not args.outdir:\n args.outdir = os.path.join(\"~/logdir\", \"cifar_testbed\", args.experiment_name)\n\n expdir = utils.full_path(args.outdir)\n os.makedirs(expdir, exist_ok=True)\n\n utils.setup_cuda(args.seed)\n datadir = utils.full_path(args.datadir)\n os.makedirs(datadir, exist_ok=True)\n\n # log config for reference\n logger.info(f'exp_name=\"{args.experiment_name}\", exp_desc=\"{args.experiment_description}\"')\n logger.info(f'model_name=\"{args.model_name}\", seed={args.seed}, epochs={args.epochs}')\n logger.info(f\"half={args.half}, cutout={args.cutout}\")\n logger.info(f'datadir=\"{datadir}\"')\n logger.info(f'expdir=\"{expdir}\"')\n logger.info(f\"train_batch_size={args.train_batch_size}\")\n\n if args.device:\n device = torch.device(args.device)\n else:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # load data just before train start so any errors so far is not delayed\n train_dl, val_dl, test_dl = get_data(\n datadir=datadir,\n train_batch_size=args.train_batch_size,\n test_batch_size=args.test_batch_size,\n train_num_workers=args.loader_workers,\n test_num_workers=args.loader_workers,\n cutout=args.cutout,\n )\n\n epochs = args.epochs\n\n net = create_model(args.model_name, device, args.half)\n crit = create_crit(device, args.half)\n optim, sched, sched_on_epoch = optim_sched_orig(net, epochs)\n\n train_metrics = train(\n epochs,\n train_dl,\n val_dl,\n net,\n device,\n crit,\n optim,\n sched,\n sched_on_epoch,\n args.half,\n False,\n grad_clip=args.grad_clip,\n )\n test_acc = test(net, test_dl, device, args.half)\n log_metrics(expdir, \"train_metrics\", train_metrics, test_acc, args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/supergraph/models/train_pytorch.py","file_name":"train_pytorch.py","file_ext":"py","file_size_in_byte":12021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"378101591","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport requests\nimport logging, json\n\nlogger = logging.getLogger('tradeex.apiclient')\n\nclass APIClient(object):\n def __init__(self, url, retry_policy=None):\n self.url = url\n self.retry_policy = retry_policy\n\n def send_json_request(self, pay_load, trackingId = '', response_format='json'):\n headers = {\"Content-Type\": \"application/json\",\n \"charset\": \"UTF-8\"}\n logger.info(\"apiclient.send_json_request(): {0} send request to {1}, with json:{2}\".format(\n '[trackId: {0}]'.format(trackingId) if trackingId else '',\n self.url, pay_load\n ))\n r = requests.post(self.url, json=pay_load, headers= headers, allow_redirects=True)\n\n logger.info(\"response is {0}\".format(r.text))\n return r.json() if response_format=='json' else r.text.encode('utf-8')\n","sub_path":"stakingsvc/tradeapi/apiclient.py","file_name":"apiclient.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"580050297","text":"\"\"\"\nThis mission is the first one of the series. Here you should find the length of the longest substring that consists of\nthe same letter. For example, line \"aaabbcaaaa\" contains four substrings with the same letters \"aaa\", \"bb\",\"c\"\nand \"aaaa\". The last substring is the longest one, which makes it the answer.\n\nInput: A string.\n\nOutput: An int.\n\"\"\"\n\n\ndef long_repeat(line: str) -> int:\n previous = ''\n count = 0\n max_count = 0\n for letter in line:\n if letter == previous:\n count += 1\n else:\n count = 1\n previous = letter\n if count > max_count:\n max_count = count\n return max_count\n\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert long_repeat('a') == 1, \"minus one\"\n assert long_repeat('aa') == 2, \"Zero\"\n assert long_repeat('sdsffffse') == 4, \"First\"\n assert long_repeat('ddvvrwwwrggg') == 3, \"Second\"\n assert long_repeat('abababaab') == 2, \"Third\"\n assert long_repeat('') == 0, \"Empty\"\n print('\"Run\" is good. How is \"Check\"?')\n","sub_path":"ice_base/task05_long_repeat_v2.py","file_name":"task05_long_repeat_v2.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"384704290","text":"from django.shortcuts import render, HttpResponse\n\n# Create your views here.\ndef index_func(request):\n # return HttpResponse(\"

Hello {{first_name}}!

\")\n context = {\n \"first_name\": \"Trinh\",\n \"users\": [\n \"Max Gliedt\",\n \"Cassidy Gliedt\",\n \"Micah Gliedt\"\n ]\n }\n return render(request, \"index.html\", context)\n","sub_path":"03_Python/2.4_Django/01_Fundamental/Lecture file/First_Django_Project/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"190601073","text":"\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom zhiqiang.utils import torch_utils\nfrom zhiqiang.agents import AbstractPQNet\n\nfrom pong_qnet import ConvModule\n\n#\nclass PongPNet(torch.nn.Module, AbstractPQNet):\n \"\"\" Pong input image: (28, 28, 6)\n \"\"\"\n def __init__(self, settings):\n \"\"\"\n \"\"\"\n super(PongPNet, self).__init__()\n self.check_necessary_elements(PongPNet)\n #\n self.settings = settings\n self.agent_settings = settings.agent_settings\n #\n # conv layers\n self.conv_module = ConvModule(self.settings)\n #\n self.num_actions = self.agent_settings[\"num_actions\"]\n conv_features = self.conv_module.num_conv_features\n num_features = 64\n #\n self.linear_0 = nn.Linear(conv_features, num_features) # \n self.linear_1 = nn.Linear(num_features, self.num_actions)\n #\n # optimizer\n params = [p for p in self.parameters() if p.requires_grad]\n self.optimizer = torch.optim.Adam(params, lr=self.agent_settings[\"lr\"],\n weight_decay=self.agent_settings[\"l2reg\"])\n #\n self.reset(seed=self.agent_settings[\"seed\"])\n #\n\n def reset(self, seed=100):\n \"\"\"\n \"\"\"\n torch_utils.set_random_seed(seed)\n torch_utils.reset_params(self)\n torch_utils.print_params(self)\n\n #\n def trans_list_observations(self, list_observation):\n \"\"\" trans list_observation to batch_std for model\n return: s_std, standardized batch of states\n \"\"\"\n list_s = [np.concatenate(item, -1) for item in list_observation]\n #\n obs_np = np.stack(list_s, axis=0)\n obs_tensor = torch.Tensor(obs_np).to(self.settings.device)\n return obs_tensor\n\n def infer(self, s_std):\n \"\"\" s_std: standardized batch of states\n \"\"\"\n conv_features = self.conv_module(s_std)\n #\n middle = F.relu(self.linear_0(conv_features)) # [B, M]\n action_values = self.linear_1(middle) # [B, NA]\n action_probs = torch.softmax(action_values, -1)\n return action_probs\n\n #\n def merge_weights_function(self):\n return torch_utils.merge_weights\n #\n\n\n","sub_path":"examples/Pong/pong_pnet.py","file_name":"pong_pnet.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"42801697","text":"from server import controllers\nimport model\nimport os\n\n# This is the default server configuration, in case the user will not provide one.\n# The Application is configured to run on local-host and port 9999\n# The brender.sqlite database will be created inside of the server folder\ncontrollers.app.config.update(\n DEBUG=False,\n HOST='localhost',\n PORT=9999,\n DATABASE=os.path.join(os.path.dirname(controllers.__file__), 'brender.sqlite')\n)\n\ndef serve(user_config=None):\n config = controllers.app.config\n\n if user_config:\n config.from_object(user_config)\n\n model.DATABASE = config['DATABASE']\n model.create_database()\n\n # Set SEVER_NAME value according to application configuration\n config.update(\n SERVER_NAME=\"%s:%d\" % (config['HOST'], config['PORT'])\n )\n\n #controllers.app.run(host='0.0.0.0')\n \n # Run application\n controllers.app.run(\n controllers.app.config['HOST'],\n controllers.app.config['PORT'],\n )\n","sub_path":"server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"170361848","text":"# -*- coding: utf8 -*-\n\"\"\"\nCreated on Tue Mar 21 03:30:36 2017\n\n@author: Katsuhiro\n\"\"\"\n\n## rawImage To Image\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport scipy.misc\n#from Image \n\nwidth = 640*2\nheight = 360*2\nwid_hei = width*height\n\nfor j in range(300):\n i = j+1\n filename = \"rawImage_s\"+str(i)+\".rawdata\"\n \n data = np.fromfile(filename,np.dtype('u1'),wid_hei*4)\n data = data.reshape(height,width,4);\n \n scipy.misc.toimage(data).save('conv'+str(i)+'.png')","sub_path":"MD-GPU-Grid-SL/rawImages/imageConverter.py","file_name":"imageConverter.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"283717672","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport pika\n\ncredentials = pika.PlainCredentials('cqcrm', 'cqcrm')\n# 链接rabbit\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters('192.168.99.131', 5672, '/', credentials))\n# 创建频道\nchannel = connection.channel()\n##channel.queue_delete(queue='hello')\nchannel.queue_declare(queue='hello')\n\n\ndef callback(ch, method, properties, body):\n print(\" [x] Received %r\" % body)\n import time\n time.sleep(1)\n print('ok')\n #ch.basic_ack(delivery_tag=method.delivery_tag) # 主要使用此代码\n\n\nchannel.basic_qos(prefetch_count=1)\nchannel.basic_consume(callback,\n queue='hello',\n no_ack=True)\n\nprint(' [*] Waiting for messages. To exit press CTRL+C')\nchannel.start_consuming()\n","sub_path":"code/mrabbitmq/src/main/python/rabbmitmq_consumer.py","file_name":"rabbmitmq_consumer.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"169965147","text":"import json\nfrom logging import getLogger\nfrom traceback import format_exc\n\nfrom bottle import (\n Bottle, abort, request, response, run, static_file\n)\n\nfrom eden.commands import handle_command\nfrom eden.constants import STATIC_ROOT\nfrom eden.exceptions import NotFound, ValidationError\n\napp = Bottle()\nLOGGER = getLogger(__name__)\n\n\ndef build_response(result, status_code):\n \"\"\"\n Set up the response to the caller.\n\n :param result:\n A dictionary containing the result of the command.\n\n :param status_code:\n The HTTP status code to be used.\n\n \"\"\"\n response.content_type = 'application/json'\n response.body = json.dumps(result)\n response.status = status_code\n return response\n\ndef handle_request(command, status_code=200, **extra_params):\n \"\"\"\n Handle a request.\n\n This function extracts the request parameters from the\n HTTP request, executes the vommand, asnd returns the\n result as JSON.\n\n :param command:\n Name of the request.\n\n :param status_code:\n The status code to return (``200 OK`` by default).\n\n :param extra_params:\n Any additional arguments passed via the URL.\n\n \"\"\"\n if not request.headers.get('X-Requested-With') == 'XMLHttpRequest':\n LOGGER.error('Rejecting non-AJAX request')\n abort(403, 'Not an AJAX request')\n\n if request.method == 'GET':\n params = request.query.decode()\n else:\n params = request.forms.decode()\n\n params.update(extra_params)\n LOGGER.debug('Command: {0} params={1}'.format(command, params))\n try:\n result = handle_command(command, **params)\n if result is None:\n result = {}\n return build_response(result, status_code)\n\n except NotFound:\n # This should never happen!\n LOGGER.error('404')\n abort(404, 'Record not found')\n\n except ValidationError as error:\n # Send a 400 response with the errors\n errors = str(error).split('\\n')\n result = {'errors': errors}\n LOGGER.error(errors)\n return build_response(result, 400)\n\n except Exception as error:\n # It's all gone badly wrong\n LOGGER.exception('Unexpected error:')\n abort(503, 'An internal error occurred:\\n{0}'.format(format_exc()))\n\n\n#####################\n# #\n# VIEWS ` #\n# #\n#####################\n\n\n@app.get('/hello')\ndef hello():\n return \"Hello again! STATIC_ROOT = {0}\".format(STATIC_ROOT)\n\n@app.get('/')\ndef main():\n return static_file('/templates/main.html', root=STATIC_ROOT)\n\n\n@app.get('/menu/