diff --git "a/2343.jsonl" "b/2343.jsonl"
new file mode 100644--- /dev/null
+++ "b/2343.jsonl"
@@ -0,0 +1,768 @@
+{"seq_id":"359882119","text":"#!/usr/bin/python3\n# London Academy of IT\n# Python Exercise 23\n'''\nWrite a program that gives information about James Bond Films. The program gives 4 chances to name an actor who has played\nBond and then say whether you are right or not. They should use the text below in the messages output (up to the names of the actors and so\nfilms given). Finally they should print how well you did giving a score out of 4.\n'''\n\nmovies = {\"Sean Connery\" : \"From Russia with Love\",\n \"Roger Moore\": \"Live and let Die\",\n \"Pierce Brosnan\": \"Die Another Day\",\n \"Daniel Craig\": \"Skyfall\"}\n\nprint(\"Try and name 4 actors who have played James Bond.\")\ni = 1\nb = 0\nwhile i <= 4:\n a = input(\"Attemp {} - Name an actor: \".format(i))\n if a in movies:\n print(\"Well Done! {}, was in {}\".format(a, movies.get(a)))\n b += 1\n else:\n print(\"Sorry {}, hasn't played any James Bond movies\".format(a))\n i += 1\nprint(\"You got {} out of 4\".format(b))\n","sub_path":"londonacademy_beginner/londonitacademy_exercise/23.0-Exercise.py","file_name":"23.0-Exercise.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"412478812","text":"\"\"\"\nEvaluator settings.\n\"\"\"\n\nimport os\n\nimport tensorflow as tf\nfrom obspy.core.utcdatetime import UTCDateTime\nfrom datetime import datetime\n\nfrom seisnn.core import Instance\nfrom seisnn.model.attention import TransformerBlockE, TransformerBlockD, \\\n MultiHeadSelfAttention, ResBlock\nfrom seisnn.plot import plot_error_distribution\nimport seisnn.example_proto\nimport seisnn.io\nimport seisnn.sql\nimport seisnn.utils\n\n\nclass BaseEvaluator:\n @staticmethod\n def get_dataset_length(database=None):\n count = None\n try:\n db = seisnn.sql.Client(database)\n count = len(db.get_waveform().all())\n except Exception as error:\n print(f'{type(error).__name__}: {error}')\n\n return count\n\n @staticmethod\n def get_model_dir(model_instance):\n config = seisnn.utils.Config()\n save_model_path = os.path.join(config.models, model_instance)\n return save_model_path\n\n @staticmethod\n def get_eval_dir(dir_name):\n config = seisnn.utils.Config()\n eval_path = os.path.join(config.eval, dir_name.split('.')[0])\n seisnn.utils.make_dirs(eval_path)\n\n return eval_path\n\n\nclass GeneratorEvaluator(BaseEvaluator):\n \"\"\"\n Trainer class.\n \"\"\"\n\n def __init__(self,\n database=None,\n model_name=None):\n \"\"\"\n Initialize the evaluator.\n\n :param database: SQL database.\n :param model_name: Saved model.\n \"\"\"\n self.database = database\n self.model_name = model_name\n self.model = None\n\n def eval(self, tfr_list, batch_size=100):\n \"\"\"\n Main eval loop.\n\n :param tfr_list: List of .tfrecord.\n :param str name: Output name.\n \"\"\"\n model_path = self.get_model_dir(self.model_name)\n eval_path = self.get_eval_dir(self.model_name)\n\n self.model = tf.keras.models.load_model(\n model_path,\n custom_objects={\n 'TransformerBlockE': TransformerBlockE,\n 'TransformerBlockD': TransformerBlockD,\n 'MultiHeadSelfAttention': MultiHeadSelfAttention,\n 'ResBlock': ResBlock\n })\n\n data_len = self.get_dataset_length(self.database)\n progbar = tf.keras.utils.Progbar(data_len)\n dataset = seisnn.io.read_dataset(tfr_list)\n n = 0\n for val in dataset.prefetch(100).batch(batch_size):\n progbar.add(batch_size)\n\n val['predict'] = self.model.predict(val['trace'])\n\n iterator = seisnn.example_proto.batch_iterator(val)\n for i in range(len(val['predict'])):\n title = f\"eval_{n:0>5}\"\n\n instance = Instance(next(iterator))\n instance.to_tfrecord(\n os.path.join(eval_path, title + '.tfrecord'))\n n += 1\n\n val['id'] = tf.convert_to_tensor(\n title.encode('utf-8'), dtype=tf.string)[tf.newaxis]\n\n example = next(seisnn.example_proto.batch_iterator(val))\n instance = Instance(example)\n instance.to_tfrecord(os.path.join(eval_path, title + '.tfrecord'))\n n += 1\n\n def score(self, delta=0.1, error_distribution=True):\n db = seisnn.sql.Client(self.database)\n for phase in ['P', 'S']:\n tp = 0\n error = []\n predict_pick = db.get_picks(phase=phase, tag='predict')\n label_pick = db.get_picks(phase=phase, tag='manual')\n total_predict = len(predict_pick.all())\n total_label = len(label_pick.all())\n print(f'{phase}_total_predict: {total_predict} '\n f'{phase}_total_label: {total_label}')\n\n for pick in predict_pick:\n from_time, to_time = get_from_time_to_time(pick, delta)\n label = db.get_picks(\n from_time=str(from_time),\n to_time=str(to_time),\n phase=phase,\n station=pick.station,\n tag='manual'\n )\n if label.all():\n tp = tp + 1\n if error_distribution:\n error.append(UTCDateTime(label[0].time) - UTCDateTime(\n pick.time))\n plot_error_distribution(error)\n print(\n f'{phase}: tp = {tp},fp = {total_predict - tp},fn = {total_label - tp}')\n precision, recall, f1 = seisnn.qc.precision_recall_f1_score(\n true_positive=tp, val_count=total_label,\n pred_count=total_predict)\n print(\n f'{phase}: precision = {precision},recall = {recall},f1 = {f1}')\n\n\ndef get_from_time_to_time(pick, delta=0.1):\n from_time = UTCDateTime(pick.time) - delta\n from_time = datetime.strptime(str(from_time), '%Y-%m-%dT%H:%M:%S.%fZ')\n to_time = UTCDateTime(pick.time) + delta\n to_time = datetime.strptime(str(to_time), '%Y-%m-%dT%H:%M:%S.%fZ')\n return from_time, to_time\n","sub_path":"seisnn/model/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"122820411","text":"def foo11():\n import sys\n sf = sys.argv[1]\n df = sys.argv[2]\n sf_obj = open(sf,'rb')\n df_obj = open(df,'wb')\n while True:\n data = sf_obj.read(4096)\n df_obj.write(data)\n if not data:\n break\n df_obj.close()\n sf_obj.close()\n\nfoo11()","sub_path":"STEP05/project/python/day03/cp4.py","file_name":"cp4.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"13410759","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# JewelCraft jewelry design toolkit for Blender.\n# Copyright (C) 2015-2019 Mikhail Rachinskiy\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# ##### END GPL LICENSE BLOCK #####\n\n\nimport os\n\nimport bpy.utils.previews\nfrom bpy.app.translations import pgettext_iface as _\n\nfrom .. import var\nfrom . import asset\n\n\n_cache = {}\n\n\ndef _iface_lang(context):\n view = context.preferences.view\n\n if view.use_international_fonts and view.use_translate_interface:\n return view.language\n\n return \"DEFAULT\"\n\n\ndef cuts(self, context):\n lang = _iface_lang(context)\n\n if _cache.get(\"cuts__lang\") == lang:\n return _cache[\"cuts__list\"]\n\n pcoll = var.preview_collections.get(\"cuts\")\n\n if not pcoll:\n pcoll = bpy.utils.previews.new()\n\n for entry in os.scandir(var.GEM_ASSET_DIR):\n if entry.name.endswith(\".png\"):\n name = os.path.splitext(entry.name)[0]\n pcoll.load(name, entry.path, \"IMAGE\")\n\n var.preview_collections[\"cuts\"] = pcoll\n\n list_ = (\n (\"ROUND\", _(\"Round\", \"JewelCraft\"), \"\", pcoll[\"round\"].icon_id, 0),\n (\"OVAL\", _(\"Oval\"), \"\", pcoll[\"oval\"].icon_id, 1),\n (\"CUSHION\", _(\"Cushion\"), \"\", pcoll[\"cushion\"].icon_id, 2),\n (\"PEAR\", _(\"Pear\"), \"\", pcoll[\"pear\"].icon_id, 3),\n (\"MARQUISE\", _(\"Marquise\"), \"\", pcoll[\"marquise\"].icon_id, 4),\n (\"PRINCESS\", _(\"Princess\"), \"\", pcoll[\"princess\"].icon_id, 5),\n (\"BAGUETTE\", _(\"Baguette\"), \"\", pcoll[\"baguette\"].icon_id, 6),\n (\"SQUARE\", _(\"Square\"), \"\", pcoll[\"square\"].icon_id, 7),\n (\"EMERALD\", _(\"Emerald\"), \"\", pcoll[\"emerald\"].icon_id, 8),\n (\"ASSCHER\", _(\"Asscher\"), \"\", pcoll[\"asscher\"].icon_id, 9),\n (\"RADIANT\", _(\"Radiant\"), \"\", pcoll[\"radiant\"].icon_id, 10),\n (\"FLANDERS\", _(\"Flanders\"), \"\", pcoll[\"flanders\"].icon_id, 11),\n (\"OCTAGON\", _(\"Octagon\"), \"\", pcoll[\"octagon\"].icon_id, 12),\n (\"HEART\", _(\"Heart\"), \"\", pcoll[\"heart\"].icon_id, 13),\n (\"TRILLION\", _(\"Trillion\"), \"\", pcoll[\"trillion\"].icon_id, 14),\n (\"TRILLIANT\", _(\"Trilliant\"), \"\", pcoll[\"trilliant\"].icon_id, 15),\n (\"TRIANGLE\", _(\"Triangle\"), \"\", pcoll[\"triangle\"].icon_id, 16),\n )\n\n _cache[\"cuts__list\"] = list_\n _cache[\"cuts__lang\"] = lang\n\n return list_\n\n\ndef stones(self, context):\n lang = _iface_lang(context)\n\n if _cache.get(\"stones__lang\") == lang:\n return _cache[\"stones__list\"]\n\n list_ = [\n (\"DIAMOND\", _(\"Diamond\"), \"\", 0),\n (\"ALEXANDRITE\", _(\"Alexandrite\"), \"\", 1),\n (\"AMETHYST\", _(\"Amethyst\"), \"\", 2),\n (\"AQUAMARINE\", _(\"Aquamarine\"), \"\", 3),\n (\"CITRINE\", _(\"Citrine\"), \"\", 4),\n (\"CUBIC_ZIRCONIA\", _(\"Cubic Zirconia\"), \"\", 5),\n (\"EMERALD\", _(\"Emerald\"), \"\", 6),\n (\"GARNET\", _(\"Garnet\"), \"\", 7),\n (\"MORGANITE\", _(\"Morganite\"), \"\", 8),\n (\"QUARTZ\", _(\"Quartz\"), \"\", 9),\n (\"PERIDOT\", _(\"Peridot\"), \"\", 10),\n (\"RUBY\", _(\"Ruby\"), \"\", 11),\n (\"SAPPHIRE\", _(\"Sapphire\"), \"\", 12),\n (\"SPINEL\", _(\"Spinel\"), \"\", 13),\n (\"TANZANITE\", _(\"Tanzanite\"), \"\", 14),\n (\"TOPAZ\", _(\"Topaz\"), \"\", 15),\n (\"TOURMALINE\", _(\"Tourmaline\"), \"\", 16),\n (\"ZIRCON\", _(\"Zircon\"), \"\", 17),\n ]\n\n list_.sort(key=lambda x: x[1])\n\n _cache[\"stones__list\"] = list_\n _cache[\"stones__lang\"] = lang\n\n return list_\n\n\n# Weighting\n# ---------------------------\n\n\ndef weighting_set(self, context):\n\n if \"weighting_set__list\" in _cache:\n return _cache[\"weighting_set__list\"]\n\n prefs = bpy.context.scene.jewelcraft_preset\n list_ = []\n\n if not prefs.weighting_hide_default_sets:\n list_ += [\n (\n \"JCASSET_PRECIOUS\",\n \"[JewelCraft] Precious\",\n \"Commonly used precious alloys, physical properties taken directly from suppliers\"\n ),\n (\n \"JCASSET_PRECIOUS_RU\",\n \"[JewelCraft] Precious RU (ГОСТ 30649-99)\",\n \"Set of precious alloys according to Russian regulations\"\n ),\n (\n \"JCASSET_BASE\",\n \"[JewelCraft] Base\",\n \"Set of base metal alloys, physical properties taken directly from suppliers\"\n ),\n ]\n\n folder = asset.user_asset_library_folder_weighting()\n\n if os.path.exists(folder):\n for entry in os.scandir(folder):\n if entry.is_file() and entry.name.endswith(\".json\"):\n id_ = entry.name\n name_ = os.path.splitext(entry.name)[0] + \" \" # Add trailing space so UI translation won't apply\n list_.append((id_, name_, \"\"))\n\n if not list_:\n list_ = [(\"\", \"\", \"\")]\n\n _cache[\"weighting_set__list\"] = list_\n\n return list_\n\n\ndef weighting_set_refresh(self=None, context=None):\n if \"weighting_set__list\" in _cache:\n del _cache[\"weighting_set__list\"]\n\n\n# Assets\n# ---------------------------\n\n\ndef asset_folders(self, context):\n\n if \"asset_folders__list\" in _cache:\n return _cache[\"asset_folders__list\"]\n\n folder = asset.user_asset_library_folder_object()\n\n if not os.path.exists(folder):\n _cache[\"asset_folders__list\"] = [(\"\", \"\", \"\")]\n return [(\"\", \"\", \"\")]\n\n list_ = []\n\n for entry in os.scandir(folder):\n\n if entry.is_dir() and not entry.name.startswith(\".\"):\n id_ = entry.name\n name_ = entry.name + \" \" # Add trailing space so UI translation won't apply\n list_.append((id_, name_, \"\"))\n\n if not list_:\n list_ = [(\"\", \"\", \"\")]\n\n _cache[\"asset_folders__list\"] = list_\n\n return list_\n\n\ndef assets(self, context):\n category = context.scene.jewelcraft.asset_folder\n\n if \"assets__list\" in _cache and category == _cache.get(\"assets__category\"):\n return _cache[\"assets__list\"]\n\n _cache[\"assets__category\"] = category\n folder = os.path.join(asset.user_asset_library_folder_object(), category)\n\n if not os.path.exists(folder):\n _cache[\"assets__list\"] = [(\"\", \"\", \"\")]\n return [(\"\", \"\", \"\")]\n\n pcoll = var.preview_collections.get(\"assets\")\n\n if not pcoll:\n pcoll = bpy.utils.previews.new()\n\n list_ = []\n i = 0\n no_preview = var.preview_collections[\"icons\"][\"NO_PREVIEW\"].icon_id\n\n for entry in os.scandir(folder):\n\n if entry.is_file() and entry.name.endswith(\".blend\"):\n filename = os.path.splitext(entry.name)[0]\n id_ = filename\n name_ = filename + \" \" # Add trailing space so UI translation won't apply\n\n preview_id = category + filename\n preview_path = os.path.splitext(entry.path)[0] + \".png\"\n\n if os.path.exists(preview_path):\n if preview_id not in pcoll:\n pcoll.load(preview_id, preview_path, \"IMAGE\")\n preview = pcoll[preview_id].icon_id\n else:\n preview = no_preview\n\n list_.append((id_, name_, \"\", preview, i))\n i += 1\n\n var.preview_collections[\"assets\"] = pcoll\n\n if not pcoll:\n bpy.utils.previews.remove(pcoll)\n del var.preview_collections[\"assets\"]\n\n if not list_:\n list_ = [(\"\", \"\", \"\")]\n\n _cache[\"assets__list\"] = list_\n\n return list_\n\n\ndef asset_folder_list_refresh():\n if \"asset_folders__list\" in _cache:\n del _cache[\"asset_folders__list\"]\n\n\ndef asset_list_refresh(preview_id=False, hard=False):\n pcoll = var.preview_collections.get(\"assets\")\n\n if pcoll:\n\n if preview_id and preview_id in pcoll:\n del pcoll[preview_id]\n\n if not pcoll:\n bpy.utils.previews.remove(pcoll)\n del var.preview_collections[\"assets\"]\n\n elif hard:\n bpy.utils.previews.remove(pcoll)\n del var.preview_collections[\"assets\"]\n\n if \"assets__list\" in _cache:\n del _cache[\"assets__list\"]\n","sub_path":"All_In_One/addons/learnbgame/jewelcraft/lib/dynamic_list.py","file_name":"dynamic_list.py","file_ext":"py","file_size_in_byte":9031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"49393515","text":"import pandas as pd\nimport math\nfrom scipy.stats import norm\nimport numpy as np\n\ntabulato = pd.read_csv(\"../dati_pre_eventi/data/tabulato_semplificato.csv\")\neventi3 = pd.read_csv(\"creazione_eventi/data/events/events3.csv\")\nparts = {}\nfinestra = 43200\nminT = tabulato[\"timestamp\"].min()\nmaxT = tabulato[\"timestamp\"].max()\n\n\ndef group():\n global eventi3\n eventi3[\"chiamate\"] = eventi3[\"esito_positivo\"] + eventi3[\"esito_negativo\"]\n eventi3 = eventi3.drop(columns=[\"esito_positivo\", \"esito_negativo\"])\n events3_df_grouped = eventi3.groupby(by=[\"timestamp\", \"X\", \"Y\", \"A\", \"Z\"], as_index=False).sum()\n eventi3 = events3_df_grouped\n\n\ndef get_part_a(x, y):\n eventsxy = eventi3[eventi3[\"X\"] == x]\n eventsxy = eventsxy[eventsxy[\"Y\"] == y]\n intervals = []\n\n if len(eventsxy) != 0:\n start = 0\n start_finestra = True\n prec = 0\n end = 0\n for i, row in eventsxy.iterrows():\n if start_finestra:\n start = row[\"timestamp\"]\n start_finestra = False\n if row[\"timestamp\"] > start + finestra:\n intervals.append((start, prec + finestra))\n start = row[\"timestamp\"]\n if i == len(eventsxy) - 1:\n end = row[\"timestamp\"]\n prec = row[\"timestamp\"]\n\n intervals.append((start, end + finestra))\n return intervals\n\n\ndef merge_parts(part1, part2):\n part1 = [item for sublist in part1 for item in sublist]\n part2 = [item for sublist in part2 for item in sublist]\n timestamps = part1 + part2\n timestamps.sort()\n part = []\n if len(timestamps) != 0:\n start = 0\n start_finestra = True\n prec = 0\n end = 0\n for i, item in enumerate(timestamps):\n if start_finestra:\n start = item\n start_finestra = False\n if item > start + finestra:\n part.append((start, prec + finestra))\n start = item\n if i == len(timestamps) - 1:\n end = item\n prec = item\n part.append((start, end + finestra))\n return part\n\n\ndef get_part_b(partA1, partA2):\n partA = merge_parts(partA1, partA2)\n intervals = []\n if len(partA) != 0:\n start = minT\n for item in partA:\n end = item[0] - 1\n intervals.append((start, end))\n start = item[1] + 1\n intervals.append((start, maxT))\n return intervals\n else:\n return [(minT, maxT)]\n\n\ndef get_p0(x, y, a, z):\n partB = get_part_b(parts[(x, y)][\"A\"], parts[(y, a)][\"A\"])\n tabulato_az = tabulato[tabulato[\"mittente\"] == a]\n tabulato_az = tabulato_az[tabulato_az[\"destinatario\"] == z]\n tabulato_az.reset_index(inplace=True, drop=True)\n c = 0\n for _, row in tabulato_az.iterrows():\n timestamp = row[\"timestamp\"]\n for item in partB:\n if item[0] <= timestamp <= item[1]:\n c += 1\n break\n if timestamp < item[0]:\n break\n return c / len(tabulato)\n\n\ndef get_p_cappuccio(x, y, a, z):\n events_xya = eventi3[eventi3[\"X\"] == x]\n events_xya = events_xya[events_xya[\"Y\"] == y]\n events_xya = events_xya[events_xya[\"A\"] == a]\n events_xya.reset_index(inplace=True, drop=True)\n events_xyaz = events_xya[events_xya[\"Z\"] == z]\n events_xyaz.reset_index(inplace=True, drop=True)\n return events_xyaz[\"chiamate\"].sum() / eventi3[\"chiamate\"].sum() # TODO? su tutti gli eventi\n\n\ndef calcolo_p0_p_hat():\n global tabulato\n\n tabulato_coppie = tabulato\n tabulato_coppie = tabulato_coppie.drop(\n columns=[\"timestamp\", \"durata\", \"is_mittente_intercettato\", \"mittente_cella_start\", \"mittente_cella_end\",\n \"is_destinatario_intercettato\", \"destinatario_cella_start\", \"destinatario_cella_end\", \"esito_chiamata\",\n \"tipo\"])\n tabulato_coppie.drop_duplicates(inplace=True)\n tabulato_coppie.reset_index(inplace=True, drop=True)\n for i, row in tabulato_coppie.iterrows():\n x = row[\"mittente\"]\n y = row[\"destinatario\"]\n parts[(x, y)] = {}\n parts[(x, y)][\"A\"] = get_part_a(x, y)\n\n p0 = []\n p_cappuccio = []\n for i, row in eventi3.iterrows():\n x = row[\"X\"]\n y = row[\"Y\"]\n a = row[\"A\"]\n z = row[\"Z\"]\n p0.append(get_p0(x, y, a, z))\n p_cappuccio.append(get_p_cappuccio(x, y, a, z))\n eventi3[\"p0\"] = p0\n eventi3[\"p_cappuccio\"] = p_cappuccio\n eventi3.to_csv(\"data/events3_sign.csv\", index=False)\n\n\ndef n_chiamate():\n global tabulato\n tabulato_coppie = tabulato\n tabulato_coppie = tabulato_coppie.drop(\n columns=[\"timestamp\", \"durata\", \"is_mittente_intercettato\", \"mittente_cella_start\", \"mittente_cella_end\",\n \"is_destinatario_intercettato\", \"destinatario_cella_start\", \"destinatario_cella_end\", \"esito_chiamata\",\n \"tipo\"])\n tabulatof = tabulato_coppie.groupby(tabulato_coppie.columns.tolist()).size().reset_index().rename(\n columns={0: 'n_telefonate'})\n for i, row in tabulatof.iterrows():\n parts[(row[\"mittente\"], row[\"destinatario\"])][\"n_telefonate\"] = row['n_telefonate']\n return\n\n\ndef calcolo_z():\n eventi_p = pd.read_csv(\"data/events3_sign.csv\")\n z = []\n for i, row in eventi_p.iterrows():\n p_hat = row[\"p_cappuccio\"]\n p0 = row[\"p0\"] # if row[\"p0\"] != 0 else zero_probability_factor\n n = parts[(row[\"X\"], row[\"Y\"])][\"n_telefonate\"]\n root = math.sqrt((p0 * (1 - p0)) / n)\n z.append((p_hat - p0) / root)\n eventi_p[\"z\"] = z\n eventi_p.to_csv(\"data/events3_sign.csv\", index=False)\n\n return\n\n\ndef calcolo_sign():\n eventi_z = pd.read_csv(\"data/events3_sign.csv\")\n sign = []\n for i, row in eventi_z.iterrows():\n p = 1 - norm.cdf(row[\"z\"])\n sign.append(p)\n eventi_z[\"significativo\"] = sign\n eventi_z.to_csv(\"data/events3_sign.csv\", index=False)\n return\n\n\nif __name__ == \"__main__\":\n print(\"Inizio\")\n group()\n calcolo_p0_p_hat()\n n_chiamate()\n calcolo_z()\n calcolo_sign()\n print(\"Fine\")\n","sub_path":"window_43200/events3_sign.py","file_name":"events3_sign.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"336088774","text":"import sys\nimport time\n\nsys.path.insert(0, '/home/sshann/Documents/thesis/experiments/android-runner-configuration/')\n\nfrom scripts.interaction.python3.common import tap\nfrom scripts.interaction.python3.common import tap_phone_back\nfrom scripts.interaction.python3.common import write_username\nfrom scripts.interaction.python3.common import write_password\n\n\ndef login(device):\n print('\\tlogin')\n\n # click on username\n tap(device, 310, 602, 1)\n write_username(device)\n\n # click on password\n tap(device, 297, 858, 1)\n write_password(device)\n\n # click on login\n tap(device, 1210, 1056, 6)\n\n\ndef visit_global_stories(device):\n print('\\tvisit_global_stories')\n\n # click on global stories\n tap(device, 594, 330, 10)\n\n # click on card 1\n tap(device, 436, 453, 12)\n tap_phone_back(device)\n\n # click on card 2\n tap(device, 435, 858)\n tap_phone_back(device)\n\n # return to front page\n tap_phone_back(device)\n\n # click on global stories\n tap(device, 594, 330, 10)\n\n # click on card 1\n tap(device, 436, 453)\n tap_phone_back(device)\n\n # return to front page\n tap_phone_back(device)\n\n\ndef visit_all_shared_stories(device):\n print('\\tvisit_all_shared_stories')\n\n # click on all shared stories\n tap(device, 553, 442, 10)\n\n # click on card 1\n tap(device, 436, 453, 12)\n tap_phone_back(device)\n\n # click on card 2\n tap(device, 435, 858)\n tap_phone_back(device)\n\n # return to front page\n tap_phone_back(device)\n\n # click on all shared stories\n tap(device, 553, 442, 10)\n\n # click on card 1\n tap(device, 436, 453)\n tap_phone_back(device)\n\n # return to front page\n tap_phone_back(device)\n\n\n# noinspection PyUnusedLocal\ndef main(device, *args, **kwargs):\n if device.current_activity().find('com.newsblur') != -1:\n time.sleep(4)\n run_news_blur_interaction(device)\n else:\n print('\\tSkip file')\n\n\ndef run_news_blur_interaction(device):\n print('\\tRunning interaction for NewsBlur')\n # Login works, but there seems to be something wrong with Newblur deleting accounts,\n # so it is best to be logged in the entire experiment\n # login(device)\n visit_global_stories(device)\n visit_all_shared_stories(device)\n","sub_path":"android-runner-configuration/scripts/interaction/python3/com_newsblur.py","file_name":"com_newsblur.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"359260572","text":"'''\nCreated on 30 Mar 2018\n\n@author: vwltam\nmodified by ho yu hin on 18/5\n'''\nimport plotly.plotly as py\nimport pandas as pd\nfrom plotly.graph_objs import *\n\npy.sign_in('ms042087', 'Ytktzy7FUBCOGMoFSX0w')\n\nhb=pd.read_csv(\"test.csv\")\n\ndata = Data([\n Scatter(\n y=hb[\"y\"],\n mode='lines+markers',\n name=\"'linear'\",\n hoverinfo='name',\n line=dict(\n shape='linear'\n )\n )\n])\n\nlayout = Layout(\n title='HeartBeat [Line Chart]',\n xaxis = dict(title = 'data'),\n yaxis = dict(title = 'HeartBeat rate'),\n font=Font(\n family='Courier'\n )\n)\n\nfig = Figure(data=data, layout=layout)\nplot_url = py.plot(data,filename='HeartBeat [Line]')\npy.image.save_as(fig, 'HeartBeat.png')\n","sub_path":"Project/BusSafetySystem/crypt/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"42109151","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom dataset import *\n\nfrom Utils import *\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='Hyper Score')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--epochs', type=int, default=40, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--step-size', type=int, default=30)\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.001)')\n parser.add_argument('--combine-trainval', action='store_true',\n help=\"train and val sets together for training, val set alone for validation\")\n parser.add_argument('--momentum', type=float, default=0, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--train', action='store_true')\n parser.add_argument('--use_AM', action='store_true')\n parser.add_argument('--save_result', action='store_true')\n parser.add_argument('--resume', action='store_true')\n parser.add_argument('--data-path', type=str, default='1fps_train_IDE_40/',\n metavar='PATH')\n parser.add_argument('-L', type=str, default='L2', choices=['L1', 'L2'])\n # parser.add_argument('--tracklet', type=int, default=20, choices=[20, 40])\n parser.add_argument('--window', type=str, default='300', choices=['Inf','75', '150', '300', '600', '1200'])\n parser.add_argument('--log-dir', type=str, default='', metavar='PATH')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=100, metavar='N',\n help='how many batches to wait before logging training status')\n args = parser.parse_args()\n args.log_dir = 'logs/{}/appear_only/'.format(args.L, ) + args.data_path + args.log_dir\n args.data_path = os.path.expanduser('~/Data/DukeMTMC/ground_truth/') + args.data_path\n if args.combine_trainval:\n train_data_path = args.data_path + 'hyperGT_{}_trainval_{}.h5'.format(args.L, args.window)\n else:\n train_data_path = args.data_path + 'hyperGT_{}_train_{}.h5'.format(args.L, args.window)\n if args.save_result:\n test_data_path = args.data_path + 'hyperGT_{}_train_Inf.h5'.format(args.L)\n else:\n test_data_path = args.data_path + 'hyperGT_{}_val_Inf.h5'.format(args.L)\n torch.manual_seed(args.seed)\n if not os.path.isdir(args.log_dir):\n os.mkdir(args.log_dir)\n\n trainset = SiameseHyperFeat(HyperFeat(train_data_path))\n testset = SiameseHyperFeat(HyperFeat(test_data_path), train=True)\n train_loader = DataLoader(trainset, batch_size=args.batch_size,\n num_workers=4, pin_memory=True, shuffle=True)\n\n test_loader = DataLoader(testset, batch_size=args.batch_size,\n # sampler=HyperScoreSampler(testset, 1024),\n num_workers=4, pin_memory=True)\n\n metric_net = nn.DataParallel(MetricNet(num_class=2)).cuda()\n if args.resume:\n checkpoint = torch.load(args.log_dir + '/metric_net_{}_{}.pth.tar'.format(args.L, args.window))\n model_dict = checkpoint['state_dict']\n metric_net.module.load_state_dict(model_dict)\n\n appear_motion_net = nn.DataParallel(AppearMotionNet()).cuda()\n criterion = nn.CrossEntropyLoss().cuda()\n\n if args.train:\n # Draw Curve\n x_epoch = []\n train_loss_s = []\n train_prec_s = []\n test_loss_s = []\n test_prec_s = []\n optimizer = optim.SGD(metric_net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=0.005)\n if not args.resume:\n for epoch in range(1, args.epochs + 1):\n train_loss, train_prec = train(args, metric_net, appear_motion_net, train_loader, optimizer, epoch,\n criterion)\n test_loss, test_prec = test(args, metric_net, appear_motion_net, test_loader, criterion)\n x_epoch.append(epoch)\n train_loss_s.append(train_loss)\n train_prec_s.append(train_prec)\n test_loss_s.append(test_loss)\n test_prec_s.append(test_prec)\n draw_curve(args, x_epoch, train_loss_s, train_prec_s, test_loss_s, test_prec_s)\n pass\n torch.save({'state_dict': metric_net.module.state_dict(), }, args.log_dir + '/metric_net_{}_{}.pth.tar'.\n format(args.L, args.window))\n else:\n test(args, metric_net, appear_motion_net, test_loader, criterion, )\n\n x_epoch = []\n train_loss_s = []\n train_prec_s = []\n test_loss_s = []\n test_prec_s = []\n # train appear_motion_net\n optimizer = optim.SGD(metric_net.parameters(), lr=0.1 * args.lr, momentum=args.momentum)\n if args.use_AM:\n for epoch in range(1, args.epochs + 1):\n train_loss, train_prec = train(args, metric_net, appear_motion_net, train_loader, optimizer, epoch,\n criterion, train_motion=True)\n test_loss, test_prec = test(args, metric_net, appear_motion_net, test_loader, criterion,\n test_motion=True)\n x_epoch.append(epoch)\n train_loss_s.append(train_loss)\n train_prec_s.append(train_prec)\n test_loss_s.append(test_loss)\n test_prec_s.append(test_prec)\n draw_curve(args, x_epoch, train_loss_s, train_prec_s, test_loss_s, test_prec_s, train_motion=True)\n pass\n torch.save({'state_dict': appear_motion_net.module.state_dict(), },\n args.log_dir + '/appear_motion_net_{}_{}.pth.tar'.format(args.L, args.window))\n if args.use_AM:\n save_model_as_mat(args, metric_net.module, appear_motion_net.module)\n else:\n save_model_as_mat(args, metric_net.module, [])\n\n checkpoint = torch.load(args.log_dir + '/metric_net_{}_{}.pth.tar'.format(args.L, args.window))\n model_dict = checkpoint['state_dict']\n metric_net.module.load_state_dict(model_dict)\n if args.use_AM:\n checkpoint = torch.load(args.log_dir + '/appear_motion_net_{}_{}.pth.tar'.format(args.L, args.window))\n model_dict = checkpoint['state_dict']\n appear_motion_net.module.load_state_dict(model_dict)\n test(args, metric_net, appear_motion_net, test_loader, criterion,\n test_motion=args.use_AM, save_result=args.save_result, epoch_max=100)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/hyper_score/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"260048092","text":"from bson import ObjectId\nfrom django import forms\nfrom models import *\n\n\nclass HumanForm(forms.Form):\n amount = forms.IntegerField()\n manager = forms.CharField(max_length=20)\n pet_name = forms.CharField(max_length=20)\n pet_age = forms.IntegerField()\n pet_specie = forms.CharField(max_length=20)\n job_name = forms.CharField(max_length=20)\n salary = forms.IntegerField()\n human_name = forms.CharField(max_length=20)\n human_age = forms.IntegerField()\n dateCreating = forms.DateTimeField()\n\n def __init__(self, *args, **kwargs):\n self.instance = kwargs.pop('instance', None)\n super(HumanForm, self).__init__(*args, **kwargs)\n if self.instance:\n self.fields['amount'].initial = self.instance.amount\n self.fields['pet_specie'].initial = self.instance.pet_specie\n self.fields['manager'].initial = self.instance.manager\n self.fields['pet_name'].initial = self.instance.pet_name\n self.fields['pet_age'].initial = self.instance.pet_age\n self.fields['job_name'].initial = self.instance.job_name\n self.fields['salary'].initial = self.instance.salary\n self.fields['human_name'].initial = self.instance.human_name\n self.fields['human_age'].initial = self.instance.human_age\n self.fields['dateCreating'].initial = self.instance.dateCreating\n\n def save(self, commit=True):\n human = self.instance if self.instance else Human()\n human.amount = self.cleaned_data['amount']\n human.pet_name = self.cleaned_data['pet_name']\n human.manager = self.cleaned_data['manager']\n human.pet_specie = self.cleaned_data['pet_specie']\n human.pet_age = self.cleaned_data['pet_age']\n human.job_name = self.cleaned_data['job_name']\n human.salary = self.cleaned_data['salary']\n human.human_name = self.cleaned_data['human_name']\n human.human_age = self.cleaned_data['human_age']\n human.dateCreating = self.cleaned_data['dateCreating']\n if commit:\n human.save()\n\n return human\n","sub_path":"Laba3/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"607858564","text":"from decimal import Decimal\n\nimport stripe\n\nfrom flask import (\n request, json, abort, jsonify, render_template, redirect, url_for)\nfrom flask_login import login_required, fresh_login_required\n\nfrom . import app, tryton\nfrom . import util\nfrom .compat import HTTPStatus\n\nCustomer = tryton.pool.get('account.payment.stripe.customer')\nCurrency = tryton.pool.get('currency.currency')\nJournal = tryton.pool.get('account.payment.journal')\nPayment = tryton.pool.get('account.payment')\nPaymentGroup = tryton.pool.get('account.payment.group')\nSale = tryton.pool.get('sale.sale')\n\n\n@app.route('/payment/setup_intent', methods={'POST'})\n@tryton.transaction()\n@login_required\ndef payment_setup_intent():\n stripe_account = util.get_stripe_account()\n party = util.get_session_party()\n try:\n if party.stripe_customers:\n customer = party.stripe_customers[0]\n if customer.stripe_setup_intent_id:\n return customer.stripe_setup_intent.client_secret\n else:\n cu = stripe.Customer.create(\n api_key=stripe_account.secret_key,\n description=party.rec_name,\n email=party.email)\n customer = Customer(\n party=party,\n stripe_account=stripe_account,\n stripe_customer_id=cu.id)\n setup_intent = stripe.SetupIntent.create(\n api_key=stripe_account.secret_key,\n customer=customer.stripe_customer_id)\n except stripe.error.StripeError as e:\n return (\n jsonify(e.user_message), HTTPStatus.INTERNAL_SERVER_ERROR)\n customer.stripe_setup_intent_id = setup_intent.id\n customer.save()\n return setup_intent.client_secret\n\n\n@app.route('/payment/setup_intent/update', methods={'POST'})\n@tryton.transaction()\n@login_required\ndef payment_setup_intent_update():\n party = util.get_session_party()\n customer = party.stripe_customers[0]\n setup_intent = customer.stripe_setup_intent\n customer.stripe_intent_update()\n if setup_intent:\n update_customer_payment(customer, setup_intent.payment_method)\n return ('', HTTPStatus.NO_CONTENT)\n\n\n@app.route('/payment/confirm/',\n methods={'GET'})\n@app.route('//payment/confirm/',\n methods={'GET'})\n@tryton.transaction()\n@fresh_login_required\ndef payment_confirm(payment):\n party = util.get_session_party()\n if payment.party != party:\n abort(HTTPStatus.FORBIDDEN)\n payment_intent = payment.stripe_payment_intent\n if not payment_intent:\n abort(HTTPStatus.FORBIDDEN)\n if isinstance(payment.origin, Sale):\n end_url = url_for('checkout_sale_end', order=payment.origin)\n else:\n end_url = url_for('payment_confirm_end')\n if payment_intent.status == 'succeeded':\n return redirect(end_url)\n return render_template('payment_confirm.html',\n payment=payment,\n stripe_key=util.get_stripe_account().publishable_key,\n end_url=end_url)\n\n\n@app.route('//payment/confirm/end')\n@tryton.transaction()\ndef payment_confirm_end():\n return render_template('payment_confirm_end.html')\n\n\n@app.route('/payment/cancel/',\n methods={'POST'})\n@tryton.transaction()\n@fresh_login_required\ndef payment_cancel(payment):\n party = util.get_session_party()\n if payment.party != party:\n abort(HTTPStatus.FORBIDDEN)\n payment_intent = payment.stripe_payment_intent\n if not payment_intent:\n abort(HTTPStatus.FORBIDDEN)\n if payment_intent.status in {'processing', 'succeeded'}:\n abort(HTTPStatus.FORBIDDEN)\n if payment_intent.status != 'canceled':\n payment_intent.cancel()\n Payment.fail([payment])\n if isinstance(payment.origin, Sale):\n return redirect(url_for(\n 'checkout_sale', order=payment.origin, step='payment'))\n return redirect(url_for('index'))\n\n\n@app.route('/payment/source/register', methods={'POST'})\n@tryton.transaction()\ndef payment_source_register():\n stripe_account = util.get_stripe_account()\n source = json.loads(request.form['source'])\n source = stripe.Source.retrieve(\n source['id'], api_key=stripe_account.secret_key)\n party = util.get_session_party()\n if not party:\n abort(HTTPStatus.BAD_REQUEST)\n if source.type == 'sepa_debit' and not party.sepa_debit_allowed:\n abort(HTTPStatus.FORBIDDEN)\n try:\n if party.stripe_customers:\n customer = party.stripe_customers[0]\n cu = customer.retrieve()\n cu.sources.create(source=source['id'])\n cu.save()\n else:\n cu = stripe.Customer.create(\n api_key=stripe_account.secret_key,\n source=source['id'])\n customer = Customer(\n party=party,\n stripe_account=stripe_account,\n stripe_customer_id=cu.id)\n except stripe.error.StripeError as e:\n return (\n jsonify(e.user_message), HTTPStatus.INTERNAL_SERVER_ERROR)\n customer.save()\n Customer._sources_cache.clear()\n update_customer_payment(customer, source['id'])\n return ('', HTTPStatus.NO_CONTENT)\n\n\n@app.route('/payment/register', methods={'POST'})\n@tryton.transaction()\ndef payment_register():\n source = json.loads(request.form['source'])\n party = util.get_session_party()\n if not party:\n abort(HTTPStatus.BAD_REQUEST)\n currency, = Currency.search([\n ('code', '=', source['currency'].upper()),\n ])\n payment = Payment()\n payment.company = util.get_company()\n payment.kind = 'receivable'\n payment.party = party\n payment.origin = request.args.get('origin', None)\n if not payment.origin:\n payment.account = util.get_deposit_account()\n payment.amount = Decimal(source['amount']) / (10 ** currency.digits)\n payment.journal = util.get_stripe_journal()\n payment.stripe_token = source['id']\n payment.stripe_chargeable = source['status'] == 'chargeable'\n if isinstance(payment.origin, Sale):\n Sale.quote([payment.origin])\n payment.save()\n Payment.approve([payment])\n group = PaymentGroup(\n company=payment.company, journal=payment.journal, kind=payment.kind)\n group.save()\n Payment.process([payment], lambda: group)\n return ('', HTTPStatus.NO_CONTENT)\n\n\n@app.route('/payment/wait', methods={'GET'})\n@tryton.transaction()\ndef payment_wait():\n return render_template('payment_wait.html',\n stripe_key=util.get_stripe_account().publishable_key,\n source=request.args.get('source'),\n client_secret=request.args.get('client_secret'),\n next=util.get_redirect_target())\n\n\ndef update_customer_payment(customer, payment):\n sales = util.get_session_sales()\n current_sale = util.get_session_sale()\n for sale in sales:\n if not sale.stripe_customer:\n sale.stripe_customer = customer\n if (sale.stripe_customer == customer\n and (not sale.stripe_customer_payment\n or sale == current_sale)):\n sale.stripe_customer_payment = payment\n Sale.save(sales)\n subscription = util.get_session_subscription()\n if subscription:\n if not subscription.stripe_customer:\n subscription.stripe_customer = customer\n if not subscription.stripe_customer_payment:\n subscription.stripe_customer_payment = payment\n subscription.save()\n","sub_path":"fruit/webshop/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":7489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"334462962","text":"import re\n\nPATERN_VARIABLE = [\n r'[A-Za-z0-9_]{1,}\\s{1,}[A-Za-z0-9_]{1,}\\s*',\n r'[A-Za-z0-9_]{1,}\\s{0,}<[A-Za-z0-9_]{1,}>\\s{0,}[A-Za-z0-9_]{1,}',\n r'[A-Za-z0-9_]{1,}\\s{1,}[A-Za-z0-9_]{1,}\\s{1,}[A-Za-z0-9_]{1,}',\n\n r'[A-Za-z0-9_]{1,}\\s{1,}[A-Za-z0-9_]{1,}\\s{0,};',\n r'[A-Za-z0-9_]{1,}\\s*<[A-Za-z0-9_]{1,}>\\s*[A-Za-z0-9_]{0,};',\n r'[A-Za-z0-9_]{1,}\\s{1,}[A-Za-z0-9_]{1,}\\s{1,}[A-Za-z0-9_]{0,};',\n]\n\n\n\n\n\n\ndico_stemming = {\n \"def\": \"d\",\n \"while\": \"w\",\n \"for\": \"f\",\n \"elif\": \"ei\",\n \"else\": \"e\",\n \"<=\": \"c\",\n \">=\": \"c\",\n \"<\": \"c\",\n \">\": \"c\",\n \"!=\": \"!\",\n \"==\": \"-\",\n \"and\": \"&\",\n \"or\": \"|\",\n \"int\": \"i\",\n \"bool\": \"bo\",\n \"string\": \"s\",\n \"long \": \"l\",\n \"char\":\"ch\",\n \"float \": \"fl\",\n \"void\": \"v\",\n \"double\":\"db\",\n \"break\":\"b\",\n \"case\":\"cs\",\n \"try\":\"t\",\n \"catch\":\"ct\",\n \"class\":\"cl\",\n \"continue\":\"cn\",\n \"default\":\"df\",\n \"const\": \"cst\",\n \"delete\": \"dl\" ,\n \"enum\":\"en\",\n \"private\":\"pri\",\n \"protected\":\"pro\",\n \"public\":\"pub\",\n \"extern\": \"ex\",\n \"friend\": \"fr\",\n \"goto\": \"got\",\n \"inline\": \"inl\",\n \"new\": \"nw\",\n \"operator\": \"op\",\n \"register\": \"rg\",\n \"return\": \"rt\",\n \"short\": \"sh\",\n \"signed\": \"sg\",\n \"sizeof\": \"sz\",\n \"static\": \"stc\",\n \"struct\": \"str\",\n \"switch\": \"sw\",\n \"typedef\": \"td\",\n \"template\": \"tmp\",\n \"throw\": \"th\",\n \"union\": \"un\",\n \"unsigned\": \"uns\",\n \"virtual\": \"vr\",\n \"volatile\": \"vl\",\n\n \"alignas\": \"ag\",\n \"alignof\": \"agn\",\n \"and_eq\": \"&e\",\n \"auto\": \"au\",\n \"bitand\": \"btd\",\n \"bitor\": \"bt\",\n \"char16_t\": \"c16\",\n \"char32_t\": \"c32\",\n\n \"constexpr\": \"cx\",\n \"decltype\": \"dt\",\n \"const_cast\": \"ca\",\n \"export\": \"ep\",\n \"thread_local\": \"tl\",\n \"static_assert\": \"ss\",\n \"reinterpret_cast\": \"ra\",\n \"this\": \"ti\",\n \"mutable\": \"mt\",\n \"xor_eq\": \"xe\",\n \"using,\": \"us\",\n \"noexcept, \": \"nx\",\n \"not\": \"n\",\n \"not_eq\": \"!p\",\n \"nullptr\": \"nr\",\n \"or_eq\": \"|p\",\n \"true\": \"tr\",\n \"false\": \"fs\",\n \"typeid\": \"tp\",\n \"xor\": \"xr\",\n\n}\n\n\ndico_supression = {\n \" \": \"\",\n \"{\": \"\",\n \"}\": \"\",\n \"(\": \"\",\n \")\": \"\",\n \";\": \"\",\n \":\": \"\"\n}\n\n\ndef delete_string(ligne):\n delete = False\n list_chain_to_remove = []\n\n if ligne.count(\"\\\"\") > 0:\n\n i = 0\n chain_to_remove = \"\"\n\n while i < len(ligne):\n\n if ligne[i] == '\\\"':\n list_chain_to_remove.append(chain_to_remove)\n chain_to_remove = \"\"\n delete = not delete\n\n if delete and ligne[i] != '\"':\n chain_to_remove = chain_to_remove + ligne[i]\n\n i += 1\n\n for chain in list_chain_to_remove:\n ligne = ligne.replace(chain, \"\")\n\n ligne = ligne.replace(\"\\\"\\\"\", \"\\\"\")\n\n return ligne\n\n\ndef replace_by_new_content(ligne):\n for key in dico_stemming:\n ligne = ligne.replace(key, dico_stemming[key])\n return ligne\n\n\ndef delete_unuse_content(ligne):\n for key in dico_supression:\n ligne = ligne.replace(key, dico_supression[key])\n\n return ligne\n\n\ndef sanitize_content(ligne):\n filtered_content = \"\"\n ligne = delete_string(ligne)\n filtered_content = filtered_content + (replace_by_new_content(ligne))\n filtered_content = delete_unuse_content(filtered_content)\n return filtered_content\n\n\n\ndef findVariableInFuction(line):\n listVariable = []\n cpt = 0\n inFunction = False\n\n listContentinit = []\n listVarInFunction = \"\"\n\n for signe in line:\n\n if signe == \"(\":\n cpt +=1\n inFunction = True\n\n elif signe == \")\":\n cpt -=1\n\n if inFunction:\n if True and cpt > 0 and signe != \"(\":\n listVarInFunction += signe\n\n else:\n listContentinit.append(listVarInFunction)\n listVarInFunction = \"\"\n\n\n listContentSplit = []\n listContentSplit2 = []\n\n for functionContent in listContentinit:\n functionContent = functionContent.replace(',', ', ')\n x = functionContent.split(\", \")\n\n for content in x:\n listContentSplit2.append(content)\n if not set('~!@#$%^&*()+.{}\":;\\'+$').intersection(content):\n\n if \"[\" in content:\n newContent = \"\"\n id=0\n finishExtractVarFromTable = False\n\n while id < len(content) and not finishExtractVarFromTable:\n if content[id] == \"[\":\n finishExtractVarFromTable = True\n\n else:\n newContent += content[id]\n id +=1\n\n content = newContent\n\n if re.search(PATERN_VARIABLE[0], content) != None or re.search(PATERN_VARIABLE[1], content) != None or re.search(PATERN_VARIABLE[2], content) != None:\n\n listContentSplit.append(content)\n\n\n for varaiblePart in listContentSplit:\n\n if \">\" in varaiblePart:\n parts = varaiblePart.split(\">\")\n parts[len(parts)-2] +=\">\"\n else:\n parts = varaiblePart.split(\" \")\n\n i = 0\n type=\"\"\n\n while i < len(parts) -1:\n type = type+parts[i]\n i +=1\n\n type = re.sub(' ', '', type)\n variable = re.sub(' ', '', parts[len(parts)-1])\n listVariable.append((type, variable))\n\n\n return listVariable\n\n\ndef findVariableDeclare(ligne):\n \"\"\"\n :param ligne: représente une ligne de code\n :return: retour le la liste des variable au seins d'une fonction\n \"\"\"\n\n listVariable = []\n type = \"\"\n list = []\n\n if True:\n i = 0\n find = False\n\n while i < len(ligne) and not find:\n\n if ligne[i] == \"=\":\n\n notFind = True\n findSeparator = True\n permissionParcourtWord = False\n\n k = i\n var = \"\"\n typeVar = \"\"\n listVariableTransition = []\n inTab = False\n\n while k > 0 and notFind:\n\n if ligne[k] == \" \":\n\n permissionParcourtWord = False\n\n if var != \"\":\n listVariableTransition.append(var)\n\n var = \"\"\n\n if ligne[k] == \",\":\n\n findSeparator = True\n permissionParcourtWord = False\n\n if var != \"\":\n listVariableTransition.append(var)\n var = \"\"\n\n if ligne[k] == \"]\":\n inTab= True\n\n if ligne[k] == \"/\":\n if len(ligne) > k+1:\n if ligne[k+1] == \"/\" or ligne[k+1] == \"*\":\n\n notFind = False\n\n #Si on trouve un signe arpès avoir trouvé un séparateur ou qu'on est en train de parcourir un mot\n if ligne[k] != \",\" and ligne[k] != \" \" and k != i and not inTab:\n\n if findSeparator or permissionParcourtWord:\n findSeparator = False\n permissionParcourtWord = True\n var = ligne[k]+var\n\n else:\n notFind = False\n\n\n if ligne[k] == \"[\":\n inTab = False\n\n\n if not notFind:\n type = \"\"\n while k > 0 and ligne[k] != \" \":\n type = ligne[k]+type\n k -=1\n\n\n for variable in listVariableTransition:\n if not set('[~!@#$%^&*()+.{}\":;\\']+$').intersection(type) and not set('[~!@#$%^&*()+.{}\":;\\']+$').intersection(variable) :\n type = re.sub(' ', '', type)\n variable = re.sub(' ', '', variable)\n typeVar = (type, variable)\n\n if typeVar not in listVariable:\n listVariable.append(typeVar)\n\n k -= 1\n\n elif ligne[i] == \";\" and \"=\" not in ligne:\n line=\"\"\n line = re.findall(r'\\s{0,}[A-Za-z0-9_]{1,}\\s{1,}[A-Za-z0-9_]{1,};', ligne)\n\n if line != [] and \"return\" not in line[0]:\n\n line = re.sub(';', '', line[0])\n ligneTab = line.split(\" \")\n\n u = 0\n type = \"\"\n var = \"\"\n while u\n line = re.sub(r'\\s{0,}'+variable+r'\\s{0,}<', type+\"<\", line)\n line = re.sub(r'\\s{0,}'+variable+r'\\s{0,}>', type+\">\", line)\n line = re.sub(r'>\\s{0,}'+variable+r'\\s{0,}>', \">\"+type+\">\", line)\n line = re.sub(r'<\\s{0,}'+variable+r'\\s{0,}<', \"<\"+type+\"<\", line)\n line = re.sub(r'>\\s{0,}'+variable+r'\\s{0,}<', \">\"+type+\"<\", line)\n line = re.sub(r'<\\s{0,}'+variable+r'\\s{0,}>', \"<\"+type+\">\", line)\n\n ###gestion du séparateur \",\"\n line = re.sub(r'\\s{0,}'+variable+r'\\s{0,},', type+\",\", line)\n line = re.sub(r',\\s{0,}'+variable+r'\\s{0,},', \",\"+type+\",\", line)\n line = re.sub(r',\\s{0,}'+variable+r'\\s{0,}=', \",\"+type+\"=\", line)\n line = re.sub(r',\\s{0,}'+variable+r'\\s{0,}\\)', \",\"+type+\")\", line)\n\n ###gestion des symbole ( )\n line = re.sub(r'\\s{0,}'+variable+r'\\s{0,}\\)', type+\")\", line)\n line = re.sub(r'\\(\\s{0,}'+variable+r'\\s{0,},', \"(\"+type+\",\", line)\n line = re.sub(r'\\(\\s{0,}'+variable+r'\\s{0,}\\)', \"(\"+type+\")\", line)\n line = re.sub(r'\\s{0,}'+variable+r'\\s{0,}!', type+\"!\", line)\n line = re.sub(r'\\s{0,}'+variable+r'\\s{0,}<', type+\"<\", line)\n line = re.sub(r'\\s{0,}'+variable+r'\\s{0,}>', type+\">\", line)\n\n i += 1\n\n return line\n\n\n\n\n\n\ndef sanitize_list(liste_variable):\n \"\"\"\n :param liste_variable: représente la liste des variables du code\n :return: retourne la liste des variables après avoir ajouté un espace après le type de la variable. Permet de différencier les types Matrice et collection\n \"\"\"\n\n id = 0\n for elt in liste_variable:\n cpt = elt[0].count('>')\n\n if cpt == 0:\n newElt = (elt[0] + \" \", elt[1])\n elt = newElt\n liste_variable[id] = newElt\n\n id += 1\n\n return liste_variable\n\n\n\ndef remove_comentary(lignes):\n \"\"\"\n :param liste_variable: représente la liste des variables du code\n :return: retourne la liste des variables après avoir ajouté un espace après le type de la variable. Permet de différencier les types Matrice et collection\n \"\"\"\n long_comment = False\n code_without_comentary = []\n\n for ligne in lignes:\n\n if \"//\" in ligne:\n tab_line = ligne.split(\"//\")\n code_without_comentary.append(tab_line[0])\n\n elif \"/*\" in ligne and \"*/\" in ligne:\n tab_line1 = ligne.split(\"/*\")\n tab_line2 = ligne.split(\"*/\")\n new_line = tab_line1[0] + tab_line2[len(tab_line2)-1]\n code_without_comentary.append(new_line)\n\n elif \"/*\" in ligne:\n tab_line = ligne.split(\"/*\")\n code_without_comentary.append(tab_line[0])\n long_comment = True\n\n elif \"*/\" in ligne:\n tab_line = ligne.split(\"*/\")\n code_without_comentary.append(tab_line[len(tab_line)-1])\n long_comment = False\n\n\n elif not long_comment:\n code_without_comentary.append(ligne)\n\n\n\n return code_without_comentary\n\n\ndef excecEvalRedondance(code):\n\n listFunction = []\n listVariableRename = []\n lastListVariableRename = []\n\n listVarBlock = []\n lignes = code.split(\"\\n\")\n\n scopeCodeUser = False\n firstInsert = False\n long_comment = False\n\n lignesCompacte = \"\"\n newBlock = []\n functionCode = \"\"\n\n lignes = remove_comentary(lignes)\n\n for ligne in lignes:\n\n if \"#include\" not in ligne and \"using namespace\" not in ligne:\n\n ligne = ligne.replace('\\n', '')\n\n listeVarInitFunction = findVariableInFuction(ligne)\n\n if listeVarInitFunction != []:\n\n if listVariableRename != []:\n\n listFunction.append(listVariableRename)\n functionCode= \"\"\n listVariableRename=[]\n listVarToRenameFunction = []\n lastListVariableRename = []\n\n i = 0\n while i < len(listeVarInitFunction):\n if listeVarInitFunction[i] not in listVariableRename and listeVarInitFunction[i] != \"\":\n listVariableRename.append(listeVarInitFunction[i])\n i += 1\n\n else:\n listVarToRenameFunction = listeVarInitFunction\n\n i = 0\n while i < len(listVarToRenameFunction):\n if listVarToRenameFunction[i] not in listVariableRename and listVarToRenameFunction[i] != \"\":\n listVariableRename.append(listVarToRenameFunction[i])\n i += 1\n\n\n else:\n listVarToRenameFunction = listeVarInitFunction + findVariableDeclare(ligne)\n\n i = 0\n while i < len(listVarToRenameFunction):\n if listVarToRenameFunction[i] not in listVariableRename and listVarToRenameFunction[i] != \"\":\n listVariableRename.append(listVarToRenameFunction[i])\n i += 1\n\n\n\n lignesCompacte +=ligne\n\n listFunction.append(listVariableRename)\n\n blockCodesWithRenameVariable = []\n listFunctionCode = find_function(lignesCompacte)\n\n\n for function in listFunctionCode:\n blockCodesWithRenameVariable.append(function)\n\n codeRename = \"\"\n i=0\n for elt in blockCodesWithRenameVariable:\n codeRename += rename_variable(elt, listFunction[i])\n i +=1\n\n blockCodes = find_block(codeRename)\n\n\n\n print(blockCodes)\n\n for bl in blockCodes:\n print(bl)\n\n\n cptRedondance = 0\n\n for block in blockCodes:\n if blockCodes[block] > 1:\n cptRedondance += blockCodes[block]-1\n\n return cptRedondance\n\n","sub_path":"evaluation_code/cpp/evalRedondance.py","file_name":"evalRedondance.py","file_ext":"py","file_size_in_byte":19177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"192185487","text":"\"\"\"\nImporting and using sqlite module within python program\n@author: outsider\n\"\"\"\n\nimport sqlite3\n\nsqliteFile = 'test.sqlite'\ntableName = 'table2'\nid = 'testID'\nsomeID = 123456\ncol2 = 'table2seconCol' #Name of the new column\ncol3 = 'table2thirdCol' #Name of the new column\n\n#Connecting to the database file\nconn = sqlite3.connect(sqliteFile)\nc = conn.cursor()\n\n#Contents of all columns for row that match the id number from column 1\nc.execute('SELECT * FROM {tn} WHERE {cn}=\"Hi Sylvain\"'\\\n .format(tn=tableName, cn=col2))\nallRows = c.fetchall()\nprint('1:', allRows)\n\n#Value of a particular column for rows that match a certain value in column 1\nc.execute('SELECT ({coi}) FROM {tn} WHERE {cn}=\"Hi Sylvain\" '\\\n .format(coi=col2, tn=tableName, cn=col2))\nallRows = c.fetchall()\nprint('2:', allRows)\n\n#Values of 2 particular columns for rows that match a certain value in column 1\nc.execute('SELECT {coi},{coi1} FROM {tn} WHERE {cn}=\"Hi Sylvain\" '\\\n .format(coi=col2, coi1=col3, tn=tableName, cn=col2))\nallRows = c.fetchall()\nprint('3:', allRows)\n\nc.execute('SELECT * FROM {tn} WHERE {cn}=\"Hi Sylvain\" LIMIT 10 '\\\n .format(coi=col2, tn=tableName, cn=col2))\ntenRows = c.fetchall()\nprint('4:', tenRows)\n\n#Check whether or not an ID exists and print all columns relative to that\nc.execute('SELECT * FROM {tn} WHERE {idf}={testID} '\\\n .format(tn=tableName, cn=col2, idf=id, testID=someID))\nidExists = c.fetchone()\nif(idExists):\n print('5: {}'.format(idExists))\nelse:\n print('5: {} does not exists'.format(id))\n\n\nconn.close()\n\n\n#This method is for cleanning the variables and prevent SQL's injections... \ndef cleanNames(someVariable):\n return ''.join(char for char in someVariable if char.isalnum())\n\n\n\n\n","sub_path":"select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"461474794","text":"class Cars:\n def minecolor(self,color):\n self.color=color\n print(self.color)\nclass BMW(Cars):\n def topspeed(self,speed):\n self.speed=speed\n print(self.speed)\nobjc=Cars()\nobjBMW=BMW()\nobjc.minecolor(\"red\")\n#objc.topspeed(100)\nobjBMW.minecolor(\"blue\")\nobjBMW.topspeed(50)","sub_path":"day6assignment-26july/26july2021-Kalaiarasi-day6assignment/inherit.py","file_name":"inherit.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"560989566","text":"import os\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\n\n\ndef random_shape(height, width):\n \"\"\"Generates specifications of a random shape that lies within\n the given height and width boundaries.\n Returns a tuple of three valus:\n * The shape name (square, circle, ...)\n * Shape color: a tuple of 3 values, RGB.\n * Shape dimensions: A tuple of values that define the shape size\n and location. Differs per shape type.\n \"\"\"\n # Shape\n shape = random.choice([\"square\", \"circle\", \"triangle\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height // 4)\n return shape, color, (x, y, s)\n\n\ndef random_image():\n \"\"\"Creates random specifications of an image with multiple shapes.\n Returns the background color of the image and a list of shape\n specifications that can be used to draw the image.\n \"\"\"\n # Pick random background color\n bg_color = np.array([random.randint(0, 255) for _ in range(3)])\n height = random.randint(300, 350)\n width = random.randint(350, 400)\n print(\"bg_color: \" + str(bg_color))\n print(\"height: \" + str(height))\n print(\"width: \" + str(width))\n\n image = np.ones([height, width, 3], dtype=np.uint8) * bg_color\n mask = np.zeros([height, width, 3], dtype=np.uint8)\n labels = []\n assert image.shape == mask.shape\n\n # Generate a few random shapes\n shapes = []\n boxes = []\n N = random.randint(1, 4)\n for _ in range(N):\n shape, color, dims = random_shape(height, width)\n shapes.append((shape, color, dims))\n x, y, s = dims\n boxes.append([y - s, x - s, y + s, x + s])\n # Apply non-max suppression wit 0.3 threshold to avoid\n # shapes covering each other\n keep_ixs = utils.non_max_suppression(\n np.array(boxes), np.arange(N), 0.1)\n shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]\n\n for shape, color, dims in shapes:\n x, y, s = dims\n if shape == 'square':\n image = cv2.rectangle(image, (x - s, y - s),\n (x + s, y + s), color, -1)\n mask = cv2.rectangle(mask, (x - s, y - s),\n (x + s, y + s), 1, -1)\n labels.append([1, x, y, s])\n elif shape == \"circle\":\n image = cv2.circle(image, (x, y), s, color, -1)\n mask = cv2.circle(mask, (x, y), s, 2, -1)\n labels.append([2, x, y, s])\n elif shape == \"triangle\":\n points = np.array([[(x, y - s),\n (x - s / math.sin(math.radians(60)), y + s),\n (x + s / math.sin(math.radians(60)), y + s),\n ]], dtype=np.int32)\n image = cv2.fillPoly(image, points, color)\n mask = cv2.fillPoly(mask, points, 3)\n labels.append([3, x, y, s])\n\n return image, mask, np.array(labels)\n\n\nif __name__ == \"__main__\":\n num_simple = 100\n simple_path = os.path.join(ROOT_DIR, \"samples/shapes/generated\")\n\n for i in range(num_simple):\n temp_dir = os.path.join(simple_path, \"simple\" + str(i))\n if os.path.exists(temp_dir):\n ls = os.listdir(temp_dir)\n for file in ls:\n os.remove(os.path.join(temp_dir, file))\n else:\n os.makedirs(temp_dir)\n\n image, mask, labels = random_image()\n cv2.imwrite(os.path.join(temp_dir, \"img.png\"), image)\n cv2.imwrite(os.path.join(temp_dir, \"label.png\"), mask)\n np.savetxt(os.path.join(temp_dir, \"labels.txt\"), labels)","sub_path":"samples/shapes/generate_shapes.py","file_name":"generate_shapes.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"593147232","text":"from django.shortcuts import render, redirect \r\nfrom django.contrib.auth.decorators import login_required \r\nfrom django.contrib import messages \r\nfrom .forms import ImageCreateForm\r\nfrom .models import Image\r\n\r\n\r\n@login_required \r\ndef image_create(request):\t\r\n\tif request.method == 'POST': \r\n\t\tform = ImageCreateForm(data=request.POST,files=request.FILES) \r\n\t\tif form.is_valid(): \r\n\t\t\tcd = form.cleaned_data\r\n\t\t\t \r\n\t\t\tnew_item = form.save(commit=False) \r\n\t\t\tnew_item.user = request.user \r\n\t\t\tnew_item.save() \r\n\t\t\tmessages.success(request, 'Image added successfully') \r\n\telse:\r\n\t\tform = ImageCreateForm(data=request.GET)\r\n\r\n\treturn render(request,'image/img/create.html',{'section': 'images','form': form})\r\n","sub_path":"image/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"604035713","text":"#!/bin/bash/env python\n# coding=UTF-8\n# by Tarcisio marinho\n# github.com/tarcisio-marinho\nimport os, subprocess, random, socket\n\ndef conexao(meuIP):\n # servidor\n f=open('private_key.txt','r')\n chave_privada=f.read()\n print(chave_privada)\n print('Servidor rodando')\n while True:\n porta=6064\n\n socket_obj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n socket_obj.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # se der ctrl + c, ele para de escutar na porta\n socket_obj.bind((meuIP, porta))\n socket_obj.listen(1) # escuta apenas 1 \"vitma\"\n #os.system('clear')\n conexao,endereco=socket_obj.accept()\n\n # ao se conectar, envia a chave privada (S)\n conexao.send(str(chave_privada))\n # recebeu do cliente o ID\n\n print(recebido)\n\n\nconexao('127.0.0.1')\n","sub_path":"C&C/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"417184586","text":"import os\nimport shutil\nimport argparse\nfrom PIL import Image, ImageChops, ImageStat\n\nimport misli\nfrom .constants import RECORDING_EXTENSION, SNAP_EXTENSION\nfrom misli.gui import update_components_from_changes\nfrom misli.gui.qt_main_loop import QtMainLoop\n\nimport pamet\n\nfrom .gui_recorder import MisliGuiRecorder\nfrom .gui_replay import MisliGuiReplay\n\nINSPECTION_TEMPLATES_PATH = 'inspection_templates'\nAUTOMATIC_INSPECTION_OUTPUT_PATH = 'automatic_inspection_output'\nMANUAL_INSPECTION_OUTPUT_PATH = 'manually_verified_inspections'\nDIFF_FOLDER = 'tmp_last_visual_inspection_diffs'\n\n\ndef run_recording(file_for_replay, output_folder, replay_speed):\n misli.set_main_loop(QtMainLoop())\n misli_gui.set_reproducible_ids(True)\n\n recorder = MisliGuiRecorder('BrowserWindowView')\n misli.gui.on_action(recorder.handle_action_channel)\n\n replay = MisliGuiReplay(file_for_replay)\n replay.speed = replay_speed\n\n misli.on_change(update_components_from_changes)\n misli.gui.on_action(replay.queue_next_action)\n\n # desktop_app = misli.gui.create_view('DesktopApp', parent_id='')\n\n desktop_app_class = pamet.view_library.get_view_class('BrowserWindowView')\n desktop_app = desktop_app_class(parent_id='')\n\n replay.queue_next_action([])\n desktop_app.exec_()\n\n recorder.save_recording(output_folder, overwrite=True)\n\n\ndef compare_images(img1, img2):\n # Don't compare if images are of different modes or different sizes.\n if (img1.mode != img2.mode) \\\n or (img1.size != img2.size) \\\n or (img1.getbands() != img2.getbands()):\n return 100\n\n # Generate diff image in memory.\n diff_img = ImageChops.difference(img1, img2)\n # Calculate difference as a ratio.\n stat = ImageStat.Stat(diff_img)\n diff_ratio = sum(stat.mean) / (len(stat.mean) * 255)\n\n return diff_ratio * 100\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--manual-verification', action='store_true')\n parser.add_argument('--replay-speed', default=1)\n args = parser.parse_args()\n\n manual_verification = args.manual_verification\n replay_speed = args.replay_speed\n\n output_folder = AUTOMATIC_INSPECTION_OUTPUT_PATH\n if manual_verification:\n print('MANUAL MODE: The generated snapshots of the BrowserWindow will'\n ' be used for the automatic visual inspections, so you should'\n ' verify that the app is working correctly. You can adjust the'\n ' actions replay speed with the --replay-speed argument.')\n output_folder = MANUAL_INSPECTION_OUTPUT_PATH\n\n print('RUNNING AUTOMATED TESTS: press CTRL+C in the console to abort the '\n 'process and discard the last test. Press Enter to start.')\n\n for template in os.scandir(INSPECTION_TEMPLATES_PATH):\n if not template.name.endswith(RECORDING_EXTENSION):\n raise Exception('Unexpected file %s' % template)\n\n tname = template.name[:-len(RECORDING_EXTENSION)]\n\n results_folder = os.path.join(output_folder, tname)\n\n run_recording(template.path, results_folder, replay_speed)\n print('Done with template \"%s\".' % tname)\n\n if manual_verification:\n return\n\n print('Done with all inspections. Starting to compare with the manually '\n 'verified data')\n\n diffs_found = 0\n\n for recording in os.scandir(AUTOMATIC_INSPECTION_OUTPUT_PATH):\n snapshots_folder = os.path.join(recording.path, 'snapshots')\n good_snaps_folder = os.path.join(\n MANUAL_INSPECTION_OUTPUT_PATH,\n recording.name,\n 'snapshots')\n\n if not os.path.exists(good_snaps_folder):\n print('Manually verified images for %s missing.' % recording.name)\n continue\n\n for snap in os.scandir(snapshots_folder):\n good_snap_path = os.path.join(good_snaps_folder, snap.name)\n\n if not os.path.exists(good_snap_path):\n continue\n\n image = Image.open(snap.path)\n good_image = Image.open(good_snap_path)\n\n diff_value = compare_images(image, good_image)\n\n if diff_value > 0.01:\n diffs_found += 1\n snap_meta, ext = os.path.splitext(snap.name)\n print('DIFFERENCE %s (>0.001) for \"%s\" in recording \"%s\"' %\n (diff_value, snap_meta, recording.name))\n\n diff_folder = os.path.join(\n DIFF_FOLDER, recording.name, snap_meta)\n\n if os.path.exists(diff_folder):\n shutil.rmtree(diff_folder)\n\n os.makedirs(diff_folder, exist_ok=True)\n\n common_target_path = os.path.join(diff_folder, snap.name)\n target_path_good_img = (common_target_path + '_good' +\n SNAP_EXTENSION)\n\n shutil.copy(snap.path, common_target_path + SNAP_EXTENSION)\n shutil.copy(good_snap_path, target_path_good_img)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/visual/run_visual_inspection.py","file_name":"run_visual_inspection.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"83356037","text":"#!flask/bin/python\nfrom flask import Flask, jsonify\nfrom flask import abort\nimport array\nfrom flask import request\nfrom flask import make_response\nfrom flask import url_for\n\n\napp = Flask(__name__)\n\n\n#We use a temporary in memory json object to store the services\n#This of course should be on a DB in the long term.\t \nservices = [\n {\n 'id': 1,\n#\t\t'uniqueID' : u'0.0.1.1.epochtime',\n 'change_version':1, \n 'service': u'test',\n 'version': u'0.0.1', \n 'change': u'created'\n },\n {\n 'id': 2,\n#\t\t'uniqueID' : u'0.0.1.2.epochtime',\n 'change_version':1,\n 'service': u'test',\n\t 'version': u'0.0.1', \n 'change': u'created'\n },\n\t{\n 'id': 3,\n#\t\t'uniqueID' : u'0.0.2.1.epochtime',\n 'change_version':1,\n 'service': u'test',\n 'version': u'0.0.2', \n 'change': u'created'\n },\n {\n 'id': 4,\n#\t\t'uniqueID' : u'0.0.2.2.epochtime',\n 'change_version':1,\n 'service': u'test',\n\t 'version': u'0.0.2', \n 'change': u'created'\n },\n {\n 'id': 5,\n#\t\t'uniqueID' : u'0.0.2.1.epochtime',\n 'change_version':1,\n 'service': u'test2',\n\t 'version': u'0.0.2', \n 'change': u'created'\n },\n\t {\n 'id': 6,\n#\t\t'uniqueID' : u'0.0.2.2.epochtime',\n 'change_version':1,\n 'service': u'test2',\n\t 'version': u'0.0.2', \n 'change': u'created'\n }\n]\n\n#This is our get function at the high level. This will return a json object including all registered services.\n#We also return a URI refering to each service so that the users don't have to create that\n@app.route('/service_registry/api/v1.0/services', methods=['GET'])\ndef get_services():\n return jsonify({'services': services})\n#put the code below in once you get URIs working\n# return jsonify({'services': [make_public_service(service) for service in services]})\n\n#This get function returns a service by service id\n@app.route('/service_registry/api/v1.0/services/', methods=['GET'])\ndef get_service_id(service_id):\n service = [service for service in services if service['id'] == service_id]\n if len(service) == 0:\n abort(404)\n return jsonify({'service': service[0]})\n#This end point returns all instances and versions for a given service\n@app.route('/service_registry/api/v1.0/services//getAll', methods=['GET'])\ndef get_service_all(service_name_all):\n service = [service for service in services if service['service'] == service_name_all]\n if len(service) == 0:\n abort(404)\n return jsonify({'service': service})\n\t\n\n\n\n#This returns the count of a given service\n@app.route('/service_registry/api/v1.0/services/', methods=['GET'])\ndef get_service(service_name):\n service = [service for service in services if service['service'] == service_name]\n if len(service) == 0:\n return jsonify({'service': service_name,'count': 0 })\n count=len(service)\t\n return jsonify({'service': service_name,'count': count })\n#this returns the counts of a service and version\n@app.route('/service_registry/api/v1.0/services//', methods=['GET'])\ndef get_service_count(service_name,service_version):\n service = [service for service in services if service['service'] == service_name and service['version'] == service_version]\n if len(service) == 0:\n return jsonify({'service': service_name, 'version' : service_version ,'Not found':'True', 'count': 0 })\n count=len(service)\t\n #return jsonify({'service': service})\t\n return jsonify({'service': service_name, 'version' : service_version, 'count': count })\n\t\n\n#return a nice message if not found\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\t#This is the post end point implementation\n@app.route('/service_registry/api/v1.0/services', methods=['POST'])\ndef create_service():\n if not request.json or not 'service' in request.json or not 'version' in request.json:\n abort(400)\n service = {\n 'id': services[-1]['id'] + 1,\n 'change_version':1,\n 'service': request.json['service'],\n 'version': request.json.get('version', \"\"),\n# 'uniqueID' : u'0.0.2.2.epochtime',\n 'change': u'created'\n }\n services.append(service)\n return jsonify({'service': service}), 201\n\n \n\n#This is the update service using the service id\n#Other types of update needs to be implemented\n@app.route('/service_registry/api/v1.0/services/', methods=['PUT'])\ndef update_service(service_id):\n service = [service for service in services if service['id'] == service_id]\n if len(service) == 0:\n abort(404)\n if not request.json:\n abort(400)\n if 'service' in request.json and type(request.json['service']) != str:\n abort(400)\n if 'version' in request.json and type(request.json['version']) != str:\n abort(400)\n if 'change' in request.json and type(request.json['change']) != str:\n abort(400)\n service[0]['service'] = request.json.get('service', service[0]['service'])\n service[0]['version'] = request.json.get('version', service[0]['version'])\n service[0]['change'] = u'changed'\n service[0]['change_version']=service[0]['change_version']+1\n return jsonify({'service': service[0]})\n\n#This is the put implementation for /services/service_name\n@app.route('/service_registry/api/v1.0/services/', methods=['PUT'])\ndef update_service_serviceName(service_name):\n service = [service for service in services if service['service'] == service_name]\n if len(service) == 0:\n abort(404)\n if not request.json:\n abort(400)\n if 'service' in request.json and type(request.json['service']) != str:\n abort(400)\n if 'version' in request.json and type(request.json['version']) != str:\n abort(400)\n if 'change' in request.json and type(request.json['change']) != str:\n abort(400)\n\n for i in range(0,len(service)):\n service[i]['service'] = request.json.get('service', service[i]['service'])\n service[i]['version'] = request.json.get('version', service[i]['version'])\n service[i]['change'] = u'changed'\n service[i]['change_version']=service[i]['change_version']+1\n\n return jsonify({'service': service})\n\n#This is the put implementation for /services/service_name/version_name\n@app.route('/service_registry/api/v1.0/services//', methods=['PUT'])\ndef update_service_serviceName_versionName(service_name,service_version):\n service = [service for service in services if service['service'] == service_name and service['version'] == service_version]\n if len(service) == 0:\n abort(404)\n if not request.json:\n abort(400)\n if 'service' in request.json and type(request.json['service']) != str:\n abort(400)\n if 'version' in request.json and type(request.json['version']) != str:\n abort(400)\n if 'change' in request.json and type(request.json['change']) != str:\n abort(400)\n\n for i in range(0,len(service)):\n service[i]['service'] = request.json.get('service', service[i]['service'])\n service[i]['version'] = request.json.get('version', service[i]['version'])\n service[i]['change'] = u'changed'\n service[i]['change_version']=service[i]['change_version']+1\n\n return jsonify({'service': service})\n\n#This is the delete end point\n@app.route('/service_registry/api/v1.0/services/', methods=['DELETE'])\ndef delete_service(service_id):\n service = [service for service in services if service['id'] == service_id]\n if len(service) == 0:\n abort(404)\n services.remove(service[0])\n return jsonify({'sesrvice': service[0]['service'],'id': service[0]['id'],'change': 'removed'})\n services.append(service)\n return jsonify({'service': service}), 201\n#This function is used to create a URI to our services. This helps the user interact with the service\n#We have removed it for now and can be added in the future to help users make calls to services\n#def make_public_service(service):\n# new_service = {}\n# for field in service:\n# if field == 'id':\n# new_service['uri'] = url_for('get_services', service_id=service['id'], _external=True)\n# else:\n# new_service[field] = service[field]\n # return new_service\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"service-registry-api/service_resigter.py","file_name":"service_resigter.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"189248962","text":"import time\nimport os\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\nimport json\nimport requests\n\n\nclass Watcher:\n dir_path = os.path.dirname(os.path.realpath(__file__))\n DIRECTORY_TO_WATCH = dir_path + \"/data/\"\n\n def __init__(self):\n self.observer = Observer()\n\n def run(self):\n print(self.DIRECTORY_TO_WATCH)\n event_handler = Handler()\n self.observer.schedule(event_handler, self.DIRECTORY_TO_WATCH, recursive=True)\n self.observer.start()\n print(self.DIRECTORY_TO_WATCH)\n try:\n while True:\n time.sleep(5)\n except:\n self.observer.stop()\n print(\"Error\")\n\n self.observer.join()\n\n\nclass Handler(FileSystemEventHandler):\n def on_any_event(self, event):\n if event.is_directory:\n print(event)\n return None\n\n elif event.event_type == 'created':\n # Take any action here when a file is first created.\n # print(\"Received created event - %s.\" % event.src_path);\n file = event.src_path\n json_data = open(file)\n test_data = json.load(json_data)\n for item in test_data:\n requests.post('http://localhost:8000/api/records/', data=item)\n\n\nif __name__ == '__main__':\n w = Watcher()\n w.run()\n","sub_path":"backend/records/watch_for_changes.py","file_name":"watch_for_changes.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"548280961","text":"from setuptools import setup\nfrom setuptools.command.install import install\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\nclass CustomInstall(install):\n def run(self):\n install.run(self)\n # custom stuff here\n print(readme())\n\nsetup(name='theonering',\n version='0.1',\n description='One ring to rule them all, one ring to find them, one ring to bring them all and in the darkness bind them',\n long_description=readme(),\n url='http://github.com/unk/unk',\n author='unk',\n author_email='unk@unk.com',\n license='MIT',\n packages=['theonering'],\n install_requires=[\n 'virtualenv',\n 'virtualenvwrapper',\n 'ipython'\n ],\n scripts=['bin/sauron'],\n test_suite='nose.collector',\n tests_require=['nose'],\n include_package_data=True,\n zip_safe=False,\n cmdclass={'install': CustomInstall})\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"452349951","text":"import os.path\n\nfrom saml2 import BINDING_HTTP_REDIRECT\nfrom saml2.saml import NAME_FORMAT_BASIC\n\ntry:\n from saml2.sigver import get_xmlsec_binary\nexcept ImportError:\n get_xmlsec_binary = None\n\nif get_xmlsec_binary:\n xmlsec_path = get_xmlsec_binary([\"/opt/local/bin\"])\nelse:\n xmlsec_path = '/usr/bin/xmlsec1'\n\n\n#BASE = 'http://edem.microcomp.sk'\n#entityid' : 'http://edem.microcomp.sk',\nBASE = 'http://edem.microcomp.sk'\nCONFIG_PATH = os.path.dirname(__file__)\n\nUSER_MAPPING = {\n 'email': 'mail',\n 'fullname': 'field_display_name',\n}\n#'idp': ['urn:mace:umu.se:saml:ckan:idp'],\nCONFIG = {\n 'entityid' : 'http://edem.microcomp.sk',\n 'description': 'CKAN saml2 auth',\n 'service': {\n 'sp': {\n 'name' : 'CKAN SP',\n 'endpoints': {\n 'assertion_consumer_service': [BASE],\n 'single_logout_service' : [(BASE + '/slo',\n BINDING_HTTP_REDIRECT)],\n },\n 'required_attributes': [\n # 'sn',\n 'uid',\n # 'name',\n # 'mail',\n # 'status',\n # 'roles',\n # 'field_display_name',\n # 'realname',\n # 'groups',\n # 'givenname',\n # 'surname',\n # 'edupersonaffiliation',\n ],\n 'optional_attributes': [],\n \"authn_assertions_signed\": \"true\",\n \"authn_requests_signed\" : \"true\",\n \"want_assertions_signed\": \"true\",\n \"logout_requests_signed\": \"true\",\n }\n },\n 'debug': 1,\n 'key_file': CONFIG_PATH + '/pki/mod_key.pem',\n 'cert_file': CONFIG_PATH + '/pki/mod_cert.pem',\n 'attribute_map_dir': CONFIG_PATH + '/../attributemaps',\n 'metadata': {\n 'local': [CONFIG_PATH + '/idp.xml'],\n },\n # -- below used by make_metadata --\n# 'organization': {\n# 'name': 'Exempel AB',\n# 'display_name': [('Exempel AB','se'),('Example Co.','en')],\n# 'url':'http://www.example.com/ckan',\n# },\n# 'contact_person': [{\n# 'given_name':'John',\n# 'sur_name': 'Smith',\n# 'email_address': ['john.smith@example.com'],\n# 'contact_type': 'technical',\n# },\n# ],\n 'name_form': NAME_FORMAT_BASIC,\n \"xmlsec_binary\": '/usr/bin/xmlsec1',\n 'logger': {\n 'rotating': {\n 'filename': 'sp.log',\n 'maxBytes': 100000,\n 'backupCount': 5,\n },\n 'loglevel': 'debug',\n }\n}\n","sub_path":"ckanext/saml2/config/sp_config.py","file_name":"sp_config.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"566944155","text":"from rest_framework import serializers\n\nfrom aerpay_main.models import Category, Coupon, FMCGProduct, FMCGTransaction, Product, QuantityType, Store, SubscriptionPrice\n\nclass SubscriptionPriceGetSrlzr(serializers.ModelSerializer):\n class Meta:\n model = SubscriptionPrice\n fields = ('id', 'new_price',)\n\nclass QuantityTypesGetSrlzr(serializers.ModelSerializer):\n class Meta:\n model = QuantityType\n fields = ('id', 'type_label',)\n\nclass StoreRegisterSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Store\n fields = ('email', 'help_desk_email',\n 'name', 'phone_number', 'image', 'base64Image', 'imageType', \n 'description', 'address', 'address_latitude', 'address_longitude', \n 'refered_by_store_referal_code',)\n\nclass StoreProductsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ('id', 'category', 'name', 'image', 'quantity_type', 'price', 'discounted_price', 'is_out_of_stock')\n\n def to_representation(self, instance):\n rep = super().to_representation(instance)\n rep['category'] = instance.category.name\n return rep\n\nclass StoreListSrlzr(serializers.ModelSerializer):\n products = serializers.SerializerMethodField()\n class Meta:\n model = Store\n fields = ('id','name', 'image', 'address', \n 'description', 'no_of_ratings', 'rating', 'products')\n \n def get_products(self, obj):\n selected_products = Product.objects.filter(\n category__store=obj).distinct()[0:5]\n return StoreProductsSerializer(selected_products, many=True).data\n\nclass StoreGetSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Store\n fields = ('id', 'email', 'help_desk_email',\n 'name', 'phone_number', 'image', 'base64Image', 'imageType', 'address', 'address_latitude', 'address_longitude', \n 'description', 'default_subscription_price', 'subscription_renewal_price', \n 'last_paid_subscription_date', 'subscription_renewal_date', \n 'refered_by_store_referal_code', 'own_referal_code', 'no_of_ratings', 'rating')\n\n\nclass StoreUpdateSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Store\n fields = ('email', 'help_desk_email',\n 'name', 'phone_number', 'image', 'base64Image', 'imageType', \n 'description', 'address', 'address_latitude', 'address_longitude',)\n\n\nclass CategoryMdlSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ('id', 'name', 'image', 'base64Image', 'imageType')\n\nclass ProductCreateSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ('category', 'name', 'image', 'base64Image', 'imageType', \n 'description', 'quantity_type', 'initial_quantity',\n 'price', 'discounted_price',)\n\nclass ProductGetSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ('id', 'category', 'name', 'image', 'base64Image', 'imageType', 'description', 'quantity_type',\n 'initial_quantity', 'quantity_in_stock', 'price', 'discounted_price', \n 'is_out_of_stock')\n\n def to_representation(self, instance):\n rep = super().to_representation(instance)\n rep['category'] = CategoryMdlSrlzr(\n instance.category).data\n return rep\n\nclass ProductUpdateSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ('category', 'name', 'image', 'base64Image', 'imageType', \n 'description', 'quantity_type',\n 'price', 'discounted_price',)\n\nclass RefillProductSrlzr(serializers.Serializer):\n new_quantity_in_stock = serializers.IntegerField()\n\nclass ProductDeleteSrlzr(serializers.Serializer):\n id = serializers.IntegerField()\n\nclass CategoryCreateSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ('name', 'image', 'base64Image', 'imageType')\n\nclass CatProductsSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ('id', 'name', 'image', 'description', 'quantity_type',\n 'quantity_in_stock', 'price', 'discounted_price', \n 'is_out_of_stock')\n\nclass CategoryGetSrlzr(serializers.ModelSerializer):\n products = CatProductsSrlzr(\n many=True, read_only=True)\n class Meta:\n model = Category\n fields = ('id', 'name', 'image', 'products')\n\nclass StoreUsrGetSrlzr(serializers.ModelSerializer):\n categories = CategoryGetSrlzr(\n many=True, read_only=True)\n class Meta:\n model = Store\n fields = ('id', 'email', 'help_desk_email',\n 'name', 'phone_number', 'image', 'address', 'address_latitude', 'address_longitude', \n 'description', 'no_of_ratings', 'rating', 'categories')\n\n\nclass CategoryUpdateSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ('name', 'image', 'base64Image', 'imageType')\n\nclass CategoryDeleteSrlzr(serializers.Serializer):\n id = serializers.IntegerField()\n\nclass FMCGStrProductGetSrlzr(serializers.ModelSerializer):\n class Meta:\n model = FMCGProduct\n fields = ('id', 'name', 'image', 'description', 'quantity_type',\n 'quantity_in_stock', 'price', 'discounted_price', \n 'is_out_of_stock')\n\nclass FMCGTrnsctnCreateSrlzr(serializers.ModelSerializer):\n class Meta:\n model = FMCGTransaction\n fields = ('fmcg_product', \n 'quantity_taken_from_fmcg', 'total_cost_at_the_time')\n\nclass FMCGTrnsctnGetSrlzr(serializers.ModelSerializer):\n class Meta:\n model = FMCGTransaction\n fields = ('id', 'fmcg_product', 'transaction_occured_at', \n 'quantity_taken_from_fmcg', 'total_cost_at_the_time',)\n\n def to_representation(self, instance):\n rep = super().to_representation(instance)\n rep['fmcg_product'] = FMCGStrProductGetSrlzr(\n instance.fmcg_product).data\n return rep\n\nclass CouponGnrtSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Coupon\n fields = ('category', 'product',\n 'minimum_order_Rs', 'discount_type',\n 'discount_amount', 'uses_per_customer')\n\nclass CouponGetSrlzr(serializers.ModelSerializer):\n class Meta:\n model = Coupon\n fields = ('id', 'category', 'product', 'code',\n 'minimum_order_Rs', 'discount_type',\n 'discount_amount', 'uses_per_customer')\n \nclass CouponDeleteSrlzr(serializers.Serializer):\n id = serializers.IntegerField()\n\n","sub_path":"aerpay_main/store_serializers.py","file_name":"store_serializers.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"328816868","text":"import numpy as np\nfrom numpy.linalg import inv\n\nm_ = np.matmul\nmprod_ = m_\n\ndef _kfi(xp, uxp, m, um, h, f = None, q = None):\n #if (f is not None): print('---')\n #else: print ('xxx')\n size = len(xp)\n f = f if f is not None else np.identity(size)\n xi = mprod_(f, xp.T)\n #print('f xp ', xi)\n q = np.zeros(size * size).reshape(size, size) if q is None else q\n #print(' qi ', q)\n uxi = mprod_(f, mprod_(uxp, f.T)) + q\n ide = np.identity(size)\n #h2 = h2 if h2 is not None else 0. * h\n res = m - mprod_(h, xi.T) # - mprod_(h2, xi.T)\n #print('xi ', xi.T)\n #print('h ', h)\n #print('h xi ', mprod_(h, xi.T))\n #print('res ', res)\n k = np.matmul(mprod_(uxi, h.T), inv(mprod_(h, mprod_(uxi, h.T)) + um))\n #print('k ', k)\n x = xi + mprod_(k, res.T)\n #print(' k res ', mprod_(k, res.T))\n #print('x ', x)\n ux = mprod_((ide - mprod_(k, h)), uxi)\n #print('cov ', cov)\n return x, ux, res, k\n\ndef _kfs(ms, ums, hs, x0, ux0 = None, fs = None, qs = None):\n nsample, msize, ssize = len(ms), len(ms[0]), len(x0)\n ux0 = 1e4 * np.identity(ssize) if ux0 is None else ux0\n xs = [x0 for ii in range(nsample)]\n uxs = [np.identity(ssize) for ii in range(nsample)]\n res = [np.zeros(msize) for ii in range(nsample)]\n fs = [np.identity(ssize) for ii in range(nsample)] if fs is None else fs\n qs = [0.* fi for fi in fs] if qs is None else qs\n for i in range(nsample):\n xp, uxp = x0, ux0\n if (i >= 1):\n xp, uxp = xs[i-1], uxs[i-1]\n xi, uxi, resi, _ = _kfi(xp, uxp, ms[i], ums[i], hs[i], f = fs[i], q = qs[i])\n xs[i], uxs[i], res[i] = xi, uxi, resi\n return xs, uxs, res\n\n\ndef _kfs(xi, xi1, xi1p, ui, ui1p, ui1, fi = None):\n size = len(xs)\n fi = fi if fi is not None else np.identity(size)\n ak = mprod_(ui, mprod_(fi.T, inv(uip)))\n xis = xi + mprod_(ai, (xi1 - xi1p))\n uis = ui + mprod_(ai, ui1p - ui1p)\n print('Ak ', ak)\n print('xk ', xis)\n return xis,\n\nclass KFnode:\n\n names = ['xp', 'uxp', 'k', 'res']\n names += ['xm', 'uxm', 'a', 'xs', 'uxs']\n\n def __init__(self, m = None, um = None, h = None, f = None, q = None):\n for name in KFnode.names:\n setattr(self, name, None)\n self.m = m\n self.um = um\n self.h = h\n self.f = f\n self.q = q\n return\n\ndef kfilter(kfnodes, x0, ux0):\n\n nsize = len(kfnodes)\n msize = len(x0)\n\n _kfstep = _kfstep_m\n\n def _kfnode0(x0, ux0):\n k0 = KFnode()\n k0.xm = x0\n k0.uxm = ux0\n k0.f = np.identity(msize)\n k0.q = 0. * np.identity(msize)\n return k0\n\n k0 = _kfnode0(x0, ux0)\n kfnodes[0] = _kfstep(k0, kfnodes[0])\n for i in range(1, nsize):\n #print('i-node ', i)\n _kfstep(kfnodes[i-1], kfnodes[i])\n\n return kfnodes\n\ndef kfsmooth(kfnodes):\n nsize = len(kfnodes)\n\n def _klast(kn):\n kn.xs = kn.xm\n kn.uxs = kn.uxm\n return kn\n\n _klast(kfnodes[-1])\n\n for i in range(nsize-2, -1, -1):\n #print('back-step ', i)\n _kfback(kfnodes[i], kfnodes[i+1])\n\n return kfnodes\n\ndef _kfstep_m(kip, ki):\n\n #print('x0 ', kip.xm)\n #print('ux0 ', kip.uxm)\n #print('f0 ', kip.f)\n #print('q0 ', kip.q)\n\n ki.xp = m_(kip.f, kip.xm.T)\n #print('xp :', ki.xp)\n ki.uxp = m_(kip.f, m_(kip.uxm, kip.f.T)) + kip.q\n #print('uxp :', ki.uxp)\n\n ki.res = ki.m - m_(ki.h, ki.xp.T)\n #print('res ', ki.res)\n\n ki.uxm = inv(inv(ki.uxp) + m_(ki.h.T, m_(ki.um, ki.h)))\n #print('um : ', ki.um)\n #print('uxm : ', ki.uxm)\n ki.xm = m_(ki.uxm, m_(inv(ki.uxp), ki.xp) + m_(ki.h.T, m_(ki.um, ki.m.T)))\n #print('xm : ', ki.xm)\n\n return ki\n\ndef _kfstep_k(kip, ki):\n\n #print('x0 ', kip.xm)\n #print('ux0 ', kip.uxm)\n #print('f :', kip.f)\n #print('q :', kip.q)\n\n ki.xp = m_(kip.f, kip.xm.T)\n ki.uxp = m_(kip.f, m_(kip.uxm, kip.f.T)) + kip.q\n #print('xp : ', ki.xp)\n #print('uxp : ', ki.uxp)\n\n ki.res = ki.m - m_(ki.h, ki.xp.T)\n ki.k = m_( m_(ki.uxp, ki.h.T), inv(m_(ki.h, m_(ki.uxp, ki.h.T)) + ki.um))\n #print('res : ', ki.res)\n #print('k : ', ki.k)\n\n ide = np.identity(len(ki.xp))\n ki.xm = ki.xp + m_(ki.k, ki.res.T)\n ki.uxm = m_((ide - m_(ki.k, ki.h)), ki.uxp)\n #print('xm : ', ki.xm)\n #print('uxm : ', ki.uxm)\n\n return ki\n\n\ndef _kfback(kip, ki):\n #print('fT-1 ', kip.f.T)\n #print('Cp ', ki.uxp)\n #print('Cm-1 ', kip.uxm)\n kip.a = m_(kip.uxm, m_(kip.f.T, inv(ki.uxp)))\n #print('A ', kip.a)\n kip.xs = kip.xm + m_(kip.a, (ki.xs - ki.xp).T)\n kip.uxs = kip.uxm + m_(kip.a, m_(ki.uxs - ki.uxp, kip.a.T))\n return ki\n\n#------\n\ndef _delta(xs, type = float):\n #dxs[1:] = dxs[1:] - xs[:-1]\n #dxs = np.array(dxs, dtype = type)\n axs = np.array(xs)\n dxs = axs[1:] - axs[:-1]\n return dxs\n\ndef _delta_ms(cs, ufactor = 2., umin = 2.4):\n size = len(cs[0])\n ms = _delta(cs)\n ums = [np.identity(size) * ufactor * np.maximum(np.sqrt(np.abs(ci)), umin) for ci in cs]\n return ms, ums\n\ndef _hs(ts, cs, N, hi_):\n dts = _delta(ts)\n hs = [hi_(ci, dt, N) for dt, ci in zip(dts, cs[:-1])]\n return hs\n\ndef _delta_kfs(ts, cs, x0, N, hi_, full_output = False):\n ms, ums = _delta_ms(cs)\n hs = _hs(ts, cs, N, hi_)\n xs, uxs, res = _kfs(ms, ums, hs, x0)\n result = (xs, uxs, res, ms, ums, hs) if full_output else (xs, uxs)\n return result\n\ndef _rvs(cs):\n nsample, size = len(cs), len(cs[0])\n n0 = np.random.poisson(np.abs(cs[0]))\n ns = [n0,]\n for i in range(1, nsample):\n dci = cs[i] - cs[i-1]\n nip = ns[i-1]\n sig = np.ones(size)\n sig[dci < 0 ] = -1.\n dni = sig * np.random.poisson(np.abs(dci))\n ni = nip + dni\n ns .append(ni)\n return ns\n\ndef _hrvs(N, x0, ci0, hi_, t0 = 0, dt = 0.5, nsamples = 200):\n size = len(ci0)\n ts = [t0 + i * dt for i in range(nsamples)]\n cis = [ci0,]\n for i in range(1, nsamples):\n cip = cis[i-1]\n # #print('cip', cip)\n hi = hi_(cip, dt, N)\n # #print('hi ', hi)\n dci = np.matmul(hi, x0.T)\n # #print('dci ', dci)\n ci = cip + dci\n cis .append(ci)\n nis = _rvs(cis)\n return ts, cis, nis\n\n\n#\n# Specific KF\n#\n\n\n#\n# Extende\n#-----\n\ndef sirm_fi(si, xi, dt, N):\n f = np.identity(4)\n s = (N - np.sum(si))/N\n ni, nid = si[0], xi[4]\n f = np.identity(4)\n f[4, 1], f[4, 4] = s * ni * dt, nid * dt\n return f\n\ndef sirm_hi(si, xi, dt, N):\n h = np.zeros( 3 * 5).reshape(3, 5)\n s = (N - np.sum(si))/N\n ni, nid = si[0], xi[4]\n h[0, 0 ], h[0, 2] = s * ni, nid - ni\n h[1, 2] = -nid + ni\n h[2, 3] = nid\n return dt * h\n\n\ndef sirm_hv(s0 = (1, 0, 0), x0 = (0.6, 0.06, 0.2, 0.2), dt = 1, N = 1e6, nsamples = 100):\n ts, xs, ss = [], [], []\n for i in range(nsamples):\n ti = i * dt\n sip, xip = s0, x0 if i == 0 else xs[i-1], ss[i-1]\n fi = sirm_fi(sip, xip, dt, N)\n xi = mprod_(fi, x0.T)\n hi = sirm_hi(sip, xip, dt, N)\n si = mprod_(hi, x0.T)\n ss.append(si); xs.append(xi)\n return ts, xs, ss\n\n\n#\n#\n#-------\n\ndef sirm_hi(ci, dt, N):\n size = 3\n ni, nr, nd = ci[0], ci[1], ci[2]\n h = np.zeros(size * size).reshape(size, size)\n si = N - np.sum(ci)\n h[0, 0], h[0, 1], h[0, 2] = si/N, -1., -1.\n h[1, 1] = 1.\n h[2, 2] = 1.\n h = h * dt * ni\n return h\n\ndef sirm_rvs(N, x0, s0 = np.array((1, 0, 0)), **kargs):\n return _hrvs(N, x0, s0, sirm_hi, **kargs)\n\ndef sirm_kf(ts, cs, x0, N, full_output = False, **kargs):\n return _delta_kfs(ts, cs, x0, N, sirm_hi, full_output, **kargs)\n #ms, ums = delta_ms(cs)\n #hs = hs_(ts, cs, N, sirm_hi)\n #xs, uxs, res = _kfs(ms, ums, hs, x0, N, **kargs)\n #result = (xs, uxs, res, ms, ums, hs) if full_output else (xs, uxs)\n #return result\n\n#\n# SIM Model\n#\n\ndef sir_hi(ci, dt, N):\n size = 2\n ni, nr = ci[0], ci[1]\n h = np.zeros(size * size).reshape(size, size)\n si = N - np.sum(ci)\n h[0, 0], h[0, 1] = si/N, -1., -1\n h[1, 2] = 1.\n h = h * dt * ni\n return h\n\ndef sir_rvs(N, x0, s0 = np.array((1, 0, 0)), **kargs):\n return _hrvs(N, x0, s0, sirm_hi, **kargs)\n\ndef sir_kf(ts, cs, x0, N, full_output = False, **kargs):\n return _delta_kfs(ts, cs, x0, N, sir_hi, full_output, **kargs)\n# ms, ums = _delta_ms(cs)\n# hs = _hs(ts, cs, N, sir_hi)\n# xs, uxs, res = _kfs(ms, ums, hs, x0, N, **kargs)\n# result = (xs, uxs, res, ms, ums, hs) if full_output else (xs, uxs)\n# return result\n\n#\n\n#\n# SIR Model\n#\n\n# def sir_hi(ci, dt, N):\n# size = 2\n# ni, nr = ci[0], ci[1]\n# h = np.zeros(size * size).reshape(size, size)\n# si = N - np.sum(ci)\n# h[0, 0], h[0, 1] = si/N, -1.\n# h[1, 1] = 1.\n# h = h * dt * ni\n# return h\n#\n# def sir_rvs(N, x0, **kargs):\n# ci = np.array((1, 0))\n# return _hrvs(N, x0, ci, sir_hi, **kargs)\n#\n# def sir_kf(ts, cs, x0, N, full_output = False, **kargs):\n# ms, ums = delta_ms(cs)\n# hs = hs_(ts, cs, N, sir_hi)\n# xs, uxs, res = _kfs(ms, ums, hs, x0, N, **kargs)\n# result = (xs, uxs, res, ms, ums, hs) if full_output else (xs, uxs)\n# return result\n\n#\n# SEIR model\n#\n\ndef seir_hi(ci, dt, N):\n size = 3\n ne, ni, nr = ci[0], ci[1], ci[2]\n si = N - np.sum(ci)\n h = np.zeros(size * size).reshape(size, size)\n h[0, 0], h[0, 2] = ni * si/N, -ne\n h[1, 1], h[1, 2] = -ni , ne\n h[2, 1] = ni\n h = h * dt\n return h\n\ndef seir_rvs(N, x0, ci = (1, 1, 0), **kargs):\n ci = np.array(ci)\n return _hrvs(N, x0, ci, seir_hi, **kargs)\n\n\ndef seir_kf(ts, cs, x0, N, full_output = False, **kargs):\n ms, ums = delta_ms(cs)\n hs = hs_(ts, cs, N, seir_hi)\n xs, uxs, res = _kfs(ms, ums, hs, x0, N, **kargs)\n result = (xs, uxs, res, ms, ums, hs) if full_output else (xs, uxs)\n return result\n\n#\n# SEIR2 model\n#\n\ndef seir2_hi(ci, dt, N):\n ne, ni, nr, nd = ci[0], ci[1], ci[2], ci[3]\n si = N - np.sum(ci)\n h = np.zeros(4 * 5).reshape(4, 5)\n h[0, 0], h[0, 2] = ni * si/N, -ne\n h[1, 1], h[1, 2] = -ni , ne\n h[2, 1], h[2, 3] = ni , -ni\n h[3, 3], h[3, 4] = ni , -nd\n h = h * dt\n return h\n\n\ndef seir2_rvs(N, x0, ci = (1, 1, 0, 0), **kargs):\n ci = np.array(ci)\n return _hrvs(N, x0, ci, seir2_hi, **kargs)\n\n\ndef seir2_kf(ts, cs, x0, N, full_output = False, **kargs):\n ms, ums = delta_ms(cs)\n hs = hs_(ts, cs, N, seir2_hi)\n xs, uxs, res = _kfs(ms, ums, hs, x0, N, **kargs)\n result = (xs, uxs, res, ms, ums, hs) if full_output else (xs, uxs)\n return result\n\n\n\n#\n# SIR2 no-exposed model\n#\n\ndef sir2_hi(ci, dt, N):\n ni, nr, nd = ci[0], ci[1], ci[2]\n si = N - np.sum(ci)\n h = np.zeros(3 * 4).reshape(3, 4)\n h[0, 0], h[0, 1] = ni * si/N, -ni\n h[1, 1], h[1, 2] = ni , -ni\n h[2, 2], h[2, 3] = ni , -nd\n h = h * dt\n return h\n\n\ndef sir2_rvs(N, x0, ci = (1, 0, 0), **kargs):\n ci = np.array(ci)\n return _hrvs(N, x0, ci, sir2_hi, **kargs)\n\n\ndef sir2_kf(ts, cs, x0, N, full_output = False, **kargs):\n ms, ums = delta_ms(cs)\n hs = hs_(ts, cs, N, sir2_hi)\n xs, uxs, res = _kfs(ms, ums, hs, x0, N, **kargs)\n result = (xs, uxs, res, ms, ums, hs) if full_output else (xs, uxs)\n return result\n\n#\n# SIR2 completed with death\n#\n\ndef sir2c_hi(ci, dt, N):\n ni, nr, nd, nm = ci[0], ci[1], ci[2], ci[3]\n si = N - np.sum(ci)\n h = np.zeros(4 * 4).reshape(4, 4)\n h[0, 0], h[0, 1] = ni * si/N, -ni\n h[1, 1], h[1, 2] = ni , -ni\n h[2, 2], h[2, 3] = ni , -nd\n h[3, 3] = nd\n h = h * dt\n return h\n\n\ndef sir2c_rvs(N, x0, ci = (1, 0, 0, 0), **kargs):\n ci = np.array(ci)\n return _hrvs(N, x0, ci, sir2c_hi, **kargs)\n\n\ndef sir2c_kf(ts, cs, x0, N, full_output = False, **kargs):\n ms, ums = delta_ms(cs)\n hs = hs_(ts, cs, N, sir2c_hi)\n xs, uxs, res = _kfs(ms, ums, hs, x0, N, **kargs)\n result = (xs, uxs, res, ms, ums, hs) if full_output else (xs, uxs)\n return result\n\n#\n# Chainned KF\n#\n#\n\ndef sirm_xhi(sp, dt, sf):\n ni, nr, nm = sp[0], sp[1], sp[2]\n h = np.zeros(3 * 3).reshape(3, 3)\n h[0, 0], h[0, 1], h[0, 2] = ni * sf, -ni, -ni\n h[1, 1] = ni\n h[2, 2] = ni\n h = h * dt\n return h\n\ndef sirm_sfi(xi, dt, sf):\n beta, gamma, rho = xi[0], xi[1], xi[2]\n f = np.zeros(3 * 3).reshape(3, 3)\n f[0, 0] = sf * beta - gamma\n f[1, 0] = gamma - rho\n f[2, 0] = rho\n f = np.identity(3) + f * dt\n return f\n\ndef _extrapolate(ts, ss, xs, N, sfi_):\n\n sps = []\n for i in range(1, len(ss)):\n dt, si, xi = ts[i] - ts[i-1], ss[i-1], xs[i-1]\n sf = (N - np.sum(si))/N\n sfi = sfi_(xi, dt, sf)\n spi = mprod_(sfi, si.T)\n #print('si ', si)\n #print('dt ', dt, 'xi', xi)\n #print('fi ', sfi)\n #print('spi', spi)\n sps.append(spi)\n return sps\n\n\ndef _delta_ckfs(ts, cs, x0, s0, N, xhi_, sfi_, sh = None, sigma = 100, **kargs):\n csize, ssize, xsize = len(cs[0]), len(s0), len(x0)\n #ucs = np.random.poisson(np.sqrt(cs))\n ms, ums = _delta_ms(cs)\n ssize, xsize = len(s0), len(x0)\n shi = sh if sh is not None else np.identity(ssize)\n #hp = np.zeros(2 * 3).reshape(2, 3)\n #hp[0, 0], hp[1, 2] = 1, 1\n ux0 = sigma * sigma * np.identity(xsize)\n us0 = sigma * sigma * np.identity(ssize)\n xs, uxs, xrs = [x0,], [ux0,], []\n ss, uss, srs = [s0,], [us0,], []\n #for i in range(1, 20): # len(ms)):\n for i in range(1, len(ms)):\n ci, uci = cs[i] , np.identity(csize) * np.sqrt(np.maximum(1, cs[i]))\n mi, umi = ms[i] , ums[i-1]\n xp, uxp = xs[i-1], uxs[i-1]\n sp, usp = ss[i-1], uss[i-1]\n # Move first the samples\n dt = ts[i] - ts[i-1]\n sf = (N - np.sum(sp))/N\n sfi = sfi_(xp, dt, sf)\n usp = usp * (np.sqrt(2) ** 2)\n si, usi, sri, _ = _kfi(sp, usp, ci, uci, shi, sfi)\n si = np.maximum(si, 0.)\n si = np.minimum(si, N)\n # Now the state\n # Use the true values\n # sp = cp -\n dt = ts[i] - ts[i-1]\n sf = (N - np.sum(si))/N\n xhi = mprod_(shi, xhi_(si, dt, sf))\n xi, uxi, xri, _ = _kfi(xp, uxp, mi, umi, xhi)\n xi = np.maximum(xi, 0.)\n #print('ci ', ci)\n #print('sp ', sp)\n #print('si-p', mprod_(sfi, sp))\n #print('si ', si)\n xs.append(xi); uxs.append(uxi); xrs.append(xri)\n ss.append(si); uss.append(usi); srs.append(sri)\n return (xs, uxs, xrs), (ss, uss, srs)\n#\n#\n\n\n#\n# def model_h(ci, dt, N):\n# h\n# P * h, (1- P)* h\n# return h\n#\n# def model_rvs()\n#\n#\n# def _model_kfi(sp, xp, uxp, mi, umi):\n# h = model_hi(sp, dt, N):\n#\n# si = h * xp\n#\n#\n#\n# def model_kf(ts, cs, c0, N, full_output = False, **kargs):\n# nsample = len(cs)\n# ms, ums = delta_ms(cs)\n# for i in range(1, nsample):\n# mi, umi = ms[i], umi[i]\n# sp, xp, uxp = ss[i], xs[i], uxp[i]\n# si, xi, uxi, res = _model_kfi(xp, uxp, sp, mi, umi)\n#\n#\n# #\n# #\n# #\n#\n#\n# #---------\n#\n# #\n# # def kf_hssir_ms(ts, cs, N):\n# #\n#\n# def kf_kfilter(ts, nis, x0, N, mifun, hifun, sigma = 100., ufactor = 1., full_output = False):\n# nsample, size, msize = len(ts), len(x0), len(nis)\n# dt = ts[1] - ts[0]\n# ux0 = np.identity(size)*sigma*sigma\n# xs = [x0 for ii in range(nsample)]\n# uxs = [np.identity(size) for ii in range(nsample)]\n# res = [np.zeros(size) for ii in range(nsample)]\n# #hmatrix = hmatrix[model]\n# ms = _delta(nis)\n# ums = [np.identity(msize) * np.abs(ms) * ufactor]\n# hi_ = models[model]\n# hs = [hi_(ni, dt, N) for ni in nis]\n# for i in range(nsample):\n# xp, uxp = x0, ux0\n# if (i >= 1):\n# xp, uxp = xs[i-1], uxs[i-1]\n# xi, uxi, resi, _ = _kfilter(xp, uxp, ms[i], ums[i], hs[i])\n# xs[i], uxs[i], res[i] = xi, uxi, resi\n# result = (xs, uxs) if full_output is False else (xs, uxs, ms, ums, res)\n# return result\n#\n#\n# def kf_kfilter(ts, cs, x0, N, model = 'seir2', sigma = 100., ufactor = 2., full_output = False):\n# nsample, size = len(ts), len(x0)\n# ux0 = np.identity(size)*sigma*sigma\n# xs = [x0 for ii in range(nsample)]\n# uxs = [np.identity(size) for ii in range(nsample)]\n# res = [np.zeros(size) for ii in range(nsample)]\n# #hmatrix = hmatrix[model]\n# ms, ums = kf_measurements(ts, cs)\n# ms [-1] = cs[-1]\n# for i in range(nsample):\n# ums[-1][4, 4] = mp.sqrt(np.maximum(ms[-1][i], 2.4))\n# hs = kf_hmatrices_seir2(ts, cs, N)\n# for i in range(nsample):\n# xp, uxp = x0, ux0\n# if (i >= 1):\n# xp, uxp = xs[i-1], uxs[i-1]\n# xi, uxi, resi, _ = _kfilter(xp, uxp, ms[i], ums[i], hs[i])\n# xs[i], uxs[i], res[i] = xi, uxi, resi\n# result = (xs, uxs) if full_output is False else (xs, uxs, ms, ums, hs, res)\n# return result\n","sub_path":"c19/kfilter.py","file_name":"kfilter.py","file_ext":"py","file_size_in_byte":17428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"362689914","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nfrom google.appengine.ext import ndb\nfrom gaecookie.decorator import no_csrf\nfrom gaeforms import base\nfrom gaeforms.base import Form\nfrom gaeforms.ndb.form import ModelForm\nfrom gaegraph.model import Node\nfrom gaepermission.decorator import login_not_required,login_required\nfrom config.template_middleware import TemplateResponse\nfrom tekton import router\nfrom tekton.gae.middleware.redirect import RedirectResponse\nfrom gaegraph.model import Arc\n\n\n# CRUD\n\n@login_required\n@no_csrf\ndef index():\n\n query = Noticias.query().order(Noticias.nome)\n noticia_lista = query.fetch()\n\n form = NoticiasForm()\n noticia_lista=[form.fill_with_model(noticias) for noticias in noticia_lista]\n\n editar_form_path = router.to_path(editar_form)\n\n delete_path = router.to_path(delete)\n\n for noticias in noticia_lista:\n noticias['edit_path']='%s/%s'%(editar_form_path,noticias['id'])\n noticias['delete_path'] = '%s/%s' % (delete_path,noticias['id'])\n contexto = {'noticias_lista':noticia_lista}\n\n return TemplateResponse(contexto)\n\n\n@no_csrf\n@login_required\ndef editar_form(categoria_id):\n categoria_id=int(categoria_id)\n categoria = Noticias.get_by_id(categoria_id)\n\n categoria_form = NoticiasForm()\n categoria_form.fill_with_model(categoria)\n\n # teste\n erros = categoria_form.validate()\n # teste\n #teste\n\n contexto = {'salvar_path': router.to_path(editar,categoria_id),\n 'erros': erros,\n 'noticias' : categoria_form}\n\n\n return TemplateResponse(contexto,router.to_path('/noticias/home.html'))\n\n\n@login_required\ndef delete(categoria_id):\n chave = ndb.Key(Noticias,int(categoria_id))\n chave.delete()\n return RedirectResponse(router.to_path(index))\n\n\n@login_required\n@no_csrf\ndef editar(categoria_id,**propriedades):\n categoria_id = int(categoria_id)\n categoria = Noticias.get_by_id(categoria_id)\n\n categoria_form = NoticiasForm(**propriedades)\n\n erros = categoria_form.validate()\n\n if erros:\n contexto = {'salvar_path': router.to_path(salvar),\n 'erros': erros,\n 'noticias' : categoria_form}\n return TemplateResponse(contexto,'/noticias/home.html')\n\n else:\n categoria_form.fill_model(categoria)\n\n categoria.put()\n return RedirectResponse(router.to_path(index))\n #feito para mostrar o que esta sendo salvo no banco\n\n\n##################\n@login_required\n@no_csrf\ndef salvar(**propriedades):\n categorias_form = NoticiasForm(**propriedades)\n erros = categorias_form.validate()\n if erros:\n contexto = {'salvar_path': router.to_path(index),\n 'erros': erros,\n 'noticias' : categorias_form}\n return TemplateResponse(contexto,'/noticias/home.html')\n\n else:\n categorias=categorias_form.fill_model()\n\n categorias.put()\n\n return RedirectResponse(router.to_path('/noticias_listar'))\n #feito para mostrar o que esta sendo salvo no banco\n#####################\n\n\n\n# Modelos\nclass Noticias(Node):\n nome = ndb.StringProperty(required=True)\n descricao = ndb.StringProperty()\n\nclass NoticiasForm(ModelForm):\n _model_class = Noticias\n _include = [Noticias.nome,Noticias.descricao]","sub_path":"backend/appengine/routes/noticias_listar/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"583557212","text":"from glob import iglob\nimport os\nimport requests\nimport pymongo\nimport os.path\nfrom pymongo import MongoClient\nimport json\nfrom py2neo import Graph,Path,authenticate,Node,Relationship\n\ndef commoncount(list1,list2):\n count = 0\n for element in list1:\n if element in list2:\n count +=1\n return count\n\nauthenticate(\"localhost:7474\",\"neo4j\",\"haha\")\ngraph = Graph(\"http://localhost:7474/db/data/\")\narray = []\ni=0\nfor fname in iglob(os.path.expanduser('test/*.json')):\n with open(fname) as fin:\n videos = json.load(fin)\n array.append(videos)\n stats = videos['videoInfo']['statistics']\n node = Node(\"Video\",id = videos['videoInfo']['id'], commentCount = stats['commentCount'], viewCount = stats['viewCount'], favoriteCount = stats['favoriteCount'], likeCount = int(stats['likeCount']) , dislikeCount = stats['dislikeCount'])\n graph.create(node)\nprint(\"Nodes Finished\")\n\nfor i in range(len(array)):\n temp = array[i]['videoInfo']\n for j in range(i-1,-1,-1):\n dup = array[j]['videoInfo']\n a = graph.find_one(\"Video\",property_key = 'id', property_value = temp['id'])\n b = graph.find_one(\"Video\",property_key = 'id', property_value = dup['id'])\n if temp['snippet']['channelId'] == dup['snippet']['channelId']:\n crelation = Relationship(a,\"samechannel\",b, weight=5)\n graph.create(crelation)\n\n dcount = 1.5*commoncount(temp['snippet']['description'].split(),dup['snippet']['description'].split())\n if dcount != 0:\n drelation = Relationship(a , \"similardescription\" , b , weight = dcount)\n graph.create(drelation)\n\n if 'tags' in temp['snippet'] and 'tags' in dup['snippet']:\n tcount = 4*commoncount(temp['snippet']['tags'],dup['snippet']['tags'])\n if tcount != 0:\n trelation = Relationship(a, \"similartags\", b, weight = tcount)\n graph.create(trelation)\n\n ccount=10*commoncount(temp['snippet']['title'].split(),dup['snippet']['title'].split())\n if ccount != 0:\n crelation = Relationship(a , \"similardescription\" , b , weight = ccount)\n graph.create(crelation)\n print(i)\n\n\nprint(\"Neo4j...Finish\")\n\n# match (n)-[r]-(m) where n.id='5zG6AagUQBY' return m.id,r.weight order by r.weight desc limit 5;\n\nconnection = MongoClient()\ndb = connection.videos\nvideos = db.videos\n\nfor fname in iglob(os.path.expanduser('test/*.json')):\n with open(fname) as fin:\n videos = json.load(fin)\n title= videos['videoInfo']['snippet']['title']\n desc=videos['videoInfo']['snippet']['description']\n videos['title']=title\n videos['desc']=desc\n likeCount=videos['videoInfo']['statistics']['likeCount']\n videos['likeCount']=int(likeCount)\n del videos['videoInfo']['statistics']['likeCount']\n del videos['videoInfo']['snippet']['title']\n del videos['videoInfo']['snippet']['localized']['title']\n del videos['videoInfo']['snippet']['localized']['description']\n del videos['videoInfo']['snippet']['description']\n if 'tags' in videos['videoInfo']['snippet']:\n blah=videos['videoInfo']['snippet']['tags']\n videos['tags']=blah\n del videos['videoInfo']['snippet']['tags']\n ids=db.videos.insert(videos)\n#db.videos.createIndex( { title: \"text\", desc: \"text\", tags:\"text\" }, { weights: { title: 20, tags:4, desc: 2 }, name: \"TextIndex\" } )\n","sub_path":"myDbShit/NOSQL.py","file_name":"NOSQL.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"23940596","text":"def bellman_ford(vertices, edges, start, limit):\n distances = [float('inf')] * vertices\n distances[start] = 0\n for i in range(limit):\n tmp = list(distances)\n for (vertex_from, vertex_to, weight) in edges:\n if distances[vertex_to] > tmp[vertex_from] + weight:\n distances[vertex_to] = tmp[vertex_from] + weight\n return distances\n\n\nif __name__ == '__main__':\n with open('input.txt', 'r') as file:\n vertices, start, finish, limit = map(int, file.readline().split())\n adj_matrix = []\n for _ in range(vertices):\n adj_matrix.append(list(map(int, file.readline().split())))\n edges = []\n for i in range(vertices):\n for j in range(vertices):\n if adj_matrix[i][j] != -1:\n edges.append((i, j, adj_matrix[i][j]))\n distances = bellman_ford(vertices, edges, start, limit)\n print(distances[finish] if distances[finish] != float('inf') else -1)\n\n","sub_path":"bellman_ford.py","file_name":"bellman_ford.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"382288487","text":"# -*- coding: utf-8 -*-\nimport timeit\nimport sys\nimport io\nimport uuid\nimport random\n\nfrom ctools import *\n\n\ndef humanize(t):\n if t > 1:\n return \"%.3f s\" % t\n t *= 1000\n if t > 1:\n return \"%.3f ms\" % t\n t *= 1000\n if t > 1:\n return \"%.3f µs\" % t\n t *= 1000\n return \"%.3f ns\" % t\n\n\ndef mean_std(seq):\n mean = sum(seq) / len(seq)\n return mean, sum((x - mean) ** 2 for x in seq) ** 0.5\n\n\nbuffer = io.StringIO()\n\n\ndef run_str(run, title, loop=1000000, repeat=10, **kwargs):\n t_arr = timeit.repeat(run, globals=globals(), number=loop, repeat=repeat, **kwargs)\n t_arr = [t / loop for t in t_arr]\n mean, std = mean_std(t_arr)\n print(\n title, \",\\t\", humanize(mean), \" ± \", humanize(std),\n \"each ({:,} runs, {:,} loops)\".format(repeat, loop),\n sep=\"\", flush=True, file=sys.stderr\n )\n\n\nrun_str(\"int8_to_datetime(20170101)\", 'int8_to_datetime')\n\nrad = random.randint(0, 0xffffffff)\n\nrun_str(\n \"jump_consistent_hash(65535, 1024)\",\n \"jump_consistent_hash\",\n setup=\"rad = random.randint(0, 0xffffffff)\"\n)\n\nstring = str(uuid.uuid1())\nrun_str(\"strhash(string)\", \"strhash default\", setup=\"string = str(uuid.uuid1())\")\nrun_str(\"strhash(string, 'fnv1a')\", \"strhash fnv1a\", setup=\"string = str(uuid.uuid1())\")\nrun_str(\"strhash(string, 'fnv1')\", \"strhash fnv1\", setup=\"string = str(uuid.uuid1())\")\nrun_str(\"strhash(string, 'djb2')\", \"strhash djb2\", setup=\"string = str(uuid.uuid1())\")\nrun_str(\"strhash(string, 'murmur')\", \"strhash murmur\", setup=\"string = str(uuid.uuid1())\")\n","sub_path":"benchmarks/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"44609669","text":"from setuptools import setup, find_packages\n\nfrom helm_upgrade import __version__\n\n# Source dependencies from requirements.txt file.\nwith open(\"requirements.txt\", \"r\") as f:\n lines = f.readlines()\n install_packages = [line.strip() for line in lines]\n\nsetup(\n name=\"helm_upgrade\",\n version=__version__,\n install_requires=install_packages,\n include_package_data=True,\n python_requires=\">=3.7\",\n author=\"Sarah Gibson\",\n author_email=\"drsarahlgibson@gmail.com\",\n url=\"https://sgibson91.github.io/\",\n # this should be a whitespace separated string of keywords, not a list\n keywords=\"development helm dependencies\",\n description=\"Update the dependencies of a helm chart to the latest published versions.\", # noqa: E501\n long_description=open(\"./README.md\", \"r\").read(),\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n packages=find_packages(),\n use_package_data=True,\n entry_points={\"console_scripts\": [\"helm-upgrade = helm_upgrade.cli:main\"]},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"117684988","text":"from django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import Point_type, Point\n\n\nclass PointAdmin(admin.ModelAdmin):\n \"\"\"\n Settings for Point section in admin.\n \"\"\"\n list_display = ('name', 'code', 'point_type', 'address')\n list_display_links = ('name', 'address')\n list_filter = ('point_type',)\n search_fields = ('name', 'code')\n\n def get_readonly_fields(self, request, obj=None):\n if obj:\n return ['code']\n else:\n return []\n\n\nclass PointtypeAdmin(admin.ModelAdmin):\n \"\"\"\n Settings for Point type section in admin.\n \"\"\"\n list_display = ('name', 'description', 'points_count')\n search_fields = ('name',)\n\n def points_count(self, obj):\n return obj.points.count()\n points_count.short_description = _('points count')\n\n\nadmin.site.register(Point_type, PointtypeAdmin)\nadmin.site.register(Point, PointAdmin)\n","sub_path":"points/pointsapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"89645784","text":"\nimport logging\nfrom copy import deepcopy\nimport random\n\nimport new_utility_functions\nimport main\nimport players\nimport games\nimport clot\n\n\n\ndef createGames_RoundRobin(tourney_id, tourney_clotconfig):\n\t\"\"\"This is called periodically to check for new games that need to be created.\n\tthe roundrobin part is that we want everyone to play everyone else.\n\tso the players not currently in games are just paired up with each other,\n\tso long as they have not yet played each other.\n\t\"\"\"\n\tlogging.info('')\n\tlogging.info('in createGames_RoundRobin()')\n\n\tif main.hasTourneyFinished(tourney_id, tourney_clotconfig):\n\t\tlogging.info('round robin tourney has finished')\n\t\treturn\n\n\t#Retrieve all games that are ongoing\n\tactiveGames = list(games.Game.all().filter(\"winner =\", None).filter(\"tourney_id =\", tourney_id)) ###.run(batch_size=1000))\n\tactiveGameIDs = dict([[g.key().id(), g] for g in activeGames])\n\t#logging.info(\"Active games: \" + str(activeGameIDs))\n\n\t#Throw all of the player IDs that are in these ongoing games into a dictionary\n\tplayerIDsInGames = dict([[gp.playerID, gp] for gp in games.GamePlayer.all().filter(\"tourney_id =\", tourney_id) if gp.gameID in activeGameIDs]) ##.run(batch_size=1000) \n\n\t#Find all players who aren't in the dictionary (and therefore aren't in any games) and also have not left the CLOT (isParticipating is true)\n\tallPlayers = players.Player.all().filter(\"tourney_id =\", tourney_id)#.run(batch_size=1000)\n\t\n\tall_players_vec = [p for p in allPlayers]\n\t#logging.info(\"all_players_vec: \")\n\t#logging.info(all_players_vec)\n\tall_players_keys_ids_vec = [p.key().id() for p in allPlayers]\n\t#logging.info(\"all_players_keys_ids_vec: \" + str(all_players_keys_ids_vec))\n\tplayer_ids_in_games_vec = [p for p in playerIDsInGames]\n\t#logging.info(\"player_ids_in_games_vec: \" + str(player_ids_in_games_vec))\n\t\n\tplayersNotInGames = [p for p in allPlayers if p.isParticipating and p.key().id() not in playerIDsInGames]\n\t#logging.info(\"Players not in games: \")\n\t#logging.info(playersNotInGames)\n\n\t#------------------------\n\t#now pair up players who are not in games. IF they have not played each otehr yet.\n\n\t#get the head-to-head matrix, so we can see who has played who\n\thead_to_head_biggermat, head_to_head_2d = new_utility_functions.getHeadToHeadTable(tourney_id)\n\t##logging.info('head_to_head_2d:')\n\t##logging.info(head_to_head_2d)\n\n\t#\n\tthe_ids = deepcopy(head_to_head_biggermat[0][1:])\n\t#logging.info('the_ids:')\n\t#logging.info(the_ids)\n\n\t#Randomize the order\n\trandom.shuffle(playersNotInGames)\n\n\t#loop over all possible pairs, and pair IF they have not played each other yet\n\tpaired_yet = [False]*len(playersNotInGames)\n\tlist_for_pairing = []\n\tfor i in range(0,len(playersNotInGames)-1):\n\t\tif not paired_yet[i]:\n\t\t\tpi = playersNotInGames[i]\n\t\t\tpi_id = int(pi.player_id)\n\t\t\tpi_index = the_ids.index(pi_id) #find where in the head-to-head matrix this player is.\n\t\t\t\n\t\t\t#logging.info('pi:')\n\t\t\t#logging.info(pi)\n\t\t\t#logging.info(pi_id)\n\t\t\t#logging.info(pi_index)\n\t\t\t\n\t\t\tfor j in range(i+1,len(playersNotInGames)):\n\t\t\t\tif (not paired_yet[j]) and (not paired_yet[i]):\n\t\t\t\t\tpj = playersNotInGames[j]\n\t\t\t\t\tpj_id = int(pj.player_id)\n\t\t\t\t\tpj_index = the_ids.index(pj_id) #find where in the head-to-head matrix this player is.\n\t\t\t\t\t\n\t\t\t\t\t#logging.info('pj:')\n\t\t\t\t\t#logging.info(pj)\n\t\t\t\t\t#logging.info(pj_id)\n\t\t\t\t\t#logging.info(pj_index)\n\t\t\t\n\t\t\t\t\tif (head_to_head_2d[pi_index][pj_index][0]==0) and (head_to_head_2d[pj_index][pi_index][0]==0): \n\t\t\t\t\t\t#they have not played each other.\n\t\t\t\t\t\t#so match them.\n\t\t\t\t\t\tpaired_yet[i] = True\n\t\t\t\t\t\tpaired_yet[j] = True\n\t\t\t\t\t\tlist_for_pairing.append(pi)\n\t\t\t\t\t\tlist_for_pairing.append(pj)\n\t\t\t\t\t\t#logging.info('paired '+str(pi)+' '+str(pj))\n\n\t##debug\n\t#logging.info(\"new player order is: \")\n\t#logging.info(list_for_pairing)\n\t#for pair in clot.pairs(list_for_pairing):\n\t#\tlogging.info(pair)\n\t##end of debug\n\n\t#The template ID defines the settings used when the game is created. You can create your own template on warlight.net and enter its ID here\n\ttemplateID = main.getTemplateID(tourney_id, tourney_clotconfig)\n\n\t#Create a game for everyone not in a game.\n\tgamesCreated = [games.createGame(pair, templateID, tourney_id) for pair in clot.pairs(list_for_pairing)]\n\tlogging.info(\"Created games \" + str(gamesCreated))\n\t\n\tif (len(activeGames)==0) and (len(list_for_pairing)==0):\n\t\tif main.isTourneyInPlay(tourney_id, tourney_clotconfig):\n\t\t\t#tourney is in play, but no games are going on, and we found no games we could create.\n\t\t\t#so the tourney is over\n\t\t\tmain.endTourney(tourney_id, tourney_clotconfig)\n\t\t\tlogging.info('')\n\t\t\tlogging.info('all games have been played, so TOURNAMENT IS OVER !!!!!!!!!!!!!!')\n\t\t\tlogging.info('')\n\n\n\n","sub_path":"tournament_roundrobin.py","file_name":"tournament_roundrobin.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"410525499","text":"import time \nimport sys\nimport logging\nimport socket\n\nlogger = logging.getLogger(\"job\")\n\nTASK_STARTING = 0\nTASK_RUNNING = 1\nTASK_FINISHED = 2\nTASK_FAILED = 3\nTASK_KILLED = 4\nTASK_LOST = 5\n\nclass Job:\n def __init__(self):\n self.id = self.newJobId()\n self.start = time.time()\n\n def slaveOffer(self, s, availableCpus):\n raise NotImplementedError\n\n def statusUpdate(self, t):\n raise NotImplementedError\n\n def error(self, code, message):\n raise NotImplementedError\n \n nextJobId = 0\n @classmethod\n def newJobId(cls):\n cls.nextJobId += 1\n return cls.nextJobId\n\nLOCALITY_WAIT = 5\nMAX_TASK_FAILURES = 4\nCPUS_PER_TASK = 1\n\n# A Job that runs a set of tasks with no interdependencies.\nclass SimpleJob(Job):\n\n def __init__(self, sched, tasks):\n Job.__init__(self)\n self.sched = sched\n self.tasks = tasks\n\n self.launched = [False] * len(tasks)\n self.finished = [False] * len(tasks)\n self.numFailures = [0] * len(tasks)\n self.blacklist = [[] for i in xrange(len(tasks))]\n self.tidToIndex = {}\n self.numTasks = len(tasks)\n self.tasksLaunched = 0\n self.tasksFinished = 0\n self.total_used = 0\n\n self.lastPreferredLaunchTime = time.time()\n\n self.pendingTasksForHost = {}\n self.pendingTasksWithNoPrefs = []\n self.allPendingTasks = []\n\n self.failed = False\n self.causeOfFailure = \"\"\n\n for i in range(len(tasks)):\n self.addPendingTask(i)\n\n @property\n def taskEverageTime(self):\n return self.total_used / self.tasksFinished\n\n def addPendingTask(self, i):\n loc = self.tasks[i].preferredLocations()\n if not loc:\n self.pendingTasksWithNoPrefs.append(i)\n else:\n for host in loc:\n self.pendingTasksForHost.setdefault(host, []).append(i)\n self.allPendingTasks.append(i)\n\n def getPendingTasksForHost(self, host):\n try:\n h, hs, ips = socket.gethostbyname_ex(host)\n except Exception:\n h, hs, ips = host, [], []\n return sum((self.pendingTasksForHost.setdefault(h, []) \n for h in [h] + hs + ips), [])\n\n def findTaskFromList(self, l, host):\n for i in l:\n if not self.launched[i] and not self.finished[i] and host not in self.blacklist[i]:\n self.blacklist[i].append(host)\n return i\n\n def findTask(self, host, localOnly):\n localTask = self.findTaskFromList(self.getPendingTasksForHost(host), host)\n if localTask is not None:\n return localTask, True\n noPrefTask = self.findTaskFromList(self.pendingTasksWithNoPrefs, host)\n if noPrefTask is not None:\n return noPrefTask, True\n if not localOnly:\n return self.findTaskFromList(self.allPendingTasks, host), False\n# else:\n# print repr(host), self.pendingTasksForHost\n return None, False\n\n # Respond to an offer of a single slave from the scheduler by finding a task\n def slaveOffer(self, host, availableCpus): \n if self.tasksLaunched >= self.numTasks:\n if (self.tasksFinished < self.numTasks \n and self.tasksFinished > self.numTasks *.75):\n # re-submit timeout task\n avg = self.taskEverageTime\n now = time.time()\n task = sorted((task.start, task) \n for i,task in enumerate(self.tasks) \n if not self.finished[i])[0][1]\n used = time.time() - task.start\n if used > avg * 2 and used > 10:\n if task.tried <= MAX_TASK_FAILURES:\n logger.warning(\"re-submit task %s for timeout %s\",\n task.id, used)\n task.start = time.time()\n task.tried += 1\n return task\n else:\n logger.error(\"tast %s timeout, aborting job %s\",\n task, self.id)\n self.abort(\"task %s timeout\" % task)\n return\n\n now = time.time()\n localOnly = (now - self.lastPreferredLaunchTime < LOCALITY_WAIT)\n i, preferred = self.findTask(host, localOnly)\n if i is not None:\n task = self.tasks[i]\n task.start = now\n task.tried = 0\n prefStr = preferred and \"preferred\" or \"non-preferred\"\n logger.debug(\"Starting task %d:%d as TID %s on slave %s (%s)\", \n self.id, i, task, host, prefStr)\n self.tidToIndex[task.id] = i\n self.launched[i] = True\n self.tasksLaunched += 1\n if preferred:\n self.lastPreferredLaunchTime = now\n return task\n logger.debug(\"no task found %s\", localOnly)\n\n def statusUpdate(self, tid, status, reason=None, result=None, update=None):\n logger.debug(\"job status update %s %s %s\", tid, status, reason)\n if status == TASK_FINISHED:\n self.taskFinished(tid, result, update)\n elif status in (TASK_LOST, \n TASK_FAILED, TASK_KILLED):\n self.taskLost(tid, status, reason)\n\n def taskFinished(self, tid, result, update):\n i = self.tidToIndex[tid]\n if not self.finished[i]:\n self.finished[i] = True\n self.tasksFinished += 1\n task = self.tasks[i]\n task.used = time.time() - task.start\n self.total_used += task.used\n logger.info(\"Task %s finished in %.2fs (%d/%d)\",\n tid, task.used, self.tasksFinished, self.numTasks)\n from schedule import Success\n self.sched.taskEnded(task, Success(), result, update)\n if self.tasksFinished == self.numTasks:\n ts = [t.used for t in self.tasks]\n tried = [t.tried for t in self.tasks]\n logger.info(\"Job %d finished in %ss: min=%s, avg=%s, max=%s, maxtry=%s\",\n self.id, time.time()-self.start, \n min(ts), sum(ts)/len(ts), max(ts), max(tried))\n self.sched.jobFinished(self)\n else:\n logger.info(\"Ignoring task-finished event for TID %d \"\n + \"because task %d is already finished\", tid, i)\n\n def taskLost(self, tid, status, reason):\n index = self.tidToIndex[tid]\n if not self.finished[index]:\n logger.warning(\"Lost TID %s (task %d:%d) %s\", tid, self.id, index, reason)\n self.launched[index] = False\n self.tasksLaunched -= 1\n\n from schedule import FetchFailed\n if isinstance(reason, FetchFailed):\n logger.warning(\"Loss was due to fetch failure from %s\",\n reason.serverUri)\n self.sched.taskEnded(self.tasks[index], reason, None, None)\n self.finished[index] = True\n self.tasksFinished += 1\n if self.tasksFinished == self.numTasks:\n self.sched.jobFinished(self)\n return\n logger.warning(\"re-enqueue the task as pending for a max number of retries\")\n if status == TASK_FAILED:\n logger.warning(\"task %s failed with: %s\", \n self.tasks[index], reason and reason.message)\n self.addPendingTask(index)\n self.sched.requestMoreResources()\n if status in (TASK_FAILED, TASK_LOST):\n self.numFailures[index] += 1\n if self.numFailures[index] > MAX_TASK_FAILURES:\n logger.error(\"Task %d failed more than %d times; aborting job\", index, MAX_TASK_FAILURES)\n self.abort(\"Task %d failed more than %d times\" \n % (index, MAX_TASK_FAILURES))\n\n else:\n logger.warning(\"Ignoring task-lost event for TID %d \"\n +\"because task %d is already finished\")\n\n def abort(self, message):\n logger.error(\"abort the job: %s\", message)\n self.failed = True\n self.causeOfFailure = message\n self.sched.jobFinished(self)\n self.sched.shutdown()\n","sub_path":"dpark/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":8254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"129264902","text":"# Copyright 2015 Janos Czentye \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nImplements the platform and POX dependent logic for the Service Adaptation\nSublayer.\n\"\"\"\nimport httplib\nimport os\nfrom subprocess import Popen\n\nfrom escape.nffg_lib.nffg import NFFG, NFFGToolBox\nfrom escape.orchest.ros_API import InstantiationFinishedEvent, \\\n BasicUnifyRequestHandler\nfrom escape.service import LAYER_NAME, log as log # Service layer logger\nfrom escape.service.element_mgmt import ClickManager\nfrom escape.service.sas_orchestration import ServiceOrchestrator\nfrom escape.util.api import AbstractAPI, RESTServer, AbstractRequestHandler, \\\n RequestStatus, RequestScheduler\nfrom escape.util.config import CONFIG\nfrom escape.util.conversion import NFFGConverter\nfrom escape.util.domain import BaseResultEvent\nfrom escape.util.mapping import PreMapEvent, PostMapEvent, ProcessorError\nfrom escape.util.misc import schedule_delayed_as_coop_task, \\\n schedule_as_coop_task, VERBOSE, quit_with_ok, \\\n get_global_parameter, quit_with_error\nfrom escape.util.stat import stats\nfrom pox.lib.revent.revent import Event\n\nSCHEDULED_SERVICE_REQUEST_DELAY = CONFIG.get_sas_request_delay()\n\n\nclass InstantiateNFFGEvent(Event):\n \"\"\"\n Event for passing NFFG (mapped SG) to Orchestration layer.\n \"\"\"\n\n def __init__ (self, nffg, resource_nffg):\n \"\"\"\n Init.\n\n :param nffg: NF-FG need to be initiated\n :type nffg: :class:`NFFG`\n :return: None\n \"\"\"\n super(InstantiateNFFGEvent, self).__init__()\n self.nffg = nffg\n self.resource_nffg = resource_nffg\n stats.add_measurement_end_entry(type=stats.TYPE_SERVICE, info=LAYER_NAME)\n\n\nclass GetVirtResInfoEvent(Event):\n \"\"\"\n Event for requesting virtual resource info from Orchestration layer.\n \"\"\"\n\n def __init__ (self, sid):\n \"\"\"\n Init.\n\n :param sid: Service layer ID\n :type sid: int\n :return: None\n \"\"\"\n super(GetVirtResInfoEvent, self).__init__()\n # service layer ID\n self.sid = sid\n\n\nclass ServiceRequestHandler(BasicUnifyRequestHandler):\n \"\"\"\n Request Handler for Service Adaptation SubLayer.\n\n .. warning::\n This class is out of the context of the recoco's co-operative thread\n context! While you don't need to worry much about synchronization between\n recoco tasks, you do need to think about synchronization between recoco task\n and normal threads. Synchronisation is needed to take care manually: use\n relevant helper function of core object: `callLater`/`raiseLater` or use\n `schedule_as_coop_task` decorator defined in util.misc on the called\n function.\n \"\"\"\n # Bind HTTP verbs to UNIFY's API functions\n request_perm = {\n 'GET': ('ping', 'version', 'operations', 'topology', 'status'),\n 'POST': ('ping', 'sg', 'topology'),\n # 'DELETE': ('sg',),\n 'PUT': ('sg',)\n }\n \"\"\"Bind HTTP verbs to UNIFY's API functions\"\"\"\n # Statically defined layer component to which this handler is bounded\n # Need to be set by container class\n bounded_layer = 'service'\n \"\"\"Statically defined layer component to which this handler is bounded\"\"\"\n static_prefix = \"escape\"\n # Logger name\n LOGGER_NAME = \"U-Sl\"\n \"\"\"Logger name\"\"\"\n log = log.getChild(\"[%s]\" % LOGGER_NAME)\n # Use Virtualizer format\n virtualizer_format_enabled = False\n \"\"\"Use Virtualizer format\"\"\"\n # Default communication approach\n DEFAULT_DIFF = True\n \"\"\"Default communication approach\"\"\"\n # Bound function\n API_CALL_RESOURCE = 'api_sas_get_topology'\n API_CALL_REQUEST = 'api_sas_sg_request'\n\n def __init__ (self, request, client_address, server):\n \"\"\"\n Init.\n\n :param request: request type\n :type request: str\n :param client_address: client address\n :type client_address: str\n :param server: server object\n :type server: :any:`BaseHTTPServer.HTTPServer`\n :return: None\n \"\"\"\n AbstractRequestHandler.__init__(self, request, client_address, server)\n\n def status (self, params):\n \"\"\"\n Return status of the given request.\n\n :param params:\n :return:\n \"\"\"\n message_id = params.get('message-id')\n if not message_id:\n self.send_error(code=httplib.BAD_REQUEST, message=\"message-id is missing\")\n return\n code, result = self._proceed_API_call('api_sas_status', message_id)\n if not result:\n self.send_acknowledge(code=code, message_id=message_id)\n self.log.debug(\"Responded status code: %s\" % code)\n else:\n self.send_json_response(code=code, data=result)\n self.log.debug(\"Responded status code: %s, data: %s\" % (code, result))\n\n def topology (self, params):\n \"\"\"\n Provide internal topology description\n\n Same functionality as \"get-config\" in UNIFY interface.\n\n :return: None\n \"\"\"\n self.log.debug(\"Call %s function: topology\" % self.LOGGER_NAME)\n # Forward call to main layer class\n resource = self._proceed_API_call(self.API_CALL_RESOURCE)\n self._topology_view_responder(resource_nffg=resource,\n message_id=params.get(self.MESSAGE_ID_NAME))\n self.log.debug(\"%s function: topology ended!\" % self.LOGGER_NAME)\n\n def sg (self, params):\n \"\"\"\n Main API function for Service Graph initiation.\n\n Same functionality as \"get-config\" in UNIFY interface.\n\n Bounded to POST HTTP verb.\n\n :return: None\n \"\"\"\n self.log.debug(\"Call %s function: sg\" % self.LOGGER_NAME)\n nffg = self._service_request_parser()\n if nffg:\n if nffg.service_id is None:\n nffg.service_id = nffg.id\n nffg.id = params[self.MESSAGE_ID_NAME]\n self.log.debug(\"Set NFFG id: %s\" % nffg.id)\n nffg.metadata['params'] = params\n self.server.scheduler.schedule_request(id=nffg.id,\n layer=self.bounded_layer,\n function=self.API_CALL_REQUEST,\n service_nffg=nffg, params=params)\n self.send_acknowledge(message_id=params[self.MESSAGE_ID_NAME])\n self.log.debug(\"%s function: sg ended!\" % self.LOGGER_NAME)\n\n\nclass ServiceLayerAPI(AbstractAPI):\n \"\"\"\n Entry point for Service Adaptation Sublayer.\n\n Maintain the contact with other UNIFY layers.\n\n Implement the U - Sl reference point.\n \"\"\"\n # Defined specific name for core object as pox.core.<_core_name>\n _core_name = LAYER_NAME\n \"\"\"Defined specific name for core object \"\"\"\n # Layer id constant\n LAYER_ID = \"ESCAPE-\" + LAYER_NAME\n \"\"\"Layer id constant\"\"\"\n # Events raised by this class\n _eventMixin_events = {InstantiateNFFGEvent, GetVirtResInfoEvent, PreMapEvent,\n PostMapEvent}\n \"\"\"Events raised by this class\"\"\"\n # Dependencies\n dependencies = ('orchestration',)\n \"\"\"Layer dependencies\"\"\"\n\n def __init__ (self, standalone=False, **kwargs):\n \"\"\"\n .. seealso::\n :func:`AbstractAPI.__init__() `\n \"\"\"\n log.info(\"Starting Service Layer...\")\n # Mandatory super() call\n self.last_sg = NFFG(id=0, name='empty')\n # Set element manager\n self.__sid = None\n self.elementManager = None\n self.service_orchestrator = None\n \"\"\":type ServiceOrchestrator\"\"\"\n self.gui_proc = None\n super(ServiceLayerAPI, self).__init__(standalone, **kwargs)\n\n def initialize (self):\n \"\"\"\n .. seealso::\n :func:`AbstractAPI.initialize() `\n \"\"\"\n log.debug(\"Initializing Service Layer...\")\n self.__sid = CONFIG.get_service_layer_id()\n if self.__sid is not None:\n log.debug(\"Setup ID for Service Layer: %s\" % self.__sid)\n else:\n self.__sid = self.LAYER_ID\n log.error(\n \"Missing ID of Service Layer from config. Using default value: %s\" %\n self.__sid)\n # Set element manager\n self.elementManager = ClickManager()\n # Init central object of Service layer\n self.service_orchestrator = ServiceOrchestrator(self)\n # Read input from file if it's given and initiate SG\n if self._sg_file:\n try:\n stats.init_request_measurement(request_id=self._sg_file)\n service_request = self._read_data_from_file(self._sg_file)\n log.info(\"Graph representation is loaded successfully!\")\n if service_request.startswith('{'):\n log.debug(\"Detected format: JSON - Parsing from NFFG format...\")\n nffg = NFFG.parse(raw_data=service_request)\n elif service_request.startswith('<'):\n log.debug(\"Detected format: XML - Parsing from Virtualizer format...\")\n converter = NFFGConverter(domain=\"INTERNAL\", logger=log,\n unique_bb_id=False,\n unique_nf_id=CONFIG.ensure_unique_vnf_id())\n nffg = converter.parse_from_Virtualizer(vdata=service_request)\n else:\n log.warning(\"Detected unexpected format...\")\n return\n if nffg.mode is not None:\n log.info('Detected mapping mode in NFFG: %s' % nffg.mode)\n else:\n nffg.mode = NFFG.MODE_ADD\n log.info(\"No mapping mode has been detected in NFFG! \"\n \"Set default mode: %s\" % nffg.mode)\n log.info(\"Schedule service request delayed by %d seconds...\"\n % SCHEDULED_SERVICE_REQUEST_DELAY)\n stats.set_request_id(request_id=nffg.id)\n self.api_sas_sg_request_delayed(service_nffg=nffg)\n except (ValueError, IOError, TypeError) as e:\n log.error(\n \"Can't load service request from file because of: \" + str(e))\n quit_with_error(msg=str(e), logger=log)\n else:\n # Init REST-API if no input file is given\n self._initiate_rest_api()\n # Init GUI\n if self._gui:\n self._initiate_gui()\n log.info(\"Service Layer has been initialized!\")\n\n def post_up_hook (self, event):\n \"\"\"\n Perform tasks after ESCAPE is up.\n\n :param event: event object\n :type event: :class:`UpEvent`\n :return: None\n \"\"\"\n log.debug(\"Call post Up event hook for layer: %s\" % self._core_name)\n if not self._sg_file:\n self.rest_api.ping_response_code = self.rest_api.POST_UP_PING_CODE\n log.debug(\"Setup 'ping' response code: %s for REST-API: %s\"\n % (self.rest_api.ping_response_code, self.rest_api.api_id))\n\n def shutdown (self, event):\n \"\"\"\n .. seealso::\n :func:`AbstractAPI.shutdown() `\n\n :param event: event object\n \"\"\"\n log.info(\"Service Layer is going down...\")\n if hasattr(self, 'rest_api') and self.rest_api:\n log.debug(\"REST-API: %s is shutting down...\" % self.rest_api.api_id)\n # self.rest_api.stop()\n if self.gui_proc:\n log.debug(\"Shut down GUI process - PID: %s\" % self.gui_proc.pid)\n self.gui_proc.terminate()\n\n def _initiate_rest_api (self):\n \"\"\"\n Initialize and set up REST API in a different thread.\n\n :return: None\n \"\"\"\n # set bounded layer name here to avoid circular dependency problem\n handler = CONFIG.get_sas_api_class()\n handler.bounded_layer = self._core_name\n params = CONFIG.get_sas_agent_params()\n # can override from global config\n if 'prefix' in params:\n handler.prefix = params['prefix']\n if 'unify_interface' in params:\n handler.virtualizer_format_enabled = params['unify_interface']\n address = (params.get('address'), params.get('port'))\n self.rest_api = RESTServer(handler, *address)\n self.rest_api.api_id = handler.LOGGER_NAME = \"U-Sl\"\n handler.log.info(\"Init REST-API for %s on %s:%s!\" % (\n self.rest_api.api_id, address[0], address[1]))\n self.rest_api.virtualizer_params = params.get('virtualizer_params', {})\n self.rest_api.start()\n handler.log.debug(\"Enforced configuration for %s: interface: %s\" % (\n self.rest_api.api_id,\n \"UNIFY\" if handler.virtualizer_format_enabled else \"Internal-NFFG\"))\n\n def _initiate_gui (self):\n \"\"\"\n Initiate and set up GUI.\n\n :return: None\n \"\"\"\n # TODO - set up and initiate MiniEdit here???\n devnull = open(os.devnull, 'r+')\n gui_path = os.path.abspath(os.getcwd() + \"/gui/gui.py\")\n self.gui_proc = Popen(gui_path, stdin=devnull, stdout=devnull,\n stderr=devnull, close_fds=True)\n log.info(\"GUI has been initiated!\")\n\n def _handle_SGMappingFinishedEvent (self, event):\n \"\"\"\n Handle SGMappingFinishedEvent and proceed with :class:`NFFG\n ` instantiation.\n\n :param event: event object\n :type event: :any:`SGMappingFinishedEvent`\n :return: None\n \"\"\"\n self._proceed_to_instantiate_NFFG(event.nffg)\n\n ##############################################################################\n # UNIFY U - Sl API functions starts here\n ##############################################################################\n\n # noinspection PyUnusedLocal\n @schedule_as_coop_task\n def api_sas_sg_request (self, service_nffg, *args, **kwargs):\n \"\"\"\n Initiate service graph in a cooperative micro-task.\n\n :param service_nffg: service graph instance\n :type service_nffg: :class:`NFFG`\n :return: None\n \"\"\"\n self.__proceed_sg_request(service_nffg=service_nffg)\n\n # noinspection PyUnusedLocal\n @schedule_delayed_as_coop_task(delay=SCHEDULED_SERVICE_REQUEST_DELAY)\n def api_sas_sg_request_delayed (self, service_nffg, *args, **kwargs):\n \"\"\"\n Initiate service graph in a cooperative micro-task.\n\n :param service_nffg: service graph instance\n :type service_nffg: :class:`NFFG`\n :return: None\n \"\"\"\n return self.__proceed_sg_request(service_nffg=service_nffg)\n\n def __proceed_sg_request (self, service_nffg):\n \"\"\"\n Initiate a Service Graph (UNIFY U-Sl API).\n\n :param service_nffg: service graph instance\n :type service_nffg: :class:`NFFG`\n :return: None\n \"\"\"\n log.getChild('API').info(\"Invoke request_service on %s with SG: %s \" %\n (self.__class__.__name__, service_nffg))\n stats.add_measurement_start_entry(type=stats.TYPE_SERVICE, info=LAYER_NAME)\n # Check if mapping mode is set globally in CONFIG\n mapper_params = CONFIG.get_mapping_config(layer=LAYER_NAME)\n if 'mode' in mapper_params and mapper_params['mode'] is not None:\n mapping_mode = mapper_params['mode']\n log.info(\"Detected mapping mode from configuration: %s\" % mapping_mode)\n elif service_nffg.mode is not None:\n mapping_mode = service_nffg.mode\n log.info(\"Detected mapping mode from NFFG: %s\" % mapping_mode)\n else:\n mapping_mode = None\n log.info(\"No mapping mode was detected!\")\n self.__sg_preprocessing(nffg=service_nffg)\n # Store request if it is received on REST-API\n if hasattr(self, 'rest_api') and self.rest_api:\n log.getChild('API').debug(\"Store received NFFG request info...\")\n msg_id = self.rest_api.request_cache.cache_request_by_nffg(\n nffg=service_nffg)\n if msg_id is not None:\n self.rest_api.request_cache.set_in_progress(id=msg_id)\n log.getChild('API').debug(\"Request is stored with id: %s\" % msg_id)\n else:\n log.getChild('API').debug(\"No request info detected.\")\n try:\n if CONFIG.get_mapping_enabled(layer=LAYER_NAME):\n # Initiate service request mapping\n mapped_nffg = self.service_orchestrator.initiate_service_graph(\n service_nffg)\n else:\n log.warning(\"Mapping is disabled! Skip instantiation step...\")\n mapped_nffg = service_nffg\n mapped_nffg.status = NFFG.MAP_STATUS_SKIPPED\n log.debug(\"Mark NFFG status: %s!\" % mapped_nffg.status)\n # Rewrite REMAP mode for backward compatibility\n if mapped_nffg is not None and mapping_mode == NFFG.MODE_REMAP:\n mapped_nffg.mode = mapping_mode\n log.debug(\"Rewrite mapping mode: %s into mapped NFFG...\" %\n mapped_nffg.mode)\n else:\n log.debug(\n \"Skip mapping mode rewriting! Mode remained: %s\" % mapping_mode)\n log.getChild('API').debug(\"Invoked request_service on %s is finished\" %\n self.__class__.__name__)\n # If mapping is not threaded and finished with OK\n if mapped_nffg is not None and not \\\n self.service_orchestrator.mapper.threaded:\n self._proceed_to_instantiate_NFFG(mapped_nffg)\n self.last_sg = mapped_nffg\n else:\n log.warning(\"Something went wrong in service request initiation: \"\n \"mapped service data is missing!\")\n self.__handle_mapping_result(nffg_id=service_nffg.id, fail=True)\n self._handle_InstantiationFinishedEvent(\n event=InstantiationFinishedEvent(\n id=service_nffg.id,\n result=InstantiationFinishedEvent.MAPPING_ERROR))\n except ProcessorError as e:\n self.__handle_mapping_result(nffg_id=service_nffg.id, fail=True)\n self._handle_InstantiationFinishedEvent(\n event=InstantiationFinishedEvent(\n id=service_nffg.id,\n result=InstantiationFinishedEvent.REFUSED_BY_VERIFICATION,\n error=e))\n\n @staticmethod\n def __sg_preprocessing (nffg):\n \"\"\"\n Preprocess given :class:`NFFG` based on request mode.\n\n :param nffg: received service request\n :type nffg: :class:`NFFG`\n :return: modified request\n :rtype: :class:`NFFG`\n \"\"\"\n if nffg.mode == NFFG.MODE_DEL:\n log.debug(\"Explicitly mark NF nodes in DELETE request...\")\n for nf in nffg.nfs:\n nf.operation = NFFG.OP_DELETE\n log.debug(\"%s --> %s\" % (nf.id, nf.operation))\n return nffg\n\n def __handle_mapping_result (self, nffg_id, fail):\n \"\"\"\n Perform necessary task for callback and cache functionality based on mapping\n result.\n\n :param nffg_id: request ID\n :type nffg_id: str or int\n :param fail: mapping result\n :type fail: bool\n :return: None\n \"\"\"\n if not (hasattr(self, 'rest_api') and self.rest_api):\n return\n log.getChild('API').debug(\"Cache request status...\")\n req_status = self.rest_api.request_cache.get_request_by_nffg_id(nffg_id)\n if req_status is None:\n log.getChild('API').debug(\"Request status is missing for NFFG: %s! \"\n \"Skip result processing...\" % nffg_id)\n return\n log.getChild('API').debug(\"Process mapping result...\")\n message_id = req_status.message_id\n if message_id is not None:\n if fail:\n self.rest_api.request_cache.set_error_result(id=message_id)\n else:\n self.rest_api.request_cache.set_success_result(id=message_id)\n ret = self.rest_api.invoke_callback(message_id=message_id)\n if ret is None:\n log.getChild('API').debug(\"No callback was defined!\")\n else:\n log.getChild('API').debug(\n \"Callback: %s has invoked with return value: %s\" % (\n req_status.get_callback(), ret))\n RequestScheduler().set_orchestration_finished(id=nffg_id)\n\n def __get_sas_resource_view (self):\n \"\"\"\n Return with the resource view of SAS layer.\n\n :return: resource view\n :rtype: :any:`AbstractVirtualizer`\n \"\"\"\n return self.service_orchestrator.virtResManager.virtual_view\n\n def api_sas_get_topology (self):\n \"\"\"\n Return with the topology description.\n\n :return: topology description requested from the layer's Virtualizer\n :rtype: :class:`NFFG`\n \"\"\"\n log.getChild('[U-Sl]').debug(\"Requesting Virtualizer for REST-API...\")\n # Get or if not available then request the layer's Virtualizer\n sas_virt = self.__get_sas_resource_view()\n if sas_virt is not None:\n log.getChild('[U-Sl]').debug(\"Generate topo description...\")\n # return with the virtual view as an NFFG\n return sas_virt.get_resource_info()\n else:\n log.getChild('[U-Sl]').error(\n \"Virtualizer(id=%s) assigned to REST-API is not found!\" %\n self.rest_api.api_id)\n\n def api_sas_status (self, message_id):\n \"\"\"\n Return the state of a request given by ``message_id``.\n\n Function is not invoked in coop-microtask, only write-type operations\n must not be used.\n\n :param message_id: request id\n :type message_id: str or int\n :return: state\n :rtype: str\n \"\"\"\n status = self.rest_api.request_cache.get_domain_status(id=message_id)\n if status == RequestStatus.SUCCESS:\n return 200, None\n elif status == RequestStatus.UNKNOWN:\n return 404, None\n elif status == RequestStatus.ERROR:\n return 500, status\n else:\n # PROCESSING or INITIATED\n return 202, None\n\n def _proceed_to_instantiate_NFFG (self, mapped_nffg):\n \"\"\"\n Send NFFG to Resource Orchestration Sublayer in an implementation-specific\n way.\n\n General function which is used from microtask and Python thread also.\n\n This function contains the last steps before the mapped NFFG will be sent\n to the next layer.\n\n :param mapped_nffg: mapped Service Graph\n :type mapped_nffg: :class:`NFFG`\n :return: None\n \"\"\"\n # Rebind requirement link fragments for lower layer mapping\n mapped_nffg = NFFGToolBox.rebind_e2e_req_links(nffg=mapped_nffg, log=log)\n # Log verbose mapping result in unified way (threaded/non-threaded)\n log.log(VERBOSE,\n \"Mapping result of Service Layer:\\n%s\" % mapped_nffg.dump())\n # Sending mapped SG / NF-FG to Orchestration layer as an Event\n # Exceptions in event handlers are caught by default in a non-blocking way\n sas_res = self.__get_sas_resource_view().get_resource_info()\n self.raiseEventNoErrors(InstantiateNFFGEvent, mapped_nffg, sas_res)\n log.getChild('API').info(\n \"Generated NF-FG: %s has been sent to Orchestration...\" % mapped_nffg)\n\n ##############################################################################\n # UNIFY Sl - Or API functions starts here\n ##############################################################################\n\n # noinspection PyUnusedLocal\n def _handle_MissingVirtualViewEvent (self, event):\n \"\"\"\n Request virtual resource info from Orchestration layer (UNIFY Sl - Or API).\n\n Invoked when a :class:`MissingVirtualViewEvent` raised.\n\n Service layer is identified with the sid value automatically.\n\n :param event: event object\n :type event: :any:`MissingVirtualViewEvent`\n :return: None\n \"\"\"\n log.getChild('API').debug(\n \"Send request(with layer ID: %s) to Orchestration \"\n \"layer...\" % self.__sid)\n self.raiseEventNoErrors(GetVirtResInfoEvent, self.__sid)\n\n def _handle_VirtResInfoEvent (self, event):\n \"\"\"\n Save requested virtual resource info as an :class:`AbstractVirtualizer\n `.\n\n :param event: event object\n :type event: :any:`VirtResInfoEvent`\n :return: None\n \"\"\"\n log.getChild('API').debug(\"Received : %s from %s layer\" % (\n event.virtualizer, str(event.source._core_name).title()))\n self.service_orchestrator.virtResManager.virtual_view = event.virtualizer\n\n def _handle_InstantiationFinishedEvent (self, event):\n \"\"\"\n Receive the result of the instantiated NFFG and save it.\n\n :param event: event object\n :type event: :any:`InstantiationFinishedEvent`\n :return: None\n \"\"\"\n if not BaseResultEvent.is_error(event.result):\n log.getChild('API').info(\n \"Service request(id=%s) has been finished successfully with result: %s!\"\n % (event.id, event.result))\n else:\n log.getChild('API').error(\n \"Service request(id=%s) has been finished with error result: %s!\" %\n (event.id, event.result))\n if not event.is_pending(event.result):\n self.__handle_mapping_result(nffg_id=event.id,\n fail=event.is_error(event.result))\n # Quit ESCAPE if test mode is active\n if get_global_parameter(name=\"QUIT_AFTER_PROCESS\"):\n stats.finish_request_measurement()\n quit_with_ok(\"Detected QUIT mode! Exiting ESCAPE...\")\n","sub_path":"escape/escape/service/sas_API.py","file_name":"sas_API.py","file_ext":"py","file_size_in_byte":24272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"364392627","text":"#2019/9/7\nimport sys\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n\n def insert(self,node,x):\n if not node:\n head = ListNode(x)\n head.next = head\n return head\n\n cur,prev = node,None\n while True:\n prev = cur\n cur = cur.next\n if x >= prev.val and x <= cur.val:\n break\n\n if (prev.val>cur.val) and (x>prev.val or x 4:\n break\n\n","sub_path":"lintcode/第六层/599_向循环有序链表插入节点.py","file_name":"599_向循环有序链表插入节点.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"419415385","text":"from ctypes import CDLL, byref, get_errno, c_long, c_ulong\nfrom os import fork, execl, WIFSTOPPED, waitpid, kill, strerror\nfrom .__c_defs import *\n\n\ndef get_reg_64(regs: user_regs_struct, name: str) -> int:\n reg_name, mask = reg_mask_map[name]\n reg = getattr(regs, reg_name)\n reg_val = reg & mask\n if not mask & 0xff:\n reg_val <<= 8\n\n return reg_val\n\n\ndef set_reg_64(regs: user_regs_struct, name: str, value: int)\\\n -> user_regs_struct:\n reg_name, mask = reg_mask_map[name]\n if not mask & 0xff:\n value <<= 8\n\n reg_old = getattr(regs, reg_name)\n reg_new = (reg_old & (0xffffffffffffffff ^ mask)) | value\n setattr(regs, reg_name, reg_new)\n return regs\n\n\nclass Breakpoint:\n def __init__(self, dbg, addr: int, permanent: bool = False):\n self.__address = addr\n self.__patched_byte = dbg.read_from_addr(addr, 1)\n self.__permanent = permanent\n dbg.write_to_addr(addr, b\"\\xcc\")\n\n @property\n def patched_byte(self) -> bytes:\n return self.__patched_byte\n\n @property\n def address(self) -> int:\n return self.__address\n\n @property\n def permanent(self) -> bool:\n return self.__permanent\n\n @permanent.setter\n def permanent(self, value: bool) -> None:\n if not (value is True or value is False):\n raise TypeError(\"type bool expected\")\n else:\n self.__permanent = value\n\n\nclass Hook(Breakpoint):\n def __init__(self, dbg, addr: int, func, permanent: bool = True,\n silent: bool = False):\n self.__name = func.__name__\n self.__procedure = func\n self.__silent = silent\n super().__init__(dbg, addr, permanent=permanent)\n\n @property\n def name(self):\n return self.__name\n\n @property\n def silent(self):\n return self.__silent\n\n @silent.setter\n def silent(self, silent: bool):\n self.__silent = silent\n\n @property\n def procedure(self):\n return self.__procedure\n\n @procedure.setter\n def procedure(self, func):\n if not callable(func):\n raise TypeError(\"{0} is not callable\".format(type(func)))\n else:\n self.__procedure = func\n self.__name = func.__name__\n\n\nclass DebuggerException(Exception):\n def __init__(self, msg: str = \"\"):\n self.__str__ = msg\n super().__init__()\n\n\nclass BreakpointHit(Exception):\n def __init__(self, bp):\n self.__str__ = \"Breakpoint hit\"\n self.__bp = bp\n super().__init__()\n\n @property\n def breakpoint(self):\n return self.__bp\n\n\nclass DebuggerBase:\n def __init__(self, ignore_ptrace_errors: bool = True):\n self.__libc_ptrace = CDLL(\"libc.so.6\", use_errno=True).ptrace\n self.__breakpoints = list()\n self.__child_alive = False\n self.__regs = user_regs_struct()\n self.__ignore_ptrace_errors = ignore_ptrace_errors\n self.__restore = None\n self.__child_pid = None\n\n def __del__(self):\n if self.__child_alive:\n kill(self.__child_pid, 9)\n\n def __ptrace(self, *args) -> c_long:\n result = self.__libc_ptrace(*args)\n errno = get_errno()\n\n if errno != 0 and not self.__ignore_ptrace_errors:\n raise OSError(errno, strerror(errno))\n return result\n\n def __debugger_handover(self):\n # restore permanent breakpoints\n if self.__restore:\n self.add_breakpoint(self.__restore)\n self.__restore = None\n\n # wait for the child to finish, refresh regs and check for\n # breakpoint\n _, s = waitpid(self.__child_pid, 0)\n\n if WIFSTOPPED(s):\n self.__regs = self.__get_regs()\n restore = self.__ignore_ptrace_errors\n self.__ignore_ptrace_errors = True\n if self.__regs.rip-1 in self.breakpoints:\n self.__breakpoint_hook()\n self.__ignore_ptrace_errors = restore\n else:\n self.__child_alive = False\n\n def __breakpoint_hook(self):\n rip_reset = self.__regs.rip - 1\n bp = self.breakpoints[rip_reset]\n self.__breakpoints.remove(bp)\n\n self.write_to_addr(rip_reset, bp.patched_byte)\n self.set_reg(\"rip\", rip_reset)\n\n if bp.permanent:\n self.__restore = bp\n else:\n self.__restore = None\n\n raise BreakpointHit(bp)\n\n def load(self, path: str, *args):\n if self.__child_alive:\n raise ChildProcessError(\"Already tracing a program\")\n\n pid = fork()\n if not pid:\n self.__ptrace(PTRACE_TRACEME, 0, 0, 0)\n args = [path] if not args else args\n\n # turn off address space randomisation for child process\n pers = CDLL(\"libc.so.6\").personality(c_ulong(0xffffffff))\n CDLL(\"libc.so.6\").personality(pers | ADDR_NO_RANDOMIZE)\n errno = get_errno()\n if errno != 0:\n print(strerror(errno))\n\n execl(path, *args)\n else:\n self.__child_pid = pid\n self.__child_alive = True\n waitpid(self.__child_pid, 0)\n self.__regs = self.__get_regs()\n\n def step(self):\n if not self.__child_alive:\n raise ChildProcessError(\"Child not running\")\n self.__ptrace(PTRACE_SINGLESTEP, self.__child_pid, 0, 0)\n self.__debugger_handover()\n\n def continue_(self):\n if not self.__child_alive:\n raise ChildProcessError(\"Child not running\")\n self.__ptrace(PTRACE_CONT, self.__child_pid, 0, 0)\n self.__debugger_handover()\n\n def __get_regs(self):\n self.__ptrace(PTRACE_GETREGS, self.__child_pid, 0, byref(self.__regs))\n return self.__regs\n\n def get_reg(self, name: str):\n return get_reg_64(self.__regs, name)\n\n def set_reg(self, name: str, value: int):\n self.__regs = set_reg_64(self.__regs, name, value)\n\n self.__ptrace(PTRACE_SETREGS, self.__child_pid, 0,\n byref(self.__regs))\n\n def read_from_addr(self, addr: int, length: int) -> bytes:\n data = b''\n while len(data) < length:\n le_bytes = self.__ptrace(PTRACE_PEEKTEXT, self.__child_pid,\n addr, 0)\n data += bytes([0x000000ff & (le_bytes >> i*8) for i in range(4)])\n addr += 4\n return data[:length]\n\n # for some reason, PTRACE_PEEKTEXT reads 4 bytes while PTRACE_POKETEXT\n # writes 8 bytes. this makes things a little more complicated\n def write_to_addr(self, addr: int, data: bytes):\n length = len(data)\n # calculate number of bytes that must be read and appended to data\n # such that len(data) is a multiple of 8\n overlap = 8 - (length % 8) if length % 8 > 0 else 0\n\n # add the required padding to data\n if overlap != 0:\n o_start = int(length / 8) + 8 - overlap\n padding = self.read_from_addr(addr + o_start, overlap)\n data = data[:o_start] + padding\n\n # write the data word by word\n for i in range(0, len(data), 8):\n self.__ptrace(PTRACE_POKETEXT, self.__child_pid,\n addr + i,\n c_long(int.from_bytes(data[i:i+8], \"little\")))\n\n def new_breakpoint(self, address: int, permanent: bool = False):\n bp = Breakpoint(self, address, permanent=permanent)\n add_breakpoint(bp)\n\n def add_breakpoint(self, breakpoint):\n if breakpoint not in self.breakpoints:\n self.__breakpoints.append(breakpoint)\n else:\n msg = \"Breakpoint at 0x{0:x} already set\"\n raise DebuggerException(msg.format(bp.address))\n\n @property\n def child_running(self):\n return self.__child_alive\n\n @property\n def breakpoints(self):\n return {bp.address: bp for bp in self.__breakpoints}\n\n @property\n def child_pid(self):\n return self.__child_pid\n\n @property\n def ignore_ptrace_errors(self):\n return self.__ignore_ptrace_errors\n\n @ignore_ptrace_errors.setter\n def ignore_ptrace_errors(self, v):\n if v:\n self.__ignore_ptrace_errors = True\n else:\n self.__ignore_ptrace_errors = False\n\n\nclass DebuggerExtended(DebuggerBase):\n def __handle_breakpoint(self, bp):\n if hasattr(bp, \"procedure\"):\n bp.procedure(self)\n\n if not hasattr(bp, \"procedure\") or not bp.silent:\n raise BreakpointHit(bp)\n\n def continue_(self):\n try:\n super().continue_()\n except BreakpointHit as hit:\n self.__handle_breakpoint(hit.breakpoint)\n\n # if no BreakpointHit exception was thrown, continue\n self.continue_()\n\n def step(self):\n try:\n super().step()\n except BreakpointHit as hit:\n self.__handle_breakpoint(hit.breakpoint)\n\n def hook(self, addr, permanent: bool = True, silent: bool = False):\n def _hook(func):\n hook = Hook(self, addr, func, permanent=permanent, silent=silent)\n self.add_breakpoint(hook)\n return hook\n return _hook\n\n def stepping(self):\n while self.child_running:\n self.step()\n yield self.get_reg(\"rip\")\n","sub_path":"debugger/__Debugger.py","file_name":"__Debugger.py","file_ext":"py","file_size_in_byte":9266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"140434392","text":"\"\"\"\nPurpose: Detect Feature using Harris operator\nAuthor: Mohamed Hosny Ahmed\nDate: 10 / 4 / 2016\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport myopencv\nimport cvarthmetics\n\n\nclass HarrisDetector:\n\n def __init__(self, image):\n self.img = image\n self.rows, self.cols, self.channels = img.shape\n\n # initialize data\n self.tmpImg = np.zeros((self.rows, self.cols), np.float32)\n self.Ix2 = np.zeros((self.rows, self.cols), np.float32)\n self.Iy2 = np.zeros((self.rows, self.cols), np.float32)\n self.Ixy2 = np.zeros((self.rows, self.cols), np.float32)\n self.H = np.zeros((2, 2), np.float32)\n self.myCV = 0\n self.R = 0\n self.flag = 1\n self.calc_harris_operator()\n\n def calc_harris_operator(self):\n # check if we are in this method\n print ('>> calc_harris_operator')\n\n # 1- Compute gradient at x, y\n self.myCV = myopencv.cvUtiliy(self.img)\n self.Ix2 = self.myCV.calc_changes('x', 3)\n self.Iy2 = self.myCV.calc_changes('y', 3)\n\n # 2- Compute Ix^2, Iy^2, Ixy^2\n self.Ix2 = np.multiply(self.Ix2, self.Ix2)\n self.Iy2 = np.multiply(self.Iy2, self.Iy2)\n self.Ixy2 = np.multiply(self.Ix2, self.Iy2)\n\n # 3- Sum all pixels by bluring image by one to get summation with window 3x3\n self.Ix2 = self.myCV.summ_pixel_window(self.Ix2, 3)\n self.Iy2 = self.myCV.summ_pixel_window(self.Iy2, 3)\n self.Ixy2 = self.myCV.summ_pixel_window(self.Ixy2, 3)\n\n # 4- Define H(x,y) matrix\n for row in range(0, self.rows):\n for col in range(0, self.cols):\n self.H[0][0] = self.Ix2[row][col]\n self.H[0][1] = self.Ixy2[row][col]\n self.H[1][0] = self.Ixy2[row][col]\n self.H[1][1] = self.Iy2[row][col]\n\n # 5- Compute R = det(H(wx,y)) - K*Trace(H(x,y))^2\n self.R = self.harris_response(self.H)\n if self.R < -10000:\n self.tmpImg[row, col] = float(self.R)\n else:\n self.tmpImg[row, col] = 0.0\n\n self.tmpImg = np.array(self.tmpImg, np.float32)\n self.tmpImg = cvarthmetics.Arthmetics.get_local_maxima(self.tmpImg)\n\n return self.tmpImg\n\n # get harris response\n def harris_response(self, H):\n # check if we are in this method\n if self.flag == 1:\n print('>> harris_response')\n self.flag = 0\n\n trace = cv2.trace(H)\n response = cv2.determinant(H) - 0.04 * np.power(trace[0], 2)\n return response\n\n # return harris index's\n def get_harris(self):\n # check if we are in this method\n print('>> get_harris')\n x = self.tmpImg * self.img\n return self.tmpImg * self.img\n\n\nif __name__ == '__main__':\n\n x=0\n img = cv2.imread('/home/prof/Work_Space/CV/Assignment_3_CV/grad.png')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n harris = HarrisDetector(image=gray)\n c = harris.get_harris()\n\n # print c.max()\n xx = c > 0.004*c.max()\n img[xx] = [0, 0, 255]\n\n cv2.imshow('img', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"harris_detector.py","file_name":"harris_detector.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"154578762","text":"\"\"\"\nThis library process the etherpad logs.\nAuthor: Pankaj\nDate: 9/09/2019\n\n\"\"\"\n\n# import package\nimport pandas as pd\nimport datetime\n\n\n\nclass etherLogAnalyzer(object):\n \"\"\"\n init function initialize the etherLogAnalyzer object.\n it takes one parameter the file name as\n \"\"\"\n def __init__(self, file_name):\n self.file_name = file_name\n\n try:\n self.file = pd.read_csv(file_name,names=['timestamp','ip','action','oldlen','newlen','changeset','charbank','noadd','noremove'])\n print('File loaded successfully....')\n\n self.file['timestamp'] = pd.to_datetime(self.file['timestamp'])\n #print('Setting timestamp as index...')\n self.file = self.file.set_index(pd.DatetimeIndex(self.file['timestamp']))\n\n except Exception as e:\n print('Error occured while opening the file',str(e))\n\n def getDuration(self):\n #self.file['timestamp'] = pd.to_datetime(self.file['timestamp'],format=\"%Y-%m-%d %I:%M:%S\")\n self.file['timestamp'] = pd.to_datetime(self.file['timestamp'])\n df1=self.file\n return str(df1.timestamp[df1.shape[0]-1]-df1.timestamp[0])\n\n\n # return all the text generated in Etherpad\n\n def getAllText(self):\n return self.file.charbank.tolist()\n\n \"\"\"\n This function will return the number of total ip recorded in the log file.\n \"\"\"\n\n def getAuthorCount(self):\n return len(self.file.ip.unique())\n\n \"\"\"\n This function returns the list of Author's IP recorded in the log file.\n \"\"\"\n\n def getAuthorIP(self):\n return self.file.ip.unique()\n\n \"\"\"\n Logs are recorded in same file for all the pads, therefore this function will seperate the log file\n on the basis of group. It reuires parameter e.g. group name and group ips.\n @params: group_name (String): name of group\n group_ips (List): list containing ips belong to that group\n Return type: Pandas dataframe.\n \"\"\"\n\n def getLogForGroup(self,group_ips):\n temp_df = self.file[self.file.ip.isin(group_ips)]\n return temp_df\n\n \"\"\"\n This function will generate statistics for each author in the group. These statistics are in form of number of addition\n and deletion along with time.\n @params:\n ip (String): ip address for which you want to see the stats\n timescale (String): it specify the time window for aggregating statistics.\n Possible values: Alias Description\n B business day frequency\n C custom business day frequency (experimental)\n D calendar day frequency\n W weekly frequency\n M month end frequency\n BM business month end frequency\n CBM custom business month end frequency\n MS month start frequency\n BMS business month start frequency\n CBMS custom business month start frequency\n Q quarter end frequency\n BQ business quarter endfrequency\n QS quarter start frequency\n BQS business quarter start frequency\n A year end frequency\n BA business year end frequency\n AS year start frequency\n BAS business year start frequency\n BH business hour frequency\n H hourly frequency\n T, min minutely frequency\n S secondly frequency\n L, ms milliseonds\n U, us microseconds\n N nanoseconds\n plot(Boolean): Specify True if you want to plot the graph\n\n\n Return type: Dataframe\n\n\n \"\"\"\n\n def generateWindowWiseStats(self,window_size='30S',ips=[]):\n\n # Computer number of char added or deleted\n #print(\"IP\",ips)\n tempdf = self.file.copy()\n #print(tempdf.shape)\n temp = tempdf.loc[tempdf['ip'].isin(ips)]\n temp['addition'] = temp['newlen']-temp['oldlen']\n temp['deletion'] = temp['oldlen']-temp['newlen']\n mask = temp['addition']<0\n mask2 = temp['deletion']<0\n temp.loc[mask,'addition']=0\n temp.loc[mask2,'deletion']=0\n\n\n\n #self.file['timestamp'] = pd.to_datetime(self.file['timestamp'],format=\"%Y-%m-%d %I:%M:%S\")\n\n\n\n df1=temp.copy()\n\n # Computing timedelta\n time_delta = pd.to_timedelta(window_size)\n\n # Creating empty dataframe\n final = pd.DataFrame(columns=['timestamp','u1_add','u1_del','u1_text','u2_add','u2_del','u2_text','u3_add','u3_del','u3_text','u4_add','u4_del','u4_text'])\n\n if df1.shape[0] != 0:\n cur_ts = df1.timestamp[0]\n else:\n print(\"Empty frame\")\n return final\n\n\n while cur_ts < df1.timestamp[df1.shape[0]-1]:\n\n next_ts = cur_ts + time_delta\n\n temp_log_df = df1.between_time(datetime.datetime.time(cur_ts),datetime.datetime.time(next_ts),include_start=True,include_end=False)\n\n\n entry = self.extractFeatures(cur_ts,temp_log_df,ips)\n\n #final = final.append({'timestamp':entry['timestamp'],'u1_add':entry['u1_add'],'u1_del':entry['u1_del'],'u1_text':entry['u1_text'],'u2_add':entry['u2_add'],'u2_del':entry['u2_del'],'u2_text':entry['u2_text'],'u3_add':entry['u3_add'],'u3_del':entry['u3_del'],'u3_text':entry['u3_text'],'u4_add':entry['u4_add'],'u4_del':entry['u4_del'],'u4_text':entry['u4_text'],'u1_speak':entry['u1_speak'],'u2_speak':entry['u2_speak'],'u3_speak':entry['u3_speak'],'u4_speak':entry['u4_speak'],'speak_sequence':entry['speak_sequence']},ignore_index=True)\n final = final.append(entry,ignore_index=True)\n\n cur_ts = next_ts\n final.to_csv('Final.csv',index=False)\n return final\n\n\n def extractFeatures(self,timestamp,log_df, no_ip):\n # features\n user1_addition = 0\n user1_deletion = 0\n user1_text = \"\"\n\n user2_addition = 0\n user2_deletion = 0\n user2_text = \"\"\n\n user3_addition = 0\n user3_deletion = 0\n user3_text = \"\"\n\n user4_addition = 0\n user4_deletion = 0\n user4_text = \"\"\n\n\n def concatenate_list_data(list):\n\n result= ''\n for element in list:\n if str(element) != 'nan':\n result += str(element)\n return result\n\n\n u1 = log_df.loc[log_df['ip']==no_ip[0],:]\n u2 = log_df.loc[log_df['ip']==no_ip[1],:]\n u3 = log_df.loc[log_df['ip']==no_ip[2],:]\n u4 = log_df.loc[log_df['ip']==no_ip[3],:]\n\n\n user1_addition = u1.addition.sum()\n user1_deletion = u1.deletion.sum()\n user1_text = concatenate_list_data(u1.charbank.tolist())\n\n user2_addition = u2.addition.sum()\n user2_deletion = u2.deletion.sum()\n user2_text = concatenate_list_data(u2.charbank.tolist())\n\n user3_addition = u3.addition.sum()\n user3_deletion = u3.deletion.sum()\n user3_text = concatenate_list_data(u3.charbank.tolist())\n\n user4_addition = u4.addition.sum()\n user4_deletion = u4.deletion.sum()\n user4_text = concatenate_list_data(u4.charbank.tolist())\n\n\n\n\n return {'timestamp':timestamp,'u1_add':user1_addition,'u1_del':user1_deletion,'u1_text':user1_text,'u2_add':user2_addition,'u2_del':user2_deletion,'u2_text':user2_text,'u3_add':user3_addition,'u3_del':user3_deletion,'u3_text':user3_text,'u4_add':user4_addition,'u4_del':user4_deletion,'u4_text':user4_text}\n\n\n\n\n def generateStatsForAuthor(self,ip,plot=False,timescale='30S'):\n temp = self.file.copy()\n temp = temp.loc[temp['ip']==ip,:]\n temp['addition'] = temp['newlen']-temp['oldlen']\n temp['deletion'] = temp['oldlen']-temp['newlen']\n mask = temp['addition']<0\n mask2 = temp['deletion']<0\n temp.loc[mask,'addition']=0\n temp.loc[mask2,'deletion']=0\n stat = temp.groupby(pd.Grouper(freq=timescale)).sum()\n if plot:\n stat[['addition','deletion']].plot(kind='bar')\n plt.title('Stats for User:'+ip)\n return stat\n","sub_path":"etherLogAnalyzer.py","file_name":"etherLogAnalyzer.py","file_ext":"py","file_size_in_byte":8390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"415684319","text":"\n#Code to run automatic fish feeder\nimport RPi.GPIO as GPIO\nimport time\n\nimport datetime\n\n#twitter config\nfrom twython import Twython\n\nfrom auth import (\n consumer_key,\n consumer_secret,\n access_token,\n access_token_secret\n)\n\ntwitter = Twython(\n consumer_key,\n consumer_secret,\n access_token,\n access_token_secret\n)\n\n\n\n\n#auto feeder\n\nfor i in range(3):\n\n time.sleep(5)\n GPIO.setmode(GPIO.BOARD)\n\n control_pins=[7,11,13,15]\n\n for pin in control_pins:\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin,0)\n\n halfstep_seq = [\n [1,0,0,0],\n [1,1,0,0],\n [0,1,0,0],\n [0,1,1,0],\n [0,0,1,0],\n [0,0,1,1],\n [0,0,0,1],\n [1,0,0,1]\n ]\n\n\n for j in range(512):\n for halfstep in range(8):\n for pin in range(4):\n GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])\n time.sleep(0.001)\n\n GPIO.cleanup()\n\n now = str(datetime.datetime.now())\n message= \"Hello Chum! %s %d\" % (now,i+1)\n\n twitter.update_status(status=message)\n\n print(\"Tweeted: %s %s %d\" % (message, now, i+1))\n\n\n","sub_path":"ChumBucketAuto.py","file_name":"ChumBucketAuto.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"104312889","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nimport csv\nfrom django.conf import settings\nfrom .filters import PhageFilter\nfrom io import StringIO\nfrom io import TextIOWrapper\nfrom PhageBank.core.forms import Add_ResearchForm, AForm, AIForm, Edit_Phage_DataForm, Edit_ResearcherForm, Edit_ResearchForm, Edit_IsolationDataForm, Edit_Experiment_Form\nfrom PhageBank.core.forms import SignUpForm, UploadFileForm, LinkForm, LoginForm, Add_Phage_DataForm, Add_ResearcherForm, Add_Experiment_Form,Isolation_Form\nfrom PhageBank.core.models import PhageData, PreData, ExperimentData, IsolationData\nfrom django.forms.formsets import BaseFormSet\nfrom django.forms.formsets import formset_factory\nfrom django.forms import inlineformset_factory\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.template.loader import render_to_string\nfrom django.http import JsonResponse\nfrom django.template import RequestContext\nfrom django.contrib.messages import get_messages\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib import messages\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.forms import PasswordChangeForm\n\nimport json\nimport os\nfrom csvvalidator import *\nimport datetime\nimport sqlite3\nimport pandas as pd\n\ndef count(dest_dir):\n count = 0;\n for filename in os.listdir(dest_dir):\n if filename.endswith(\".png\") or filename.endswith(\".jpg\") or filename.endswith(\".jpeg\"):\n count=count+1;\n continue\n return count\n\ndef list_path(dest_dir):\n list_path = [];\n for filename in os.listdir(dest_dir):\n if filename.endswith(\".png\") or filename.endswith(\".jpg\") or filename.endswith(\".jpeg\"):\n list_path.append(filename)\n continue\n return list_path\n\ndef logged_in_index(request):\n last_three = PhageData.objects.all().order_by('-id')[:3]\n dest_dir1=dest_dir2=dest_dir3= name1=name2=name3=\"\"\n count1 = count2 = count3 = -1\n try:\n name1 = last_three[0].phage_name\n dest_dir1 = list_path(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[0].phage_name))\n count1 = count(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[0].phage_name))\n except:\n pass\n\n try:\n name2 = last_three[1].phage_name\n dest_dir2 = list_path(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[1].phage_name))\n count2 = count(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[1].phage_name))\n\n except:\n pass\n\n try:\n name3 = last_three[2].phage_name\n dest_dir3 = list_path(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[2].phage_name))\n count3 = count(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[2].phage_name))\n\n except:\n pass\n return render(request, 'logged_in_index.html',{'login_status': request.user.is_authenticated,\n 'username': request.user.username,\n 'phage1': name1,\n 'phage2': name2,\n 'phage3': name3,\n 'dest_dir1': dest_dir1,\n 'dest_dir2': dest_dir2,\n 'dest_dir3': dest_dir3,\n 'count1': count1,\n 'count2': count2,\n 'count3': count3\n })\n\ndef mylogout(request):\n logout(request)\n last_three = PhageData.objects.all().order_by('-id')[:3]\n dest_dir1 = dest_dir2 = dest_dir3 = name1 = name2 = name3 = \"\"\n count1 = count2 = count3 = -1\n try:\n name1 = last_three[0].phage_name\n dest_dir1 = list_path(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[0].phage_name))\n count1 = count(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[0].phage_name))\n except:\n pass\n\n try:\n name2 = last_three[1].phage_name\n dest_dir2 = list_path(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[1].phage_name))\n count2 = count(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[1].phage_name))\n\n except:\n pass\n\n try:\n name3 = last_three[2].phage_name\n dest_dir3 = list_path(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[2].phage_name))\n count3 = count(os.path.join(settings.MEDIA_ROOT, \"images\", last_three[2].phage_name))\n\n except:\n pass\n messages.success(request, 'You have successfully logged out.', extra_tags='alert')\n return render(request, 'logged_in_index.html',{'login_status': request.user.is_authenticated,\n 'username': request.user.username,\n 'phage1': name1,\n 'phage2': name2,\n 'phage3': name3,\n 'dest_dir1': dest_dir1,\n 'dest_dir2': dest_dir2,\n 'dest_dir3': dest_dir3,\n 'count1': count1,\n 'count2': count2,\n 'count3': count3\n })\ndef signup(request):\n data = dict()\n\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n data['form_is_valid'] = True\n else:\n data['form_is_valid'] = False\n else:\n form = SignUpForm()\n\n context = {'form': form}\n data['html_form'] = render_to_string('partial_signup.html',\n context,\n request=request\n )\n return JsonResponse(data)\n\ndef mylogin(request):\n msg = dict()\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n print (form.errors)\n username = request.POST['username']\n password = request.POST['password']\n if form.is_valid():\n msg['form_is_valid'] = True\n else:\n form.add_error('password', 'Please enter a correct username and password. Note that both fields are case-sensitive.')\n msg['form_is_valid'] = False\n\n if username and password:\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n msg['form_is_valid'] = True\n else:\n msg['form_is_valid'] = False\n else:\n form = AuthenticationForm()\n context = {'form': form}\n msg['html_form'] = render_to_string('partial_login.html',\n context,\n request=request\n )\n return JsonResponse(msg)\n\n\n@login_required\ndef change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user)\n messages.success(request, 'Your password was successfully updated!')\n return redirect('logout')\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'change_password.html', {'form': form,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username,\n })\n\n\ndef handle_uploaded_file(f, dest):\n with open(dest, 'wb') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n\n\n#Fill the model object in similar fashion\ndef fillExpObject(expform, phage):\n exp = ExperimentData.objects.create(expkey=phage)\n exp.expkey = phage\n exp.owner = expform.cleaned_data.get('owner')\n exp.timestamp = expform.cleaned_data.get('TimeStamp')\n exp.category = expform.cleaned_data.get('category')\n exp.short_name = expform.cleaned_data.get('short_name')\n exp.full_name = expform.cleaned_data.get('full_name')\n exp.methods = expform.cleaned_data.get('methods')\n exp.results = expform.cleaned_data.get('results')\n exp.save()\n\ndef fillExpObjectedit(expform, exp):\n exp.owner = expform.cleaned_data.get('owner')\n exp.timestamp = expform.cleaned_data.get('TimeStamp')\n exp.category = expform.cleaned_data.get('category')\n exp.short_name = expform.cleaned_data.get('short_name')\n exp.full_name = expform.cleaned_data.get('full_name')\n exp.methods = expform.cleaned_data.get('methods')\n exp.results = expform.cleaned_data.get('results')\n exp.save()\n\ndef fillIsoltionObject(isoform, phage):\n iso = IsolationData.objects.create(isokey=phage)\n iso.isokey = phage\n iso.owner_name = isoform.cleaned_data.get('owner_name')\n iso.location = isoform.cleaned_data.get('location')\n iso.type1 = isoform.cleaned_data.get('type1')\n iso.TimeStamp = isoform.cleaned_data.get('timestamp')\n iso.save()\n\ndef fillIsoltionObjectedit(isoform, iso):\n iso.owner_name = isoform.cleaned_data.get('owner_name')\n iso.location = isoform.cleaned_data.get('location')\n iso.type1 = isoform.cleaned_data.get('type')\n iso.TimeStamp = isoform.cleaned_data.get('TimeStamp')\n iso.save()\n\ndef validate_latest_phage(query_results):\n if(query_results.count()>0):\n latest = query_results.latest('id')\n return latest.phage_name\n else:\n return \"\"\n\n@login_required\ndef add_phage(request):\n if request.user.is_authenticated:\n if request.method == 'POST':\n pform = Add_Phage_DataForm(request.POST) #phage_name\n rrform = Add_ResearcherForm(request.POST)\n rform = Add_ResearchForm(request.POST) #CPT ID\n expform = Add_Experiment_Form(request.POST)\n isoform = Isolation_Form(request.POST)\n aform = AForm(request.POST, request.FILES)\n aiform = AIForm(request.POST)\n\n if pform.is_valid() and rrform.is_valid() and rform.is_valid() and expform.is_valid() and isoform.is_valid() \\\n and aform.is_valid() and aiform.is_valid():\n phagename = pform.cleaned_data.get('phage_name')\n CPTid = rform.cleaned_data.get('phage_CPT_id')\n\n #approvePhage = 1 if no duplicates in phage_name. 0 otherwise\n #approveCPTid = 1 if no duplicates in CPT id\n #duplicatePhagesPhages : list of phages due to duplicates in phage names\n #duplicatePhagesCPTid : list of CPT ids due to duplicates in phage names\n #duplicateCPTidPhages : list of phages due to duplicates in CPT ids\n #duplicateCPTidCPTid : list of duplicate CPT ids\n\n #chkDuplicatesFlag = 0\n chkDuplicatesFlag = int(request.POST['flag'])\n #chkDuplicatesFlag = 1\n\n msg = dict()\n\n if chkDuplicatesFlag==1:\n approvePhage, approveCPTid, duplicatePhagesPhages, duplicatePhagesCPTid, duplicateCPTidPhages\\\n , duplicateCPTidCPTid = checkDuplicatesInAddPhage(phagename, CPTid)\n\n print(approvePhage, approveCPTid)\n\n msg['approvePhage']=approvePhage\n msg['approveCPTid']=approveCPTid\n\n if (approvePhage==0 or approveCPTid==0):\n msg['duplicatePhagesPhages']=json.dumps(duplicatePhagesPhages)\n msg['duplicatePhagesCPTid']=json.dumps(duplicatePhagesCPTid)\n msg['duplicateCPTidPhages']=json.dumps(duplicateCPTidPhages)\n msg['duplicateCPTidCPTid']=json.dumps(duplicateCPTidCPTid)\n\n return JsonResponse(msg)\n\n pform.save()\n\n phage = PhageData.objects.get(phage_name=phagename)\n phage.phage_CPT_id = rform.cleaned_data.get('phage_CPT_id')\n phage.phage_isolator_loc = rform.cleaned_data.get('phage_isolator_loc')\n phage.phage_all_links = aiform.cleaned_data.get('link')\n phage.phage_isolator_name = rrform.cleaned_data.get('phage_isolator_name')\n phage.phage_experimenter_name = rrform.cleaned_data.get('phage_experimenter_name')\n phage.phage_submitted_user = request.user.username\n phage.phage_lab = rrform.cleaned_data.get('phage_lab')\n\n phage.save()\n fillIsoltionObject(isoform, phage)\n\n fillExpObject(expform, phage)\n # print(phage.phage_submitted_user)\n phagedoc = aform.cleaned_data.get('doc')\n phageimage = aform.cleaned_data.get('image')\n dest_dir = os.path.join(settings.MEDIA_ROOT, \"images\", phagename)\n docs_dest_dir = os.path.join(settings.MEDIA_ROOT, \"docs\", phagename)\n try:\n os.mkdir(dest_dir)\n os.mkdir(docs_dest_dir)\n except:\n pass\n dest = os.path.join(dest_dir, str(phageimage))\n docsdest = os.path.join(docs_dest_dir, str(phagedoc))\n if phageimage is None:\n pass\n else:\n handle_uploaded_file(phageimage, dest)\n if phagedoc is None:\n pass\n else:\n handle_uploaded_file(phagedoc, docsdest)\n\n # query_results = PhageData.objects.all()\n\n return JsonResponse(msg)\n\n #if the data is valid\n #render(request, 'view_phages.html', {'add_status':'true','query_results':query_results} )\n #render(request, 'view_phages.html', {'add_status':'true','query_results':query_results ,\n # 'login_status': request.user.is_authenticated,\n # 'username': request.user.username})\n\n else:\n pform.add_error(\"phage_name\",\"This field is required.\")\n rform.add_error(\"phage_CPT_id\",\"This field is required.\")\n return render(request, 'add_phage.html', {'pform': pform,\n 'rrform': rrform,\n 'rform': rform,\n 'expform': expform,\n 'isoform': isoform,\n 'aform': aform,\n 'aiform': aiform,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username,\n })\n else:\n pform = Add_Phage_DataForm()\n rrform = Add_ResearcherForm()\n rform = Add_ResearchForm()\n expform = Add_Experiment_Form()\n isoform = Isolation_Form()\n aform = AForm()\n aiform = AIForm()\n return render(request, 'add_phage.html', {'pform': pform,\n 'rrform': rrform,\n 'rform': rform,\n 'expform': expform,\n 'isoform':isoform,\n 'aform': aform,\n 'aiform': aiform,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username,\n })\n#this form show the phages per user\ndef my_phages(request):\n query_results = PhageData.objects.filter(phage_submitted_user=request.user.username)\n name = validate_latest_phage(query_results)\n return render(request, 'view_phages.html', {'query_results': query_results,\n 'edit_status':'false','add_status':'false',\n 'delete_status':'false','latest':name,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username\n })\n#this form shows all the phages\ndef view_phages(request):\n query_results = PhageData.objects.all()\n name = validate_latest_phage(query_results)\n return render(request, 'view_phages.html', {'query_results': query_results,\n 'edit_status': 'false', 'add_status': 'false',\n 'delete_status': 'false', 'latest': name,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username\n })\n \n \ndef about_us(request):\n query_results = PhageData.objects.all()\n name = validate_latest_phage(query_results)\n return render(request, 'about_us.html', {'query_results': query_results,\n 'edit_status': 'false', 'add_status': 'false',\n 'delete_status': 'false', 'latest': name,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username\n })\n \n\ndef peoples(request):\n query_results = PhageData.objects.all()\n name = validate_latest_phage(query_results)\n return render(request, 'peoples.html', {'query_results': query_results,\n 'edit_status': 'false', 'add_status': 'false',\n 'delete_status': 'false', 'latest': name,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username\n })\n \n\ndef articles(request):\n query_results = PhageData.objects.all()\n name = validate_latest_phage(query_results)\n return render(request, 'articles.html', {'query_results': query_results,\n 'edit_status': 'false', 'add_status': 'false',\n 'delete_status': 'false', 'latest': name,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username\n }) \n \n \ndef contact_us(request):\n query_results = PhageData.objects.all()\n name = validate_latest_phage(query_results)\n return render(request, 'contact_us.html', {'query_results': query_results,\n 'edit_status': 'false', 'add_status': 'false',\n 'delete_status': 'false', 'latest': name,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username\n }) \n \n \n@user_passes_test(lambda u: u.is_superuser, login_url='/admin/')\ndef delele_all_phages(request):\n phage = PhageData.objects.all().delete()\n query_results = PhageData.objects.all()\n return render(request, 'view_phages.html', {'query_results': query_results,\n 'edit_status':'false','add_status':'false',\n 'delete_status':'false',\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username\n })\n#this form shows a particular phage\ndef view_phage(request):\n phageName = request.GET.get('name')\n phage = PhageData.objects.get(phage_name=phageName)\n previous_names = phage.PhageName.all()\n expdata = phage.PName.all()\n isodata = phage.iso_phageName.all()\n dest_dir = os.path.join(settings.MEDIA_ROOT, \"images\", phageName)\n list_path=[]\n count = 0;\n try:\n for filename in os.listdir(dest_dir):\n if filename.endswith(\".png\") or filename.endswith(\".jpg\") or filename.endswith(\".jpeg\"):\n list_path.append(filename)\n count=count+1;\n continue\n else:\n continue\n except:\n pass\n return render(request, 'view_phage.html', {'item': phage,'previous_names':previous_names,'expdata':expdata,'isodata':isodata,\n 'login_status': request.user.is_authenticated,'dest_dir':list_path,'count':count,\n 'username': request.user.username\n })\n\n@login_required\ndef deletephages(request):\n if request.user.is_authenticated:\n x = request.GET.get('name')\n dest_dir = os.path.join(settings.MEDIA_ROOT, \"images\", x)\n docs_dest_dir = os.path.join(settings.MEDIA_ROOT, \"docs\", x)\n try:\n os.rmdir(dest_dir)\n os.rmdir(docs_dest_dir)\n except:\n pass\n phage = PhageData.objects.get(phage_name=x).delete()\n query_results = PhageData.objects.all()\n name = validate_latest_phage(query_results)\n return render(request, 'view_phages.html', {'query_results': query_results,'delete_status':'true',\n 'login_status': request.user.is_authenticated,'latest':name,\n 'username': request.user.username\n })\n else:\n #messages.error(request,'Login or signup first!')\n return render(request,'login.html',\n {'login_status': request.user.is_authenticated\n })\n\n\ndef search_phage(request):\n phage_list = PhageData.objects.all()\n request.GET._mutable = True\n print(\"$$$$\")\n if request.GET.get('submitted_year_gt'):\n #print(request.GET['submitted_year_gt'])\n if int(request.GET.get('submitted_year_gt')) < 0:\n messages.error(request, 'Invalid value for \"Year Submitted After\" entered. Setting it to 1')\n request.GET['submitted_year_gt'] = 1\n #print(request.GET['submitted_year_gt'])\n if request.GET.get('submitted_year_lt'):\n if int(request.GET.get('submitted_year_lt')) < 0:\n messages.error(request, 'Invalid value for \"Year Submitted Before\" entered. Setting it to 1')\n request.GET['submitted_year_lt'] = 1\n if request.GET.get('submitted_month_gt'):\n if int(request.GET.get('submitted_month_gt')) < 0:\n messages.error(request, 'Invalid value for \"Month Submitted After\" entered. Setting it to 1')\n request.GET['submitted_month_gt'] = 1\n if request.GET.get('submitted_month_lt'):\n if int(request.GET.get('submitted_month_lt')) < 0:\n messages.error(request, 'Invalid value for \"Month Submitted Before\" entered. Setting it to 1')\n request.GET['submitted_month_lt'] = 1\n\n phage_filter = PhageFilter(request.GET, queryset=phage_list)\n return render(request, 'search_phage.html', {'filter': phage_filter,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username,\n })\n\ndef check_entry(name):\n print(PhageData.objects.filter(phage_name=name).count())\n if (PhageData.objects.filter(phage_name=name).count() == 0 and PreData.objects.filter(phagename=name).count()==0 ):\n return False\n else:\n return True\n\n@login_required\ndef editPhage(request):\n if request.user.is_authenticated:\n name = request.GET.get('name')\n phage = PhageData.objects.get(phage_name = name)\n isodata = IsolationData.objects.filter(isokey = phage)\n expdata = ExperimentData.objects.filter(expkey = phage)\n last = isodata.latest('id')\n last_exp = expdata.latest('id')\n pform = Edit_Phage_DataForm(request.POST, instance=phage, initial = {'phage_name':phage.phage_name })\n rrform = Edit_ResearcherForm(request.POST, instance=phage)\n rform = Edit_ResearchForm(request.POST, instance=phage)\n isoform = Edit_IsolationDataForm(request.POST)\n expform = Edit_Experiment_Form(request.POST)\n aform = AForm(request.POST, request.FILES)\n aiform = AIForm(request.POST)\n if request.method==\"POST\":\n if pform.is_valid() and rrform.is_valid() and rform.is_valid() and aform.is_valid() and aiform.is_valid()\\\n and isoform.is_valid() and expform.is_valid():\n curr_phage = pform.cleaned_data.get('phage_name')\n if(check_entry(curr_phage) and curr_phage!=name):\n return render(request, 'EditPhage.html', {'item': phage,\n 'pform': pform,\n 'rrform': rrform,'expform':expform,\n 'rform': rform,\n 'aform': aform,\n 'aiform': aiform,'duplicate':'true',\n 'isoform':isoform,'iso':last,'exp':last_exp,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username,\n })\n phage.phage_name = curr_phage\n if(name!=phage.phage_name and PreData.objects.filter(phagename = name).count()==0):\n obj = PreData.objects.create(testkey=phage)\n obj.testkey = phage\n obj.phagename = name\n print (obj.phagename)\n obj.save()\n print (phage.PhageName.all().values())\n #phage = PhageData.objects.get(phage_name=phagename)\n phage.phage_isolator_name = rrform.cleaned_data.get('phage_isolator_name')\n phage.phage_experimenter_name = rrform.cleaned_data.get('phage_experimenter_name')\n phage.phage_CPT_id = rform.cleaned_data.get('phage_CPT_id')\n phage.phage_isolator_loc = rform.cleaned_data.get('phage_isolator_loc')\n phage.phage_all_links = aiform.cleaned_data.get('link')\n phage.phage_lab = rrform.cleaned_data.get('phage_lab')\n #isolator_data = phage.iso_phageName.objects.latest(iso_phageName)\n pform.save()\n phage.save()\n #last.delete()\n fillExpObjectedit(expform, last_exp)\n fillIsoltionObjectedit(isoform, last)\n phagedoc = aform.cleaned_data.get('doc')\n phageimage = aform.cleaned_data.get('image')\n dest_dir_old = os.path.join(settings.MEDIA_ROOT, \"images\", name)\n docs_dest_dir_old = os.path.join(settings.MEDIA_ROOT, \"docs\", name)\n dest_dir = os.path.join(settings.MEDIA_ROOT, \"images\", phage.phage_name)\n docs_dest_dir = os.path.join(settings.MEDIA_ROOT, \"docs\", phage.phage_name)\n try:\n os.rename(dest_dir_old,dest_dir)\n os.rename(docs_dest_dir_old,docs_dest_dir)\n except:\n pass\n dest = os.path.join(dest_dir, str(phageimage))\n docsdest = os.path.join(docs_dest_dir, str(phagedoc))\n if phageimage is None:\n pass\n else:\n handle_uploaded_file(phageimage, dest)\n if phagedoc is None:\n pass\n else:\n handle_uploaded_file(phagedoc, docsdest)\n query_results = PhageData.objects.all()\n lname = validate_latest_phage(query_results)\n return render(request, 'view_phages.html', {'edit_status':'true','query_results':query_results,'latest':lname,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username} )\n else:\n phage = PhageData.objects.get(phage_name=name)\n phage.save()\n return render(request, 'EditPhage.html', {'item': phage,\n 'pform': pform,\n 'rrform': rrform,'expform':expform,\n 'rform': rform,\n 'aform': aform,\n 'aiform': aiform,\n 'isoform':isoform,'iso':last,'exp':last_exp,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username,\n })\n else:\n pform = Edit_Phage_DataForm(request.POST, instance=phage)\n rrform = Edit_ResearcherForm(request.POST, instance=phage)\n rform = Edit_ResearchForm(request.POST, instance=phage)\n isoform = Edit_IsolationDataForm(request.POST)\n expform = Edit_Experiment_Form(request.POST)\n aform = AForm()\n aiform = AIForm()\n return render(request, 'EditPhage.html', {'item': phage,\n 'pform': pform,\n 'rrform': rrform,\n 'rform': rform,\n 'aform': aform,\n 'aiform': aiform, 'isoform' : isoform,'expform':expform,\n 'iso':last,\n 'exp': last_exp,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username,\n })\n else:\n return render(request,'Login.html',\n {'login_status': request.user.is_authenticated\n })\n\n\ndef func(phagename):\n dest_dir = os.path.join(settings.MEDIA_ROOT, \"images\", phagename)\n docs_dest_dir = os.path.join(settings.MEDIA_ROOT, \"docs\", phagename)\n try:\n os.mkdir(dest_dir)\n os.mkdir(docs_dest_dir)\n except:\n pass\n\n\ndef populate(reader, request):\n fields = reader.fieldnames\n for row in reader:\n flag = 0\n obj = PhageData.objects.create()\n iso = IsolationData.objects.create(isokey = obj)\n exp = ExperimentData.objects.create(expkey = obj)\n if 'phage_name' in fields:\n name = row['phage_name']\n if not name:\n flag = 0\n elif(PhageData.objects.filter(phage_name=name).count() == 0 and PreData.objects.filter(phagename=name).count() == 0):\n obj.phage_name = name\n else:\n obj.delete()\n exp.delete()\n iso.delete()\n if (PhageData.objects.filter(phage_name=name).count() > 0):\n obj = PhageData.objects.get(phage_name=name)\n else:\n obj1 = PreData.objects.get(phagename=name)\n obj = obj1.testkey\n\n isodata = IsolationData.objects.filter(isokey=obj)\n expdata = ExperimentData.objects.filter(expkey=obj)\n iso = isodata.latest('id')\n exp = expdata.latest('id')\n flag = 1\n if 'phage_host_name' in fields:\n obj.phage_host_name = row['phage_host_name']\n if 'phage_isolator_name' in fields:\n obj.phage_isolator_name = row['phage_isolator_name']\n if 'phage_experimenter_name' in fields:\n obj.phage_experimenter_name = row['phage_experimenter_name']\n if 'phage_CPT_id' in fields:\n obj.phage_CPT_id = row['phage_CPT_id']\n if 'phage_isolator_loc' in fields:\n obj.phage_isolator_loc = row['phage_isolator_loc']\n if 'owner_name' in fields:\n iso.owner_name = row['owner_name']\n if 'location' in fields:\n iso.location = row['location']\n if 'type' in fields:\n iso.type = row['type']\n if 'TimeStamp' in fields:\n iso.TimeStamp = row['TimeStamp']\n if 'owner' in fields:\n exp.owner = row['owner']\n if 'timestamp' in fields:\n exp.timestamp = row['timestamp']\n if 'methods' in fields:\n exp.methods = row['methods']\n if 'results' in fields:\n exp.results = row['results']\n if 'short_name' in fields:\n exp.short_name = row['short_name']\n if 'full_name' in fields:\n exp.full_name = row['full_name']\n if 'category' in fields:\n exp.category = row['category']\n obj.phage_submitted_user = request.user.username\n\n if flag == 0:\n obj.delete()\n exp.delete()\n iso.delete()\n else:\n obj.save()\n func(obj.phage_name)\n iso.save()\n exp.save()\n\n\n\n@user_passes_test(lambda u: u.is_superuser, login_url='/admin/')\ndef model_form_upload(request):\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n paramFile = TextIOWrapper(request.FILES['file'].file, encoding=request.encoding)\n reader = csv.DictReader(paramFile,delimiter=';',skipinitialspace=True,)\n populate(reader, request)\n query_results = PhageData.objects.all()\n lname = validate_latest_phage(query_results)\n return render(request, 'view_phages.html', {'query_results': query_results,\n 'edit_status': 'false', 'add_status': 'false',\n 'delete_status': 'false','latest':lname,\n 'login_status': request.user.is_authenticated,\n 'username': request.user.username\n })\n else:\n form = UploadFileForm()\n return render(request, 'model_form_upload.html', {'form': form,'login_status': request.user.is_authenticated,\n 'username': request.user.username})\n\ndef checkDuplicatesInAddPhage(phage_name, phage_CPT_id):\n #db=sqlite3.connect('db.sqlite3')\n #params={'phage_name':phage_name, 'phage_CPT_id':phage_CPT_id}\n #q1=\"SELECT phage_name, phage_CPT_id FROM core_phagedata WHERE phage_name='{phage_name}'\"\n #rowsPhage = pd.read_sql_query(q1.format(**params), db)\n\n rowsPhage = PhageData.objects.filter(phage_name = phage_name).values('phage_name','phage_CPT_id')\n #.values() is a list of dict\n\n rowsCPTid = PhageData.objects.filter(phage_CPT_id = phage_CPT_id).values('phage_name','phage_CPT_id')\n\n duplicatePhagesPhages = [d['phage_name'] for d in rowsPhage] # rowsPhage[\"phage_name\"].values.tolist()\n #print(duplicatePhagesPhages)\n\n duplicatePhagesCPTid = [d['phage_CPT_id'] for d in rowsPhage]\n\n duplicateCPTidPhages = [d['phage_name'] for d in rowsCPTid]\n duplicateCPTidCPTid = [d['phage_CPT_id'] for d in rowsCPTid]\n\n approvePhage=1\n approveCPTid=1\n if len(rowsPhage)>0:\n approvePhage=0\n\n if len(rowsCPTid)>0:\n approveCPTid=0\n\n return approvePhage, approveCPTid, duplicatePhagesPhages, duplicatePhagesCPTid, duplicateCPTidPhages\\\n , duplicateCPTidCPTid\n\n#def checkDuplicatesInFile(decoded_file):\n# db=sqlite3.connect('db.sqlite3')\n# io_string = io.StringIO(decoded_file)\n# \n# phage_names=[]\n# \n# for line in csv.reader(io_string, delimiter=','):\n# phage_names.append(line[0])\n# \n# df=pd.read_sql_query('SELECT phage_name FROM core_phagedata',db)\n# \n# stored_phages = df[\"phage_name\"].values.tolist()\n# \n# common_phages = set(phage_names).intersection(stored_phages)\n# \n# approveFlag=1\n# if len(common_phages)>0:\n# approveFlag=0\n# \n# #print(common_phages)\n# return common_phages, approveFlag\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PhageBank/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":39284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"635601146","text":"# built-in libraries\nimport datetime\n\n# external libraries\nimport pytz\n\n# internal libraries\nfrom ouroboros import Type, Image, Node\nfrom ouroboros.config import MILLI, UNIX_EPOCH\n\n# exports\n__all__= (\"dt\", \"td\",\n \"at\", \"after\", \"every\",\n \"relate\", \"iso8601\")\n\n# constants\n# ...\n\n# datetime.datetime <-> JSON\ndt = Type(\"!clock/dt\", datetime.datetime,\n lambda x: int((x - UNIX_EPOCH).total_seconds() / MILLI),\n lambda x: UNIX_EPOCH + datetime.timedelta(seconds=x * MILLI))\n\n# datetime.timedelta <-> JSON\ntd = Type(\"!clock/td\", datetime.timedelta,\n lambda x: int(x.total_seconds() / MILLI),\n lambda x: datetime.timedelta(seconds=x * MILLI))\n\n\n@Image(\".clock@at\",\n sys=Node(evs=(\"tick\",), args=(),\n ins=(), reqs=(\"t\",),\n outs=(), pros=()),\n usr=Node(evs=(), args=(),\n ins=(), reqs=(),\n outs=(\"tock\",), pros=()))\ndef at(sys, usr):\n \"\"\"at\"\"\" \n yield\n while True:\n sys_t, = sys.reqs\n yield (usr.outs((sys_t,)),)\n\n\n@Image(\".clock@after\",\n env=Node(evs=(), args=(),\n ins=(), reqs=(\"t\",),\n outs=(), pros=()),\n sys=Node(evs=(\"tick\",), args=(),\n ins=(), reqs=(\"delta_t\",),\n outs=(), pros=()),\n usr=Node(evs=(), args=(),\n ins=(), reqs=(),\n outs=(\"tock\",), pros=()))\ndef after(env, sys, usr):\n \"\"\"after\"\"\"\n yield\n while True:\n env_t, = env.reqs\n delta_t, = sys.reqs\n yield (usr.outs((env_t + delta_t,)),)\n\n\n@Image(\".clock@every\",\n env=Node(evs=(\"tick\",), args=(\"t\",),\n ins=(), reqs=(),\n outs=(), pros=()),\n sys=Node(evs=(\"tick\",), args=(\"delta_t\",),\n ins=(), reqs=(),\n outs=(\"tick\",), pros=()),\n kw=Node(evs=(), args=(),\n ins=(), reqs=(),\n outs=(\"tock\",), pros=()))\ndef every(env, sys, **kw):\n \"\"\"every\"\"\"\n env_t, = env.args\n delta_t, = sys.args\n \n yield\n while True:\n env_t += delta_t\n yield ((sys.outs((env_t,)),) +\n tuple(usr.outs((True,))\n for usr in kw.values()))\n\n\n@Image(\".clock@relate\",\n sys=Node(evs=(\"tock\",), args=(),\n ins=(), reqs=(\"t\",),\n outs=(), pros=()),\n usr=Node(evs=(), args=(),\n ins=(), reqs=(\"t\",),\n outs=(\"lt\", \"eq\", \"gt\"), pros=()))\ndef relate(sys, usr):\n \"\"\"relate\"\"\"\n evs = yield\n while True:\n sys_t, = env.reqs\n usr_t, = usr.reqs\n yield (usr.outs((sys_t < usr_t,\n sys_t == usr_t,\n sys_t > usr_t)),)\n\n\n@Image(\".clock@iso8601\",\n sys=Node(evs=(), args=(),\n ins=(), reqs=(\"t\",),\n outs=(), pros=()),\n usr=Node(evs=(\"tock\",), args=(),\n ins=(\"tock\", 8601,), reqs=(),\n outs=(8601,), pros=(\"t_dt\",)))\ndef iso8601(sys, usr):\n evs = yield\n while True:\n sys_t, = sys.reqs\n clk_e, usr_e = usr.ins()\n flag = usr_e not in evs\n if flag:\n usr.pros = (datetime.datetime.fromtimestamp\n (sys_t, tz=pytz.utc),)\n yield (usr.outs((flag or None,)),)\n","sub_path":"ob-time/ob-time/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"159243354","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport logging\nfrom typing import List\n\n#from eco_kg.transform_utils.eol_hierarchy.eol_hierarchy import EOLheirarchyTransform\nfrom eco_kg.transform_utils.ontology import OntologyTransform\nfrom eco_kg.transform_utils.ontology.ontology_transform import ONTOLOGIES\nfrom eco_kg.transform_utils.eol_traits.eol_traits import EOLTraitsTransform\nfrom eco_kg.transform_utils.planteome.planteome import PlanteomeTransform\nfrom eco_kg.transform_utils.gene_expression_atlas.gene_expression_atlas import GeneExpressionAtlasTransform\n\n\nDATA_SOURCES = {\n #'EOLheirarchyTransform': EOLheirarchyTransform,\n 'GoTransform': OntologyTransform,\n #'HpTransform': OntologyTransform,\n 'NCBITransform': OntologyTransform,\n #'EnvoTransform' : OntologyTransform,\n 'ToTransform' : OntologyTransform,\n 'PoTransform' : OntologyTransform,\n #'PecoTransform' : OntologyTransform,\n 'EOLTraitsTransform': EOLTraitsTransform,\n 'PlanteomeTransform': PlanteomeTransform,\n 'GeneExpressionAtlasTransform':GeneExpressionAtlasTransform\n}\n\n\ndef transform(input_dir: str, output_dir: str, sources: List[str] = None) -> None:\n \"\"\"Call scripts in eco_kg/transform/[source name]/ to transform each source into a graph format that\n KGX can ingest directly, in either TSV or JSON format:\n https://github.com/NCATS-Tangerine/kgx/blob/master/data-preparation.md\n Args:\n input_dir: A string pointing to the directory to import data from.\n output_dir: A string pointing to the directory to output data to.\n sources: A list of sources to transform.\n Returns:\n None.\n \"\"\"\n if not sources:\n # run all sources\n sources = list(DATA_SOURCES.keys())\n\n for source in sources:\n if source in DATA_SOURCES:\n logging.info(f\"Parsing {source}\")\n t = DATA_SOURCES[source](input_dir, output_dir)\n if source in ONTOLOGIES.keys():\n t.run(ONTOLOGIES[source])\n else:\n t.run()","sub_path":"eco_kg/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"51779132","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os, sys, yaml, re, uuid\nimport datetime\nimport jinja2\n\nclass AsiaTokyoTimezone(datetime.tzinfo):\n def utcoffset(self, dt):\n return datetime.timedelta(hours=9)\n \n def dst(self, dt):\n return datetime.timedelta(hours=9)\n \n def tzname(self, dt):\n return 'Asia/Tokyo'\n\nTZ = AsiaTokyoTimezone()\n\nBASE_DIR = os.path.abspath(os.getcwd())\nDEFAULT_DIR = os.path.join(BASE_DIR, '_posts')\nDEFAULT_CONF_FILE = os.path.join(BASE_DIR, '_config.yml')\nTEMPLATE_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates', 'jekyll-template.md')\n\nimport argparse\n\ndef parse_args(*args):\n parser = argparse.ArgumentParser(description=\"Process some integers.\")\n parser.add_argument('title', action=\"store\")\n parser.add_argument('-t', '--timestamp', default=False, action=\"store_true\")\n parser.add_argument('-c', '--config', default=DEFAULT_CONF_FILE, action=\"store\")\n parser.add_argument('-o', '--directory', default=DEFAULT_DIR, action=\"store\")\n args = parser.parse_args(args)\n return args\n\ndef write(dst, data):\n if not os.path.exists(dst):\n with open(dst, 'w') as fp:\n fp.write(data.encode('utf8'))\n else:\n print('ERROR: File already exists.', file=sys.stderr)\n\ndef main(*args):\n args = parse_args(*args)\n timestamp = args.timestamp\n time_now = datetime.datetime.now(tz=TZ)\n time_date = time_now.strftime('%Y-%m-%d')\n title_lower = args.title.lower()\n title = re.sub(r' ', '-', title_lower)\n if timestamp:\n file_name = time_date + '-' + title + '.md'\n else:\n file_name = title + '.md'\n #dst_dir = os.path.dirname(os.path.abspath(__file__))\n dst = os.path.join(args.directory, file_name)\n with open(DEFAULT_CONF_FILE, 'r') as fp:\n config_data = fp.read()\n config_obj = yaml.load(config_data)\n with open(TEMPLATE_FILE, 'r') as fp:\n template_data = fp.read()\n template_context = {}\n template_context.update({'page': {'title': args.title,\n 'current_time': time_now,\n 'uuid': str(uuid.uuid4())}})\n template_context.update({'site': config_obj})\n template_result = jinja2.Template(template_data).render(template_context)\n print(dst)\n write(dst, template_result)\n return 0\n\nif __name__ == '__main__':\n sys.exit(main(*sys.argv[1:]))\n\n","sub_path":"jekyll_create_file.py","file_name":"jekyll_create_file.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"536601489","text":"class Cat:\r\n def __init__(self, name):\r\n self.name = name\r\n self.fed = False\r\n self.sleepy = False\r\n self.size = 0\r\n\r\n def eat(self):\r\n if self.fed:\r\n raise Exception(\"Already fed.\")\r\n self.fed = True\r\n self.sleepy = True\r\n self.size += 1\r\n\r\n def sleep(self):\r\n if not self.fed:\r\n raise Exception(\"Cannot sleep while hungry\")\r\n self.sleepy = False\r\n\r\n\r\nimport unittest\r\n\r\n\r\nclass TestCase(unittest.TestCase):\r\n def test_cat_if_size_increase_after_eating(self):\r\n cat = Cat('Akira')\r\n cat.eat()\r\n self.assertEqual(1, cat.size)\r\n\r\n def test_cat_if_fed_after_eating(self):\r\n cat = Cat('Akira')\r\n cat.eat()\r\n self.assertTrue(cat.fed)\r\n\r\n def test_cat_already_fet(self):\r\n cat = Cat('Akira')\r\n self.assertFalse(cat.fed)\r\n cat.eat()\r\n self.assertTrue(cat.fed)\r\n with self.assertRaises(Exception) as ex:\r\n cat.eat()\r\n self.assertEqual(\"Already fed.\", str(ex.exception))\r\n\r\n def test_cat_sleep_if_not_fed(self):\r\n cat = Cat('Akira')\r\n self.assertFalse(cat.fed)\r\n with self.assertRaises(Exception) as ex:\r\n cat.sleep()\r\n self.assertEqual(\"Cannot sleep while hungry\", str(ex.exception))\r\n\r\n def test_cat_is_not_sleepy_after_sleeping(self):\r\n cat = Cat('Akira')\r\n cat.eat()\r\n cat.sleep()\r\n self.assertFalse(cat.sleepy)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"9.Testing/Lab/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"594724967","text":"from typing import Callable, List\n\nimport torch\nimport torch.nn as nn\n\nfrom torecsys.layers import InnerProductNetworkLayer, OuterProductNetworkLayer, DNNLayer\nfrom torecsys.utils.decorator import no_jit_experimental_by_namedtensor\nfrom torecsys.utils.operations import combination\nfrom . import _CtrModel\n\n\nclass ProductNeuralNetworkModel(_CtrModel):\n r\"\"\"Model class of Product Neural Network (PNN).\n\n Product Neural Network is a model using inner-product or outer-product to extract high \n dimensional non-linear relationship from interactions of feature tensors instead, where \n the process is handled by factorization machine part in Factorization-machine supported \n Neural Network (FNN).\n\n :Reference:\n\n #. `Yanru QU, 2016. Product-based Neural Networks for User Response Prediction `_.\n\n \"\"\"\n\n @no_jit_experimental_by_namedtensor\n def __init__(self,\n embed_size: int,\n num_fields: int,\n deep_layer_sizes: List[int],\n output_size: int = 1,\n prod_method: str = \"inner\",\n deep_dropout_p: List[float] = None,\n deep_activation: Callable[[torch.Tensor], torch.Tensor] = nn.ReLU(),\n **kwargs):\n r\"\"\"Initialize ProductNeuralNetworkModel\n \n Args:\n embed_size (int): Size of embedding tensor\n num_fields (int): Number of inputs' fields\n deep_layer_sizes (List[int]): Layer sizes of dense network\n output_size (int): Output size of model\n i.e. output size of dense network. \n Defaults to 1.\n prod_method (str): Method of product neural network. \n Allow: [inner, outer].\n Defaults to inner.\n deep_dropout_p (List[float], optional): Probability of Dropout in dense network. \n Defaults to None.\n deep_activation (Callable[[T], T], optional): Activation function of dense network. \n Defaults to nn.ReLU().\n \n Arguments:\n kernel_type (str): Type of kernel to compress outer-product.\n \n Attributes:\n pnn (nn.Module): Module of product neural network.\n deep (nn.Module): Module of dense layer.\n bias (nn.Parameter): Parameter of bias of field-aware factorization machine.\n\n Raises:\n ValueError: when prod_method is not in [inner, outer].\n \"\"\"\n # Refer to parent class\n super(ProductNeuralNetworkModel, self).__init__()\n\n # Initialize product network\n if prod_method == \"inner\":\n self.pnn = InnerProductNetworkLayer(num_fields=num_fields)\n elif prod_method == \"outer\":\n self.pnn = OuterProductNetworkLayer(embed_size=embed_size,\n num_fields=num_fields,\n kernel_type=kwargs.get(\"kernel_type\", \"mat\"))\n else:\n raise ValueError(\"'%s' is not allowed in prod_method. Please use ['inner', 'outer'].\")\n\n # Calculate size of inputs of dense layer\n cat_size = combination(num_fields, 2) + num_fields + 1\n\n # Initialize dense layer\n self.deep = DNNLayer(\n output_size=output_size,\n layer_sizes=deep_layer_sizes,\n inputs_size=cat_size,\n dropout_p=deep_dropout_p,\n activation=deep_activation\n )\n\n # Initialize bias parameter\n self.bias = nn.Parameter(torch.zeros((1, 1), names=(\"B\", \"O\")))\n nn.init.uniform_(self.bias.data)\n\n def forward(self, feat_inputs: torch.Tensor, emb_inputs: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward calculation of ProductNeuralNetworkModel\n \n Args:\n feat_inputs (T), shape = (B, N, E = 1), dtype = torch.float: Features tensors.\n emb_inputs (T), shape = (B, N, E), dtype = torch.float: Embedded features tensors.\n \n Returns:\n T, shape = (B, O), dtype = torch.float: Output of ProductNeuralNetworkModel\n \"\"\"\n # Get batch size from emb_inputs\n b = emb_inputs.size(\"B\")\n\n # Aggregate feat_inputs on dimension N and rename dimension O to E\n # inputs: feat_inputs, shape = (B, N, E = 1)\n # output: pnn_first, shape = (B, O = N)\n pnn_first = feat_inputs.flatten([\"N\", \"E\"], \"O\")\n\n # Calculate product cross features by pnn layer \n # inputs: emb_inputs, shape = (B, N, E)\n # with output's shape = (B, NC2)\n pnn_second = self.pnn(emb_inputs)\n\n # Concat pnn_second, pnn_first and bias on dimension O\n # inputs: pnn_second, shape = (B, O = NC2)\n # inputs: pnn_first, shape = (B, O = N)\n # inputs: bias, shape = (B, O = 1)\n # output: outputs, shape = (B, O = 1)\n outputs = torch.cat([pnn_second, pnn_first, self.bias], dim=\"O\")\n\n # Calculate with deep layer forwardly\n # inputs: outputs, shape = (B, O = (NC2 + N + 1))\n # output: outputs, shape = (B, O)\n outputs = self.deep(outputs)\n\n # Drop names of outputs, since autograd doesn't support NamedTensor yet.\n outputs = outputs.rename(None)\n\n return outputs\n","sub_path":"torecsys/models/ctr/product_neural_network.py","file_name":"product_neural_network.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"395074465","text":"from __future__ import absolute_import\nimport os\nfrom celery import Celery\nfrom django.conf import settings\nfrom client.client import Job\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'collins.settings.dev')\napp = Celery('api')\n\n# Using a string here means the worker will not have to\n# pickle the object when using Windows.\napp.config_from_object('django.conf:settings')\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n\n@app.task(bind=True)\ndef ping(self):\n return \"Pong\"\n\n@app.task(bind=True)\ndef add(self, x, y):\n return x + y\n\n@app.task(bind=True, ignore_result=False)\ndef execute_job(self, job):\n print(\"Executing Job {}\".format(job['id']))\n job = Job(json=job)\n result = job.execute()\n print(result)\n print(\"Job results are ready\")\n return result.json()\n","sub_path":"api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"608195289","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport xlrd\nimport requests\n\n# өгөгдлүүдээ http://college.cengage.com/mathematics/brase/understandable_statistics/7e/students/datasets/slr/frames/slr05.html \n# хаяг дээрхи Чикаго хотын гал болон гэмт хэргийн гаралтын хамаарлын \n# судалгааг агуулсан excel файлаас татаж авч уншаад numpy массив болгож хадгална\ndata_url = \"http://college.cengage.com/mathematics/brase/understandable_statistics/7e/students/datasets/slr/excel/slr05.xls\"\nu = requests.get(data_url)\nbook = xlrd.open_workbook(file_contents=u.content, encoding_override=\"utf-8\")\nsheet = book.sheet_by_index(0)\ndata = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])\nn_samples = sheet.nrows-1\n\n# гал гаралтын тоо тэмжээг илтгэх оролтын X placeholder\nX = tf.placeholder(tf.float32, name=\"X\")\n# гэмт хэргийн гаралтын тоо хэмжээг илтгэх гаралтын Y placeholder\nY = tf.placeholder(tf.float32, name=\"Y\")\n\n# 0 утгаар цэнэглэсэн жин болон биас утгууд\nw = tf.Variable(0., name=\"weights\")\nb = tf.Variable(0., name=\"bias\")\n\n# гал гаралтын тооноос гэмт хэргийн гаралтыг тооцон олох\n# шугам регрессийн модел\nY_predicted = X*w+b\n\n# алдааны квадрат утгыг төлөөлөх loss функц\nloss = tf.square(Y-Y_predicted, name='loss')\n\n# gradient descent алгоритм хэрэглэн 0.01 хэмжээтэйгээр суралцуулж\n# loss утгыг минимумчилна\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)\n\n# регрессийн цэгүүдийг зурж харуулах\nplt.xlabel('Гал гаралтын тоо хэмжээ')\nplt.ylabel('Гэмт хэргийн гаралтын тоо')\nplt.scatter(data[:, 0], data[:, 1])\nx_plot = np.linspace(0, 50, 100)\nplt.ion() # графикийг тусдаа процесстой цонх болгон харуулах\n\nwith tf.Session() as sess:\n # шаардлагатай хувьсагчуудыг цэнэглэн зарлах, энэ тохиолдолд w болон b \n sess.run(tf.global_variables_initializer())\n # моделийг сургах\n for i in range(100): # 100 epoch\n for x, y in data:\n # loss-ийг минимумчлах train_op\n sess.run(optimizer, feed_dict={X: x, Y: y})\n # w болон b утгууд нь одоо ямар болсныг авах\n w_value, b_value = sess.run([w, b])\n print(\"weight=\", w_value, \", bias=\", b_value)\n\n # регрессийн шулууныг w болон b утгуудыг ашиглан зурах\n plt.xlabel('Гал гаралтын тоо хэмжээ')\n plt.ylabel('Гэмт хэргийн гаралтын тоо')\n plt.scatter(data[:, 0], data[:, 1])\n plt.plot(x_plot, x_plot*w_value + b_value)\n plt.show()\n plt.pause(0.01)\n plt.gcf().clear()\n\n\n","sub_path":"regression/regression_from_url.py","file_name":"regression_from_url.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"230716002","text":"#캠핑\n\nN=0\nans = []\nwhile True:\n L, P, V = map(int,input().split())\n # 연속하는 P일중 L일만 사용가능\n\n if (L == 0 and P == 0 and V == 0) or (L>=P or P>=V or L>=V) :\n break\n N +=1\n ans.append((V // P) * L + min(V%P, L))\n # print(\"Case \"+ str(i) + \": \" + str(ans))\n # print(\"Case {}: {}\".format(i,ans))\n\n\nfor i in range(N):\n print(f'Case {i+1}: {ans[i]}')\n","sub_path":"greedy/4796.py","file_name":"4796.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"392900235","text":"from PyQt5.QtWidgets import QWidget, QPushButton, QLineEdit,QInputDialog, QApplication\n\nclass ChooseNickname(QWidget):\n def __init__(self):\n super().__init__(self)\n self.initUI()\n\n def initUI(self):\n input_box = QLineEdit()\n\napp = QApplication([])\ndialog = QInputDialog()\ndialog.show()\napp.exec_()","sub_path":"ICubE-/gui2.py","file_name":"gui2.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"433722501","text":"#!/usr/bin/env python\n\"\"\"\n================================================================================\n:mod:`photon_range` -- Estimate of electron range\n================================================================================\n\n.. module:: photon_range\n :synopsis: Estimate of electron range\n\n.. inheritance-diagram:: pymontecarlo.util.photon_range\n\n\"\"\"\n\n# Script information for the file.\n__author__ = \"Philippe T. Pinard\"\n__email__ = \"philippe.pinard@gmail.com\"\n__version__ = \"0.1\"\n__copyright__ = \"Copyright (c) 2014 Philippe T. Pinard\"\n__license__ = \"GPL v3\"\n\n# Standard library modules.\n\n# Third party modules.\n\n# Local modules.\n\n# Globals and constants variables.\n\ndef photon_range(e0, material, transition):\n \"\"\"\n This function returns the generated photon range in *material* at\n incident electron energy *e0* for a characteristic x ray line *transition*.\n\n Reference:\n Hovington, P., Drouin, D., Gauvin, R. & Joy, D.C. (1997).\n Parameterization of the range of electrons at low energy using\n the CASINO Monte Carlo program. Microsc Microanal 3(suppl.2),\n 885–886.\n \n :arg e0: incident electron energy (in eV)\n :arg material: material\n :arg transition: x-ray line transition\n \n :return: photon range (in meters)\n \"\"\"\n if transition.z not in material.composition:\n raise ValueError('%s is not in material' % transition.symbol)\n if transition.energy_eV > e0:\n return 0.0\n\n z = transition.z\n ck = 43.04 + 1.5 * z + 5.4e-3 * z ** 2\n cn = 1.755 - 7.4e-3 * z + 3.0e-5 * z ** 2\n density = material.density_g_cm3\n\n e0 = e0 / 1e3\n ec = transition.energy_eV / 1e3\n\n return ck / density * (e0 ** cn - ec ** cn) * 1e-9\n","sub_path":"pymontecarlo/util/photon_range.py","file_name":"photon_range.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"625661917","text":"import sys\n#sys.stdin=open(\"input.txt\", \"rt\")\n\ns=input()\nn=0\n\nfor i in range(len(s)):\n if ord(s[i])>=48 and ord(s[i])<=57:\n n*=10\n n+=(ord(s[i])-48)\n\ncnt=0\nfor i in range(1,n+1):\n if n%i==0:\n cnt+=1\n\nprint(n)\nprint(cnt)","sub_path":"section3/2. 숫자만 추출.py","file_name":"2. 숫자만 추출.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"302962549","text":"# Globals #\n\n\nimport sys\n\nsys.path.insert(0, \"modules\")\n\n# Project modules\nfrom model import Model\nimport preprocessing as pp\nimport analysis\nimport scraper\n\n# System modules\nimport os.path\nimport pandas as pd\n\ntry:\n if sys.argv[1] == \"-o\":\n OPTIMIZE = sys.argv[2] == \"True\"\n else:\n print(\"Invalid arguments.\")\n sys.exit()\nexcept IndexError:\n print(\"Missing arguments.\")\n sys.exit()\n\n\n# Data Bus #\n\n\nprint(\"Fetching data\")\n\nprice_data = scraper.fetch_data(os.path.dirname(os.getcwd()) + \"/data/price_data.csv\")\nblockchain_data = scraper.fetch_data(os.path.dirname(os.getcwd()) + \"/data/blockchain_data.csv\")\n#coindesk_headlines = pd.read_csv(os.path.dirname(os.getcwd()) + \"/data/test_scores.csv\", sep=\",\")\n\n# Preprocessing #\n\n\nprint(\"Preprocessing\")\n\ndata = (\n price_data.pipe(pp.calculate_indicators)\n .pipe(pp.merge_datasets, other_sets=[blockchain_data]) # [blockchain_data, coindesk_headlines]\n .pipe(pp.binarize_labels)\n .pipe(pp.fix_null_vals)\n .pipe(pp.add_lag_variables, lag=3)\n .pipe(pp.power_transform)\n )\nx_train, x_test, y_train, y_test = pp.split(data, test_size=.2, balanced=True)\n\n\n# Exploratory Analysis #\n\n\nprint(\"Analyzing features\")\n\n#print(data.describe())\nanalysis.plot_corr_matrix(data)\n\n\n# Fitting Models #\n\n\nprint(\"Fitting models\")\n\nlog_reg = Model(\n estimator=\"LogisticRegression\",\n train_set=(x_train, y_train),\n test_set=(x_test, y_test),\n select_features=\"RecursiveFE\",\n optimize=OPTIMIZE\n )\nrand_forest = Model(\n estimator=\"RandomForest\",\n train_set=(x_train, y_train),\n test_set=(x_test, y_test),\n select_features=\"RecursiveFE\",\n optimize=OPTIMIZE\n )\ngrad_boost = Model(\n estimator=\"GradientBoosting\",\n train_set=(x_train, y_train),\n test_set=(x_test, y_test),\n select_features=\"RecursiveFE\",\n optimize=OPTIMIZE\n )\n\n\n# Evaluation #\n\n\nprint(\"Evaluating\")\n\n# Logistic Regression\nprint(\"\\tLogistic Regression Estimator\")\nlog_reg.plot_cnf_matrix()\nlog_reg.cross_validate(method=\"Holdout\")\nlog_reg.cross_validate(\n method=\"RollingWindow\",\n data=data,\n window_size=.9,\n test_size=.1\n )\n\n# Random Forest\nprint(\"\\tRandom Forest Classifier\")\nrand_forest.plot_cnf_matrix()\nrand_forest.cross_validate(method=\"holdout\")\nrand_forest.cross_validate(\n method=\"RollingWindow\",\n data=data,\n window_size=.9,\n test_size=.1\n )\n\n# Gradient Boosting\nprint(\"\\tGradient Boosting Classifier\")\ngrad_boost.plot_cnf_matrix()\ngrad_boost.cross_validate(method=\"holdout\")\ngrad_boost.cross_validate(\n method=\"RollingWindow\",\n data=data,\n window_size=.9,\n test_size=.1\n )\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"141940352","text":"import logging\n\nfrom flask import Flask, render_template\nfrom flask_restful import Api\nfrom flask_socketio import SocketIO\n\nfrom .api import *\nfrom ..modules.db import session_manager, unpack_declarative\nfrom ..modules.comm_interface import client_interface\nfrom ..modules.nodes import connected_nodes\nfrom ..models.db_dataschemes import *\nfrom ..models.observe import Observer\n\n# If the default logger are on, the logging library doesn't work properly\n# with the logging made by moody\n\nlogging.getLogger(\"werkzeug\").setLevel(logging.ERROR)\nlogging.getLogger(\"engineio\").setLevel(logging.ERROR)\nlogging.getLogger(\"socketio\").setLevel(logging.ERROR)\n\n\n# Observer/socketio/restful stuff\n#\n# Define the update behaviour for the default observers\n# so that it forwards the changes to the socketio interface\n\ninterface = Flask(__name__)\nsocketio = SocketIO(interface)\napi = Api(interface)\n\napi.add_resource(SituationListApi, \"/situation\")\napi.add_resource(SituationApi, \"/situation/\")\n\napi.add_resource(ActuatorGroupApi, \"/actuatorgroup\")\napi.add_resource(ActionListApi, \"/actuatorgroup/\")\napi.add_resource(ActionApi, \"/action/\")\n\napi.add_resource(SituationActionMappingsApi, \"/mappings\")\napi.add_resource(SituationActionMappingApi, \"/mapping\")\n\napi.add_resource(NeuralDatasetsApi, \"/neuraldatasets\")\napi.add_resource(NeuralDatasetApi, \"/neuraldataset/\")\napi.add_resource(NeuralCollectApi, \"/neuralcollect\")\napi.add_resource(NeuralPredictApi, \"/neuralpredict\")\n\n\ndef _update(**kwargs):\n global socketio\n # Check for the correctness of the Observer pattern usage\n if \"nodes\" not in kwargs:\n raise ValueError(\"Nodes should be passed through the notify_observer/update functions.\")\n socketio.emit(\"changed\", {'data': kwargs[\"nodes\"]._asdict()}, namespace=\"/listen\")\n\n\nconnected_nodes_observer = Observer()\nconnected_nodes_observer.update = _update\nconnected_nodes.add_observer(connected_nodes_observer)\n\n\n# Flask routing\n@interface.route(\"/\")\ndef adm():\n return render_template(\"admin.html\", title=\"Moody Administration Panel\")\n\n\n@interface.route(\"/discover\")\ndef discover():\n # Delete the nodes currently saved in the session\n connected_nodes.clear()\n client_interface().discover()\n return \"\", 200\n\n\n@interface.route(\"/settings\")\ndef settings():\n return render_template(\"settings.html\")\n\n\n@interface.route(\"/situations\")\ndef situations():\n with session_manager() as session:\n situation_data = session.query(Situation.situation_id, Situation.situation_name).all()\n situaction_data = session.query(SituationSetting, Situation.situation_name, Action.action_name)\\\n .join(Situation, SituationSetting.situation_id == Situation.situation_id)\\\n .join(Action, SituationSetting.action_id == Action.action_id).all()\n return render_template(\"situations.html\", situations=situation_data, situactions=situaction_data)\n\n\n@interface.route(\"/actions\")\ndef actions():\n with session_manager() as session:\n grps = session.query(NodeMeta).join(Node). \\\n filter(Node.node_metadata == NodeMeta.nodemeta_id). \\\n filter(Node.node_type == \"actuator\").all()\n group_data = unpack_declarative(grps)\n return render_template(\"actions.html\", groups=group_data)\n\n\n# TODO only show and work with datasets including the types connected at the moment\n@interface.route(\"/neuralinf\")\ndef neuralinf():\n with session_manager() as session:\n neuralmeta_data = session.query(NeuralMeta).all()\n return render_template(\"neuralinf.html\", datasets=neuralmeta_data)\n\n\n@interface.route(\"/linker/\")\ndef linker(linker_id):\n try:\n int(linker_id)\n except ValueError:\n return \"\", 404\n\n with session_manager() as session:\n situation_data = session.query(Situation.situation_id, Situation.situation_name).all()\n grp = session.query(NodeMeta).get(linker_id)\n res = session.query(Action.action_id, Action.action_name, Action.action_value) \\\n .join(NodeMeta).filter(Action.action_metadata == linker_id).all()\n return render_template(\"linker.html\", group=grp, actions=res, situations=situation_data)\n\n\n@interface.route(\"/nodes\", methods=[\"GET\"])\ndef get_connected():\n try:\n elements = [elem._asdict() for elem in connected_nodes.nodes]\n except KeyError:\n return \"[]\", 200\n else:\n return json.dumps(elements), 200\n","sub_path":"moodysg/interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"618909259","text":"import argparse\n\ndef fasta_reader(filename):\n\n\tinfile = open(filename)\n\tseqs = {}\n\n\tfor line in infile:\n\t\tname = line.strip()[1:] # remove leading '>'\n\t\tseq = next(infile).strip()\n\t\tseqs[name] = seq\n\n\treturn seqs\n\ndef tab_reader(filename):\n\tinfile = open(filename)\n\tseqs = {}\n\n\tfor line in infile:\n\t\tname, seq = line.strip().split('\\t')\n\t\tseqs[name] = seq\n\t\tseqs[name] = seq\n\n\treturn seqs\n\ndef csv_reader(filename):\n\tinfile = open(filename)\n\tseqs = {}\n\n\tfor line in infile:\n\t\tname, seq = line.strip().split(',')\n\t\tseqs[name] = seq\n\n\treturn seqs\n\ndef fasta_writer(filename, seqs):\n\toutfile = open(filename, 'w')\n\t\n\tfor x in seqs:\n\t\toutfile.write('>'+x+'\\n'+seqs[x]+'\\n')\n\n\toutfile.close()\n\ndef tab_writer(filename, seqs):\n\toutfile = open(filename, 'w')\n\t\n\tfor x in seqs:\n\t\toutfile.write(x+'\\t'+seqs[x]+'\\n')\n\n\toutfile.close()\n\ndef csv_writer(filename, seqs):\n\toutfile = open(filename, 'w')\n\t\n\tfor x in seqs:\n\t\toutfile.write(x+','+seqs[x]+'\\n')\n\n\toutfile.close()\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='Switch between either csv,tab or fasta formats')\n\tparser.add_argument('filename', help='Name of input file')\n\tparser.add_argument('input_type', help='Type of input file')\n\tparser.add_argument('output_type', help='Type of output file')\n\tparser.add_argument('output_name', help='Name of output file')\n\n\targs = parser.parse_args()\n\n\tinput_filename = args.filename\n\toutput_type = args.output_type\n\tinput_type = args.input_type\n\toutput_name = args.output_name\n\n\tif input_type == 'fasta':\n\t\tseqs = fasta_reader(input_filename)\n\telif input_type == 'csv':\n\t\tseqs = csv_reader(input_filename)\n\telif input_type == 'tab':\n\t\tseqs = tab_reader(input_filename)\n\telse:\n\t\traise ValueError('Input type must be csv, tab or fasta')\n\n\tif output_type == 'fasta':\n\t\tfasta_writer(output_name, seqs)\n\telif output_type == 'csv':\n\t\tcsv_writer(output_name, seqs)\n\telif output_type == 'tab':\n\t\ttab_writer(output_name, seqs)\n\telse:\n\t\traise ValueError('Output type must be csv, tab or fasta')\n\n","sub_path":"format_converter.py","file_name":"format_converter.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"218544491","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 22 17:19:30 2019\r\nThis workshop largely focuses on classification techniques that are used in sentiment analysis. \r\nRemember sentiment analysis consists of 3 main steps.\r\n The first is the sourcing and collection of textual data. \r\n The second step is the training and classification of the textual data to obtain the sentiment scores. \r\n The 3rd step which will be covered on Day 4 is the visualisation and communication of the sentiment scores.\r\n@author: isswan\r\n\"\"\"\r\n###############################\r\n#Data Prepration \r\n# Set your working directory\r\nfrom os import getcwd, chdir\r\nimport pandas as pd\r\nimport pickle as pk\r\nfpath = getcwd()\r\nprint (fpath)\r\n# Change your path here\r\nchdir(fpath) \r\n\r\n\r\n#######################################################################\r\n#reading in another set of data\r\nposlines = open(fpath+\"\\\\Data\\\\rt-polarity-utf8.pos\",'r').read().splitlines()\r\nneglines = open(fpath+\"\\\\Data\\\\rt-polarity-utf8.neg\",'r').read().splitlines()\r\n\r\nprint (\"No of positive reviews \" + str(len(poslines)))\r\nprint (\"No of negative reviews \" + str(len(neglines)))\r\n\r\n\r\n# There is a total of 5331 positives and negatives\r\n# Lets take the first N as training set, and leave the rest for validation\r\nN=4800\r\nposlinesTrain = poslines[:N]\r\nneglinesTrain = neglines[:N]\r\nposlinesTest = poslines[N:]\r\nneglinesTest = neglines[N:]\r\n\r\n# Create the train set and the test set by attaching labels to text to form a \r\n# list of tupes (sentence, label). Labels are 1 for positive, -1 for negatives\r\ntrainset = [(x,1) for x in poslinesTrain] + [(x,-1) for x in neglinesTrain]\r\ntestset = [(x,1) for x in poslinesTest] + [(x,-1) for x in neglinesTest]\r\n#######################################################################\r\n#build a Lexicon Classifier (I)\r\n# We use it for a Lexicon classifier which is essentially count good and bad words from the reviews. \r\n# The model learns the all the frequencies of a paticular word appearing in Pos/Neg group\r\n\r\nposwords = {} # this dictionary will store counts for every word in positives\r\nnegwords = {} # and negatives\r\nfor line, label in trainset: # for every sentence and its label\r\n\tfor word in line.split(): # for every word in the sentence\r\n\t\t# increment the counts for this word based on the label\r\n #Finaly we have the dictionrary:\r\n # for each word, how many times does it appear in Pos/Neg group \r\n\t\tif label == 1: poswords[word] = poswords.get(word,0) + 1\r\n\t\telse: negwords[word] = negwords.get(word, 0) + 1\r\n \r\nprint (\"No of negative words in trainset \" + str(len(negwords)))\r\nprint (\"No of positive words in trainset \" + str(len(poswords)))\r\n\r\nposFreqs = {k: poswords[k] for k in list(poswords)[:5]}\r\nprint (posFreqs)\r\nnegFreqs = {k: negwords[k] for k in list(negwords)[:5]}\r\nprint (negFreqs)\r\n\r\n#######################################################################\r\n#The classifier will make judgement based on the posFreq(w)/(posFreq(w)+negFreq(w))\r\n#Apply the classifier on top of testing dataset\r\n\r\nwrong = 0 # will store the number of misclassifications\r\npred_list = []\r\nactual = []\r\nfor line, label in testset:\r\n\ttotpos, totneg = 0.0,0.0\r\n\tfor word in line.split():\r\n\t\t# Get the (+1 smooth'd) number of counts this word occurs in each class\r\n\t\t#smoothing is done in case this word isn't in train set, so that there\r\n\t\t# is no danger in dividing by 0 later when we do a/(a+b)\r\n\t\ta = poswords.get(word, 0.0) + 1.0\r\n\t\tb = negwords.get(word, 0.0) + 1.0\r\n\t\t#increment our score counter for each class, based on this word\r\n\t\ttotpos+=a/(a+b)\r\n\t\ttotneg+=b/(a+b)\r\n\t#create prediction based on counter values\r\n\tprediction=1\r\n\tif totneg>totpos: prediction = -1\r\n\tpred_list.append(prediction)\r\n\tactual.append(label)\r\n\tif prediction!=label:\r\n\t\twrong+=1\r\n\t\tprint ('ERROR: %s posscore = %.2f negscore=%.2f' % (line, totpos, totneg))\r\n\telse:\r\n\t\tpass\r\n #print 'CORRECT: %s posscore=%.2f negscore=%.2f' % (line, totpos, totneg)\r\n \r\nprint ('error rate is %f' % (1.0*wrong/len(testset),))\r\nprint ('No of wrongs ' + str(wrong))\r\nprint ('Size of test set ' + str(len(testset)))\r\n\r\n\"\"\"\r\nerror rate is 0.209981\r\nNo of wrongs 223\r\nSize of test set 1062\r\n\r\nNote down the error rate. The 'correct' rate is already quite high for a simple lexicon classifier. \r\nRead through some of the wrongly classified ones and can you understand why they are wrongly classified?\r\n What do you think a high score should be? \r\nERROR: it's a big idea , but the film itself is small and shriveled . posscore = 7.01 negscore=6.99\r\nERROR: the story alone could force you to scratch a hole in your head . posscore = 7.34 negscore=6.66\r\n\"\"\"\r\n######################################################################\r\n#NLTK Classifier\r\n#In this section, we do the same for a lexicon classifier. \r\n#This time round, we use as the training set a labelled corpus \"Sentiwordnet.txt\" from NLTK. \r\n#The NLTK is a popular open-source text mining tool in Python. \r\n#Other popular tools include the Stanford NLP and OpenNLP.\r\nimport nltk\r\n#nltk.downoad('wordnet')\r\n#nltk.downoad('sentiwordnet')\r\n#nltk.downoad('punkt')\r\n\r\nfrom nltk.corpus import sentiwordnet as swn\r\nfrom scipy import mean\r\nfrom nltk.tokenize import word_tokenize as wt\r\n\r\n\r\nsynsets = swn.senti_synsets('fast')\r\n \r\nfor syn in swn.senti_synsets('fast'):\r\n print (str(syn))\r\n\r\n##Caculate the overall polarity of given word \r\n ## mean of Pos/Neg score by averaging the polarity of all the synonyms \r\n ## max by selecting the max\r\ndef get_pos_neg_score(word, metric):\r\n posi=[0.0]\r\n negi=[0.0]\r\n synsets = swn.senti_synsets(word)\r\n for syn in synsets:\r\n posi.append(syn.pos_score())\r\n negi.append(syn.neg_score())\r\n if metric == \"Mean\":\r\n pos = mean(posi)\r\n neg = mean(negi)\r\n else:\r\n pos = max(posi)\r\n neg = max(negi)\r\n return pos, neg\r\n\r\nget_pos_neg_score('fast','Mean')\r\n\r\n##############################################################################################\r\n#using NLTK instead of training corpus to build the classifier posScore(sent) ? negScore(sent)\r\n\r\npred = []\r\nactual = []\r\nfor line, label in testset:\r\n pos_rev = neg_rev = 0 \r\n for word in wt(line):\r\n pos, neg = get_pos_neg_score(word, \"Mean\")\r\n pos_rev+=pos\r\n neg_rev+=neg\r\n if pos_rev>neg_rev:\r\n lab=1\r\n else:\r\n lab=-1\r\n pred.append(lab)\r\n actual.append(label)\r\n \r\nactuals = pd.Series(actual)\r\npredicted = pd.Series(pred)\r\nprint (actuals)\r\nprint (predicted)\r\n\r\n# Confusion Matrix\r\ncm1=pd.crosstab(actuals, predicted, rownames=['Actuals'], colnames=['Predicted'], margins=True)\r\ncm1\r\n\r\n#precision score and recall scores\r\nfrom sklearn.metrics import precision_score, recall_score\r\n\r\n#Accuracy is lower than the first lexicon classifier 79% Why?\r\nmat1 = cm1.to_numpy()\r\naccuracy = float(mat1[0,0]+mat1[1,1])/mat1[2,2]\r\nprint('Accuracy: {0:0.3f}'.format(accuracy))\r\n\r\nprecision = precision_score(actuals, predicted)\r\nprint('Precision score: {0:0.3f}'.format(precision))\r\nrecall = recall_score(actuals, predicted)\r\nprint('Recall score: {0:0.3f}'.format(recall))\r\n\r\n#This workshop is based on some simple Lexicon classifiers. \r\n#Can you explain the pros and cons of this simple method now?\r\n\r\n\r\n","sub_path":"Lectures/Day 2/workshop/selected-downloads (26)/LexiconBasedClassifiers.py","file_name":"LexiconBasedClassifiers.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"638400479","text":"#! python3\n# openURLs.py - opens several preset URLs by examining a text file of URLs\n# Usage - openURLs.py [presetName]\n # -l, --list\n # -h, --help \n # --setdefault [presetName]\n\n\n\n# Functions\n # open a list of presets based on group/\"playlist\" name\n # opens a text file in the default directory for presets \n # save a preset of URL links, with a name\n # option to assign a preset as the default\n # make it print the list and ask which one to save \n # list all preset names and their links\n\nimport webbrowser, sys, os, shelve\n\n# make a directory where the script is located to hold preset text files \nos.makedirs(os.path.join(sys.path[0], \"URL Presets\"), exist_ok=True)\nos.chdir(os.path.join(sys.path[0], \"URL Presets\"))\n\nflags = [\"-l\", \"--list\", \"--setdefault\", \"-h\", \"--help\"]\nallPresets = []\n\ndef openPresets(preset):\n if not os.path.isfile(preset + \".txt\"):\n print(\"Preset not found. Use --setdefault to reconfigure.\")\n os.system(\"pause\") \n sys.exit(1)\n listOfURLs = open(preset + \".txt\")\n for URL in listOfURLs.readlines():\n try: \n webbrowser.open(URL)\n except Exception as exc:\n print(\"Failed to open \" + URL + \"\\nError: \" + str(exc)) \n\ndef listPresets():\n if len(allPresets) == 0:\n print(\"No presets found in \" + os.getcwd())\n os.system(\"pause\")\n sys.exit(1)\n print(\"List of Available Presets:\")\n for i in range(len(allPresets)):\n print(str(i + 1) + \". \" + allPresets[i])\n\ndef askToSelectPreset():\n while(True):\n print(\"\\nSelect a preset or type 'q' to quit: \", end = \"\")\n choice = input()\n if choice == 'q':\n sys.exit() \n if choice.isdigit():\n choice = int(choice) - 1\n if choice < 0 or choice >= len(allPresets):\n print(\"Please type a valid preset name or number.\")\n continue\n return allPresets[choice]\n else:\n if choice not in allPresets:\n print(\"Please type a valid preset name or number.\")\n continue\n else:\n return choice\n\ndef setDefaultPreset():\n listPresets()\n default = askToSelectPreset()\n shelf = shelve.open(\"default_preset\")\n shelf['default'] = default\n print(\"Set \" + default + \" as the default preset.\\n\")\n shelf.close()\n os.system(\"pause\")\n sys.exit()\n \n\ndef scanForPresets():\n for file in os.listdir(\".\"):\n if file.endswith(\".txt\"):\n preset = file.rstrip(\".txt\")\n allPresets.append(preset)\n\n \ndef printUsage():\n print(\"\"\"Usage - openURLs.py [presetName]\nOpens multiple URLs by reading a text file.\nText files consist of URLs separated by a new line and are\nfound in the 'URL Presets' directory. \n\n--setdefault (presetName)\n-l, --list\n-h, --help\n\"\"\")\n\n# Find all presets \nscanForPresets()\n\n# load default preset, if available \nif len(sys.argv) == 1:\n if os.path.isfile(\"default_preset.bak\") and os.path.isfile(\"default_preset.dat\") and os.path.isfile(\"default_preset.dir\"):\n shelf = shelve.open(\"default_preset\")\n preset = shelf[\"default\"]\n openPresets(preset)\n shelf.close()\n else:\n printUsage()\n print(\"\\nNo default preset currently found. Add default preset? (Y/N) \", end =\"\")\n if input().lower() == 'y':\n setDefaultPreset()\n \n sys.exit()\n\n#--help or invalid response \nif sys.argv[1] not in (flags + allPresets) or sys.argv[1] == \"-h\" or sys.argv[1] == \"--help\":\n printUsage()\n os.system(\"pause\")\n sys.exit(1)\n\nif sys.argv[1] in allPresets:\n openPresets(sys.argv[1])\n\n# Set the default preset\nif sys.argv[1] == '--setdefault':\n setDefaultPreset()\n \n \n# List all presets and prompt which one to open \nif sys.argv[1] == \"-l\" or sys.argv[1] == \"--list\":\n listPresets()\n preset = askToSelectPreset()\n openPresets(preset)\n","sub_path":"URL Presets/openURLs.py","file_name":"openURLs.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"84489430","text":"# http://judge.mipt.ru/mipt_cs_on_python3_2016/labs/lab5.html\r\n\r\n# Упражнение №2. Задачи посложнее\r\n# =================================\r\n\r\n# Переставьте соседние элементы в списке. Задача решается в три строки.\r\ns = \"1 2\"\r\ns = \"1\"\r\ns = \"1 2 3 4 5 6 7\"\r\ns = \"\"\r\ns = \"1 2 3 4 5 6\"\r\nL = s.split()\r\n\r\nlast = len(L) - 1 if len(L) % 2 else len(L)\r\nL[:last:2], L[1:last:2] = L[1:last:2], L[:last:2]\r\nprint(*L)\r\n\r\n\r\n# Выполните циклический сдвиг элементов списка вправо.\r\ns = \"1 2 3 4 5\"\r\nL = s.split()\r\nprint(*(L[-1:] + L[:-1]))\r\n\r\n# Выведите элементы, которые встречаются в списке только один раз.\r\n# Элементы нужно выводить в том порядке, в котором они встречаются в списке.\r\ns = \"1 2 2 3 3 3 5 6 6\"\r\nL = s.split()\r\nprint(*[el for el in L if L.count(el) == 1])\r\n\r\n# Определите, какое число в этом списке встречается чаще всего. Если таких\r\n# чисел несколько, выведите любое из них.\r\ns = \"1 2 2 3 3 4 4 5 6 6 3 4\"\r\nL = s.split()\r\nprint(max([L.count(el) for el in L]))\r\n","sub_path":"4_arithmetics_and_lists/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"80492867","text":"## This file is part of Invenio.\n## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## published by the Free Software Foundation; either version 2 of the\n## License, or (at your option) any later version.\n##\n## Invenio is distributed in the hope that it will be useful, but\n## WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n## General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with Invenio; if not, write to the Free Software Foundation, Inc.,\n## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\n__revision__ = \"$Id$\"\n\n ## Description: function Send_APP_Mail\n ## This function send an email informing the original\n ## submitter of a document that the referee has approved/\n ## rejected the document. The email is also sent to the\n ## referee for checking.\n ## Author: T.Baron\n ## PARAMETERS:\n ## newrnin: name of the file containing the 2nd reference\n ## addressesAPP: email addresses to which the email will\n ## be sent (additionally to the author)\n ## categformatAPP: variable needed to derive the addresses\n ## mentioned above\n\nimport os\nimport re\n\nfrom invenio.config import CFG_SITE_NAME, \\\n CFG_SITE_URL, \\\n CFG_SITE_SUPPORT_EMAIL, \\\n CFG_CERN_SITE, \\\n CFG_SITE_RECORD\nfrom invenio.access_control_admin import acc_get_role_users, acc_get_role_id\nfrom invenio.dbquery import run_sql\nfrom invenio.websubmit_config import CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN\nfrom invenio.mailutils import send_email\nfrom invenio.errorlib import register_exception\nfrom invenio.search_engine import print_record\nfrom invenio.websubmit_functions.JOBSUBMIT_Mail_Submitter import CFG_WEBSUBMIT_JOBS_SUPPORT_EMAIL, \\\n CFG_WEBSUBMIT_JOBS_FROMADDR, \\\n email_footer\n\nCFG_WEBSUBMIT_RECORD_OWNER_EMAIL = \"270__m\"\n\ndef JOBSUBMIT_Send_APP_Mail(parameters, curdir, form, user_info=None):\n \"\"\"\n This function send an email informing the original submitter of a\n document that the referee has approved/ rejected the document.\n\n Parameters:\n\n * addressesAPP: email addresses of the people who will receive\n this email (comma separated list). this parameter may contain\n the string. In which case the variable computed from\n the [categformatAFP] parameter replaces this string.\n eg.: \"-email@cern.ch\"\n\n * categformatAPP contains a regular expression used to compute\n the category of the document given the reference of the\n document.\n eg.: if [categformatAFP]=\"TEST--.*\" and the reference\n of the document is \"TEST-CATEGORY1-2001-001\", then the computed\n category equals \"CATEGORY1\"\n\n * emailFile: Name of the file containing the email of the\n submitter of the document\n\n * newrnin: Name of the file containing the 2nd reference of the\n document (if any).\n\n * decision_file: Name of the file containing the decision of the\n document.\n\n * comments_file: Name of the file containing the comments of the\n document.\n\n * edsrn: Name of the file containing the reference of the\n document.\n \"\"\"\n global titlevalue,authorvalue,sysno,rn\n doctype = form['doctype']\n titlevalue = titlevalue.replace(\"\\n\",\" \")\n authorvalue = authorvalue.replace(\"\\n\",\"; \")\n # variables declaration\n categformat = parameters['categformatAPP']\n otheraddresses = parameters['addressesAPP']\n newrnpath = parameters['newrnin']\n ## Get the name of the decision file:\n try:\n decision_filename = parameters['decision_file']\n except KeyError:\n decision_filename = \"\"\n ## Get the name of the comments file:\n try:\n comments_filename = parameters['comments_file']\n except KeyError:\n comments_filename = \"\"\n\n ## Now try to read the comments from the comments_filename:\n if comments_filename in (None, \"\", \"NULL\"):\n ## We don't have a name for the comments file.\n ## For backward compatibility reasons, try to read the comments from\n ## a file called 'COM' in curdir:\n if os.path.exists(\"%s/COM\" % curdir):\n try:\n fh_comments = open(\"%s/COM\" % curdir)\n comment = fh_comments.read()\n fh_comments.close()\n except IOError:\n ## Unable to open the comments file\n exception_prefix = \"Error in WebSubmit function \" \\\n \"Send_APP_Mail. Tried to open \" \\\n \"comments file [%s/COM] but was \" \\\n \"unable to.\" % curdir\n register_exception(prefix=exception_prefix)\n comment = \"\"\n else:\n comment = comment.strip()\n else:\n comment = \"\"\n else:\n ## Try to read the comments from the comments file:\n if os.path.exists(\"%s/%s\" % (curdir, comments_filename)):\n try:\n fh_comments = open(\"%s/%s\" % (curdir, comments_filename))\n comment = fh_comments.read()\n fh_comments.close()\n except IOError:\n ## Oops, unable to open the comments file.\n comment = \"\"\n exception_prefix = \"Error in WebSubmit function \" \\\n \"Send_APP_Mail. Tried to open comments \" \\\n \"file [%s/%s] but was unable to.\" \\\n % (curdir, comments_filename)\n register_exception(prefix=exception_prefix)\n else:\n comment = comment.strip()\n else:\n comment = \"\"\n\n ## Now try to read the decision from the decision_filename:\n if decision_filename in (None, \"\", \"NULL\"):\n ## We don't have a name for the decision file.\n ## For backward compatibility reasons, try to read the decision from\n ## a file called 'decision' in curdir:\n if os.path.exists(\"%s/decision\" % curdir):\n try:\n fh_decision = open(\"%s/decision\" % curdir)\n decision = fh_decision.read()\n fh_decision.close()\n except IOError:\n ## Unable to open the decision file\n exception_prefix = \"Error in WebSubmit function \" \\\n \"Send_APP_Mail. Tried to open \" \\\n \"decision file [%s/decision] but was \" \\\n \"unable to.\" % curdir\n register_exception(prefix=exception_prefix)\n decision = \"\"\n else:\n decision = decision.strip()\n else:\n decision = \"\"\n else:\n ## Try to read the decision from the decision file:\n try:\n fh_decision = open(\"%s/%s\" % (curdir, decision_filename))\n decision = fh_decision.read()\n fh_decision.close()\n except IOError:\n ## Oops, unable to open the decision file.\n decision = \"\"\n exception_prefix = \"Error in WebSubmit function \" \\\n \"Send_APP_Mail. Tried to open decision \" \\\n \"file [%s/%s] but was unable to.\" \\\n % (curdir, decision_filename)\n register_exception(prefix=exception_prefix)\n else:\n decision = decision.strip()\n\n if os.path.exists(\"%s/%s\" % (curdir,newrnpath)):\n fp = open(\"%s/%s\" % (curdir,newrnpath))\n newrn = fp.read()\n fp.close()\n else:\n newrn = \"\"\n # Document name\n res = run_sql(\"SELECT ldocname FROM sbmDOCTYPE WHERE sdocname=%s\", (doctype,))\n docname = res[0][0]\n # retrieve category\n categformat = categformat.replace(\"\", \"([^-]*)\")\n m_categ_search = re.match(categformat, rn)\n if m_categ_search is not None:\n if len(m_categ_search.groups()) > 0:\n ## Found a match for the category of this document. Get it:\n category = m_categ_search.group(1)\n else:\n ## This document has no category.\n category = \"unknown\"\n else:\n category = \"unknown\"\n\n # Creation of the mail for the referee\n otheraddresses = otheraddresses.replace(\"\",category)\n addresses = \"\"\n if otheraddresses != \"\":\n addresses += otheraddresses\n else:\n addresses = re.sub(\",$\",\"\",addresses)\n\n ## Add the record's submitter(s) into the list of recipients:\n # The submitters email address is read from the file specified by 'emailFile'\n try:\n fp = open(\"%s/%s\" % (curdir,parameters['emailFile']))\n addresses += fp.read().replace (\"\\n\",\" \")\n fp.close()\n except:\n pass\n\n if decision == \"approve\":\n mailtitle = \"%s has been approved\" % rn\n mailbody = \"The submitted job listing with reference number %s has been fully approved.\" % (rn,)\n mailbody += \"\\n\\nIt will soon become visible in the INSPIRE-HEP Jobs database - <%s/Jobs>\" % (CFG_SITE_URL,)\n else:\n mailtitle = \"%s has been rejected\" % rn\n mailbody = \"The %s %s has been rejected.\" % (docname,rn)\n if rn != newrn and decision == \"approve\" and newrn != \"\":\n mailbody += \"\\n\\nIts new reference number is: %s\" % newrn\n mailbody += \"\\n\\nTitle: %s\\n\\nAuthor(s): %s\\n\\n\" % (titlevalue,authorvalue)\n if comment != \"\":\n mailbody += \"Comments from the referee:\\n%s\\n\" % comment\n # Send mail to referee\n send_email(fromaddr=CFG_WEBSUBMIT_JOBS_FROMADDR, toaddr=addresses, subject=mailtitle, \\\n content=mailbody, footer=email_footer(support_email=CFG_WEBSUBMIT_JOBS_SUPPORT_EMAIL), copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN)\n return \"\"\n","sub_path":"websubmit/JOBSUBMIT_Send_APP_Mail.py","file_name":"JOBSUBMIT_Send_APP_Mail.py","file_ext":"py","file_size_in_byte":10305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"307481470","text":"from api.exceptions import OmdbApiResponseException\nfrom api.externalapihandler.external_api_handler import ExternalApiHandler\n\n\nclass OmdbApiHandler(ExternalApiHandler):\n\n def __init__(self, api_key):\n self.base_url = 'http://www.omdbapi.com/'\n self.poster_url = 'http://img.omdbapi.com/'\n super().__init__(api_key)\n\n def get_movie(self, title):\n parameters = {'apikey': self.api_key, 't': title}\n result = self._get(self.base_url, parameters)\n if result.get('Error'):\n raise OmdbApiResponseException(\n 'Error encountered when getting movie from OMDb: {}'.format(result.get('Error')))\n return result\n","sub_path":"api/externalapihandler/omdb_api_handler.py","file_name":"omdb_api_handler.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"384307558","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 12 09:09:07 2020\n\n@author: marie\n\"\"\"\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\nimport setup.preprocessing as preprocessing\n\nimport time\nimport pandas as pd\nimport os.path\n\nfrom ast import literal_eval\n\n\n\n#%%\ndef set_model_parameters(model, typ, epochs, change_architecture, adaptive_pooling,\n X, Y, featuresize = None, data_dir = r\"/home/fr/fr_fr/fr_mw263\"):\n \n if (typ == 4):\n results = pd.read_csv(os.path.join(data_dir, f\"output/grid_search/grid_search_results_{model}4.csv\"))\n elif (typ == 2):\n if adaptive_pooling:\n results = pd.read_csv(os.path.join(data_dir, f\"output/grid_search/adaptive_pooling/grid_search_results_{model}2.csv\"))\n featuresize = results[\"featuresize\"]\n else:\n results = pd.read_csv(os.path.join(data_dir, f\"output/grid_search/grid_search_results_{model}2.csv\"))\n else:\n results = pd.read_csv(os.path.join(data_dir, f\"output/grid_search/grid_search_results_{model}2.csv\"))\n \n best_model = results.iloc[results['mae_val'].idxmin()].to_dict()\n \n if change_architecture == True:\n best_model[\"activation\"] == nn.Sigmoid\n \n hidden_dims = literal_eval(best_model[\"hiddensize\"])\n \n if featuresize is None:\n dimensions = [X.shape[1]]\n else:\n dimensions = []\n for hdim in hidden_dims:\n dimensions.append(hdim)\n dimensions.append(Y.shape[1])\n \n hparams = {\"batchsize\": int(best_model[\"batchsize\"]), \n \"epochs\":epochs, \n \"history\": int(best_model[\"history\"]), \n \"hiddensize\":hidden_dims,\n \"learningrate\":best_model[\"learningrate\"]}\n \n model_design = {\"dimensions\": dimensions,\n \"activation\": eval(best_model[\"activation\"][8:-2]),\n \"featuresize\":featuresize}\n \n return hparams, model_design\n\n \n#%% Train the model with hyperparameters selected after random grid search:\n \ndef train_network(model,typ, site, epochs, q, adaptive_pooling, dropout_prob = 0.0, change_architecture = False, \n traindata_perc = None, featuresize = None, \n save = True, data_dir = r\"/home/fr/fr_fr/fr_mw263\"):\n \n \"\"\"\n Takes the best found model parameters and trains a MLP with it.\n \n Args:\n X, Y (numpy array): Feature and Target data. \\n\n model_params (dict): dictionary containing all required model parameters. \\n\n epochs (int): epochs to train the model. \\n\n splits (int): How many splits will be used in the CV. \\n\n eval_set (numpy array): if provided, used for model evaluation. Default to None.\n \n Returns:\n running_losses: epoch-wise training and validation errors (rmse and mae) per split.\\n\n y_tests: Target test set on which the model was evaluated on per split.\\n\n y_preds: Network predictions per split.\\n\n performance (pd.DataFrame): Data frame of model parameters and final training and validation errors.\\n\n \"\"\"\n X, Y = preprocessing.get_splits(sites = [site],\n years = [2001,2002,2003,2004,2005,2006, 2007],\n datadir = os.path.join(data_dir, \"scripts/data\"), \n dataset = \"profound\",\n simulations = None)\n\n X_test, Y_test = preprocessing.get_splits(sites = [site],\n years = [2008],\n datadir = os.path.join(data_dir, \"scripts/data\"), \n dataset = \"profound\",\n simulations = None)\n \n eval_set = {\"X_test\":X_test, \"Y_test\":Y_test}\n \n hparams, model_design = set_model_parameters(model, typ, epochs, change_architecture, adaptive_pooling, X, Y)\n \n start = time.time()\n \n data_dir = os.path.join(data_dir, f\"output/models/{model}{typ}\")\n \n if adaptive_pooling:\n data_dir = os.path.join(data_dir, r\"adaptive_pooling\")\n\n if dropout_prob == 0.0:\n data_dir = os.path.join(data_dir, r\"nodropout\")\n else:\n data_dir = os.path.join(data_dir, r\"dropout\")\n \n if not traindata_perc is None:\n data_dir = os.path.join(data_dir, f\"data{traindata_perc}perc\")\n \n if change_architecture:\n data_dir = os.path.join(data_dir, f\"sigmoidActivation\")\n \n dev = __import__(f\"setup.dev_{model}\", fromlist=[\"selected\"])\n \n running_losses,performance, y_tests, y_preds = dev.train_model_CV(hparams, model_design, \n X, Y, \n eval_set, dropout_prob,\n data_dir, save)\n end = time.time()\n \n # performance returns: rmse_train, rmse_test, mae_train, mae_test in this order.\n performance = np.mean(np.array(performance), axis=0)\n rets = [(end-start), \n hparams[\"hiddensize\"], hparams[\"batchsize\"], hparams[\"learningrate\"], hparams[\"history\"], model_design[\"activation\"], \n performance[0], performance[1], performance[2], performance[3]]\n results = pd.DataFrame([rets], \n columns=[\"execution_time\", \"hiddensize\", \"batchsize\", \"learningrate\", \"history\", \"activation\", \"rmse_train\", \"rmse_val\", \"mae_train\", \"mae_val\"])\n results.to_csv(os.path.join(data_dir, r\"selected_results.csv\"), index = False)\n \n # Save: Running losses, ytests and ypreds.\n np.save(os.path.join(data_dir, \"running_losses.npy\"), running_losses)\n np.save(os.path.join(data_dir, \"y_tests.npy\"), y_tests)\n np.save(os.path.join(data_dir, \"y_preds.npy\"), y_preds)\n \n #return(running_losses, y_tests, y_preds)\n ","sub_path":"python/setup/wrapper_training.py","file_name":"wrapper_training.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"425927349","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser, UserManager as AbstractUserManager\nfrom django.conf import settings\nfrom django.core.signing import Signer\nfrom django.template import engines, Context\nfrom django.urls import reverse\nfrom django.db.models import Count\n\nsigner = Signer()\ndt_engine = engines['django'].engine\n\n# Данная sмодель заменит модель пользователя User, используемую по умолчанию.\n# Данная замена должна быть отражена в настройках проекта: AUTH_USER_MODEL = 'user.models.AdvUser'.\n# Замен производится с целью расширения стандартной модели с помощью дополнительных методов и атрибутов.\n'''\nclass AdvUserManager(AbstractUserManager):\n #pass\n\n def normalize_email(self, email):\n if email.strip() == '':\n return None\n return email.lower()\n'''\nclass AdvUser(AbstractUser):\n #objects = AdvUserManager()\n\n class Meta(AbstractUser.Meta):\n verbose_name = 'Пользователь'\n verbose_name_plural = 'Пользователи (расширенная модель)'\n ordering = ['-date_joined']\n unique_together = ['email']\n\n def __str__(self):\n return self.username\n\n def confirm(self):\n self.is_active = True\n self.save()\n\n def get_email_context(self):\n host = settings.HOST_NAME\n sign = signer.sign(self.username)\n link = host + reverse('user:registration_confirmed', kwargs={'sign': sign})\n return Context({'confirmation_link': link})\n\n def send_confirmation_email(self):\n context = self.get_email_context()\n text_body = dt_engine.get_template('emails/registration_confirmation.txt').render(context=context)\n html_body = dt_engine.get_template('emails/registration_confirmation.html').render(context=context)\n self.email_user(subject='Mailer: Подтверждение регистрации', message=text_body, html_message=html_body)\n","sub_path":"django/user/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"291739368","text":"import unittest\r\nfrom main import upload_file\r\ntoken = input(\"Введите токен\")\r\nclass TestFails(unittest.TestCase):\r\n def test_wrong_upload_file(self):\r\n import requests\r\n upload_file(\"Fight\", token)\r\n response = requests.get(\"https://cloud-api.yandex.net/v1/disk/resources/files\", params={\"limit\": 1000},\r\n headers={\"Authorization\": token})\r\n status_code = response.status_code\r\n self.assertNotEqual(status_code, 200)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n\r\n\r\n","sub_path":"TestFails.py","file_name":"TestFails.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"353694067","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 14 16:42:06 2018\n\n@author: AshwinAmbal\n\nDescription: The code below is used to extract required data from the file \ndevset_textTermsPerPOI.txt and write the required information in a database type\nformat into a csv file. The column header in the written file would signify the\nuser id's of the various users and the row headers would be annotations.\nEg:\n [Annotations LocID1 LocID2 LocID3]\n [\"wicked\" ID1 ID2 ID3]\n [...... .... .... ...]\n\"\"\"\n\nimport csv\n\n# Opening file to read from\nfile1 = open(\"C:\\\\MWDB Project\\\\devset\\\\desctxt\\\\devset_textTermsPerPOI.txt\",\"r\", encoding=\"utf8\")\nlist_of_lines = file1.readlines()\nids = list()\n\n#Reading all image id's into 'ids'\nfor row in list_of_lines:\n loc_id = \"\"\n words = row.split()\n for word in words:\n if '\"' not in word:\n loc_id += word + \" \"\n else:\n loc_id = \"_\".join(loc_id.split())\n break\n ids.append(loc_id)\n\n# Converting existing file to one in which the first set of words before the \n# annotations are replaced with _ between those words for uniform access \n# devset_textTermsPerPOIEdited.csv ===> devset_textTermsPerPOI.csv\nnew_lines = list()\nfor i, row in enumerate(list_of_lines):\n w = 0\n words = row.split()\n new_line = list()\n for word in words:\n if '\"' in word:\n break\n w += 1\n new_line.append(ids[i])\n for j in range(w, len(words)):\n new_line.append(words[j])\n new_lines.append(new_line)\n\n\nwith open(\"C:\\\\MWDB Project\\\\Code\\\\CSV\\\\Task_3\\\\devset_textTermsPerPOIEdited.csv\", 'w', encoding = 'utf8', newline='') as outcsv: \n #configure writer to write standard csv file\n writer = csv.writer(outcsv, delimiter = ',', quotechar = \"'\")\n for item in new_lines:\n #Write item to outcsv\n writer.writerow(item)\n\nlist_of_lines = list()\ncsvfile = open(\"C:\\\\MWDB Project\\\\Code\\\\CSV\\\\Task_3\\\\devset_textTermsPerPOIEdited.csv\",\"r\", encoding=\"utf8\")\nreader = csv.reader(csvfile, delimiter=',', quotechar = \"'\") \nfor row in reader:\n list_of_lines.append(row)\n\n# Reading all annotations into 'annotation'\nannotation = list()\nfor line in list_of_lines:\n for i in range(1, len(line), 4):\n line[i] = line[i].replace(\"\\'\", \" \")\n line[i] = line[i].replace('\"', '')\n annotation.append(line[i])\n\nannotation = list(set(annotation))\n\n# Making a dictionary of annotation mapping to the index in which it occurs in the\n# list 'annotation'\ndict_annot = dict()\nfor i, annot in enumerate(annotation):\n dict_annot[annot] = i\n\n# Reading line by line from the file and writing the UserID and their corresponding\n# tf, df and tf-idf values as rows and finally taking a transpose of the matrix\n# formed to get the required database schema mentioned above.\nannot_values_tf = [[\"Annotations\"] + annotation]\nannot_values_df = [[\"Annotations\"] + annotation]\nannot_values_idf = [[\"Annotations\"] + annotation]\nfor line in list_of_lines:\n flag = 0\n values_tf = [0] * len(annotation)\n values_df = [0] * len(annotation)\n values_idf = [0] * len(annotation)\n for i in range(1, len(line), 4):\n line[i] = line[i].replace(\"\\'\", \" \")\n line[i] = line[i].replace('\"', '')\n values_tf[dict_annot[line[i]]] = (int(line[i + 1]))\n values_df[dict_annot[line[i]]] = (int(line[i + 2]))\n values_idf[dict_annot[line[i]]] = (float(line[i + 3]))\n annot_values_tf.append([line[0]] + values_tf)\n annot_values_df.append([line[0]] + values_df)\n annot_values_idf.append([line[0]] + values_idf)\n\nfinal_annot_tf = list(map(list, zip(*annot_values_tf)))\nfinal_annot_df = list(map(list, zip(*annot_values_df)))\nfinal_annot_idf = list(map(list, zip(*annot_values_idf)))\n\n# Writing the schema into csv files for future access\nwith open(\"C:\\\\MWDB Project\\\\Code\\\\CSV\\\\Task_3\\\\TF_Loc.csv\", 'w', encoding = 'utf8', newline='') as outcsv: \n #configure writer to write standard csv file\n writer = csv.writer(outcsv, delimiter=',')\n for item in final_annot_tf:\n #Write item to outcsv\n writer.writerow(item)\n \nwith open(\"C:\\\\MWDB Project\\\\Code\\\\CSV\\\\Task_3\\\\DF_Loc.csv\", 'w', encoding = 'utf8', newline='') as outcsv: \n #configure writer to write standard csv file\n writer = csv.writer(outcsv, delimiter=',')\n for item in final_annot_df:\n #Write item to outcsv\n writer.writerow(item)\n \nwith open(\"C:\\\\MWDB Project\\\\Code\\\\CSV\\\\Task_3\\\\TF_IDF_Loc.csv\", 'w', encoding = 'utf8', newline='') as outcsv: \n #configure writer to write standard csv file\n writer = csv.writer(outcsv, delimiter=',')\n for item in final_annot_idf:\n #Write item to outcsv\n writer.writerow(item)","sub_path":"Code/Tasks/CSV_Writer_DB_Locations.py","file_name":"CSV_Writer_DB_Locations.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"348632524","text":"from django.shortcuts import render, redirect, reverse\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework.status import (\n HTTP_400_BAD_REQUEST,\n HTTP_404_NOT_FOUND,\n HTTP_200_OK,\n HTTP_403_FORBIDDEN,\n HTTP_500_INTERNAL_SERVER_ERROR,\n HTTP_401_UNAUTHORIZED,\n HTTP_201_CREATED\n)\nfrom rest_framework import generics\nfrom core.models import *\nfrom customer.api.serializers import *\nfrom django.http import Http404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nimport datetime\n# Create your views here.\n\ntoday = datetime.date.today()\n\n@api_view([\"POST\"])\ndef addToCart(request):\n return Response({\"response\": \"Rating successfully\"}, status=200)\n #if request.method == 'POST':\n # cart = Cart()\n # print('hiiiiiiiiiiiiiiiiii')\n # cart.qr_code = QRCode.objects.get(qr_code_id=\"hjws5tbp\")\n # cart.menu_item = MenuItem.objects.get(id=request.POST['menu'])\n # cart.save()\n # return redirect(reverse('customer:index'))\n \nclass myCart(APIView):\n def post(self, request):\n if request.data:\n serializer = AddToCartSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n \n try:\n qr_code = QRCode.objects.get(qr_code_id=request.data['qr_code'])\n menuitem = MenuItem.objects.get(id=request.data['menuitem'])\n if Cart.objects.filter(qr_code = qr_code, menu_item = menuitem, is_active=True).exists():\n cart = Cart.objects.get(qr_code=qr_code, menu_item = menuitem, is_active=True)\n qty = cart.quantity\n cart.quantity = qty + int(request.data['quantity'])\n cart.date_updated = today\n cart.save()\n \n else:\n serializer.save(qr_code=qr_code, menu_item=menuitem)\n \n return Response({'status':HTTP_201_CREATED,\"data\":[],'message':'Added to bag'})\n except:\n return Response({'status':HTTP_400_BAD_REQUEST,'data':[],\"message\":\"Incorrect QR Code or Menu\"})\n else:\n return Response({'status':HTTP_500_INTERNAL_SERVER_ERROR,'data':[],\"message\":\"Something went wrong. Please try again later\"})\n\n def get(self, request, qrcode_id=None):\n if qrcode_id == None:\n return Response({'status':HTTP_403_FORBIDDEN,'data':[],\"message\": \"QR Code id not present\"})\n\n elif len(qrcode_id) <= 0:\n return Response({'status':HTTP_401_UNAUTHORIZED,'data':[],\"message\": \"Invalid QR Code\"})\n\n elif len(qrcode_id) >= 0:\n try:\n qr_object = QRCode.objects.get(qr_code_id=qrcode_id)\n cart = Cart.objects.filter(is_active=True,qr_code=qr_object).order_by('id')\n if cart.exists():\n serializer = ListCartSerializer(cart, many=True, context={\"request\": request})\n return Response({'status':HTTP_200_OK,\"data\": serializer.data,'message':'OK'})\n else:\n return Response({'status':HTTP_404_NOT_FOUND,'data':[],\"message\": \"No data\"}, )\n except:\n return Response({'status':HTTP_400_BAD_REQUEST,'data':[],\"message\":\"Incorrect QR Code \"})\n\n else:\n return Response({'status':HTTP_500_INTERNAL_SERVER_ERROR,'data':[],\"message\":\"Something went wrong. Please try again later\"})","sub_path":"emenudsp/customer/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"454998267","text":"from gibson.envs.env_modalities import CameraRobotEnv, BaseRobotEnv\nfrom gibson.envs.env_bases import *\nfrom gibson.core.physics.robot_locomotors import VirtualCamera\nfrom transforms3d import quaternions\nimport os\nimport numpy as np\nimport sys\nimport pybullet as p\nfrom gibson.core.physics.scene_stadium import SinglePlayerStadiumScene\nimport pybullet_data\nimport cv2\n\nCALC_OBSTACLE_PENALTY = 1\n\ntracking_camera = {\n 'yaw': 20,\n 'z_offset': 0.5,\n 'distance': 1,\n 'pitch': -20\n}\n\ntracking_camera_top = {\n 'yaw': 20, # demo: living room, stairs\n 'z_offset': 0.5,\n 'distance': 1,\n 'pitch': -20\n}\n\nclass VirtualCameraEnv(CameraRobotEnv):\n \"\"\"Specfy navigation reward\n \"\"\"\n def __init__(self, config, gpu_idx=0):\n #self.config = self.parse_config(config)\n self.config = config\n print(self.config[\"envname\"])\n assert(self.config[\"envname\"] == self.__class__.__name__ or self.config[\"envname\"] == \"TestEnv\")\n CameraRobotEnv.__init__(self, self.config, gpu_idx,\n scene_type=\"building\",\n tracking_camera=tracking_camera)\n\n self.robot_introduce(VirtualCamera(self.config, env=self))\n self.scene_introduce()\n self.gui = self.config[\"mode\"] == \"gui\"\n self.total_reward = 0\n self.total_frame = 0\n assert(self.config[\"envname\"] == self.__class__.__name__ or self.config[\"envname\"] == \"TestEnv\")\n\n def add_text(self, img):\n font = cv2.FONT_HERSHEY_SIMPLEX\n x,y,z = self.robot.body_xyz\n r,p,ya = self.robot.body_rpy\n cv2.putText(img, 'x:{0:.4f} y:{1:.4f} z:{2:.4f}'.format(x,y,z), (10, 20), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, 'ro:{0:.4f} pth:{1:.4f} ya:{2:.4f}'.format(r,p,ya), (10, 40), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, 'potential:{0:.4f}'.format(self.potential), (10, 60), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, 'fps:{0:.4f}'.format(self.fps), (10, 80), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)\n return img\n\n def _rewards(self, action=None, debugmode=False):\n return [0]\n\n def _termination(self, debugmode=False):\n return False\n\n def _reset(self):\n self.total_frame = 0\n self.total_reward = 0\n obs = CameraRobotEnv._reset(self)\n return obs\n","sub_path":"gibson/envs/camera_env.py","file_name":"camera_env.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"537071029","text":"from bs4 import BeautifulSoup as bs\nimport requests\nimport psycopg2\n\nglobus_page = requests.get(\n url = 'https://globus-online.kg/catalog/ovoshchi_frukty_orekhi_zelen/'\n).text\n\ndata = bs(globus_page, 'html.parser')\n\nconnection = psycopg2.connect(\n dbname = 'globus',\n user = 'postgres',\n password = '0709045683kgg',\n host = 'localhost'\n)\ncursor = connection.cursor()\n\n# create = '''CREATE TABLE vegetables (\n# user_id SERIAL PRIMARY KEY,\n# image_link VARCHAR(300) NOT NULL,\n# product_name VARCHAR(300) NOT NULL,\n# price VARCHAR(300) NOT NULL\n# );'''\n# cursor.execute(create)\n# cursor.connection.commit()\n\n\nview_showcase = data.find('div', attrs={ 'id': 'view-showcase'})\n\nall_card = view_showcase.find_all('div', class_= 'list-showcase__part-main')\n\nfor card in all_card:\n image = card.find('div', class_='list-showcase__picture').a.img.get('src')\n name_of_product = card.find('div', class_='list-showcase__name-rating').a.text\n price = card.find('div', class_='list-showcase__prices').find('span', class_='c-prices__value js-prices_pdv_ГЛОБУС Розничная').text\n print(name_of_product)\n\n\n a = f'''INSERT INTO vegetables (image_link, product_name, price)\n VALUES (\\'{image}\\', \\'{name_of_product}\\', \\'{price}\\');'''\n \n cursor.execute(a)\n connection.commit()","sub_path":"veget_parsers.py","file_name":"veget_parsers.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"74868589","text":"import requests\nfrom pyquery import PyQuery\nfrom mymodule import stats_word\n\n\n# 通过url获取文本并分析\ndef stats (url) :\n response = requests.get(url)\n # 提取微信公众号正文\n document = PyQuery (response.text)\n content = document ('#js_content').text() \n # 统计前100词频\n statList = stats_word.stats_text(content,100)\n statString = ''.join(str(i) for i in statList)\n\n return statString\n","sub_path":"19100101/Shawn/d12_training1.py","file_name":"d12_training1.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"477933273","text":"__author__ = \"Kamil Markowiak\"\n__copyright__ = \"Copyright 2018, 4imp Kamil Markowiak\"\n__license__ = \"GPL\"\n__email__ = \"kamil.markowiak@protonmail.com\"\n\nfrom variables import folder_tmp, wpisy, result_no_blank_lines, line_numbers\nimport re\n\n\ndef NumeryLiniiDoPodzialu():\n Wpisy_file = folder_tmp+wpisy\n Output_file = folder_tmp+result_no_blank_lines\n test1 = open(Wpisy_file, 'w')\n licznik = re.compile('^[0-9]+\\s?\\.\\s?[Ww]')\n with open(Output_file, 'r') as plik:\n numery_linii_do_podzialu = []\n with open(folder_tmp+line_numbers, 'w') as output:\n for line_i, line in enumerate(plik, 1):\n if licznik.search(line):\n output.write(\"%d\\n\" % line_i)\n test1.write(\"%s\\n\" % line)\n numery_linii_do_podzialu.append(line_i)\n return numery_linii_do_podzialu\n test1.close()\n","sub_path":"csv_record_separator.py","file_name":"csv_record_separator.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"232682488","text":"#!/usr/bin/env python\n# coding: utf-8\nimport pandas as pd \nimport numpy as np \nfrom scipy.special import comb \nimport math\nfrom operator import mul\nimport neal \nimport dimod \nimport random \nimport matplotlib.pyplot as plt \nimport timeit\nimport time\nfrom itertools import combinations\n\n\ndef calc_marginals(df): \n return np.array([ \n sum(df['Y']), \n np.dot(df['Y'], df['SEX']), \n np.dot(df['Y'], df['AOP']), \n ]) \n\n\ndef make_Hamiltonian(df):\n t_list = calc_marginals(df)\n \n N=len(df)\n dup_list = [(i, i) for i in range(N)]\n comb_list = [(i, j) for i in range(N) for j in range(i+1, N)]\n \n lin_Y = [1-2*t_list[0] for (i, _) in dup_list] #同じy同士\n quad_Y = [2 for (i, j) in comb_list] #異なるy同士\n num_Y = t_list[0]**2 #数字の二乗\n \n SEX = df['SEX'].iloc\n lin_SEX = [(SEX[i] - 2 * t_list[1]) * SEX[i] for (i, _) in dup_list]\n quad_SEX = [2*SEX[i] * SEX[j] for (i, j) in comb_list]\n num_SEX = t_list[1]**2\n \n AOP = df['AOP'].iloc\n lin_AOP = [(AOP[i] - 2 * t_list[2]) * AOP[i] for (i, _) in dup_list]\n quad_AOP = [2*AOP[i] * AOP[j] for (i, j) in comb_list]\n num_AOP = t_list[2]**2\n \n lin_list = [sum(lin) for lin in zip(lin_Y, lin_SEX, lin_AOP)]\n lin = {i: lin_list[i] for (i, _) in dup_list}\n \n #quad\n quad_values = [sum(quad) for quad in zip(quad_Y, quad_SEX, quad_AOP)]\n quad = {ij: quad_values[n] for (n, ij) in enumerate(comb_list)}\n \n #num\n num = num_Y + num_SEX + num_AOP\n \n return dimod.BinaryQuadraticModel(lin, quad, num, dimod.Vartype.BINARY)#dic, dic, num\n\n\ndef make_res_data(df, num_reads):\n sa_sampler = neal.sampler.SimulatedAnnealingSampler()\n initial_states = df['Y'].values.tolist()\n bqm = make_Hamiltonian(df)\n res = sa_sampler.sample(\n bqm, num_reads=num_reads, \n initial_states=initial_states, \n initial_states_generator='tile'\n ) \n return res\n\ndef find_valid_y(res): \n valid_y_list= [] \n valid_y_num= 0\n occurrence_list = []\n this_time_y_list = []\n for y_info in list(res.record):\n if y_info[1]==0.:\n this_time_y = list(y_info[0])\n if all([this_time_y != p for p in valid_y_list]): \n valid_y_list.append(this_time_y)\n valid_y_num += 1\n occurrence_list.append(1)\n this_time_y_list.append(this_time_y)\n else:\n i = this_time_y_list.index(this_time_y)\n occurrence_list[i] += 1\n return valid_y_list, valid_y_num, occurrence_list\n\n\ndef y_num_hist(df, valid_y_list, path):\n LI = list(df['LI'])\n hist_dic = {}\n for valid_y in valid_y_list:\n t1 = int(np.dot(LI, valid_y))\n if t1 in hist_dic.keys():\n hist_dic[t1] += 1\n else:\n hist_dic[t1] = 1\n \n \n x = [i for i in list(hist_dic.keys())]\n plt.xlabel('value of t1')\n plt.ylabel('number of samples')\n plt.bar(x, list(hist_dic.values()))\n plt.xticks(x, x)\n plt.savefig(path)\n plt.show()\n return hist_dic\n\ndef occurence_hist(occurrence_list, plot_path):\n x = [i for i in range(len(occurrence_list))]\n plt.xlabel('each sample')\n plt.ylabel('number of the occurrence')\n plt.bar(x, occurrence_list)\n ax = plt.gca()\n ax.axes.xaxis.set_visible(False)\n plt.savefig(plot_path)\n return plt.show()\n \ndef time_num_y(df, num_reads, path):\n time_list = []\n time_0 = time.time() \n sa_sampler = neal.sampler.SimulatedAnnealingSampler()\n \n initial_states = df['Y'].values.tolist()\n t_list = calc_marginals(df)\n \n valid_y_list= [] \n valid_y_num= 0\n bqm = make_Hamiltonian(df)\n res = sa_sampler.sample(\n bqm, num_reads=num_reads, \n initial_states=initial_states, \n initial_states_generator='tile'\n ) \n for y_info in list(res.record):\n if y_info[1]==0.:\n if len(valid_y_list)==0:\n valid_y_list.append(list(y_info[0]))\n valid_y_num += 1\n time_1 = time.time()\n elapsed_time = time_1 - time_0\n time_list.append(elapsed_time)\n \n elif all(list(y_info[0]) != p for p in valid_y_list): \n valid_y_list.append(list(y_info[0]))\n valid_y_num += 1\n time_1 = time.time()\n elapsed_time = time_1 - time_0\n time_list.append(elapsed_time)\n \n valid_y_num_list = [i for i in range(1, valid_y_num+1)]\n \n plt.xlabel('time')\n plt.ylabel('number of hits')\n plt.plot(time_list, valid_y_num_list)\n plt.savefig(path)\n plt.show()\n return valid_y_list, valid_y_num_list, time_list\n\n\ndef p_value_transition(df, num_reads, output_path) :\n sa_sampler = neal.sampler.SimulatedAnnealingSampler()\n \n initial_states = df['Y'].values.tolist()\n t_list = calc_marginals(df)\n t1 = int(np.dot(df['Y'], df['LI']))\n t1_y = 0\n p_dic = {}\n \n valid_y_num= 0\n valid_y_list = []\n bqm = make_Hamiltonian(df)\n res = sa_sampler.sample(\n bqm, num_reads=num_reads, \n initial_states=initial_states, \n initial_states_generator='tile'\n )\n \n \n for y_info in list(res.record):\n if y_info[1]==0.:\n valid_y = list(y_info[0]) \n if all(valid_y != p for p in valid_y_list):\n valid_y_num += 1\n valid_y_list.append(valid_y)\n if int(np.dot(valid_y, list(df['LI'])))==t1:\n t1_y += 1\n p_dic[valid_y_num] = t1_y/valid_y_num\n \n plt.xlabel('number of hits')\n plt.ylabel('p value')\n plt.plot(list(p_dic.keys()), list(p_dic.values()))\n plt.savefig(output_path)\n plt.show()\n \n return valid_y_num, valid_y_list, p_dic\n\n\ndef test_find_valid_y():\n df = pd.read_csv('../../input/ost20.csv', sep=',', index_col=0)\n valid_y_list, valid_y_num = find_valid_y(df, num_reads = 10)\n return valid_y_list, valid_y_num\n\n\n\ndef test_validity(canditate_list):\n df1 = pd.read_csv('../../input/ost20.csv', sep=',',index_col=0)\n df2 = pd.read_csv('../../input/ost20.csv', sep=',',index_col=0)\n new_y = np.array(canditate_list)\n df2['Y'] = new_y\n t_list1 = calc_marginals(df1)\n t_list2 = calc_marginals(df2)\n print(t_list1)\n print(t_list2)\n assert np.all(t_list1[[0,2]] == t_list2[[0,2]])\n\n","sub_path":"202011/scripts/functions/Neal_exact_test_functions.py","file_name":"Neal_exact_test_functions.py","file_ext":"py","file_size_in_byte":7053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"327906164","text":"# MediaPipePose\nimport cv2\nimport mediapipe as mp\nimport math\nimport os\nimport requests\n\nclass Point:\n x = 0\n y = 0\n\n# 각도가 성공 기준에 들어갈 경우에는, len 값을 올려주어 이 len 값을 기준으로 score를 매길 예정.\nready_success_len = swing_success_len = finish_success_len = 0\n# 피드백 문구로 넣을 예정.\nready_feedback = swing_feedback = finish_feedback = \"\"\n# 측정하지 못했을 경우에는 1을 return하는 변수\nisErrorSwing = isErrorReady = isErrorFinish = 0 # true or false\n\n#p1, p2, p3 세 점을 입력받아 p2 중심으로 각도를 return하는 함수.\ndef Angle(P1, P2, P3):\n a = math.sqrt(math.pow(P1.x - P2.x, 2) + math.pow(P1.y - P2.y, 2))\n b = math.sqrt(math.pow(P2.x - P3.x, 2) + math.pow(P2.y - P3.y, 2))\n c = math.sqrt(math.pow(P1.x - P3.x, 2) + math.pow(P1.y - P3.y, 2))\n angle = math.acos((a * a + b * b - c * c) / (2 * a * b))\n return (angle * 180) / PI\n\n#p11, p12의 중심점, p2, p3의 각도를 return하는 함수.\ndef Angle2(P11, P12, P2, P3):\n P1 = Point()\n P1.x = (P11.x + P12.x) / 2\n P1.y = (P11.y + P12.y) / 2\n a = math.sqrt(math.pow(P1.x - P2.x, 2) + math.pow(P1.y - P2.y, 2))\n b = math.sqrt(math.pow(P2.x - P3.x, 2) + math.pow(P2.y - P3.y, 2))\n c = math.sqrt(math.pow(P1.x - P3.x, 2) + math.pow(P1.y - P3.y, 2))\n angle = math.acos((a * a + b * b - c * c) / (2 * a * b))\n return (angle * 180) / PI\n\n#피타코라스 활용 거리 return 함수\ndef Distance(P1,P2):\n distance = math.sqrt(pow(P1.x - P2.x, 2)+pow(P1.y - P2.y, 2))\n return distance\n\n# 각 각도에 대하여 ox 판별하는 함수. (eval_ready, eval_swing, eval_finish)\n# rs : 해당 각도\n# skeleton : 각 각도에 대한 한글명\n# minNum, maxNum : 스켈레톤에 최솟값, 최댓값\ndef eval_ready(rs, skeleton, minNum, maxNum):\n global ready_success_len, ready_feedback, isErrorReady\n if skeleton == \"keyCheck\":\n if rs < 110:\n isErrorReady = 1\n ready_success_len = 0\n return\n if minNum < rs < maxNum:\n ready_success_len += 1\n else:\n if ready_feedback == \"\":\n ready_feedback = skeleton\n else:\n ready_feedback = ready_feedback + \", \" + skeleton\n\ndef eval_swing(rs, skeleton, minNum, maxNum):\n global swing_success_len, swing_feedback, isErrorSwing\n if skeleton == \"keyCheck\":\n if rs < 110:\n isErrorSwing = 1\n swing_success_len = 0\n return\n if minNum < rs < maxNum:\n swing_success_len += 1\n else:\n if swing_feedback == \"\":\n swing_feedback = skeleton\n else:\n swing_feedback = swing_feedback + \", \" + skeleton\n\ndef eval_finish(rs, skeleton, minNum, maxNum):\n global finish_success_len, finish_feedback, isErrorFinish\n if skeleton == \"keyCheck\":\n if rs < 110:\n isErrorFinish = 1\n finish_success_len = 0\n return\n if minNum < rs < maxNum:\n finish_success_len += 1\n else:\n if finish_feedback == \"\":\n finish_feedback = skeleton\n else:\n finish_feedback = finish_feedback + \", \" + skeleton\n\nmp_drawing = mp.solutions.drawing_utils\nmp_pose = mp.solutions.pose\nPI = 3.1415926535\nnum = -1\n\n#프로 자세기반 기준 점수.\nBASESCORE_READY = [72.53, 69.72, 72.47, 85.34, 172.4, 173.95]\nBASESCORE_SWING = [156.0, 170.63, 169.8, 131.81, 138.6]\nBASESCORE_FINISH = [144.31, 162.99]\n\nDATA_DIR = os.getcwd() + \"/images\"\nIMAGE_FILES = os.listdir(DATA_DIR)\nIMAGE_FILES.sort()\nprint(IMAGE_FILES)\nlength1 = length4 = 0\nlength2 = length5 = 0\nlength3 = length6 = 0\n\nwith mp_pose.Pose(\n static_image_mode=True,\n model_complexity=2,\n min_detection_confidence=0.5) as pose:\n for idx, file in enumerate(IMAGE_FILES):\n if (file.find('drive') == -1): continue\n else: num += 1\n file = DATA_DIR + '/' + file\n image = cv2.imread(file)\n image_height, image_width, _ = image.shape\n\n # Convert the BGR image to RGB before processing.\n results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n if not results.pose_landmarks:\n if(file.find('first') != -1) : isErrorReady = 1\n elif(file.find('second') != -1) : isErrorSwing = 1\n elif(file.find('third') != -1) : isErrorFinish = 1\n continue\n annotated_image = image.copy()\n ll = [] # 각 번호마다의 위치를 저장하기 위한 임시 배열.\n for id, lm in enumerate(results.pose_landmarks.landmark):\n h, w, c = image.shape\n p = Point()\n p.x = int(lm.x * w)\n p.y = int(lm.y * h)\n ll.append(p)\n cv2.putText(annotated_image, str(id), (p.x, p.y), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0), 1)\n\n # Draw pose landmarks on the image.\n mp_drawing.draw_landmarks(\n annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)\n cv2.imwrite('images_result/annotated_image' + str(num) + '.png', annotated_image)\n if (file.find('first') != -1):\n one = Angle2(ll[23], ll[24], ll[27], ll[28])\n two = Angle2(ll[23], ll[24], ll[28], ll[27])\n three = Angle(ll[12], ll[11], ll[13])\n four = Angle(ll[11], ll[12], ll[14])\n five = Angle(ll[11], ll[13], ll[15])\n six = Angle(ll[12], ll[14], ll[16])\n length1 = Distance(ll[0], ll[25])\n eval_ready(round(one, 2), \"왼발\", 65, 89)\n eval_ready(round(two, 2), \"오른발\", 61, 89)\n eval_ready(round(three, 2), \"왼쪽 어깨\", 50, 93)\n eval_ready(round(four, 2), \"오른쪽 어깨\", 60, 92)\n eval_ready(round(five, 2), \"왼쪽 팔꿈치\", 155, 180)\n eval_ready(round(six, 2), \"오른쪽 팔꿈치\", 160, 180)\n eval_ready(round(length1, 2), \"keyCheck\", 0, 0)\n\n if (file.find('second') != -1):\n ones = Angle(ll[12], ll[24], ll[26])\n twos = Angle(ll[24], ll[26], ll[28])\n threes = Angle(ll[23], ll[25], ll[27])\n fours = Angle(ll[11], ll[12], ll[14])\n fives = Angle(ll[12], ll[14], ll[16])\n length2 = Distance(ll[0], ll[25])\n eval_swing(round(ones, 2), \"오른쪽 허리\", 140, 180)\n eval_swing(round(twos, 2), \"오른쪽 무릎\", 167, 180)\n eval_swing(round(threes, 2), \"왼쪽 무릎\", 165, 180)\n eval_swing(round(fours, 2), \"오른쪽 어깨\", 110, 175)\n eval_swing(round(fives, 2), \"오른쪽 팔꿈치\", 70, 155)\n eval_swing(round(length2, 2), \"keyCheck\", 0, 0)\n\n if (file.find('third') != -1):\n onef = Angle(ll[24], ll[26], ll[28])\n twof = Angle(ll[12], ll[24], ll[26])\n length3 = Distance(ll[0], ll[25])\n eval_finish(round(onef, 2), \"오른쪽 무릎\", 125, 180)\n eval_finish(round(twof, 2), \"오른쪽 허리\", 155, 175)\n eval_finish(round(length3, 2), \"keyCheck\", 0, 0)\n\n rDict = {6: 'Perfect', 5: 'Good', 4: 'Good', 3: 'Notbad', 2: 'Fail', 1: 'Fail', 0: 'Fail'}\n sDict = {5: 'Perfect', 4: 'Good', 3: 'Good', 2: 'Notbad', 1: 'Fail', 0: 'Fail'}\n fDict = {2: 'Perfect', 1: 'Notbad', 0: 'Fail'}\n\n # print('본 프로그램은 아이언, 드라이브 모두에 적용됩니다.')\n # print('---------- 준비 자세 ----------')\n # print('결과 : ', rDict[ready_success_len])\n # print('아쉬운 자세 : ', ready_feedback)\n # print(ready_success_len)\n # print('---------- 스윙 자세 ----------')\n # print('결과 : ', sDict[swing_success_len])\n # print('아쉬운 자세 : ', swing_feedback)\n # print(swing_success_len)\n # print('---------- 피니시 자세 ----------')\n # print('결과 : ', fDict[finish_success_len])\n # print('아쉬운 자세 : ', finish_feedback)\n # print(finish_success_len)\n\n result = {'ready_result': rDict[ready_success_len], 'ready_feedback': ready_feedback,\n 'swing_result': sDict[swing_success_len], 'swing_feedback': swing_feedback,\n 'finish_result': fDict[finish_success_len], 'finish_feedback': finish_feedback\n , 'isErrorReady': isErrorReady, 'isErrorSwing': isErrorSwing, 'isErrorFinish': isErrorFinish\n , 'length1': length1, 'length2': length2, 'length3': length3\n }\n resultText = {\"resultText\": result}\n # print(length1)\n # print(length2)\n # print(length3)\n print(result)\n res = requests.post('http://localhost:5000/api/feedbackdata', json=resultText)","sub_path":"getDriveImageScore.py","file_name":"getDriveImageScore.py","file_ext":"py","file_size_in_byte":8567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"51943270","text":"from django.shortcuts import render, HttpResponse, _get_queryset\nfrom django.shortcuts import render, redirect, get_object_or_404, render_to_response\nfrom .forms import WikiPostsModel, WikiPostsForm, RelatedModel, RelatedForm, AuthorModel, AuthorForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ObjectDoesNotExist\nimport operator\nimport re\nfrom django.db.models import Q\n\n\n# function to show all entries at index\ndef index(request):\n allEntries = WikiPostsModel.objects.all()\n context = {\n \"allEntries\": allEntries\n }\n\n return render(request, 'Project2App/index.html', context)\n\n\n\n# function is redundanf...might delete...\ndef allEntries(request):\n entry_list = WikiPostsModel.objects.all()\n context = {'entry_list': entry_list}\n return render(request, 'Project2App/allEntries.html', context)\n\n# function to allow entering new wiki posts if user is logged in\n@login_required\ndef yourEntries(request):\n # If the current person is logged in, do the code below\n if request.user.is_authenticated:\n # This puts the logged in user entry into the variable author\n author= AuthorModel.objects.get(id=1)\n # This will grab all of the entries for the logged in user using the variable you just created\n allEntries = WikiPostsModel.objects.filter(foreignkeyToAuthor=author).order_by('id')\n # If the user is not logged in...\n else:\n # Make all Entries blank because you need this because both the index.html page is expecting a allEntries variable\n allEntries = \"\"\n context = {\"allEntries\": allEntries}\n return render(request, \"Project2App/yourEntries.html\", context)\n\n\n\n# This page will provide a form to add users\ndef createUser(request):\n # POST Request\n # If the form is being pushed to this function\n if request.method == \"POST\":\n print(request.method)\n # This will put all the user's information from the HTML page into this new form variable\n form = AuthorForm(request.POST)\n # Run all the validation on this form\n if form.is_valid():\n # Save the form's information in the model\n form.save()\n # Create a new Django User entry\n User.objects.create_user(request.POST[\"username\"], request.POST[\"password1\"], request.POST[\"password2\"])\n return redirect(\"index\")\n else:\n context = {\n \"errors\": form.errors,\n \"form\": form\n }\n return render(request, \"Project2App/createUser.html\", context)\n\n\n # GET Request\n else:\n # This will create a blank form using AuthorForm\n form = AuthorForm()\n context = {\"form\": form}\n return render(request, \"Project2App/createUser.html\", context)\n\n\n# Allows the edit of user information\ndef editUser(request, username):\n user = get_object_or_404(User, pk=username)\n edit_form = AuthorForm(request.POST or None, instance=AuthorModel)\n if edit_form.is_valid():\n edit_form.save()\n return redirect('index')\n\n return render(request, 'Project2App/createUser.html', {'userform': edit_form})\n\n# this function will delete a user\ndef deleteuser(request, username):\n user = get_object_or_404(User, pk=username)\n if request.method == 'POST':\n user.delete()\n return redirect('index')\n\n return render(request, 'Project2App/delete.html', {'selecteduser': user})\n\n\n# this function allow only logged in users to add new wiki posts\n@login_required()\ndef addNewEntry(request):\n # This will create a blank form using CollectorForm\n form = WikiPostsForm()\n context = {\n \"form\": form\n }\n return render(request, \"Project2App/addNewEntry.html\", context)\n\n\ndef gotNewEntryInfo(request):\n # This will put all the user's information from the HTML page into this new form variable\n form = WikiPostsForm(request.POST)\n # This puts the logged in user entry into the variable collector\n author = AuthorModel.objects.get(username=request.user)\n\n # create a wiki post entry from the logged in user\n if form.is_valid():\n # Created a new WikiPostModel entry usin the user's form information that was passed using the request.POST\n WikiPostsModel.objects.create(postTitle=request.POST[\"postTitle\"], postText=request.POST[\"postText\"],\n createdDateTime=request.POST[\"createdDateTime\"],\n lastUpdatedDateTime=request.POST[\"lastUpdatedDateTime\"],\n optionalPostImage=request.POST[\"optionalPostImage\"],\n foreignKeyToAuthor=author)\n return redirect(\"index\")\n else:\n context = {\"form\": form, \"errors\": form.errors}\n return render(request, \"Project2App/addNewEntry.html\", context)\n\n\n# this function allows the edit of wiki post entries\ndef editEntries(request, wikipostsID):\n # Grab an exact entry of the WikiPostsModel using the primary key\n editExistingWikiPost = get_object_or_404(WikiPostsModel, pk=wikipostsID)\n\n # Post method\n if request.method == \"POST\":\n # This will fill in the form with the user's information and use the exact WikiPostsModel with primary key\n form = WikiPostsForm(request.POST, instance=editExistingWikiPost)\n if form.is_valid():\n form.save()\n else:\n print(\"Form is not valid\")\n return redirect(\"index\")\n\n # Get method\n # Grabbed the exact wikipost form using the existing WikiPosts model using the primary key from earlier\n form = WikiPostsForm(instance=editExistingWikiPost)\n context = {\n \"form\": form,\n \"wikipostsID\": wikipostsID\n }\n return render(request, \"Project2App/editEntries.html\", context)\n\n# function to delete wiki posts\ndef deleteEntries(request, wikipostsID):\n deleteThisWikiPost = get_object_or_404(WikiPostsModel, pk=wikipostsID)\n deleteThisWikiPost.delete()\n return redirect(\"index\")\n\n# function to delete related entries\ndef relatedEntries(request):\n related_list = RelatedModel.objects.all()\n context = {\n \"related_list\": related_list\n }\n return render(request, 'Project2App/index.html', context)\n\n# function to add a related entry\ndef addRelated(request):\n # This will create a blank form using CollectorForm\n form = RelatedForm()\n context = {\n \"form\": form\n }\n return render(request, \"Project2App/addRelated.html\", context)\n\n\ndef gotRelatedInfo(request):\n # Put all the user's info from the HTML page into a new form variable\n form = RelatedForm(request.POST)\n # Put the logged in user's related entry into variable\n wikipost = WikiPostsModel.objects.get(postTitle=request.user)\n # create a related entry from the logged in user...\n if form.is_valid():\n RelatedModel.objects.create(itemTitle=request.POST[\"itemTitle\"], itemText=request.POST[\"itemText\"],\n createdDateTime=request.POST[\"createdDateTime\"],\n lastUpdatedDateTime=request.POST[\"lastUpdatedDateTime\"],\n optionalPostImage=request.POST[\"optionalPostImage\"], foreignKeyToUser=User)\n return redirect(\"index\")\n else:\n context = {\"form\": form, \"errors\": form.errors}\n return render(request, \"Project2App/addRelated.html\", context)\n\n\ndef editRelated(request, relatedID):\n # Grab an exact entry of the RelatedModel using the primary key\n editExistingRelated = get_object_or_404(RelatedModel, pk=relatedID)\n\n # Post method\n if request.method == \"POST\":\n # This will fill in the form with the user's information and use the exact RelatedModel with primary key\n form = RelatedForm(request.POST, instance=editExistingRelated)\n if form.is_valid():\n form.save()\n else:\n print(\"Form is not valid\")\n return redirect(\"index\")\n\n # Get method\n # Grabbed the exact related form using the existing Related model using the primary key from earlier\n form = RelatedForm(instance=editExistingRelated)\n context = {\n \"form\": form,\n \"relatedID\": relatedID\n }\n return render(request, \"Project2App/editRelated.html\", context)\n\n#function to delete a related entry\ndef deleteRelated(request, relatedID):\n deleteThisRelated = get_object_or_404(RelatedModel, pk=relatedID)\n deleteThisRelated.delete()\n return redirect(\"index\")\n\n#function to search wiki posts\ndef searchPosts(request):\n return HttpResponse(\"search here\")\n","sub_path":"Project2/Project2App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"366139849","text":"########################################################################################################\nimport hashlib, sys, os\n########################################################################################################\ndef hash(path, email_type):\n\tfilenames=os.listdir(path)\n\ttry:\n\t\tos.makedirs(\"/home/eward/Downloads/Dataset\")\n\texcept OSError:\n\t\tpass\n\tfor filename in filenames:\n\t\tcurrent_file=open(path+filename, \"r+\")\n\t\tparsed_file=open(\"/home/eward/Downloads/Dataset/\"+email_type+\"_\"+filename, \"w\")\n\t\tparsed_file.write(email_type+\"\\n\")\n\t\tfor line in current_file.readlines():\n\t\t\tz=line.split(\" \")\n\t\t\tz[0]=z[0].encode(\"UTF-8\")\n\t\t\thashed_word=hashlib.md5(z[0])\n\t\t\tparsed_file.write(hashed_word.hexdigest()+\" \"+str(z[1])+\" \\n\")\n\t\t\tdel hashed_word\n\t\tcurrent_file.close()\n\t\tparsed_file.close()\n\t\t\ndef main():\n\thash(sys.argv[1], sys.argv[2])\n\nif __name__==\"__main__\":\n\tmain()\n\n\n\t\n\t\t\n\t\n\t\n\n","sub_path":"hasher.py","file_name":"hasher.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"272178416","text":"from .global_config import *\n\n# store all variables from global config\ncontext_vars = vars()\n\n# folders\nnoisy_folder = 'noisy'\nnot_noisy_folder = 'notNoisy'\n\n# file or extensions\npost_image_name_separator = '___'\n\n# variables\nfeatures_choices_labels = ['static', 'svd_reconstruction', 'fast_ica_reconstruction', 'ipca_reconstruction']\n\n# parameters\nkeras_epochs = 30\nkeras_batch = 32\nval_dataset_size = 0.2\n\nkeras_img_size = (200, 200)","sub_path":"config/cnn_config.py","file_name":"cnn_config.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"31816763","text":"#!/usr/bin/env python\nfrom pylab import *\norigin = 'lower'\n#origin = 'upper'\n\n# The following controls only interior masking.\ntest_masking = False # There is a bug in filled contour masking with\n # interior masks.\n\nif test_masking:\n # Use a coarse grid so only a few masked points are needed.\n delta = 0.5\nelse:\n delta = 0.025\n\nx = y = arange(-3.0, 3.01, delta)\nX, Y = meshgrid(x, y)\nZ1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)\nZ2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)\nZ = 10 * (Z1 - Z2)\n\n# interior badmask doesn't work yet for filled contours\nif test_masking:\n badmask = zeros(shape(Z))\n\n badmask[5,5] = 1\n badmask[5,6] = 1\n Z[5,5] = 0\n Z[5,6] = 0\n\n badmask[0,0] = 1\n Z[0,0] = 0\n Z = ma.array(Z, mask=badmask)\n\nnr, nc = Z.shape\n\n# put NaNs in one corner:\nZ[-nr//6:, -nc//6:] = nan\n# contourf will convert these to masked\n\n\nZ = ma.array(Z)\n# mask another corner:\nZ[:nr//6, :nc//6] = ma.masked\n\n\n# We are using automatic selection of contour levels;\n# this is usually not such a good idea, because they don't\n# occur on nice boundaries, but we do it here for purposes\n# of illustration.\nCS = contourf(X, Y, Z, 10, # [-1, -0.1, 0, 0.1],\n #alpha=0.5,\n cmap=cm.bone,\n origin=origin)\n\n# Note that in the following, we explicitly pass in a subset of\n# the contour levels used for the filled contours. Alternatively,\n# We could pass in additional levels to provide extra resolution.\n\nCS2 = contour(X, Y, Z, CS.levels[::2],\n colors = 'r',\n origin=origin,\n hold='on')\n\ntitle('Nonsense (with 2 masked corners)')\nxlabel('word length anomaly')\nylabel('sentence length anomaly')\n\n# Make a colorbar for the ContourSet returned by the contourf call.\ncbar = colorbar(CS)\ncbar.ax.set_ylabel('verbosity coefficient')\n# Add the contour line levels to the colorbar\ncbar.add_lines(CS2)\n\nfigure()\n\n# Now make a contour plot with the levels specified,\n# and with the colormap generated automatically from a list\n# of colors.\nlevels = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5]\nCS3 = contourf(X, Y, Z, levels,\n colors = ('r', 'g', 'b'),\n origin=origin)\n\nCS4 = contour(X, Y, Z, levels,\n colors = ('k',),\n linewidths = (3,),\n origin = origin)\ntitle('Listed colors (with 2 masked corners)')\nclabel(CS4, fmt = '%2.1f', colors = 'w', fontsize=14)\ncolorbar(CS3)\n\nshow()\n\n","sub_path":"lang/python/algo/scipy/aLotMore/pylab_examples/contourf_demo.py","file_name":"contourf_demo.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"469192403","text":"import miscellaneous\nimport time\n\n\n\n\n\n\n\n\n\ndef execute(data, output_location):\n \"\"\"\n This function calls the appropriate functions in miscellaneous.py. Those functions will use the encrypt() function\n located below as the algorithm to actually encrypt the text. Then, the ciphertext will be returned back to\n cryptography_runner.py\n\n :param data: (string) the data to be encrypted\n :param output_location: (string) the file to write relevant information into\n :return: (string) the encrypted data\n \"\"\"\n\n\n # Obtain the encrypted text. Also write statistics and relevant info a file\n encrypted = miscellaneous.symmetric_encrypt_or_decrypt_with_general_key(data, output_location,\n \"Encryption\", \"vigenere_multiplicative\", \"encrypt\")\n\n # Return encrypted text to be written in cryptography_runner\n return encrypted\n\n\n\n\n\n# The actual algorithm to encrypt using a vigenere cipher(multiplication instead of addition)\ndef encrypt(plaintext, key, char_set_size):\n \"\"\"\n This function encrypts with a vigenere cipher that multiplies instead of adds. Done to access more characters in\n unicode. If ascii or extended_ascii, store as numbers. Otherwise, store as characters\n\n :param plaintext: (string) the plaintext to encrypt with\n :param key: (string) the string to encrypt with\n :param char_set_size: (integer) the number of characters in the character set used\n :return: (string) the encrypted text\n \"\"\"\n\n ciphertext = \"\"\n ciphertext_list = [] # for storing unicode values of the numbers (when char_set_size <= 256). Done for speed\n key_index = 0\n\n # if using unicode, then adjust the size of the char_set_size to be printable characters only (no surrogates)\n if char_set_size > 256:\n char_set_size = char_set_size - miscellaneous.SURROGATE_BOUND_LENGTH\n\n # Counter for printing purposes\n characters_done = 0\n\n for x in plaintext:\n\n characters_done += 1\n\n # figure out the unicode value for each of the characters\n uni_val_plain = ord(x)\n\n # figure out the unicode value for the right character in the key, then update for next iteration\n key_char = key[key_index]\n uni_val_key = ord(key_char)\n\n key_index = (key_index + 1) % len(key)\n\n\n # figure out the encrypted character val (un-modded)\n uni_val_encrypted = (uni_val_plain * uni_val_key)\n\n\n # if the encrypted_char would be a surrogate(unprintable), adjust by adding SURROGATE_BOUND_LENGTH\n if miscellaneous.SURROGATE_LOWER_BOUND <= uni_val_encrypted:\n encrypted_char = chr(uni_val_encrypted + miscellaneous.SURROGATE_BOUND_LENGTH)\n\n\n # Print updates\n if characters_done % 100 == 0:\n print (\"Percentage of text done: \" + str(characters_done / len(plaintext) * 100))\n\n\n # Add the encrypted character to the overall encrypted message (if using unicode)\n if char_set_size > 256:\n\n # Find the encrypted char value(modded)\n uni_val_encrypted = (uni_val_plain * uni_val_key) % char_set_size\n encrypted_char = chr(uni_val_encrypted)\n\n # Add to ciphertext\n ciphertext = ciphertext + encrypted_char\n\n # Otherwise, add the number to the overall encrypted message (list)\n else:\n ciphertext_list.append(str(uni_val_encrypted))\n\n\n # Build up ciphertext if necessary(when we were using list (when char_set_size <= 256))\n if ciphertext == \"\":\n ciphertext = \" \".join(ciphertext_list)\n\n return ciphertext\n\n\n\n\n\n","sub_path":"Encryption/vigenere_multiplicative.py","file_name":"vigenere_multiplicative.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"489427561","text":"\nfrom modules.IEnvironmentController import IEnvironmentController\nfrom python_terraform import *\nfrom modules import aws_service, splunk_sdk\nfrom tabulate import tabulate\nimport ansible_runner\n\n\nclass TerraformController(IEnvironmentController):\n\n def __init__(self, config, log, packer_amis):\n super().__init__(config, log)\n custom_dict = self.config.copy()\n rem_list = ['log_path', 'log_level', 'art_run_techniques']\n [custom_dict.pop(key) for key in rem_list]\n custom_dict['ip_whitelist'] = [custom_dict['ip_whitelist']]\n if packer_amis:\n custom_dict['use_packer_amis'] = '1'\n else:\n custom_dict['use_packer_amis'] = '0'\n custom_dict['splunk_packer_ami'] = \"packer-splunk-server-\" + self.config['key_name']\n custom_dict['phantom_packer_ami'] = \"packer-phantom-server-\" + self.config['key_name']\n custom_dict['kali_machine_packer_ami'] = \"packer-kali-machine-\" + self.config['key_name']\n custom_dict['windows_domain_controller_packer_ami'] = \"packer-windows-domain-controller-\" + self.config['key_name']\n custom_dict['windows_server_packer_ami'] = \"packer-windows-server-\" + self.config['key_name']\n custom_dict['windows_client_packer_ami'] = \"packer-windows-client-\" + self.config['key_name']\n self.terraform = Terraform(working_dir='terraform',variables=custom_dict)\n\n\n def build(self):\n self.log.info(\"[action] > build\\n\")\n return_code, stdout, stderr = self.terraform.apply(capture_output='yes', skip_plan=True, no_color=IsNotFlagged)\n if not return_code:\n self.log.info(\"attack_range has been built using terraform successfully\")\n self.list_machines()\n\n def destroy(self):\n self.log.info(\"[action] > destroy\\n\")\n return_code, stdout, stderr = self.terraform.destroy(capture_output='yes', no_color=IsNotFlagged)\n self.log.info(\"attack_range has been destroy using terraform successfully\")\n\n\n def stop(self):\n instances = aws_service.get_all_instances(self.config)\n aws_service.change_ec2_state(instances, 'stopped', self.log)\n\n\n def resume(self):\n instances = aws_service.get_all_instances(self.config)\n aws_service.change_ec2_state(instances, 'running', self.log)\n\n\n def search(self, search_name):\n instance = aws_service.get_instance_by_name(\"attack-range-splunk-server\",self.config)\n if instance['State']['Name'] == 'running':\n splunk_sdk.search(instance['NetworkInterfaces'][0]['Association']['PublicIp'],str(self.config['splunk_admin_password']), search_name, self.log)\n else:\n self.log.error('ERROR: Splunk server is not running.')\n\n\n def simulate(self, target, simulation_techniques):\n target_public_ip = aws_service.get_single_instance_public_ip(target, self.config)\n if target == 'attack-range-windows-client':\n runner = ansible_runner.run(private_data_dir='.attack_range/',\n cmdline=str('-i ' + target_public_ip + ', '),\n roles_path=\"../ansible/roles\",\n playbook='../ansible/playbooks/atomic_red_team.yml',\n extravars={'art_run_techniques': simulation_techniques, 'ansible_user': 'Administrator', 'ansible_password': self.config['win_password'], 'ansible_port': 5985, 'ansible_winrm_scheme': 'http'},\n verbosity=0)\n else:\n runner = ansible_runner.run(private_data_dir='.attack_range/',\n cmdline=str('-i ' + target_public_ip + ', '),\n roles_path=\"../ansible/roles\",\n playbook='../ansible/playbooks/atomic_red_team.yml',\n extravars={'art_run_techniques': simulation_techniques, 'ansible_user': 'Administrator', 'ansible_password': self.config['win_password']},\n verbosity=0)\n\n if runner.status == \"successful\":\n self.log.info(\"successfully executed technique ID {0} against target: {1}\".format(simulation_techniques, target))\n else:\n self.log.error(\"failed to executed technique ID {0} against target: {1}\".format(simulation_techniques, target))\n sys.exit(1)\n\n\n def list_machines(self):\n instances = aws_service.get_all_instances(self.config)\n response = []\n instances_running = False\n for instance in instances:\n if instance['State']['Name'] == 'running':\n instances_running = True\n response.append([instance['Tags'][0]['Value'], instance['State']['Name'], instance['NetworkInterfaces'][0]['Association']['PublicIp']])\n else:\n response.append([instance['Tags'][0]['Value'], instance['State']['Name']])\n print()\n print('Terraform Status\\n')\n if len(response) > 0:\n if instances_running:\n print(tabulate(response, headers=['Name','Status', 'IP Address']))\n else:\n print(tabulate(response, headers=['Name','Status']))\n else:\n print(\"ERROR: Can't find configured EC2 Attack Range Instances in AWS.\")\n sys.exit(1)\n print()\n\n\n def list_searches(self):\n instance = aws_service.get_instance_by_name(\"attack-range-splunk-server\",self.config)\n if instance['State']['Name'] == 'running':\n response = splunk_sdk.list_searches(instance['NetworkInterfaces'][0]['Association']['PublicIp'],str(self.config['splunk_admin_password']))\n if len(response) > 0:\n objects = []\n for object in response:\n objects.append([object.name])\n print()\n print('Available savedsearches in Splunk\\n')\n print(tabulate(objects, headers=['Name']))\n print()\n else:\n log.error('ERROR: Splunk server is not running.')\n","sub_path":"modules/TerraformController.py","file_name":"TerraformController.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"257782960","text":"# Solving a Large Linear System\n\nimport numpy\n\n# Set a random seed so the code below is reproducible\nnumpy.random.seed(1)\n\n# Create A and b matrices with random\nA = 10*numpy.random.rand(10,10)-5\nb = 10*numpy.random.rand(10)-5\n\n# Solve Ax = b\nsolution = numpy.linalg.solve(A,b)\nprint(solution)\n\n# To verify the solution works, show Ax - b is near 0\nsum(abs(numpy.dot(A,solution) - b))","sub_path":"Chapter 7/SolvingALargeLinearSystem.py","file_name":"SolvingALargeLinearSystem.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"653716600","text":"from twitter import OAuth, Twitter\nfrom datetime import datetime\n\n\ndef parse_date(_date):\n \"\"\"\n responseに含まれる日付をdatetimeに変換\n :param _date:a string of date\n :return:\n \"\"\"\n return datetime.strptime(_date, '%a %b %d %H:%M:%S %z %Y')\n\n\nclass TimelineHelper:\n def __init__(self, twitter):\n self.twitter = twitter\n\n def get_latest_tweet(self, screen_name=None):\n if not screen_name:\n screen_name = self.twitter.account.settings()['screen_name']\n return self.twitter.statuses.user_timeline(screen_name=screen_name, count=1)[0]\n\n def get_latest_id(self, screen_name=None):\n return self.get_latest_tweet(screen_name)['id']\n\n def next_mention(self, since_id=1, max_id=None):\n \"\"\"\n mentionの取得\n :param since_id:このidより大きいidのmentionを取得\n :param max_id: 取得対象idの最大値\n :return:\n \"\"\"\n if not max_id:\n max_id = self.get_latest_id()\n\n return self.__next_tweet(self.twitter.statuses.mentions_timeline, max_id, since_id=since_id)\n\n def next_tweet(self, screen_name=None, since_id=1, max_id=None):\n \"\"\"\n timelineの取得\n :param screen_name:指定ユーザーのtimelineを取得. 指定なしの場合, ログインアカウントのタイムラインを取得する\n :param since_id:このidより大きいidのツイートを取得\n :param max_id: 取得対象idの最大値\n :return:最新のツイートから降順に返却\n \"\"\"\n if not max_id:\n max_id = self.get_latest_id(screen_name)\n\n if not screen_name:\n screen_name = self.twitter.account.settings()['screen_name']\n return self.__next_tweet(self.twitter.statuses.user_timeline, max_id, screen_name=screen_name,\n since_id=since_id)\n\n def __next_tweet(self, timeline_api, max_id, **params):\n \"\"\"\n timelineを取得し、1件ごとyieldする\n :param timeline_api:ex. home_timeline / mention_timeline\n :param max_id:max_id\n :param params:max_id以外のapiパラメータ\n :return:a tweet object\n \"\"\"\n count = 200\n while True:\n tweets = timeline_api(max_id=max_id, count=count, **params)\n for tweet in tweets:\n yield tweet\n\n if len(tweets) < count:\n break\n max_id = tweets[-1]['id'] - 1\n\n\nif __name__ == \"__main__\":\n import os\n import settings\n\n settings.init_env()\n\n consumer_key = os.environ.get('TWITTER_CONSUMER_KEY')\n consumer_secret = os.environ.get('TWITTER_CONSUMER_SECRET')\n oauth_token = os.environ.get('TWITTER_OAUTH_TOKEN')\n oauth_secret = os.environ.get('TWITTER_OAUTH_SECRET')\n\n t = Twitter(auth=OAuth(\n oauth_token, oauth_secret, consumer_key, consumer_secret))\n helper = TimelineHelper(t)\n for tweet in helper.next_tweet(since_id=825000136252682240):\n print(tweet['text'])\n for tweet in helper.next_mention(since_id=709043705114570753):\n print(tweet['text'], tweet['id'])\n","sub_path":"twitterhelper.py","file_name":"twitterhelper.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"156359175","text":"#run directly in python 3.6 (install html2text via pip)\n\nimport pyodbc\nimport re\n\ndef left(s, amount):\n return s[:amount]\n\ndef right(s, amount):\n return s[-amount:]\n\ndef mid(s, offset, amount):\n return s[offset:offset+amount]\n\ndef isvalid_line(line):\n line=line.strip()\n\n #minimum length criteria\n if len(line)<=10:\n return 0\n #** email headers #! Links #- Line breaks #\\ Line breaks (orginal message)\n if left(line,2)=='**' or left(line,1)=='!' or left(line,1)=='-' or left(line,1)=='\\\\' or left(line,1)=='//':\n return 0\n \n #EMAIL HEADERS\n invalid_prefixes=['EMAIL:','E:','E-MAIL:','FROM:','TO:','CC:','SENT:','SUBJECT:','PHONE:','PH','OFFICE:','MOBILE:','TEL:','TEL','FAX:','DATE:','FROM:**','SENT:**','TO:**','SUBJECT:**','IMPORTANCE:','IMPORTANCE:**','RECEIVED:','RECEIVED:**']\n start_token=line.split(' ',1)[0]\n if start_token.upper() in invalid_prefixes:\n return 0\n \n #HTTP addresses\n if left(line,4).upper()=='HTTP':\n return 0\n\n #single word as a line\n if line.count(' ')<2:\n return 0\n\n #detecting semicolons for addresses in TO/CC line\n if line.count(';')>=2:\n return 0\n \n return 1\n\n\ncon=pyodbc.connect('DRIVER={SQL Server};Server=kach_saw;Database=WORKAREA;Trusted_Connection=yes;')\ncur=con.cursor()\n\nqry=\"SELECT [description] phrase FROM [WORKAREA].[MST].[t_StopPhrases] where isstopphrase=1\"\ncur.execute(qry)\nStopPhrases=cur.fetchall()\nStopPhrasesList=[row.phrase for row in StopPhrases]\n\nqry=\"SELECT ID,DESCRIPTION_CLEANED_EN TEXT FROM MST.t_SampleDES\" \ncur.execute(qry)\nrows=cur.fetchall()\n\nbatch=1\ncnt=0\nfor row in rows:\n cleanedtxt=\"\"\n for line in row.TEXT.splitlines():\n line=line.strip()\n if isvalid_line(line): #print(isvalid_line(line),'-',line)\n if line not in StopPhrasesList: \n cleanedtxt=cleanedtxt+line+\"\\n\"\n\n cur.execute(\"UPDATE [MST].[t_SampleDES] SET [description_cleaned_en_stopwords]=? WHERE ID=?\",(cleanedtxt,row.ID))\n cnt+=1\n if cnt%100==0:\n print(batch)\n con.commit()\n cnt=0\n batch+=1\n \ncon.commit()\ncon.close()\n\n\n## if isvalid_line(line):\n## print('-',line)\n## cur.execute(\"UPDATE [MST].[t_Sample] SET DESCRIPTION_CLEANED=? WHERE ID=?\",(cleansed,row.ID))\n\n\n##output=\"\".join(line.strip() for line in output.splitlines())\n\n##print(output)\n\n##for line in output.splitlines():\n## line=line.strip()\n##\n## #to eliminate disclaimer, instructions at the bottom of email\n## #if line=='* * *':\n## # break\n## print(line)\n","sub_path":"NLP_Normalization/Clear_Description_MSTRAVEL.py","file_name":"Clear_Description_MSTRAVEL.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"632500406","text":"# -*- coding: utf-8 -*-\n\"\"\"Sirepo setup script\n\n:copyright: Copyright (c) 2015-2023 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\nimport os\nimport pykern.pksetup\n\ninstall_requires = [\n \"Flask==2.0.3\",\n \"SQLAlchemy>=1.4,<2\",\n \"aenum\",\n \"asyncssh\",\n \"cryptography>=2.8\",\n \"futures\",\n \"matplotlib\",\n \"numconv\",\n \"numpy\",\n \"Pillow\",\n \"pyIsEmail\",\n \"pykern\",\n \"pytz\",\n \"requests\",\n \"tornado\",\n \"user-agents\",\n \"werkzeug==2.0.3\",\n # Optional dependencies\n # required for email login and smtp\n \"Authlib>=0.13\",\n \"dnspython\",\n # required for sbatch agent\n \"asyncssh\",\n]\nif not os.environ.get(\"no_uwsgi\", 0):\n # had a problem installing in one environment so make optional\n install_requires.append(\"uwsgi\")\n\npykern.pksetup.setup(\n author=\"RadiaSoft LLC.\",\n author_email=\"pip@sirepo.com\",\n description=\"accelerator code gui\",\n install_requires=install_requires,\n license=\"http://www.apache.org/licenses/LICENSE-2.0.html\",\n name=\"sirepo\",\n url=\"http://sirepo.com\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: JavaScript\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"87941818","text":"from random import randint\nfrom graphviz import Digraph\nimport seaborn as sns\n\nclass RefGraphBuilder():\n\n def __init__(self):\n pass\n\n def referencepath(self,refpath):\n self.refpath = refpath\n p = Digraph(name='REFERENCE', node_attr={'shape': 'cds', 'color': 'black', 'fillcolor': 'grey', 'fixedsize':'true'}, format='png',graph_attr={'splines': 'spline', 'rankdir': 'LR'},engine='dot',edge_attr={'arrowhead': 'vee', 'arrowsize': '0.5', 'color': 'black','penwidth':'2'})\n nw = 0\n nodedata = {}\n for i in range(1, len(self.refpath) - 1): # Has to start at one, else loopback in graph\n nw = (int(refpath[i+1][1]) - int(refpath[i][1])) / 10\n if nw < 1.2:\n nw = 1.2\n ##### Return a dictionary of node attr to work with - interactive plotting\n nodedata[str(self.refpath[i - 1][1] + self.refpath[i - 1][2])] = [\"label\", str(self.refpath[i - 1][0] + ' ' + self.refpath[i - 1][1] + ' ' + self.refpath[i - 1][2]), \"width\", str(nw)]\n #####\n p.node(self.refpath[i - 1][1] + self.refpath[i - 1][2],label=str(self.refpath[i - 1][0] + ' ' + self.refpath[i - 1][1] + ' ' + self.refpath[i - 1][2]), width = str(nw))\n #### NEED TO ADD FOR EDGES AS WELL\n p.edge(self.refpath[i - 1][1] + self.refpath[i - 1][2], self.refpath[i][1] + self.refpath[i][2])\n\n p.node(self.refpath[len(self.refpath) - 1][1] + self.refpath[len(self.refpath) - 1][2],label=str(self.refpath[len(self.refpath) - 1][0] + ' ' + self.refpath[len(self.refpath) - 1][1] + ' ' + self.refpath[len(self.refpath) - 1][2]))\n p.node(self.refpath[len(self.refpath) - 2][1] + self.refpath[len(self.refpath) - 2][2],label=str(self.refpath[len(self.refpath) - 2][0] + ' ' + self.refpath[len(self.refpath) - 2][1] + ' ' + self.refpath[len(self.refpath) - 2][2]))\n p.edge(self.refpath[len(self.refpath) - 2][1] + self.refpath[len(self.refpath) - 2][2],\n self.refpath[len(self.refpath) - 1][1] + self.refpath[len(self.refpath) - 1][2])\n\n p.node(str('REF'), label=str('Reference'), width = '1.6')\n p.node(str('REF_'), label=str('Path'))\n p.edge('REF', 'REF_')\n\n #print(nodedata)\n return p, nodedata;\n\n def variantpath(self, output,graph,loci,refpath):\n\n self.output = output\n self.graph = graph\n self.loci =loci\n self.refpath = refpath\n\n colour = sns.color_palette(\"colorblind\", desat=.5) + sns.color_palette(\"Set1\", n_colors=8, desat=.5)\n del colour[7:9]\n colour = sns.color_palette(colour)\n colour = colour.as_hex()\n\n ## newvar\n allvar = {}\n testref = [list(elem) for elem in refpath]\n ## newvar\n\n for key in self.output:\n varpath = ()\n for i in self.output[key]:\n if i[0] == self.loci[0] and int(i[1]) >= self.loci[1] and int(i[1]) <= self.loci[2]:\n varpath = sorted(\n tuple(varpath) + (([i[0], i[1], i[3]]),)) # have to convert to int for sort - see refpath\n varpath = sorted(varpath, key=lambda x: int(x[1]))\n ## newvar\n nw = 0\n temp = []\n matching = []\n ## newvar\n\n x = Digraph(name=key, node_attr={'shape': 'cds', 'color': colour[randint(0, len(colour) - 1)],'fillcolor': colour[randint(0, len(colour) - 1)]} , engine='dot',edge_attr={'arrowhead': 'vee', 'arrowsize': '0.5', 'color': colour[randint(0, len(colour) - 1)],'penwidth':'4'}) # colour can also be set to use x11 names 'red'\n\n for i in range(1, len(varpath) - 1):\n # if output[key][i][0] == loci[0] and int(output[key][i][1]) >= loci[1] and int(output[key][i][1]) <= loci[2]:\n\n nw = (int(varpath[i+1][1]) - int(varpath[i][1])) / 10\n if nw < 1.2: # have to include a maximum node size as well\n nw = 1.2\n if varpath[i - 1] in testref:\n x.node(varpath[i - 1][1] + varpath[i - 1][2],label=str(varpath[i - 1][0] + ' ' + varpath[i - 1][1] + ' ' + varpath[i - 1][2]), width=str(nw))\n matching = list(filter(lambda k: varpath[i] in allvar[k], allvar.keys()))\n if matching: # exit pathways to a reference node\n matching.append(key)\n x.edge(varpath[i - 1][1] + varpath[i - 1][2], varpath[i][1] + varpath[i][2], label=str(' - '.join(matching)), color='black', style='dotted')\n matching = []\n else:\n x.edge(varpath[i - 1][1] + varpath[i - 1][2], varpath[i][1] + varpath[i][2], label=str(key))\n\n else:\n x.node(varpath[i - 1][1] + varpath[i - 1][2],label=str(varpath[i - 1][0] + ' ' + varpath[i - 1][1] + ' ' + varpath[i - 1][2]),width=str(nw))\n matching = list(filter(lambda k: varpath[i] in allvar[k], allvar.keys()))\n if matching: ####################\n matching.append(key)\n\n x.edge(varpath[i - 1][1] + varpath[i - 1][2], varpath[i][1] + varpath[i][2],label=str(' - '.join(matching)), color='black', style='dotted')\n matching = []\n else:\n x.edge(varpath[i - 1][1] + varpath[i - 1][2], varpath[i][1] + varpath[i][2], label=str(key))\n\n\n x.node(varpath[len(varpath) - 1][1] + varpath[len(varpath) - 1][2], label=str(varpath[len(varpath) - 1][0] + ' ' + varpath[len(varpath) - 1][1] + ' ' + varpath[len(varpath) - 1][2]))\n x.node(varpath[len(varpath) - 2][1] + varpath[len(varpath) - 2][2], label=str(varpath[len(varpath) - 2][0] + ' ' + varpath[len(varpath) - 2][1] + ' ' + varpath[len(varpath) - 2][2]))\n x.edge(varpath[len(varpath) - 2][1] + varpath[len(varpath) - 2][2],varpath[len(varpath) - 1][1] + varpath[len(varpath) - 1][2])\n\n x.node(str(key), label=str(key))\n x.node(str(key + '_'), label=str('Path'))\n x.edge(str(key), str(key + '_'))\n\n temp = [_ for _ in varpath if _[2] != 'REF']\n allvar[key] = temp\n\n self.graph.subgraph(x)\n return graph","sub_path":"Dyedot_variationGraphs/class_Grapher.py","file_name":"class_Grapher.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"219895430","text":"import requests\r\nimport json\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport urllib\r\nimport datetime\r\nimport random\r\nimport csv\r\nimport time\r\nfrom pymongo import MongoClient\r\nimport threading\r\n\r\n\"\"\"\r\n 该脚本主要是 爬取百度招聘网站上的招聘信息,并将数据存入MongoDB数据库。\r\n 主要内容:\r\n (省, 市, 区, 工程标题, 工程描述, 详细地址, 工种, 开工日期, 完工日期, 薪酬金额, 接包要求 , 发布状态, 发布日期)\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\n 数据库操作\r\n\"\"\"\r\nclient = MongoClient('localhost', 27017)\r\nbaidu = client.baidu\r\ncollection = baidu.work_1210\r\n\r\n\r\ndef save_to_mongodb(work_info):\r\n try:\r\n if collection.insert_one(work_info):\r\n print('记录成功!')\r\n except Exception:\r\n print('记录失败!')\r\n\r\n##########################################################\r\n\r\n\r\ndef get_proxy():\r\n \"\"\"\r\n 根据之前代理池系统,对外提供的web访问接口,从代理池获取代理\r\n \"\"\"\r\n try:\r\n get_proxy_utl = 'http://127.0.0.1:5000/random'\r\n res = requests.get(get_proxy_utl)\r\n if res.status_code == 200:\r\n print('从代理���中获取代理IP: %s' % res.text)\r\n proxies = {'http': 'http://' + res.text}\r\n return proxies\r\n else:\r\n return None\r\n except Exception as e:\r\n print('从代理池中获取代理IP出错了!! %s' % e)\r\n return None\r\n\r\n\r\n###########################################################\r\n\"\"\"\r\n 初始化变量\r\n\"\"\"\r\nua = ['Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',\r\n 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',\r\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko',\r\n 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',\r\n 'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',\r\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',\r\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)']\r\n\r\n\r\ndef crawl(job_type, city, province, token, cookie):\r\n headers = {\r\n 'Accept': '*/*',\r\n 'Cookie': cookie,\r\n 'Referer': 'https://zhaopin.baidu.com/',\r\n 'User-Agent': random.choice(ua)\r\n }\r\n\r\n pn = 0\r\n for i in range(10):\r\n\r\n try:\r\n url = 'https://zhaopin.baidu.com/api/qzasync?query=%s&city=%s&is_adq=1&pcmod=1&token=%s&pn=%d&rn=10' % (\r\n urllib.parse.quote(job_type), urllib.parse.quote(city).replace('%', '%25'), token, pn)\r\n\r\n # url = 'https://zhaopin.baidu.com/api/qzasync?query=%E6%96%87%E5%91%98&city=%25E6%25AD%25A6%25E6%25B1%2589&is_adq=1&pcmod=1&token=%3D%3DAmS3tqY%2BaoEqFbtxmabe5aspJaXt1Zqd2kYS5kst5l&pn=10&rn=10'\r\n print(url)\r\n\r\n # 获取代理IP\r\n proxies = get_proxy()\r\n print('代理ip:', proxies)\r\n if not proxies:\r\n res = requests.get(url, headers=headers)\r\n else:\r\n res = requests.get(url, headers=headers, proxies=proxies)\r\n\r\n pn += 10\r\n print(res.status_code)\r\n data = res.json()\r\n\r\n for disp_data in data['data']['disp_data']:\r\n url = disp_data.get('loc', '')\r\n district = disp_data.get('district', '') # 区县\r\n\r\n detail_url = 'https://zhaopin.baidu.com/szzw?id=%s' % url\r\n print(detail_url)\r\n\r\n try:\r\n if not proxies:\r\n detail_res = requests.get(detail_url)\r\n else:\r\n detail_res = requests.get(detail_url, proxies=proxies)\r\n print(detail_res.status_code)\r\n if detail_res.status_code != 200:\r\n continue\r\n html = detail_res.text\r\n\r\n company = '' # 公司名称\r\n title = '' # 标题\r\n job_desc = '' # 工作描述\r\n detail_address = '' # 详细地址\r\n release_time = str(datetime.datetime.now().strftime('%Y/%m/%d')) # 开工时间(当前时间)\r\n valid_time = '' # 有效时间\r\n salary = '' # 薪水\r\n public_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) # 发布时间(当前时间)\r\n\r\n try:\r\n company = re.compile(r'class=\"bd-tt\" data-a-39d218aa>(.*?)<').findall(html)[0]\r\n company = company.replace('class=\"bd-tt\" data-a-39d218aa>', '').replace('<', '')\r\n except:\r\n company = ''\r\n\r\n try:\r\n title = re.compile(r'class=\"job-name\">(.*?)').findall(html)[0]\r\n except Exception as e:\r\n print(e)\r\n title = '招%s' % (job_type)\r\n\r\n if '管理培训生' in title \\\r\n or '销售' in title \\\r\n or '风控岗' in title \\\r\n or '临床推广' in title \\\r\n or 'android' in title \\\r\n or '推广专员' in title \\\r\n or '产品运营' in title:\r\n continue\r\n\r\n # title = '招' + job_type\r\n\r\n try:\r\n job_desc = re.compile(r'职位描述:(.*?)').findall(html)[0]\r\n job_desc = job_desc.replace('', '').replace('', '').replace('
', '').replace(\r\n '', '')\r\n except Exception as e:\r\n print(e)\r\n try:\r\n job_desc = re.compile(r'>工作内容:(.*?)
').findall(html)[0]\r\n job_desc = job_desc.replace('', '').replace('', '').replace('
', '').replace(\r\n '', '')\r\n except:\r\n job_desc = \"招%s,要熟练工\" % job_type\r\n\r\n try:\r\n detail_address = re.compile(r'工作地址:
(.*?)
', '').replace('', '')\r\n except Exception as e:\r\n print(e)\r\n try:\r\n detail_address = re.compile(r'
工作地点:\\D{2,6}
').findall(html)[0]\r\n detail_address = detail_address.replace('
', '').replace('
', '')\r\n except:\r\n detail_address = \"%s市\" % city\r\n\r\n # try:\r\n # public_time = re.compile(r'
发布时间:(2018-\\d{2}-\\d{2}).*?
').findall(html)[0]\r\n # public_time = public_time.replace('
', '').replace('
', '')\r\n # except Exception as e:\r\n # public_time = str(datetime.datetime.now().strftime('%Y/%m/%d'))\r\n\r\n try:\r\n valid_time = re.compile(r'
有效日期:(.*?)
').findall(html)[0]\r\n valid_time = valid_time.replace('
', '').replace('
', '')\r\n except Exception as e:\r\n valid_time = str((datetime.date.today() + datetime.timedelta(days=+61)).strftime(\"%Y/%m/%d\"))\r\n\r\n try:\r\n salary = re.compile(r'(\\d{3,5})-(\\d{3,5})
).\n#\n##############################################################################\n\nimport time\nfrom datetime import datetime\nfrom openerp.report import report_sxw\nimport math\n\n\nclass assets_details(report_sxw.rml_parse):\n \"\"\"\n assets details report\n \"\"\"\n _name = 'report.account.assets.detials'\n\n def __init__(self, cr, uid, name, context=None):\n \"\"\"\n initiation method\n \"\"\"\n self.counter = 0\n super(assets_details, self).__init__(cr, uid, name, context=context)\n self.localcontext.update({\n 'lines': self.lines,\n 'category':self.get_name,\n 'depreciation':self.get_depreciation,\n })\n self.context = context\n def get_name(self):\n \"\"\"\n print category name\n \"\"\"\n name = self.categories_names[self.counter]\n #cuse this method will be called after get_depreciation\n self.counter+=1\n return name\n \n def get_depreciation(self):\n \"\"\"\n print category depreciation percent\n \"\"\"\n percent = self.depreciation_percent[self.counter]\n return percent\n def to_date(self, date):\n \"\"\"\n convert string to date \n \"\"\"\n return datetime.strptime(str(date), \"%Y-%m-%d\")\n\n def compute_assets(self, company_id, category_ids, date):\n \"\"\"\n compute details of assets\n category_ids:list of categories ids\n \"\"\"\n #wanted date\n wanted_date = datetime.strptime(str(date), \"%Y-%m-%d\")\n #wanted year\n year = wanted_date.year\n start_or_year = str(year)+\"-1-1\"\n start_date = datetime.strptime(str(start_or_year), \"%Y-%m-%d\")\n self.assets = {x:{} for x in category_ids}\n self.categories = {x:{} for x in category_ids}\n asset_obj = self.pool.get('account.asset.asset')\n search_ids = asset_obj.search(self.cr, self.uid, [('category_id','in',category_ids),\\\n ('company_id','=',company_id),\\\n ('purchase_date','>=',start_or_year),\\\n ('purchase_date','<=',date),\\\n ('state','!=','draft'),],context=self.context)\n for asset in asset_obj.browse(self.cr, self.uid, search_ids):\n self.categories[asset.category_id.id] = asset.category_id.name\n self.assets[asset.category_id.id][asset.name] = self.assets[asset.category_id.id].get(asset.name,{})\n self.assets[asset.category_id.id][asset.name]['name'] = asset.name\n self.assets[asset.category_id.id][asset.name]['date'] = asset.purchase_date\n #to get the current count if exist or set to zero\n self.assets[asset.category_id.id][asset.name]['count'] = self.assets[asset.category_id.id][asset.name].get('count',0)\n self.assets[asset.category_id.id][asset.name]['count'] += 1\n \n #get the current value of the asset\n value = sum(x.amount for x in asset.history_ids\\\n if (x.type in ['initial']\\\n and self.to_date(x.date) >= start_date\\\n and self.to_date(x.date) <= wanted_date))\n \n self.assets[asset.category_id.id][asset.name]['value'] = value\n #to get the current count if exist or set to zero\n self.assets[asset.category_id.id][asset.name]['sum_value'] = self.assets[asset.category_id.id][asset.name].get('sum_value',0.0)\n \n self.assets[asset.category_id.id][asset.name]['sum_value'] += value\n\n #get the current depreciation of the asset\n depreciation = sum(x.amount for x in asset.depreciation_line_ids\\\n if x.depreciation_date and\\\n (self.to_date(x.depreciation_date) >= start_date\\\n and self.to_date(x.depreciation_date) <= wanted_date))\n \n #to get the current count if exist or set to zero\n self.assets[asset.category_id.id][asset.name]['depreciation'] = self.assets[asset.category_id.id][asset.name].get('depreciation',0.0)\n self.assets[asset.category_id.id][asset.name]['depreciation'] += math.fabs(depreciation)\n\n current_sum = self.assets[asset.category_id.id][asset.name]['sum_value']\n current_depreciation = self.assets[asset.category_id.id][asset.name]['depreciation']\n self.assets[asset.category_id.id][asset.name]['rest_value'] = current_sum - current_depreciation\n \n self.assets_to_print = []\n #hold categories name to print operation\n \n \n self.categories_names = {}\n self.depreciation_percent = {}\n counter = 0\n for key in self.assets.keys():\n category_list = []\n name = self.categories[key]\n self.categories_names[counter] = name\n dic = self.assets[key]\n \n last_line = {'name':'اﻹجمالي', 'date':' ', 'count':' ',\\\n 'value':0, 'sum_value':0, 'depreciation':0, 'rest_value':0}\n for record in dic:\n last_line['value'] += self.assets[key][record]['value']\n last_line['sum_value'] += self.assets[key][record]['sum_value']\n last_line['depreciation'] += self.assets[key][record]['depreciation']\n last_line['rest_value'] += self.assets[key][record]['rest_value']\n category_list.append(self.assets[key][record])\n \n \n \n #to add the sumation line\n category_list.append(last_line)\n \n #to round the percent to 2 digits\n if last_line['depreciation'] and last_line['sum_value']:\n self.depreciation_percent[counter] = round(last_line['depreciation']/last_line['sum_value'], 2)\n \n self.assets_to_print.append(category_list)\n counter += 1\n\n\n def lines(self, data):\n \"\"\"\n return report lines\n \"\"\"\n company_id = data['form']['company_id']\n category_ids = data['form']['category_id']\n date = str(data['form']['date'])\n #get entered company_id or the defualt user company_id\n company_id = company_id and company_id[0] or\\\n self.pool.get('res.company')._company_default_get(self.cr, self.uid, 'account.asset.asset', context=self.context)\n\n self.compute_assets(company_id, category_ids, date)\n\n return self.assets_to_print\n\nreport_sxw.report_sxw('report.account.assets.details.report',\n 'account.asset.asset',\n 'addons/account_asset_custom/report/account_asset_details.rml',\n parser=assets_details,\n header='internal landscape')\n","sub_path":"v_7/Dongola/common/account_asset_custom/report/account_asset_details.py","file_name":"account_asset_details.py","file_ext":"py","file_size_in_byte":7086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"487021245","text":"#!/usr/bin/env python\nimport os\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nfrom membrane.membrane_ops import mops as model_fun\nfrom membrane.layers.feedforward import conv\nfrom membrane.layers.feedforward import normalization\n# from layers.feedforward import pooling\nfrom membrane.layers.recurrent import feedback_hgru\n\n\ndef experiment_params(\n train_name=None,\n test_name=None,\n train_shape=None,\n test_shape=None,\n affinity=12,\n gt_idx=1,\n z=18):\n \"\"\"Parameters for the experiment.\"\"\"\n if train_shape is None:\n train_shape = [0]\n exp = {\n 'lr': [1e-2],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'training_routine': ['seung'],\n 'train_dataset': [train_name],\n 'test_dataset': [test_name],\n 'affinity': [affinity],\n 'cross_val': {'train': [0, 80], 'test': [80, 100]},\n 'gt_idx': [gt_idx], # Changes based on affinity\n 'train_input_shape': [z] + train_shape + [1],\n 'train_label_shape': [z] + train_shape + [affinity],\n 'test_input_shape': [z] + test_shape + [1],\n 'test_label_shape': [z] + test_shape + [affinity],\n 'train_stride': [1, 1, 1],\n 'test_stride': [1, 1, 1],\n 'tf_dtype': tf.float32,\n 'np_dtype': np.float32\n }\n exp['exp_label'] = __file__.split('.')[0].split(os.path.sep)[-1]\n exp['train_augmentations'] = [\n {'min_max_native_normalization': []},\n # {'normalize_volume': lambda x: x / 255.},\n # {'warp': {}},\n {'random_crop': []},\n {'pixel': {}},\n {'misalign': {}},\n {'blur': {}},\n {'missing': {}},\n {'flip_lr': []},\n {'flip_ud': []},\n ]\n exp['test_augmentations'] = [\n # {'normalize_volume': lambda x: x / 255.}\n {'min_max_native_normalization': []},\n {'random_crop': []},\n ]\n exp['train_batch_size'] = 1 # Train/val batch size.\n exp['test_batch_size'] = 1 # Train/val batch size.\n exp['top_test'] = 5 # Keep this many checkpoints/predictions\n exp['epochs'] = 100000\n exp['save_weights'] = False # True\n exp['test_iters'] = 1000\n exp['shuffle_test'] = False # Shuffle test data.\n exp['shuffle_train'] = True\n return exp\n\n\ndef build_model(data_tensor, reuse, training, output_channels):\n \"\"\"Create the hgru from Learning long-range...\"\"\"\n filters = [18]\n kernel_size = [1, 5, 5]\n with tf.variable_scope('cnn', reuse=reuse):\n # Unclear if we should include l0 in the down/upsample cascade\n with tf.variable_scope('in_embedding', reuse=reuse):\n in_emb = conv.conv3d_layer(\n bottom=data_tensor,\n name='l0',\n stride=[1, 1, 1],\n padding='SAME',\n num_filters=filters[0],\n kernel_size=[1, 5, 5],\n trainable=training,\n use_bias=True)\n in_emb = tf.nn.elu(in_emb)\n\n # hGRU down\n layer_hgru = feedback_hgru.hGRU(\n layer_name='hgru_1',\n x_shape=in_emb.get_shape().as_list(),\n timesteps=8,\n h_ext=[[1, 9, 9], [3, 5, 5], [1, 1, 1]],\n strides=[1, 1, 1, 1, 1],\n pool_strides=[1, 4, 4],\n padding='SAME',\n aux={\n 'symmetric_weights': True,\n 'dilations': [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],\n 'batch_norm': True,\n 'pooling_kernel': [1, 4, 4],\n 'intermediate_ff': filters, # + filters,\n 'intermediate_ks': [kernel_size]},\n train=training)\n h2 = layer_hgru.build(in_emb)\n nh2 = normalization.batch(\n bottom=h2,\n name='hgru_bn',\n fused=True,\n renorm=True,\n training=training)\n with tf.variable_scope('out_embedding', reuse=reuse):\n out_emb = conv.conv3d_layer(\n bottom=nh2,\n name='out_emb',\n stride=[1, 1, 1],\n padding='SAME',\n num_filters=output_channels,\n kernel_size=kernel_size,\n trainable=training,\n use_bias=True)\n return out_emb\n\n\ndef main(\n train=None,\n test=None,\n row_id=None,\n gpu_device='/gpu:0',\n evaluate=False,\n checkpoint=None,\n full_volume=False,\n force_meta=None,\n full_eval=False,\n bethge=None,\n adabn=False,\n test_input_shape=False,\n test_label_shape=False,\n overwrite_training_params=False,\n z=18):\n \"\"\"Run an experiment with hGRUs.\"\"\"\n version = '3d'\n tf_records = False\n if evaluate:\n return model_fun.evaluate_model(\n test=test,\n gpu_device=gpu_device,\n z=z,\n version=version,\n build_model=build_model,\n experiment_params=experiment_params,\n checkpoint=checkpoint,\n force_meta=force_meta,\n full_volume=full_volume,\n full_eval=full_eval,\n test_input_shape=test_input_shape,\n test_label_shape=test_label_shape,\n adabn=adabn,\n bethge=bethge,\n tf_records=tf_records)\n else:\n raise NotImplementedError\n","sub_path":"membrane/models/fgru.py","file_name":"fgru.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"373557464","text":"from fastapi import FastAPI, Path, Query\nfrom pydantic import BaseModel, Field\nfrom starlette.responses import JSONResponse\n\napp = FastAPI()\n\n\n@app.get(\"/profile/{name}\")\ndef get_path_parameter(name: str = Path(\"\", max_length=10)):\n return JSONResponse(\n content={\"message\": f\"My name is: {name}\"},\n status_code=200,\n )\n\n\ndefault_string = \"\"\ndefault_int = 0\n\n\nclass Books(BaseModel):\n name: str = Field(default_string, title=\"Book name\", max_length=300)\n page: int = Field(default_int, title=\"Page in book\", max_length=300)\n\n\n@app.post(\"/books\")\ndef create_books(req_body: Books):\n mock_response = {\n \"_id\": 4,\n \"name\": req_body.name,\n \"page\": req_body.page,\n }\n return JSONResponse(\n content={\"status\": \"ok\", \"data\": mock_response}, status_code=201\n )","sub_path":"add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"610608629","text":"\n# compute dataframes d1 (Article, Brand, MC) and d2 (MC, Dept,SubDept,Class,SubClass)\n# compute d3 from primary hierarchy file (MC, Dept,SubDept,Class,SubClass) \n# concat d2 and d3 on MC column with precedence to primary hierarchy file as d4\n# merge d1 with d4 on MC( getting (Article,Brand,MC,Dept,SubDept,Class,SubClass))\n\nimport pandas as pd\nimport sys\n \nlist_df=[]\nkeep_cols = [\"Article\", \"ArticleDesc\",\"BrandDescription\",\"BrandCode\", \"MerCategory\",\"MCDescription\",\"Dept\",\"DeptDescription\",\"SubDeptDescription\",\"SubDept\", \"Class\",\"ClassDescription\",\"SubClass\",\"SubClassDescription\"]\n\n\n# read each CSV file into individual dataframe (df_each) and concat them as single df\nfor arg in sys.argv:\n\n\tif(not(\".csv\" in arg)):\n\t\tcontinue;\n\n\tprint (\"processing...\" + arg)\n\n\t#concatenate df in every iteration of for loop\n\tdf_each = pd.read_csv(arg, thousands=',' )\n\tdf_each = df_each[keep_cols]\n \n\tprint(df_each.head()) \n\tlist_df.append(df_each)\n\n \ndf = pd.concat(list_df)\n \n# filter articles beginning with A \nfilter_criteria = df['Article'].str.contains('^A', na=False)\ndf = df[filter_criteria ] \n\n\ndf_all = df.groupby(['Article']).last() \ndf_all = df_all.reset_index() \nprint(df_all.head())\n\n\n\n# retain and rename necessary columns \nkeep_cols = [\"Article\", \"ArticleDesc\",\"BrandDescription\",\"BrandCode\", \"MerCategory\"]\ndf = df_all[keep_cols] \n\ndf.rename(columns={'ArticleDesc':'Article_Desc'}, inplace=True)\ndf.rename(columns={'BrandDescription':'Brand_Desc'}, inplace=True)\ndf.rename(columns={'MerCategory':'MC'}, inplace=True)\n\ndf.to_csv(\"article_brand_mc.csv\", index=True)\n\n\n\n\n\n#read primary hierarchy file and remove duplicates\n\n\ndf_hier1 = pd.read_csv(\"hierarchy_conv.csv\")\n#article_cols = [\"MC\"] #columns to consider for removing duplicates\ndf_hier1.drop_duplicates([\"MC\"])\ndf_hier1 = df_hier1.reindex()\nkeep_cols = [\"MC\",\"MC Des\",\"Dept\",\"Dept Des\",\"Sub Dept\",\"Sub Dept Des\",\"Class\",\"Class Des\",\"Sub Class\",\"Sub Class Des\"]\ndf_hier1 = df_hier1[keep_cols]\ndf_hier1.to_csv(\"hierarchy_uniq.csv\")\n\n\n# secondary hierarchy from sales data files \n\nkeep_cols = [\"MerCategory\",\"MCDescription\",\"Dept\",\"DeptDescription\",\"SubDept\",\"SubDeptDescription\",\"Class\",\"ClassDescription\",\"SubClass\",\"SubClassDescription\"]\ndf_hier2 = df_all[keep_cols]\nprint(df_hier2.head())\ndf_hier2.rename(columns={'MerCategory':'MC'}, inplace=True)\ndf_hier2.rename(columns={'MCDescription':'MC Des'}, inplace=True)\ndf_hier2.rename(columns={'DeptDescription':'Dept Des'}, inplace=True)\ndf_hier2.rename(columns={'SubDept':'Sub Dept'}, inplace=True)\ndf_hier2.rename(columns={'SubDeptDescription':'Sub Dept Des'}, inplace=True)\ndf_hier2.rename(columns={'ClassDescription':'Class Des'}, inplace=True)\ndf_hier2.rename(columns={'SubClass':'Sub Class'}, inplace=True)\ndf_hier2.rename(columns={'SubClassDescription':'Sub Class Des'}, inplace=True)\n\n\n#sdf_hier2.columns = [\"MC\",\"MC Des\",\"Dept\",\"Dept Des\",\"Sub Dept\",\"Sub Dept Des\",\"Class\",\"Class Des\",\"Sub Class\",\"Sub Class Des\"]\n\narticle_cols = [\"MC\"] #columns to consider for removing duplicates\ngrouped = df_hier2.groupby(article_cols)\nindex = [gp_keys[0] for gp_keys in grouped.groups.values()]\ndf_hier2 = \tdf_hier2.reindex(index)\ndf_hier2.to_csv(\"hierarchy_sec.csv\")\n\n#combine both hierarchies, giving priority to primary hierarchy (using keep=first parameter)\n\ndf_hier1.reset_index(drop=True, inplace=True)\ndf_hier2.reset_index(drop=True, inplace=True)\n\nhier_all = pd.concat([df_hier1,df_hier2])\n\nhier_all.drop_duplicates(subset=[\"MC\"],keep='first',inplace=True)\nhier_all = hier_all.reindex()\nhier_all.to_csv(\"hier_all.csv\")","sub_path":"hierarchy_details.py","file_name":"hierarchy_details.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"144750703","text":"import scrublet as scr\nimport scipy.io\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\n\nplt.rcParams['font.family'] = 'sans-serif'\nplt.rcParams['font.sans-serif'] = 'Arial'\nplt.rc('font', size=14)\nplt.rcParams['pdf.fonttype'] = 42\n\ntag = 'SmartSeq'\noutput_dir = '/home/jovyan/HB_ZIK/scrublet'\ninput_dir = '/home/jovyan/data/ZikaGlioblastomas'\ninput_file = '/home/jovyan/data/HB_ZIK/HB_ZIK/study5953-zika-star-genecounts.txt'\ncounts_matrix = pd.read_csv(input_file, sep='\\t')\ncounts_matrix = counts_matrix.iloc[0:-4,:]\ngenes = counts_matrix.iloc[:,0]\ncounts_matrix = counts_matrix.iloc[:,1:]\ncounts_matrix = np.asarray(counts_matrix)\ncounts_matrix = counts_matrix.transpose()\n\nprint('Counts matrix shape: {} rows, {} columns'.format(counts_matrix.shape[0], counts_matrix.shape[1]))\nprint('Number of genes in gene list: {}'.format(len(genes)))\n\nscrub = scr.Scrublet(counts_matrix, expected_doublet_rate=0.06, sim_doublet_ratio = 10)\ndoublet_scores, predicted_doublets = scrub.scrub_doublets(min_counts=2, \n min_cells=3, \n min_gene_variability_pctl=85, \n n_prin_comps=30)\n\npredicted_doublets = predicted_doublets*1\npredicted_doublets = predicted_doublets.astype(int)\ndetected_doublets_rate = round(scrub.detected_doublet_rate_, 4)\noverall_doublets_rate = round(scrub.overall_doublet_rate_, 4)\n\nnp.savetxt(output_dir + '/' + tag + '_' + 'doublets_scores.txt', doublet_scores) \nnp.savetxt(output_dir + '/' + tag + '_' + 'predicted_doublets.txt', predicted_doublets) \nwith open(output_dir + '/' + tag + '_' + 'detected_doublets_rate.txt', 'w') as f:\n f.write('%f' % detected_doublets_rate) \n\nwith open(output_dir + '/' + tag + '_' + 'overall_doublets_rate.txt', 'w') as f:\n f.write('%f' % overall_doublets_rate)\n\nf = scrub.plot_histogram()\nf[0].savefig(output_dir + '/' + tag + '_' + \"doubletScore_histogram.pdf\", bbox_inches='tight')\n","sub_path":"scrublet/.ipynb_checkpoints/removeDoublets_SmartSeq-checkpoint.py","file_name":"removeDoublets_SmartSeq-checkpoint.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"88778545","text":"from math import *\n\nworkdir='/Users/colin/workspace/scratch/spectralLES' # not a problem arg\n\npid = 'test_N64_K30_spectral' # problem ID\nodir = '%s/HIT_LES/%s/' % (workdir, pid) # output folder\nidir = odir # input folder for restarts\nL = 2.*pi # domain size\nN = 64 # linear grid size\ncfl = 0.33 # CFL no.\ntlimit = 12.*pi # Time Limit ~ 20tau\ntau = 0.5*pi # ~ integral turnover time\ndt_rst = 5.0*tau # restart output rate\ndt_bin = 1.0*tau # snapshot output rate\ndt_hst = 0.2*tau # histogram output rate\ndt_spec = 0.1*tau # spectrum output rate\nnu = 0.0011 # kinematic viscosity\neps_inj = 1.2 # energy injection rate (cell avg.)\nUrms = 3.0 # initial rms velocity\nk_exp = -1.0 # initial spectrum power law\nk_peak = 16 # initial spectrum decay scaling\n\n\"\"\"\nFor simulations at 256^3:\ndx/eta = 1 -> nu = 0.0071 (DNS for 2nd-3rd order statistics)\ndx/eta = 2 -> nu = 0.0028 (DNS for at best 2nd-order statistics)\ndx/eta = 4 -> nu = 0.0011 (well-resolved LES)\neps_inj = 1.2\nk_peak = 0-64 (depending on how much small-scale energy you'd like in IC)\n\n\nEverything else stays the same.\n\nEquivalence at 64^3: dx/eta = 4, 8, and 16\n\"\"\"\n","sub_path":"parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"416778886","text":"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility functions to build input_fns for use with tf.Learn.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nimport tensorflow as tf\nfrom tensorflow_transform.saved import saved_transform_io\nfrom tensorflow_transform.tf_metadata import dataset_schema\n\nfrom tensorflow.contrib.learn.python.learn.utils import input_fn_utils\n\n\ndef _convert_scalars_to_vectors(features):\n \"\"\"Vectorize scalar columns to meet FeatureColumns input requirements.\"\"\"\n def maybe_expand_dims(tensor):\n # Ignore the SparseTensor case. In principle it's possible to have a\n # rank-1 SparseTensor that needs to be expanded, but this is very\n # unlikely.\n if isinstance(tensor, tf.Tensor) and tensor.get_shape().ndims == 1:\n tensor = tf.expand_dims(tensor, -1)\n return tensor\n\n return {name: maybe_expand_dims(tensor)\n for name, tensor in six.iteritems(features)}\n\n\ndef build_csv_transforming_serving_input_fn(\n raw_metadata,\n transform_savedmodel_dir,\n raw_keys,\n field_delim=\",\",\n convert_scalars_to_vectors=True):\n \"\"\"Creates input_fn that applies transforms to raw data in csv format.\n\n CSV files have many restrictions and are not suitable for every input source.\n Consider using build_parsing_transforming_serving_input_fn (which is good for\n input sources of tensorflow records containing tf.example) or\n build_default_transforming_serving_input_fn (which is good for input sources\n like json that list each input tensor).\n\n CSV input sources have the following restrictions:\n * Only columns with schema tf.FixedLenFeature colums are supported\n * Text columns containing the delimiter must be wrapped in '\"'\n * If a string contains a double quote, the double quote must be escaped with\n another double quote, for example: the first column in\n '\"quick \"\"brown\"\" fox\",1,2' becomes 'quick \"brown\" fox'\n * White space is kept. So a text column \"label ,\" is parsed to 'label '\n\n Args:\n raw_metadata: a `DatasetMetadata` object describing the raw data.\n transform_savedmodel_dir: a SavedModel directory produced by tf.Transform\n embodying a transformation function to be applied to incoming raw data.\n raw_keys: A list of string keys of the raw labels to be used. The order in\n the list matches the parsing order in the csv file.\n field_delim: Delimiter to separate fields in a record.\n convert_scalars_to_vectors: Boolean specifying whether this input_fn should\n convert scalars into 1-d vectors. This is necessary if the inputs will be\n used with `FeatureColumn`s as `FeatureColumn`s cannot accept scalar\n inputs. Default: True.\n\n Raises:\n ValueError: if columns cannot be saved in a csv file.\n\n Returns:\n An input_fn suitable for serving that applies transforms to raw data in\n CSV format.\n \"\"\"\n if not raw_keys:\n raise ValueError(\"raw_keys must be set.\")\n\n column_schemas = raw_metadata.schema.column_schemas\n\n # Check for errors.\n for k in raw_keys:\n if k not in column_schemas:\n raise ValueError(\"Key %s does not exist in the schema\" % k)\n if not isinstance(column_schemas[k].representation,\n dataset_schema.FixedColumnRepresentation):\n raise ValueError((\"CSV files can only support tensors of fixed size\"\n \"which %s is not.\") % k)\n shape = column_schemas[k].tf_shape().as_list()\n if shape and shape != [1]:\n # Column is not a scalar-like value. shape == [] or [1] is ok.\n raise ValueError((\"CSV files can only support features that are scalars \"\n \"having shape []. %s has shape %s\")\n % (k, shape))\n\n def default_transforming_serving_input_fn():\n \"\"\"Serving input_fn that applies transforms to raw data in Tensors.\"\"\"\n\n record_defaults = []\n for k in raw_keys:\n if column_schemas[k].representation.default_value is not None:\n # Note that 0 and '' are valid defaults.\n value = tf.constant([column_schemas[k].representation.default_value],\n dtype=column_schemas[k].domain.dtype)\n else:\n value = tf.constant([], dtype=column_schemas[k].domain.dtype)\n record_defaults.append(value)\n\n placeholder = tf.placeholder(dtype=tf.string, shape=(None,),\n name=\"csv_input_placeholder\")\n parsed_tensors = tf.decode_csv(placeholder, record_defaults,\n field_delim=field_delim)\n\n raw_serving_features = {k: v for k, v in zip(raw_keys, parsed_tensors)}\n\n _, transformed_features = (\n saved_transform_io.partially_apply_saved_transform(\n transform_savedmodel_dir, raw_serving_features))\n\n if convert_scalars_to_vectors:\n transformed_features = _convert_scalars_to_vectors(transformed_features)\n\n return input_fn_utils.InputFnOps(\n transformed_features, None, {\"csv_example\": placeholder})\n\n return default_transforming_serving_input_fn\n\n\ndef build_json_example_transforming_serving_input_fn(\n raw_metadata,\n transform_savedmodel_dir,\n raw_label_keys,\n raw_feature_keys=None,\n convert_scalars_to_vectors=True):\n \"\"\"Creates input_fn that applies transforms to raw data formatted in json.\n\n The json is formatted as tf.examples. For example, one input row could contain\n the string for\n\n {\"features\": {\"feature\": {\"name\": {\"int64List\": {\"value\": [42]}}}}}\n\n which encodes an example containing only feature column 'name' with value 42.\n\n Args:\n raw_metadata: a `DatasetMetadata` object describing the raw data.\n transform_savedmodel_dir: a SavedModel directory produced by tf.Transform\n embodying a transformation function to be applied to incoming raw data.\n raw_label_keys: A list of string keys of the raw labels to be used. These\n labels are removed from the serving graph. To build a serving function\n that expects labels in the input at serving time, pass raw_labels_keys=[].\n raw_feature_keys: A list of string keys of the raw features to be used.\n If None or empty, defaults to all features except labels.\n convert_scalars_to_vectors: Boolean specifying whether this input_fn should\n convert scalars into 1-d vectors. This is necessary if the inputs will be\n used with `FeatureColumn`s as `FeatureColumn`s cannot accept scalar\n inputs. Default: True.\n\n Returns:\n An input_fn suitable for serving that applies transforms to raw data in\n tf.Examples.\n \"\"\"\n\n raw_feature_spec = raw_metadata.schema.as_feature_spec()\n raw_feature_keys = _prepare_feature_keys(raw_metadata,\n raw_label_keys,\n raw_feature_keys)\n raw_serving_feature_spec = {key: raw_feature_spec[key]\n for key in raw_feature_keys}\n\n def _serving_input_fn():\n \"\"\"Applies transforms to raw data in json-example strings.\"\"\"\n\n json_example_placeholder = tf.placeholder(tf.string, shape=[None])\n example_strings = tf.decode_json_example(json_example_placeholder)\n raw_features = tf.parse_example(example_strings, raw_serving_feature_spec)\n inputs = {\"json_example\": json_example_placeholder}\n\n _, transformed_features = (\n saved_transform_io.partially_apply_saved_transform(\n transform_savedmodel_dir, raw_features))\n\n if convert_scalars_to_vectors:\n transformed_features = _convert_scalars_to_vectors(transformed_features)\n\n return input_fn_utils.InputFnOps(transformed_features, None, inputs)\n\n return _serving_input_fn\n\n\ndef build_parsing_transforming_serving_input_fn(\n raw_metadata,\n transform_savedmodel_dir,\n raw_label_keys,\n raw_feature_keys=None,\n convert_scalars_to_vectors=True):\n \"\"\"Creates input_fn that applies transforms to raw data in tf.Examples.\n\n Args:\n raw_metadata: a `DatasetMetadata` object describing the raw data.\n transform_savedmodel_dir: a SavedModel directory produced by tf.Transform\n embodying a transformation function to be applied to incoming raw data.\n raw_label_keys: A list of string keys of the raw labels to be used. These\n labels are removed from the serving graph. To build a serving function\n that expects labels in the input at serving time, pass raw_labels_keys=[].\n raw_feature_keys: A list of string keys of the raw features to be used.\n If None or empty, defaults to all features except labels.\n convert_scalars_to_vectors: Boolean specifying whether this input_fn should\n convert scalars into 1-d vectors. This is necessary if the inputs will be\n used with `FeatureColumn`s as `FeatureColumn`s cannot accept scalar\n inputs. Default: True.\n\n Returns:\n An input_fn suitable for serving that applies transforms to raw data in\n tf.Examples.\n \"\"\"\n\n raw_feature_spec = raw_metadata.schema.as_feature_spec()\n raw_feature_keys = _prepare_feature_keys(raw_metadata,\n raw_label_keys,\n raw_feature_keys)\n raw_serving_feature_spec = {key: raw_feature_spec[key]\n for key in raw_feature_keys}\n\n def parsing_transforming_serving_input_fn():\n \"\"\"Serving input_fn that applies transforms to raw data in tf.Examples.\"\"\"\n raw_input_fn = input_fn_utils.build_parsing_serving_input_fn(\n raw_serving_feature_spec, default_batch_size=None)\n raw_features, _, inputs = raw_input_fn()\n _, transformed_features = (\n saved_transform_io.partially_apply_saved_transform(\n transform_savedmodel_dir, raw_features))\n\n if convert_scalars_to_vectors:\n transformed_features = _convert_scalars_to_vectors(transformed_features)\n\n return input_fn_utils.InputFnOps(transformed_features, None, inputs)\n\n return parsing_transforming_serving_input_fn\n\n\ndef build_default_transforming_serving_input_fn(\n raw_metadata,\n transform_savedmodel_dir,\n raw_label_keys,\n raw_feature_keys=None,\n convert_scalars_to_vectors=True):\n \"\"\"Creates input_fn that applies transforms to raw data in Tensors.\n\n Args:\n raw_metadata: a `DatasetMetadata` object describing the raw data.\n transform_savedmodel_dir: a SavedModel directory produced by tf.Transform\n embodying a transformation function to be applied to incoming raw data.\n raw_label_keys: A list of string keys of the raw labels to be used. These\n labels are removed from the serving graph. To build a serving function\n that expects labels in the input at serving time, pass raw_labels_keys=[].\n raw_feature_keys: A list of string keys of the raw features to be used.\n If None or empty, defaults to all features except labels.\n convert_scalars_to_vectors: Boolean specifying whether this input_fn should\n convert scalars into 1-d vectors. This is necessary if the inputs will be\n used with `FeatureColumn`s as `FeatureColumn`s cannot accept scalar\n inputs. Default: True.\n\n Returns:\n An input_fn suitable for serving that applies transforms to raw data in\n tf.Examples.\n\n Raises:\n ValueError: if raw_label_keys is not provided.\n \"\"\"\n if raw_label_keys is None:\n raise ValueError(\"raw_label_keys must be specified.\")\n if raw_feature_keys is None:\n raw_feature_keys = list(\n set(six.iterkeys(raw_metadata.schema.column_schemas))\n - set(raw_label_keys))\n\n def default_transforming_serving_input_fn():\n \"\"\"Serving input_fn that applies transforms to raw data in Tensors.\"\"\"\n\n raw_serving_features = {\n k: v\n for k, v in six.iteritems(raw_metadata.schema.as_batched_placeholders())\n if k in raw_feature_keys}\n sparse_serving_features = [t for t in raw_serving_features\n if isinstance(t, tf.SparseTensor)]\n if sparse_serving_features:\n raise ValueError(\"Feeding sparse tensors directly at serving time is not \"\n \"supported.\")\n _, transformed_features = (\n saved_transform_io.partially_apply_saved_transform(\n transform_savedmodel_dir, raw_serving_features))\n\n if convert_scalars_to_vectors:\n transformed_features = _convert_scalars_to_vectors(transformed_features)\n\n return input_fn_utils.InputFnOps(\n transformed_features, None, raw_serving_features)\n\n return default_transforming_serving_input_fn\n\n\ndef build_training_input_fn(metadata,\n file_pattern,\n training_batch_size,\n label_keys,\n feature_keys=None,\n reader=tf.TFRecordReader,\n key_feature_name=None,\n convert_scalars_to_vectors=True,\n **read_batch_features_args):\n \"\"\"Creates an input_fn that reads training data based on its metadata.\n\n Args:\n metadata: a `DatasetMetadata` object describing the data.\n file_pattern: List of files or pattern of file paths containing\n `Example` records. See `tf.gfile.Glob` for pattern rules.\n training_batch_size: An int or scalar `Tensor` specifying the batch size to\n use.\n label_keys: A list of string keys of the labels to be used.\n feature_keys: A list of string keys of the features to be used.\n If None or empty, defaults to all features except labels.\n reader: A function or class that returns an object with\n `read` method, (filename tensor) -> (example tensor).\n key_feature_name: A name to use to add a key column to the features dict.\n Defaults to None, meaning no key column will be created.\n convert_scalars_to_vectors: Boolean specifying whether this input_fn should\n convert scalars into 1-d vectors. This is necessary if the inputs will be\n used with `FeatureColumn`s as `FeatureColumn`s cannot accept scalar\n inputs. Default: True.\n **read_batch_features_args: any additional arguments to be passed through to\n `read_batch_features()`, including e.g. queue parameters.\n\n Returns:\n An input_fn suitable for training that reads training data.\n \"\"\"\n feature_spec = metadata.schema.as_feature_spec()\n feature_keys = _prepare_feature_keys(metadata, label_keys, feature_keys)\n\n training_feature_spec = {key: feature_spec[key]\n for key in feature_keys + label_keys}\n\n def training_input_fn():\n \"\"\"A training input function that reads materialized transformed data.\"\"\"\n\n if key_feature_name is not None:\n keys, data = tf.contrib.learn.io.read_keyed_batch_features(\n file_pattern, training_batch_size, training_feature_spec,\n reader, **read_batch_features_args)\n else:\n data = tf.contrib.learn.io.read_batch_features(\n file_pattern, training_batch_size, training_feature_spec, reader,\n **read_batch_features_args)\n\n features = {k: v for k, v in six.iteritems(data) if k in feature_keys}\n labels = {k: v for k, v in six.iteritems(data) if k in label_keys}\n\n if convert_scalars_to_vectors:\n features = _convert_scalars_to_vectors(features)\n labels = _convert_scalars_to_vectors(labels)\n\n if key_feature_name is not None:\n features[key_feature_name] = keys\n\n if len(labels) == 1:\n (_, labels), = labels.items()\n return features, labels\n\n return training_input_fn\n\n\ndef build_transforming_training_input_fn(raw_metadata,\n transformed_metadata,\n transform_savedmodel_dir,\n raw_data_file_pattern,\n training_batch_size,\n raw_label_keys,\n transformed_label_keys,\n raw_feature_keys=None,\n transformed_feature_keys=None,\n reader=tf.TFRecordReader,\n key_feature_name=None,\n convert_scalars_to_vectors=True,\n **read_batch_features_args):\n \"\"\"Creates training input_fn that reads raw data and applies transforms.\n\n Args:\n raw_metadata: a `DatasetMetadata` object describing the raw data.\n transformed_metadata: a `DatasetMetadata` object describing the raw data.\n transform_savedmodel_dir: a SavedModel directory produced by tf.Transform\n embodying a transformation function to be applied to incoming raw data.\n raw_data_file_pattern: List of files or pattern of file paths containing\n `Example` records. See `tf.gfile.Glob` for pattern rules.\n training_batch_size: An int or scalar `Tensor` specifying the batch size to\n use.\n raw_label_keys: A list of string keys of the raw labels to be used.\n transformed_label_keys: A list of string keys of the transformed labels to\n be used.\n raw_feature_keys: A list of string keys of the raw features to be used.\n If None or empty, defaults to all features except labels.\n transformed_feature_keys: A list of string keys of the transformed features\n to be used. If None or empty, defaults to all features except labels.\n reader: A function or class that returns an object with\n `read` method, (filename tensor) -> (example tensor).\n key_feature_name: A name to use to add a key column to the features dict.\n Defaults to None, meaning no key column will be created.\n convert_scalars_to_vectors: Boolean specifying whether this input_fn should\n convert scalars into 1-d vectors. This is necessary if the inputs will be\n used with `FeatureColumn`s as `FeatureColumn`s cannot accept scalar\n inputs. Default: True.\n **read_batch_features_args: any additional arguments to be passed through to\n `read_batch_features()`, including e.g. queue parameters.\n\n Returns:\n An input_fn suitable for training that reads raw training data and applies\n transforms.\n \"\"\"\n\n raw_feature_spec = raw_metadata.schema.as_feature_spec()\n raw_feature_keys = _prepare_feature_keys(raw_metadata,\n raw_label_keys, raw_feature_keys)\n raw_training_feature_spec = {\n key: raw_feature_spec[key]\n for key in raw_feature_keys + raw_label_keys}\n\n transformed_feature_keys = _prepare_feature_keys(\n transformed_metadata, transformed_label_keys, transformed_feature_keys)\n\n def raw_training_input_fn():\n \"\"\"Training input function that reads raw data and applies transforms.\"\"\"\n\n if key_feature_name is not None:\n keys, raw_data = tf.contrib.learn.io.read_keyed_batch_features(\n raw_data_file_pattern, training_batch_size, raw_training_feature_spec,\n reader, **read_batch_features_args)\n else:\n raw_data = tf.contrib.learn.io.read_batch_features(\n raw_data_file_pattern, training_batch_size, raw_training_feature_spec,\n reader, **read_batch_features_args)\n transformed_data = saved_transform_io.apply_saved_transform(\n transform_savedmodel_dir, raw_data)\n\n transformed_features = {\n k: v for k, v in six.iteritems(transformed_data)\n if k in transformed_feature_keys}\n transformed_labels = {\n k: v for k, v in six.iteritems(transformed_data)\n if k in transformed_label_keys}\n\n if convert_scalars_to_vectors:\n transformed_features = _convert_scalars_to_vectors(transformed_features)\n transformed_labels = _convert_scalars_to_vectors(transformed_labels)\n\n if key_feature_name is not None:\n transformed_features[key_feature_name] = keys\n\n if len(transformed_labels) == 1:\n (_, transformed_labels), = transformed_labels.items()\n return transformed_features, transformed_labels\n\n return raw_training_input_fn\n\n\ndef _prepare_feature_keys(metadata, label_keys, feature_keys=None):\n \"\"\"Infer feature keys if needed, and sanity-check label and feature keys.\"\"\"\n if label_keys is None:\n raise ValueError(\"label_keys must be specified.\")\n if feature_keys is None:\n feature_keys = list(\n set(six.iterkeys(metadata.schema.column_schemas)) - set(label_keys))\n overlap_keys = set(label_keys) & set(feature_keys)\n if overlap_keys:\n raise ValueError(\"Keys cannot be used as both a feature and a \"\n \"label: {}\".format(overlap_keys))\n\n return feature_keys\n","sub_path":"tensorflow_transform/saved/input_fn_maker.py","file_name":"input_fn_maker.py","file_ext":"py","file_size_in_byte":21080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"585725991","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 17-8-11 下午3:30\n# @Author : zhe.zhang\n# @Site : \n# @File : 8-11-15-30-23JSON进阶.py\n# @Software: PyCharm Community Edition\n# @Function :\n__author__ = 'zhe.zhang'\nimport json\n\n\nclass Student(object):\n def __init__(self, name, age, score):\n self.name = name\n self.age = age\n self.score = score\n\n\ns = Student('zhe.zhang', '99', '100')\n\n\n# TypeError: <__main__.Student object at 0x7f287b72c860> is not JSON serializable\n# print(json.dumps(s))\n# 错误的原因是Student对象不是一个可序列化为JSON的对象。\n# 如果连class的实例对象都无法序列化为JSON,这肯定不合理!\n# 别急,我们仔细看看dumps()方法的参数列表,可以发现,除了第一个必须的obj参数外,dumps()方法还提供了一大堆的可选参数:\n# https://docs.python.org/3/library/json.html#json.dumps\n# 这些可选参数就是让我们来定制JSON序列化。前面的代码之所以无法把Student类实例序列化为JSON,是因为默认情况下,dumps()方法不知道如何将Student实例变为一个JSON的{}对象。\n# 可选参数default就是把任意一个对象变成一个可序列为JSON的对象,我们只需要为Student专门写一个转换函数,再把函数传进去即可:\nclass Student2(object):\n def __init__(self, name, age, score):\n self.name = name\n self.age = age\n self.score = score\n\n def student2dic(self):\n return {\n 'name': self.name,\n 'age': self.age,\n 'score': self.score\n }\n\n\n# 这样,Student实例首先被student2dict()函数转换成dict,然后再被顺利序列化为JSON:\nstd2 = Student2('zhe.zhang', '99', '100')\nprint(json.dumps(std2, default=Student2.student2dic))\n# 不过,下次如果遇到一个Teacher类的实例,照样无法序列化为JSON。我们可以偷个懒,把任意class的实例变为dict:\nprint(json.dumps(std2, default=lambda obj: obj.__dict__))\n\n\n# 因为通常class的实例都有一个__dict__属性,它就是一个dict,用来存储实例变量。也有少数例外,比如定义了__slots__的class。\n# 同样的道理,如果我们要把JSON反序列化为一个Student对象实例,loads()方法首先转换出一个dict对象,\n# 然后,我们传入的object_hook函数负责把dict转换为Student实例\n# 因为通常class的实例都有一个__dict__属性,它就是一个dict,用来存储实例变量。也有少数例外,比如定义了__slots__的class。\n\n# 同样的道理,如果我们要把JSON反序列化为一个Student对象实例,loads()方法首先转换出一个dict对象,然后,我们传入的object_hook函数负责把dict转换为Student实例:\nclass Student3(object):\n def __init__(self, name, age, score):\n self.name = name\n self.age = age\n self.score = score\n\n def student2dict(self):\n return {\n 'name': self.name,\n 'age': self.age,\n 'score': self.score\n }\n\n def dict2student(self):\n return Student3(self['name'], self['age'], self['score'])\n\n\nstd3 = Student3('name', '99', '100')\nfWrite = open('./json进阶.txt', 'w')\njson.dump(std3, fWrite, default=Student3.student2dict)\nfWrite.close()\nfRead = open('./json进阶.txt', 'r')\nprint(json.load(fRead, object_hook=Student3.dict2student))\nfRead.close()\n# 小结\n# Python语言特定的序列化模块是pickle,但如果要把序列化搞得更通用、更符合Web标准,就可以使用json模块。\n# json模块的dumps()和loads()函数是定义得非常好的接口的典范。当我们使用时,只需要传入一个必须的参数。但是,当默认的序列化或反序列机制不满足我们的要求时,我们又可以传入更多的参数来定制序列化或反序列化的规则,既做到了接口简单易用,又做到了充分的扩展性和灵活性。\n","sub_path":"learnPython/8-8-14-00-00-IO编程/8-9-14-05-26序列化/8-11-15-30-23JSON进阶.py","file_name":"8-11-15-30-23JSON进阶.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"24203520","text":"\n\n#calss header\nclass _CHAMPIONSHIP():\n\tdef __init__(self,): \n\t\tself.name = \"CHAMPIONSHIP\"\n\t\tself.definitions = [u'a high-level competition to decide who is the best, especially in a sport: ', u'the position of being a champion: ', u'the support someone gives to a person, belief, right, or principle']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_championship.py","file_name":"_championship.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"454746842","text":"import pytest\nimport os\nimport numpy as np\nimport lightgbm as lgb\n\nfrom mlserver.settings import ModelSettings, ModelParameters\nfrom mlserver.types import InferenceRequest\n\nfrom mlserver_lightgbm import LightGBMModel\n\nTESTS_PATH = os.path.dirname(__file__)\nTESTDATA_PATH = os.path.join(TESTS_PATH, \"testdata\")\n\n\ndef pytest_collection_modifyitems(items):\n \"\"\"\n Add pytest.mark.asyncio marker to every test.\n \"\"\"\n for item in items:\n item.add_marker(\"asyncio\")\n\n\n@pytest.fixture\ndef model_uri(tmp_path) -> str:\n n = 4\n d = 3\n\n train = lgb.Dataset(data=np.random.rand(n, d), label=np.random.rand(n))\n print(train)\n bst = lgb.train(params={}, train_set=train)\n\n model_uri = os.path.join(tmp_path, \"lightgbm-model.bst\")\n bst.save_model(model_uri)\n\n return model_uri\n\n\n@pytest.fixture\ndef model_settings(model_uri: str) -> ModelSettings:\n return ModelSettings(\n name=\"lightgbm-model\",\n parameters=ModelParameters(uri=model_uri, version=\"v1.2.3\"),\n )\n\n\n@pytest.fixture\nasync def model(model_settings: ModelSettings) -> LightGBMModel:\n model = LightGBMModel(model_settings)\n await model.load()\n\n return model\n\n\n@pytest.fixture\ndef inference_request() -> InferenceRequest:\n payload_path = os.path.join(TESTDATA_PATH, \"inference-request.json\")\n return InferenceRequest.parse_file(payload_path)\n","sub_path":"runtimes/lightgbm/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"456160668","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 10 13:41:12 2020\r\n\r\n@author: olga\r\n\"\"\"\r\n\r\n\r\nimport numpy\r\nimport math\r\nimport scipy\r\nimport pandas \r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\nfrom numpy import random\r\nfrom numpy import mean\r\nfrom matplotlib import pyplot\r\nfrom statsmodels.graphics.gofplots import qqplot\r\nfrom scipy import stats\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import r2_score\r\nimport statsmodels.api as sm\r\n\r\nimport seaborn as seabornInstance \r\nfrom sklearn.model_selection import train_test_split \r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn import metrics\r\n\r\n\r\n# path = 'C:/Users/Olga Rumyantseva/Desktop/US_forest/' # home comp.\r\npath = 'C:/Users/olga/Desktop/US_forest/Bayes_vectors_for_regression/'\r\n\r\n# in the dataset below NA-s in LAT and LON were removed:\r\ndf = pandas.read_csv(path + 'BayesSimsResults_all_USA.csv')\r\ndf.columns\r\n# df = df.dropna()\r\ndata = df.values \r\nnumpy.size(data, 0)\r\n\r\n\r\n###########################################################################\r\n######### Regression BA vs 1 clim var : ######################################\r\n###########################################################################\r\n\r\n\r\ndataset = pandas.DataFrame({'Basal Area': data[:,0],\r\n 'AnnualMeanTemperature': data[:,1],\r\n 'MeanDiurnalRange': data[:,2],\r\n 'Isothermality': data[:,3],\r\n 'TemperatureSeasonality': data[:,4],\r\n 'MaxTemperatureofWarmestMonth': data[:,5],\r\n 'MinTemperatureofColdestMonth': data[:,6],\r\n 'TemperatureAnnualRange': data[:,7],\r\n 'MeanTemperatureofWettestQuarter': data[:,8],\r\n 'MeanTemperatureofDriestQuarter': data[:,9],\r\n 'MeanTemperatureofWarmestQuarter': data[:,10],\r\n 'MeanTemperatureofColdestQuarter': data[:,11],\r\n 'AnnualPrecipitation': data[:,12],\r\n 'PrecipitationofWettestMonth': data[:,13],\r\n 'PrecipitationofDriestMonth': data[:,14],\r\n 'PrecipitationSeasonality': data[:,15],\r\n 'PrecipitationofWettestQuarter': data[:,16],\r\n 'PrecipitationofDriestQuarter': data[:,17],\r\n 'PrecipitationofWarmestQuarte': data[:,18],\r\n 'PrecipitationofColdestQuarter': data[:,19]})\r\n \r\n\r\ndataset.shape\r\n\r\ny = dataset[dataset.columns[0]].values # dataset['Basal Area']\r\n\r\n\r\n\r\nR2results_for_clim_vars = numpy.array([])\r\n\r\nfor j in range(19):\r\n X = dataset[dataset.columns[j+1]].values\r\n # split 80% of the data to training set while \r\n # 20% of the data to test set:\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\r\n X_train= X_train.reshape(-1, 1)\r\n y_train= y_train.reshape(-1, 1)\r\n X_test = X_test.reshape(-1, 1)\r\n \r\n regressor = LinearRegression() \r\n regressor.fit(X_train, y_train)\r\n \r\n y_pred = regressor.predict(X_test)\r\n \r\n R2result = r2_score(y_test, y_pred)\r\n print(round(R2result*100, 2))\r\n# print(j+1, round(R2result*100, 2))\r\n R2results_for_clim_vars = numpy.append(R2results_for_clim_vars, R2result)\r\n\r\n\r\nprint('clim. var ',numpy.argmax(R2results_for_clim_vars)+1,\r\n ' explains ', round(numpy.max(R2results_for_clim_vars)*100, 1), '%')\r\n\r\n\r\n\r\n# Plot outputs\r\n# plt.scatter(X_test, y_test, color='black')\r\n# plt.plot(X_test, y_pred, color='blue', linewidth=3)\r\n# plt.xticks(())\r\n# plt.yticks(())\r\n# plt.show()\r\n\r\n\r\n\r\n###########################################################################\r\n######### Regression BA vs 2 clim vars : ######################################\r\n###########################################################################\r\n\r\ny = dataset[dataset.columns[0]].values # dataset['Basal Area']\r\n\r\nR2results_for_clim_vars2 = numpy.array([])\r\n\r\n\r\nfor j in range(19):\r\n if j+1 == 15:\r\n continue\r\n X = dataset.iloc[:, [15,j+1]].values\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\r\n \r\n regressor = LinearRegression() \r\n regressor.fit(X_train, y_train)\r\n \r\n y_pred = regressor.predict(X_test)\r\n \r\n R2result2 = r2_score(y_test, y_pred)\r\n \r\n print(round(R2result2*100, 3))\r\n# print(j+1, round(R2result2*100, 2))\r\n R2results_for_clim_vars2 = numpy.append(R2results_for_clim_vars2, R2result2)\r\n\r\n\r\nprint(round(numpy.max(R2results_for_clim_vars2)*100, 2), '%')\r\n\r\n###########################################################################\r\n######### Regression BA vs 3 clim vars : ######################################\r\n###########################################################################\r\n\r\nR2results_for_clim_vars3 = numpy.array([])\r\n\r\n\r\nfor j in range(19):\r\n if (j+1 == 15 or j+1 == 9):\r\n continue\r\n X = dataset.iloc[:, [15, 9, j+1]].values\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\r\n \r\n regressor = LinearRegression() \r\n regressor.fit(X_train, y_train)\r\n \r\n y_pred = regressor.predict(X_test)\r\n \r\n R2result3 = r2_score(y_test, y_pred)\r\n \r\n print(round(R2result3*100, 3))\r\n# print(j+1, round(R2result3*100, 2))\r\n R2results_for_clim_vars3 = numpy.append(R2results_for_clim_vars3, R2result3)\r\n\r\n\r\nprint(round(numpy.max(R2results_for_clim_vars3)*100, 2), '%')\r\n\r\n\r\n###########################################################################\r\n######### Regression BA vs 4 clim vars : ######################################\r\n###########################################################################\r\n\r\nR2results_for_clim_vars4 = numpy.array([])\r\n\r\n\r\nfor j in range(19):\r\n if (j+1 == 2 or j+1 == 9 or j+1 == 15):\r\n continue\r\n X = dataset.iloc[:, [2, 9, 15, j+1]].values\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\r\n \r\n regressor = LinearRegression() \r\n regressor.fit(X_train, y_train)\r\n \r\n y_pred = regressor.predict(X_test)\r\n \r\n R2result4 = r2_score(y_test, y_pred)\r\n \r\n print(round(R2result4*100, 2))\r\n# print(j+1, round(R2result4*100, 2))\r\n R2results_for_clim_vars4 = numpy.append(R2results_for_clim_vars4, R2result4)\r\n\r\n\r\nprint(round(numpy.max(R2results_for_clim_vars4)*100, 3), '%')\r\n\r\n###########################################################################\r\n######### Regression BA vs 5 clim vars : ######################################\r\n###########################################################################\r\n\r\nR2results_for_clim_vars5 = numpy.array([])\r\n\r\n\r\nfor j in range(19):\r\n if (j+1 == 2 or j+1 == 9 or j+1 == 15 or j+1 == 17):\r\n continue\r\n X = dataset.iloc[:, [2, 9, 15, 17, j+1]].values\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\r\n \r\n regressor = LinearRegression() \r\n regressor.fit(X_train, y_train)\r\n \r\n y_pred = regressor.predict(X_test)\r\n \r\n R2result5 = r2_score(y_test, y_pred)\r\n \r\n print(round(R2result5*100, 2))\r\n# print(j+1, round(R2result4*100, 2))\r\n R2results_for_clim_vars5 = numpy.append(R2results_for_clim_vars5, R2result5)\r\n\r\n\r\nprint(round(numpy.max(R2results_for_clim_vars5)*100, 3), '%')\r\n\r\n","sub_path":"Regression_BayesBA_vs_BayesClimate_all_USA.py","file_name":"Regression_BayesBA_vs_BayesClimate_all_USA.py","file_ext":"py","file_size_in_byte":7508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"106375700","text":"import logging\nimport os\nimport platform\n\nimport click\n\nfrom eyesight import __version__ # noqa\n\n__all__ = ['cli']\n\n\nPROGRAM_NAME = 'eyesight'\nMIN_MACOS_VERSION = 10.10\nLOG_VERBOSITY_MAP = {True: logging.DEBUG, False: logging.WARNING}\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass ClickFormatter(logging.Formatter):\n colors = {\n 'critical': 'red',\n 'debug': 'blue',\n 'error': 'red',\n 'exception': 'red',\n 'warning': 'yellow',\n }\n\n def format(self, record):\n if not record.exc_info:\n level = record.levelname.lower()\n msg = record.msg\n if level in self.colors:\n prefix = click.style(\n '{0}: '.format(level.title()), fg=self.colors[level]\n )\n if not isinstance(msg, (str, bytes)):\n msg = str(msg)\n msg = '\\n'.join(prefix + l for l in msg.splitlines())\n return msg\n return logging.Formatter.format(self, record)\n\n\nclass ClickHandler(logging.Handler):\n error_levels = ['critical', 'error', 'exception', 'warning']\n\n def emit(self, record):\n try:\n msg = self.format(record)\n err = record.levelname.lower() in self.error_levels\n click.echo(msg, err=err)\n except Exception:\n self.handleError(record)\n\n\nclick_handler = ClickHandler()\nclick_formatter = ClickFormatter()\nclick_handler.setFormatter(click_formatter)\nlogger.addHandler(click_handler)\n\n\nclass Camera(object):\n \"\"\" Container for camera files and routines. \"\"\"\n\n paths = [\n '/System/Library/Frameworks/CoreMediaIO.framework/Versions/A/Resources/VDC.plugin/Contents/MacOS/VDC',\n '/System/Library/PrivateFrameworks/CoreMediaIOServices.framework/Versions/A/Resources/VDC.plugin/Contents/MacOS/VDC', # noqa\n '/System/Library/PrivateFrameworks/CoreMediaIOServicesPrivate.framework/Versions/A/Resources/AVC.plugin/Contents/MacOS/AVC', # noqa\n '/System/Library/PrivateFrameworks/CoreMediaIOServicesPrivate.framework/Versions/A/Resources/VDC.plugin/Contents/MacOS/VDC', # noqa\n '/System/Library/QuickTime/QuickTimeUSBVDCDigitizer.component/Contents/MacOS/QuickTimeUSBVDCDigitizer',\n '/Library/CoreMediaIO/Plug-Ins/DAL/AppleCamera.plugin/Contents/MacOS/AppleCamera',\n '/Library/CoreMediaIO/Plug-Ins/FCP-DAL/AppleCamera.plugin/Contents/MacOS/AppleCamera',\n ]\n\n def __init__(self, enable=True):\n self.enable = enable\n self.files = self.get_files()\n\n @property\n def mode(self):\n return 0o755 if self.enable else 0o000\n\n def change_state(self):\n logger.info('{0} camera'.format('Enabling' if self.enable else 'Disabling'))\n\n for f in self.files:\n logger.debug('Processing: \"{0}\"'.format(f))\n os.chmod(f, self.mode)\n\n def get_files(self):\n logger.debug('Collecting camera files')\n files = []\n\n for p in self.paths:\n if os.path.isfile(p):\n logger.debug('Camera file found \"{0}\"'.format(p))\n files.append(p)\n else:\n logger.debug('Skipping missing camera file \"{0}\"'.format(p))\n\n if not files:\n raise click.ClickException('Could not locate camera files')\n return files\n\n\nclass Context(object):\n def __init__(self, enable, verbose):\n logger.debug('Gathering system and environment details')\n\n self.enable = enable\n self.verbose = verbose\n self.macos_version = self._get_mac_version()\n self.sip_enabled = self._get_sip_status()\n self.sudo = os.geteuid() == 0\n\n def _get_mac_version(self):\n version = platform.mac_ver()[0]\n version = float('.'.join(version.split('.')[:2])) # format as e.g., '10.10'\n return version\n\n def _get_sip_status(self):\n try:\n status = subprocess32.check_output(['csrutil', 'status'])\n except subprocess32.CalledProcessError:\n return None\n\n # status string format example: 'System Integrity Protection status: disabled.\\n'\n status = status.split(': ')[1].strip('.\\n').upper()\n return status == 'ENABLED'\n\n\n@click.command()\n@click.option(\n '--enable/--disable',\n '-e/-d',\n default=None,\n help='Set the camera state. No-op if missing.',\n)\n@click.option(\n '--verbose/--quiet',\n '-v/-q',\n is_flag=True,\n default=None,\n help='Specify verbosity level.',\n)\n@click.version_option()\n@click.pass_context\ndef cli(ctx, enable, verbose):\n logger.setLevel(LOG_VERBOSITY_MAP.get(verbose, logging.INFO))\n logger.debug('{0} started'.format(PROGRAM_NAME))\n\n logger.debug('Checking \"enable\" command line option')\n if enable is None:\n raise click.UsageError('Missing option (--enable/--disable)')\n\n ctx.obj = Context(enable, verbose)\n\n logger.debug('Checking macOS version')\n if ctx.obj.macos_version < MIN_MACOS_VERSION:\n raise click.ClickException(\n '{0} requires macOS {1} or higher'.format(PROGRAM_NAME, MIN_MACOS_VERSION)\n )\n\n logger.debug('Checking SIP status')\n if ctx.obj.sip_enabled is None:\n raise click.ClickException('Could not determine SIP status')\n elif ctx.obj.sip_enabled:\n raise click.ClickException('SIP is enabled')\n\n logger.debug('Checking user permissions')\n if not ctx.obj.sudo:\n raise click.ClickException('{0} must be run as root'.format(PROGRAM_NAME))\n\n camera = Camera(enable=ctx.obj.enable)\n camera.change_state()\n\n logger.info('Camera {0}'.format('enabled' if ctx.obj.enable else 'disabled'))\n\n\ndef show_exception(self, file=None):\n logger.error(self.message)\n\n\nclick.ClickException.show = show_exception\nclick.UsageError.show = show_exception\n","sub_path":"eyesight/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"566735841","text":"# coding=utf8\n\n\"\"\"\n zhiz.views.page\n ~~~~~~~~~~~~~~~\n\n routes:\n\n page\n GET, `page/`, display a page\n\"\"\"\n\nfrom flask import abort\nfrom skylark import fn\n\nfrom zhiz import app\nfrom zhiz.models import Post\nfrom zhiz.views.utils import render_public\n\n\n@app.route('/page/')\ndef page(page_number):\n if page_number <= 0:\n abort(404)\n\n n = 9\n\n query = Post.where(published=True).orderby(\n Post.datetime, desc=True).limit(n, offset=n * (page_number - 1)\n ).select()\n results = query.execute()\n count = results.count\n\n if count < 0: # no posts\n abort(404)\n\n query = Post.where(published=True).select(fn.count(Post.id))\n result = query.execute()\n total_count = result.tuples()[0][0]\n\n is_first_page = True if page_number == 1 else False\n is_last_page = True if n * page_number >= total_count else False\n\n posts = tuple(results.all())\n\n page = dict(\n number=page_number,\n posts=posts,\n first=is_first_page,\n last=is_last_page\n )\n return render_public('page.html', page=page)\n","sub_path":"zhiz/views/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"570845988","text":"from quadrature import *\nimport matplotlib.pyplot as plt\nfrom numpy.linalg import *\nfrom numpy import *\nimport numpy as np\n\ndef make_random_partition(n):\n partition = [0]\n lower = 0\n for i in range(n):\n new_rand = random.uniform(lower, 1)\n partition.append(new_rand)\n lower = new_rand\n partition.append(1)\n return partition\n\ndef make_h_from_interval(interval, dim):\n h_arr = [interval[j + 1] - interval[j] for j in range(dim-1)]\n return h_arr\n\ndef make_elems(dim, nodes):\n elem_arr = [[nodes[j],nodes[j+1]] for j in range(dim-1)]\n return elem_arr\n\ndef make_elem_indices(dim):\n elem_arr = [[j,j+1] for j in range(dim)]\n return elem_arr\n\ndef two(x):\n return 2.\n\ndef exp4(x):\n return exp(4*x)\n\ndef make_diri(num_elems):\n diri = zeros(num_elems)\n for i in range(num_elems):\n if i == 0 or i == num_elems-1: diri[i] = True\n else: diri[i] = False\n return diri\n\ndef make_boundary_vals(num_elems, left_boundary, right_boundary):\n boundary = zeros(num_elems)\n for i in range(num_elems):\n if i == 0: boundary[i] = left_boundary #* h_arr[0]\n elif i == num_elems-1: boundary[i] = right_boundary #* h_arr[0]\n return boundary\n\n\"\"\"\nTODO: Use general quadrature routines\n\"\"\"\ndef make_local_matrix_and_local_rhs(elem, u_prime_coef):\n A_loc = zeros((2,2))\n b_loc = zeros(2)\n for i in range(2):\n for j in range(2):\n A_loc[i,j] = integ_deriv_deriv(elem, i, j)\n A_loc[i,j] += integ_deriv_basis(elem, i, j, u_prime_coef)\n b_loc[i] += integ_f_basis(elem, two)\n return A_loc, b_loc\n\n\"\"\"\nFans out the values in the submatrix local to their\ncorrect place in the global matrix, A. The boundary\nconditions are taken care of by use of the diri (short\nfor Dirichlet) array.\n\"\"\"\ndef fan_out(elem_i, A_loc, b_loc, diri, A, b, boundary):\n # number of rows\n for v_i in range(2):\n # number of columns\n for u_j in range(2):\n # if we aren't on a boundary element\n if not diri[elem_i[v_i]] and not diri[elem_i[u_j]]:\n # fan out to the corresponding matrix entry\n A[elem_i[v_i]][elem_i[u_j]] += A_loc[v_i][u_j]\n # fan out to the RHS exactly once\n if u_j == 0: b[elem_i[v_i]] += b_loc[v_i]\n # fan out to the RHS vector if symmetric\n # if just the row is on the boundary\n elif diri[elem_i[u_j]] and not diri[elem_i[v_i]]:\n b[elem_i[v_i]] -= boundary[elem_i[u_j]] * A_loc[v_i][u_j]\n elif v_i == u_j:\n A[elem_i[v_i]][elem_i[u_j]] = 1\n b[elem_i[v_i]] = boundary[elem_i[v_i]]\n\n\n\"\"\"\nBuild the global A matrix by first constructing local\nsubmatrices and then \"fanning them out\" to the correct\nplaces.\n\"\"\"\ndef make_global_matrix_and_rhs(elems_i, elems, diri, boundary, u_prime_coef):\n dim = len(elems_i)\n #rhs = set_rhs(h_arr)\n b = zeros(dim)\n A = zeros((dim,dim))\n for i in range(dim-1):\n A_loc, b_loc = make_local_matrix_and_local_rhs(elems[i], u_prime_coef)\n fan_out(elems_i[i], A_loc, b_loc, diri, A, b, boundary)\n return A, b\n\ndef parabolic_soln(x, c_1, c_2):\n return -x**2+c_1*x+c_2\n\nerrors = []\nnum_tests = 2\nstart_dim = 1\nnum_intervals = 10\n\n#for num_intervals in range(start_dim, num_tests):\nh = 1.0/num_intervals\nnodes = linspace(0.0, 1.0, num_intervals + 1)\n#nodes = make_random_partition(num_intervals+1)\n#nodes = [0.0, 0.2, 0.27, 0.32, 0.4, 0.45, 0.46, 0.51, 0.54, 0.56, 0.59, 0.63, 0.78, 0.84, 0.9, 1.0]\ndim = len(nodes)\nelems_i = make_elem_indices(dim)\nelems = make_elems(dim, nodes)\ndiri = make_diri(len(elems_i))\n#h_arr = make_h_from_interval(nodes, dim)\nleft_boundary = 0.#/h_arr[0]\nright_boundary = 0.#/h_arr[dim-2]\n#boundary = zeros(dim)\nboundary = make_boundary_vals(len(elems_i), left_boundary, right_boundary)\n#rhs = ones(dim)\nu_prime_coef = 0\nA_fem, rhs = make_global_matrix_and_rhs(elems_i, elems, diri, boundary, u_prime_coef)\nsoln = solve(A_fem, rhs)\n#print zip(nodes, soln)\n#true_soln = [parabolic_soln(x, 1, -2) for x in nodes]\n#true_soln = [2 * (x + 5) + 23 * exp(5 - 5 *x) - exp(5 (2*x + 33))/(5 - 5 e^5)\n\nx = nodes\nplt.xlabel('x')\nplt.ylabel('Solution')\nplt.plot(x , soln)\n# plt.plot(x , true_soln)\nplt.show()\n","sub_path":"python_code/finelem.py","file_name":"finelem.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"290276163","text":"# Keesha Erickson, Nov 2018\r\n# plot heatmaps from bngl files\r\n# plot rankings of recruitment\r\n# clustering by cell line and protein recruitment\r\n\r\nfrom heatmaps_demo_def import heatmapsIGF1R, clusteringIGF1R, rankIGF1R\r\n\r\n# location of gdat files\r\n# there can only be gdat files in here (rm cdat and net)\r\nloc = 'C:/Users/Keesha/PycharmProjects/IGF1R/NCI60/bnglout/'\r\n\r\n# plot heatmaps of protein recruitment\r\nheatmapsIGF1R(loc)\r\n\r\n# rank analysis\r\nrankIGF1R(loc)\r\n\r\n# cluster cell lines\r\nclusteringIGF1R(loc)\r\n\r\n","sub_path":"igf1r_demo.py","file_name":"igf1r_demo.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"134578735","text":"# 画图,学用rectangle画方形。 \n\nimport turtle as tt\n\n# 参数n多边形边数\ndef drawLine(n):\n t=tt.Pen()\n t.color('yellow')\n t.width(3)\n t.begin_fill()\n t.shape('turtle')\n\n for i in range(n):\n t.forward(100)\n t.left(360/n)\n t.end_fill() \n\ndef drawRactangle():\n t=tt.Pen()\n t.color('blue')\n t.width(3)\n t.shape('turtle')\n\n t.begin_fill()\n for i in range(4):\n t.forward(100)\n t.left(90)\n t.end_fill()\n\n\n \ndef run():\n #drawLine(8)\n drawRactangle()\n \nrun()\n","sub_path":"Practices/Practice58.py","file_name":"Practice58.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"30581247","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/11/28 1:58 下午\n# @Author : Yijia Zheng\n# @FileName: tools.py\n\n# Useful Functions that will be repeatedly used\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\n\n\ndef pca(XMat, k):\n average = np.mean(XMat)\n m, n = np.shape(XMat)\n avgs = np.tile(average, (m, 1))\n data_adjust = XMat - avgs\n covX = np.cov(data_adjust.T)\n featValue, featVec= np.linalg.eig(covX)\n index = np.argsort(-featValue)\n if k > n:\n print(\"k must lower than feature number\")\n return\n else:\n selectVec = np.matrix(featVec.T[index[:k]]) #转置\n finalData = data_adjust * selectVec.T\n reconData = (finalData * selectVec) + average\n return finalData.real, reconData\n\n\ndef confusion_matrix(preds, labels):\n conf_matrix = np.zeros((len(labels), len(labels)))\n for p, t in zip(preds, labels):\n conf_matrix[p, t] += 1\n return conf_matrix\n\n\ndef plot_confusion_matrix(confusion_mat, save_path):\n plt.rcParams['figure.dpi'] = 500\n plt.imshow(confusion_mat, interpolation='nearest', cmap=plt.cm.gray)\n thresh = confusion_mat.max() / 2.\n for i, j in itertools.product(range(confusion_mat.shape[0]), range(confusion_mat.shape[1])):\n plt.text(j, i, confusion_mat[i, j],\n horizontalalignment=\"center\",\n color=\"black\" if confusion_mat[i, j] > thresh else \"white\")\n plt.title('Confusion matrix')\n plt.colorbar()\n tick_marks = np.arange(confusion_mat.shape[0])\n plt.xticks(tick_marks, tick_marks)\n plt.yticks(tick_marks, tick_marks)\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig(save_path)\n plt.show()\n\n\n\n","sub_path":"models/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"376030728","text":"import cv2\nimport csv\nimport time\nimport numpy as np\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom select_points import select_points\nfrom warp_image import warp_image\n\ntime.sleep(30)\n\ndef drawBox(image, boundary):\n x, y, w, h = int(boundary[0]), int(boundary[1]), int(boundary[2]), int(boundary[3])\n cv2.rectangle(image, (x, y), ((x + w), (y + h)), (255, 0, 255), 3, 1)\n cv2.putText(image, \"Tracking...\", (75, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2)\n\n#tracker = cv2.TrackerMOSSE_create()\n# tracker = cv2.TrackerCSRT_create()\n#tracker = cv2.TrackerMedianFlow_create()\n\ncamera = PiCamera() # initialize the camera and grab a reference to the raw camera capture\ncamera.resolution = (1280,720)\ncamera.framerate = 10\nrawCapture = PiRGBArray(camera, size=(1280,720))\ntime.sleep(0.1) # allows the camera to warmup\n\ncamera.capture(rawCapture, format=\"bgr\")\nimage = rawCapture.array\nrawCapture.truncate(0)\n\n#_, img = cap.read() # get initial image\n\n#coords, image1 = select_points(image)\ncoords = [[55, 326], [805, 277], [48, 636], [831, 559]] # these two lines are for when you know the coordinates\nimage1 = warp_image(image,coords) # I want to run this automatically at boot without human intervention\n\n#camera.capture(rawCapture, format=\"bgr\")\n#image = rawCapture.array\n#rawCapture.truncate(0)\n#image2 = warp_image(image,coords)\n\n#bbox = cv2.selectROI(\"Tracking\", image, False) # select bounding box\n#tracker.init(image, bbox) # initialize the tracker on the selected bounding box\n#ok, img = cap.read() # get the next image\n\n#img = warp_image(img, coords) ## maybe i'll use this\n\n# fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n# video = cv2.VideoWriter('Resources/geno_detect.mp4', fourcc, 30, (1080, 1920))\n\nt = time.localtime()\ncurrent_date = time.strftime(\"%Y%m%d\", t)\ncsv_file = 'logged_data/' + current_date + '_location.csv'\n\nwith open(csv_file, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"time\",\"x_pixel\", \"y_pixel\",])\n \nx_pixel = np.zeros(5)\ny_pixel = np.zeros(5)\ncounter = 0\n\nfor frame in camera.capture_continuous(rawCapture, format = 'bgr', use_video_port=True):\n timer = cv2.getTickCount() # this is for the fps counter\n\n img = frame.array\n image2 = warp_image(img,coords)\n \n diff = cv2.absdiff(image1,image2)\n gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n blur= cv2.GaussianBlur(gray, (5,5),0)\n _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)\n dilated = cv2.dilate(thresh,None,iterations=3)\n contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n largest = 0\n largest_contour = None\n \n for contour in contours:\n #(x,y,w,h) = cv2.boundingRect(contour)\n if cv2.contourArea(contour) > largest:\n largest = cv2.contourArea(contour)\n largest_contour = contour\n if largest_contour is not None:\n\n (x,y,w,h) = cv2.boundingRect(largest_contour)\n cv2.rectangle(image1,(x,y),(x+w,y+h),(0,255,0),2)\n \n\n #x, y, w, h = int(contours[0]), int(contours[1]), int(contours[2]), int(contours[3])\n x_pos = x + w / 2\n y_pos = y + h / 2\n \n if counter < 4:\n x_pixel[counter] = x_pos\n y_pixel[counter] = y_pos\n counter += 1\n \n else:\n \n x_pixel[counter] = x_pos\n y_pixel[counter] = y_pos\n \n t = time.localtime()\n current_time = time.strftime(\"%Y/%m/%d %H:%M:%S\", t)\n \n with open(csv_file, 'a', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([current_time, np.average(x_pixel), np.average(y_pixel)])\n \n x_pixel = np.zeros(5)\n y_pixel = np.zeros(5)\n counter = 0\n #cv2.drawContours(image1, contours, -1, (0,255,0),2)\n \n #ok, new_bbox = tracker.update(img) # updates with a new bounding box in the next frame\n\n# if contours:\n# \n# print(contours)\n# #drawBox(img, new_bbox) # if the object is found, draw the new box on the image\n#\n#\n\n\n \n# else:\n# cv2.putText(img, \"Lost\", (75, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)\n\n \n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer) # fps junk\n\n cv2.putText(img, str(int(fps)), (75, 50), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)\n\n #cv2.imshow(\"Motion Detection\", image1)\n #cv2.imshow(\"diff\",diff)\n rawCapture.truncate(0)\n\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n \n image1 = image2\n\n #video.write(img)\n #ok, img = cap.read() # get the next image in the stream for tracking\n #if ok:\n # img = warp_image(img, coords)\n\ncv2.destroyAllWindows()\n# video.release()\n","sub_path":"track_geno.py","file_name":"track_geno.py","file_ext":"py","file_size_in_byte":4815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"470664702","text":"import sys, os\nimport re\nfrom difflib import SequenceMatcher\nfrom PyTib.common import open_file, write_file, pre_process\nfrom xlwt import Workbook\nimport yaml\nfrom pathlib import Path\n\n\nparentDir = Path(__file__).resolve().parent\ninDir = parentDir / 'input'\noutDir = parentDir / 'output'\n\n\ndef is_punct(string):\n # put in common\n if '༄' in string or '༅' in string or '༆' in string or '༇' in string or '༈' in string or \\\n '།' in string or '༎' in string or '༏' in string or '༐' in string or '༑' in string or \\\n '༔' in string or '_' in string:\n return True\n else:\n return False\n\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\ndef strip_particle(word):\n particles = ['འི','འུ','འོ','ར','འམ']\n for particle in particles:\n word = word.strip(particle)\n return word\n\n\ndef reinsert_notes(raw_text, raw_notes, basis_edition='སྡེ་'):\n global note_num\n raw_text = raw_text.replace('a', '').replace('\\t', ' ').split('\\n')\n raw_notes = re.sub(r'《([^《》་]+)》', r'《\\1་》', raw_notes) # add a tsek in the edition names that lack one.\n raw_notes = raw_notes.strip().split('\\n')[1:]\n\n text = {}\n for t in raw_text:\n parts = re.split(r'([0-9]+)\\.[\\t\\s]', t)[1:]\n if parts:\n note_number = parts[0]\n note_text = pre_process(parts[1], mode='syls')\n if note_text == []:\n note_text = ['']\n text[note_number] = note_text\n\n edition_regex = r'《([^《》]+)》'\n\n # finding all the editions that exist for that text\n edition_names = set([e for r in raw_notes for e in re.findall(edition_regex, r)])\n editions = {basis_edition: []}\n for e in edition_names:\n editions[e] = []\n\n error = False\n for n in raw_notes:\n #if debug == 1:\n if show_note == 1:\n print('\\t\\t'+n)\n if error:\n break\n if n.replace(',', '').replace(' ', '') == '':\n continue\n parts = n.split(',')\n number = str(int(parts[2])-1)\n # DEBUG. Enables to start debugging at a given note\n #note_num = 304\n if number == str(note_num-1):\n print('ok')\n page_number = parts[1]\n content = parts[4:]\n note = ''\n # keep track of which edition has already been replaced\n generated_versions = {basis_edition: False}\n for e in edition_names:\n generated_versions[e] = False\n # loop through tuples of (edition-s, note)\n max_pairs = len(content)-1\n if max_pairs > len(content):\n max_pairs = len(content)-1\n tuple_idx = [c for c in range(0, max_pairs) if c % 2 == 0]\n for a in tuple_idx:\n if error:\n break\n if content[a]:\n # filters the cases where the second tuple is empty\n note = content[a+1]\n if note == 'ལ་ཆུག་མོད་ཀྱི་ཞེས་བྱ་བ་ན།':\n print('check')\n if '(' in note:\n print('there is a note on top of the comparison.')\n print('\\t'.join(parts))\n note = note.split('(')[0].strip()\n if '《' in note:\n print('The following note needs to be edited. The execution will stop now.')\n print('\\t'.join(parts))\n error = True\n break\n # 0 prepare\n # separate in syllables not separating the fusioned particles\n modif_type = ''\n if note.startswith('m'):\n modif_type = 'm'\n elif note.startswith('p'):\n modif_type = 'p'\n version = pre_process(note.replace(modif_type, ''), mode='syls')\n # delete the last element in the list of the note\n #if is_punct(version[-1]):\n if is_punct(version[-1]) and len(version) > 1:\n del version[-1]\n # reconstitute the punctuation for comparing the syllables:\n\n # add a tsek to it if the original text has one\n # if the last syllable is not a punctuation\n if not version[-1].endswith('་'):\n if not is_punct(text[number][-1]):\n if not text[number][-1].endswith('་'):\n version[-1] += '་' \n # if the last syllable is a punctuation\n elif is_punct(text[number][-1]) and len(text[number]) > 1:\n if text[number][-2].endswith('་'):\n version[-1] += '་'\n\n # 1 find index\n # 1.a\n # find the index of the syllable from which to start replacing the original\n index = len(text[number]) - len(version)\n # go one syllable left if the last syllable of the original text is a punctuation\n if is_punct(text[number][-1]):\n index -= 1\n # put the index at 0 if the replacement text is longer than the original\n if index < 0:\n index = 0\n\n # 1.b\n # try to find a point of correspondence in case there are more than a few syllables that are added\n orig_sync_idx = False\n version_sync_idx = False\n window_size = 4\n maximum = len(text[number]) - 1\n # attempts_num becomes 0 if window_size is larger than the length of version, making window_indexes an empty list.\n # this way, window_size decides wether we search for a syncronisation point or not.\n attempts_num = len(version[window_size:])\n window_indexes = [(a, a + window_size) for a in range(attempts_num)]\n # for v_w in window_indexes:\n # for a_n in range(attempts_num):\n # orig_window = text[number][maximum - window_size - a_n:maximum - a_n]\n # version_window = version[v_w[0]:v_w[1]]\n # if orig_window == version_window:\n # if not orig_sync_idx:\n # orig_sync_idx = maximum - window_size - a_n\n # version_sync_idx = v_w[0]\n\n # finding the sync point if it is the last syllable\n if not orig_sync_idx:\n # detects which of the two syls is the longest to check if both start the same way\n if len(text[number][-1]) > len(version[0]):\n long = text[number][-1]\n #long = ''.join(text[number][index:])\n short = version[0].rstrip('་')\n #short = ''.join(version)\n else:\n long = version[0]\n short = text[number][-1].rstrip('་')\n # long = ''.join(version)\n # short = ''.join(text[number][index:])\n # finds if long is short with an addition. This deals with བདེའང་ being replaced by བདེ་བའང་.\n # Todo: similar replacements may occur elsewhere than the last syllable. implementation needed.\n # in case both syllables are identical, the condition is also met.\n if short in long:\n if short+'་' != long and (len(version)==1 or short == strip_particle(long.strip('་'))):\n if modif_type == 'p': # ,,9,4,《པེ་》《སྣར་》,pཔོ།,\n orig_sync_idx = len(text[number])\n else:\n orig_sync_idx = len(text[number])-1\n else:\n orig_sync_idx = index\n version_sync_idx = 0\n\n # 2\n # generating the versions of the different editions\n edition_text = [b for b in text[number]]\n\n # A.1 for subsequent addition, keep the last syllable if it is a punctuation to add it at the end\n edition_text_last_syl = False\n if is_punct(edition_text[-1]):\n edition_text_last_syl = edition_text[-1]\n #orig_sync_idx -= 1 # as note's conjuction are removed\n\n # remove the ending tsek in version if it was not there in the original\n if edition_text[-1].endswith('་'):\n if not version[-1].endswith('་'):\n version[-1] += '་'\n if version[-1].endswith('ང'):\n version[-1] += '་'\n else:\n if version[-1].endswith('་') and not version[-1].endswith('ང་'):\n version[-1] = version[-1].rstrip('་')\n\n # 2.1 if the operation is a deletion (m stands for minus)\n if modif_type == 'm':\n # a if there is a synchronizing point between the original and the version\n if orig_sync_idx:\n del edition_text[orig_sync_idx:]\n # b if there is no sync point\n else:\n del edition_text[len(edition_text)-len(version):]\n\n # 2.2 if the operation is an addition (p stands for plus)\n elif modif_type == 'p':\n # a if there is a synchronizing point between the original and the version\n if orig_sync_idx:\n # replace the part that precedes the synchronising point\n edition_text[orig_sync_idx - version_sync_idx:orig_sync_idx] = version[:version_sync_idx]\n # replacing from the synchronising point onwards\n edition_text[orig_sync_idx:orig_sync_idx] = version[version_sync_idx:]\n # b if there is no sync point\n else:\n # add a tsek if there is none on the last syllable\n if not edition_text[-1].endswith('་'):\n edition_text[-1] += '་'\n # remove the ending tsek of version\n if version[-1].endswith('་'):\n version[-1] = version[-1].rstrip('་')\n edition_text.extend(version)\n\n # 2.3 if the operation is a replacement\n else:\n if orig_sync_idx:\n # replace the part that precedes the synchronising point\n edition_text[orig_sync_idx - version_sync_idx:orig_sync_idx] = version[:version_sync_idx]\n # replacing from the synchronising point onwards\n edition_text[orig_sync_idx:] = version[version_sync_idx:]\n # 2.b if there is no synchronising point\n else:\n ad = 0\n prev_similarity = similar(edition_text[index-1].strip('་'), version[0].strip('་'))\n diff = len(edition_text[index-1].strip('་'))-len(version[0].strip('་'))\n if prev_similarity >= 0.5 and len(version)==1:\n ad = 1\n index -= 1\n elif strip_particle(edition_text[index-1].strip('་')) == strip_particle(version[0].strip('་')):\n ad = 1\n index -= 1\n # if len(version)>1:\n # if edition_text[index+1] in version:\n # index +=1\n # edition_text.append('')\n for e in range(len(version)):\n #print(e) # གཞུང་འདིའི་བསླབ་པ་ལ་ནི་བསླབ་པར་ དབུ་མ་རིན་པོ་ཆེའི་སྒྲོན་མ།.txt\n #print(version[e])\n edition_text[index + e] = version[e]\n if ad:\n del edition_text[-1]\n\n # A.2 add the punctuation to the end if needed\n # if a punctuation was saved in A.1 and if it is not the same as the last syllable of edition_text\n if edition_text_last_syl and len(edition_text) > 0:\n if edition_text_last_syl != edition_text[-1]:\n # if the last syllable ends with a tsek\n if edition_text[-1].endswith('་'):\n # if there is a ང་\n if not edition_text[-1].endswith('ང་'):\n edition_text[-1] = edition_text[-1][:-1]\n elif edition_text[-len(version)] == version[-1]:\n edition_text[-2] = edition_text[-1]\n edition_text[-1] = ''\n edition_text.append(edition_text_last_syl)\n\n\n # 2.4 if a sync point was found, i.e. if the size of version is longer than window_size,\n # add '%' to manually check the replacement has been correctly done\n #if orig_sync_idx:\n # edition_text[-1] += '%'\n\n # 3 Add the text to the respective editions\n #\n edition_refs = re.findall(edition_regex, content[a])\n # 3.a add the versions of all the editions that require modifications from Derge and notify the edition is added\n for e in edition_refs:\n chunk = ''.join(edition_text)\n # remove the extra spaces inserted between the shad and the next verse\n chunk = chunk.replace('_།_', '_།').replace('_', ' ')\n editions[e].append((chunk, len(version), page_number, note))\n generated_versions[e] = True\n\n # 3.b add the original version of the text to the remaining\n for g in generated_versions:\n if not generated_versions[g]:\n chunk = ''.join(text[number])\n # remove the extra spaces inserted between the shad and the next verse\n chunk = chunk.replace('_།_', '_།').replace('_', ' ')\n editions[g].append((chunk, '', page_number, note))\n\n # 4 add the last bit of the text that corresponds to no note\n for g in editions:\n chunk = ''.join(text[str(len(text))])\n chunk = chunk.replace('_།_', '_།').replace('_', ' ')\n editions[g].append((chunk, '', '', ''))\n return editions\n\n\ndef generate_editions(editions, out_dir, work_name):\n # writing all the editions in their respective folder\n for e in editions:\n path = out_dir / 'editions' / e.replace('་', '།') \n file_name = work_name+'_'+e+'.txt'\n content = ''.join([e[0] for e in editions[e]]).replace('_', ' ')\n write_file(path / file_name, content)\n\n\ndef generate_unified_version(editions):\n '''\n :param editions:\n :return: a list with common syllables as separate elements, differing parts within a dict\n '''\n total = []\n # a. generate the list of editions’ names\n ed_names = [a for a in editions]\n for syl_num in range(1, len(editions['སྡེ་'])):\n pre_processed = {}\n common = []\n # b. segment in syllables and seperate on the punctuation for each version\n for ed in ed_names:\n chunk = editions[ed][syl_num][0].replace('_', ' ')\n pre_processed[ed] = pre_process(chunk, mode='syls')\n # c. add to common the syls that are the same in all editions and leave the others in pre_processed\n while len({pre_processed[ed][0] if pre_processed[ed] != [] else '' for ed in ed_names}) == 1:\n if pre_processed[ed_names[0]]:\n common.append(pre_processed[ed_names[0]][0])\n for ed in ed_names:\n del pre_processed[ed][0]\n else:\n break\n\n total.extend(common)\n total.append(pre_processed)\n return total\n\n\ndef generate_context_versions(editions, file_name, out_dir, left=5, right=5, base_ed='སྡེ་'):\n def calculate_contexts(unified_version, left=5, right=5, base_ed='སྡེ་'):\n all_versions = []\n c = 0\n for num, syl in enumerate(unified_version):\n if type(syl) == dict:\n if c == 137:\n print('ok')\n versions = {}\n for ed in syl:\n # add left context\n n_l = num-left\n if n_l < 0:\n n_l = 0\n left_context = unified_version[n_l:num]\n # add note\n note = syl[ed]\n # add right context\n n_r = num+right+1\n if n_r > len(unified_version)-1:\n n_r = len(unified_version)-1\n right_context = unified_version[num+1:n_r]\n version = left_context + note + right_context\n # if there is a note (if version[v] == dict), choose the base_ed version\n no_note_version = []\n for v in version:\n if type(v) == dict:\n for base_syl in v[base_ed]:\n no_note_version.append(base_syl)\n else:\n no_note_version.append(v)\n # add the versions in the versions\n versions[ed] = ''.join(no_note_version).replace('_', ' ')\n c += 1\n versions[str(c)] = ''\n all_versions.append(versions)\n return all_versions\n\n unified = generate_unified_version(editions)\n with_context = calculate_contexts(unified, left=left, right=right, base_ed=base_ed)\n for i in range(len(with_context)):\n with_context[i] = [[a, with_context[i][a]] for a in sorted(with_context[i])]\n output = yaml.dump_all(with_context, allow_unicode=True, default_flow_style=False, width=float(\"inf\"))\n # reformat the page number\n output = re.sub(r'\\n- -([^\\n]+)\\n -', r'\\n\\1: ', output)\n output = re.sub(r\"---\\n '([0-9]+)': ''\", r'-\\1-', output)\n output = re.sub(r\"- - '1'\\n - ''\", r'-1-', output).replace(\" '\", '').replace(\"'\", '')\n output = re.sub(r'\\n', r',,,,,,,,,,,,,,,\\n', output) # Todo\n write_file(out_dir / f'/conc_yaml/{file_name}_conc.txt', output)\n\n\ndef export_unified_structure(editions, text_name, out_dir=outDir/'unified_structure'):\n unified = generate_unified_version(editions)\n out = yaml.dump(unified, allow_unicode=True, default_flow_style=False, width=float(\"inf\"))\n write_file(out_dir / f'{text_name}_unified_structure.yaml', out)\n\n\ndef generate_outputs(text_name, notes_name, context, in_dir=inDir, out_dir=outDir):\n\n # extract text and reinsert notes\n editions = reinsert_notes(open_file(in_dir/text_name), open_file(in_dir/notes_name).replace(';', ','))\n\n work_name = text_name.split('.')[0].replace(' ', '_')\n print(work_name)\n\n\n generate_editions(editions, out_dir, work_name)\n \n export_unified_structure(editions, work_name)\n\n generate_context_versions(editions, work_name, out_dir, left=context, right=context)\n\n\nexcluded = [#'11-20_ཆོས་མངོན་པའི་འགྲེལ་པ་གནད་ཀྱི་སྒྲོན་མ།.txt',\n ]\nvol_num = 0\n\nworks = []\nfor f in sorted(os.listdir(inDir)):\n if f.endswith('txt') and f not in excluded:\n csv = f.replace('.txt', '')+'.csv'\n works.append((f, csv))\n\ndef debug_files(vol_num):\n c = 0\n for w in works:\n c += 1\n print(c, w[0])\n if c >= vol_num:\n generate_outputs(w[0], w[1], 5)\n\n\nnote_num = 738\ndebug = 1\nshow_note = 0\nif debug:\n debug_files(vol_num)\nelse:\n for w in works:\n if 'N5000' not in w[0]:\n continue\n print(w[0])\n generate_outputs(w[0], w[1], 5)\n","sub_path":"1-a-reinsert_notes/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":20569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"3572824","text":"\n# coding: utf-8\n\n# In[112]:\n\n\nimport pprint as pp\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nimport cv2\nimport time\nimport copy\nimport os\nimport json\n\n# For defining network\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n# For load data\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets,transforms\nfrom torch.utils.data.dataset import Dataset\n\n# For optimizer\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\n\n\n# In[73]:\n\n\ndef target2tensor(img):\n y = np.array(img['y'])\n tensor = torch.zeros((13,13,5))\n for box in img['boxes']:\n M = box['matrix_cell']\n r_y = box['region_y']\n tensor[M[1],M[0],:] = torch.Tensor(y[r_y[0]:r_y[1]+1]) \n return tensor\n\ndef open_json(file):\n with open(file) as data_file: \n dic = json.load(data_file)\n return dic\n\n\n# In[32]:\n\n\ndef mse_loss(input, target):\n return torch.sum((input - target) ** 2)\n\n\n# In[33]:\n\n\npreprocess = transforms.Compose([\n transforms.Resize((416,416)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n\n#
\n\n#